Python keras.layers.core.Dense() Examples
The following are 30
code examples of keras.layers.core.Dense().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
keras.layers.core
, or try the search function
.
Example #1
Source File: example.py From residual_block_keras with GNU General Public License v3.0 | 7 votes |
def get_residual_model(is_mnist=True, img_channels=1, img_rows=28, img_cols=28): model = keras.models.Sequential() first_layer_channel = 128 if is_mnist: # size to be changed to 32,32 model.add(ZeroPadding2D((2,2), input_shape=(img_channels, img_rows, img_cols))) # resize (28,28)-->(32,32) # the first conv model.add(Convolution2D(first_layer_channel, 3, 3, border_mode='same')) else: model.add(Convolution2D(first_layer_channel, 3, 3, border_mode='same', input_shape=(img_channels, img_rows, img_cols))) model.add(Activation('relu')) # [residual-based Conv layers] residual_blocks = design_for_residual_blocks(num_channel_input=first_layer_channel) model.add(residual_blocks) model.add(BatchNormalization(axis=1)) model.add(Activation('relu')) # [Classifier] model.add(Flatten()) model.add(Dense(nb_classes)) model.add(Activation('softmax')) # [END] return model
Example #2
Source File: rbm.py From keras_bn_library with MIT License | 6 votes |
def get_config(self): config = {'output_dim': self.hidden_dim, 'init': self.init.__name__, 'activation': self.activation.__name__, 'Wrbm_regularizer': self.Wrbm_regularizer.get_config() if self.Wrbm_regularizer else None, 'bh_regularizer': self.bh_regularizer.get_config() if self.bh_regularizer else None, 'activity_regularizer': self.activity_regularizer.get_config() if self.activity_regularizer else None, 'Wrbm_constraint': self.Wrbm_constraint.get_config() if self.Wrbm_constraint else None, 'bh_constraint': self.bh_constraint.get_config() if self.bh_constraint else None, 'input_dim': self.input_dim } base_config = super(Dense, self).get_config() return dict(list(base_config.items()) + list(config.items())) # ------------- # RBM internals # -------------
Example #3
Source File: model_zoo.py From visual_turing_test-tutorial with MIT License | 6 votes |
def deep_mlp(self): """ Deep Multilayer Perceptrop. """ if self._config.num_mlp_layers == 0: self.add(Dropout(0.5)) else: for j in xrange(self._config.num_mlp_layers): self.add(Dense(self._config.mlp_hidden_dim)) if self._config.mlp_activation == 'elu': self.add(ELU()) elif self._config.mlp_activation == 'leaky_relu': self.add(LeakyReLU()) elif self._config.mlp_activation == 'prelu': self.add(PReLU()) else: self.add(Activation(self._config.mlp_activation)) self.add(Dropout(0.5))
Example #4
Source File: model_zoo.py From visual_turing_test-tutorial with MIT License | 6 votes |
def create(self): assert self._config.textual_embedding_dim == 0, \ 'Embedding cannot be learnt but must be fixed' language_forward = Sequential() language_forward.add(self._config.recurrent_encoder( self._config.hidden_state_dim, return_sequences=False, input_shape=(self._config.max_input_time_steps, self._config.input_dim))) self.language_forward = language_forward language_backward = Sequential() language_backward.add(self._config.recurrent_encoder( self._config.hidden_state_dim, return_sequences=False, go_backwards=True, input_shape=(self._config.max_input_time_steps, self._config.input_dim))) self.language_backward = language_backward self.add(Merge([language_forward, language_backward])) self.deep_mlp() self.add(Dense(self._config.output_dim)) self.add(Activation('softmax'))
Example #5
Source File: model_zoo.py From visual_turing_test-tutorial with MIT License | 6 votes |
def create(self): assert self._config.merge_mode in ['max', 'ave', 'sum'], \ 'Merge mode of this model is either max, ave or sum' self.textual_embedding(self, mask_zero=False) #self.textual_embedding(self, mask_zero=True) self.add(MaskedConvolution1D( nb_filter=self._config.language_cnn_filters, filter_length=self._config.language_cnn_filter_length, border_mode='valid', activation=self._config.language_cnn_activation, subsample_length=1)) self.temporal_pooling(self) #self.add(DropMask()) self.deep_mlp() self.add(Dense(self._config.output_dim)) self.add(Activation('softmax'))
Example #6
Source File: model_zoo.py From visual_turing_test-tutorial with MIT License | 6 votes |
def create(self): self.textual_embedding(self, mask_zero=False) self.add(Convolution1D( nb_filter=self._config.language_cnn_filters, filter_length=self._config.language_cnn_filter_length, border_mode='valid', activation=self._config.language_cnn_activation, subsample_length=1)) #self.add(MaxPooling1D(pool_length=self._config.language_max_pool_length)) self.add(self._config.recurrent_encoder( self._config.hidden_state_dim, return_sequences=False, go_backwards=False)) self.deep_mlp() self.add(Dense(self._config.output_dim)) self.add(Activation('softmax'))
Example #7
Source File: model_zoo.py From visual_turing_test-tutorial with MIT License | 6 votes |
def create(self): language_model = Sequential() self.textual_embedding(language_model, mask_zero=True) self.temporal_pooling(language_model) language_model.add(DropMask()) #language_model.add(BatchNormalization(mode=1)) self.language_model = language_model visual_model_factory = \ select_sequential_visual_model[self._config.trainable_perception_name]( self._config.visual_dim) visual_model = visual_model_factory.create() visual_dimensionality = visual_model_factory.get_dimensionality() self.visual_embedding(visual_model, visual_dimensionality) #visual_model.add(BatchNormalization(mode=1)) self.visual_model = visual_model if self._config.multimodal_merge_mode == 'dot': self.add(Merge([language_model, visual_model], mode='dot', dot_axes=[(1,),(1,)])) else: self.add(Merge([language_model, visual_model], mode=self._config.multimodal_merge_mode)) self.deep_mlp() self.add(Dense(self._config.output_dim)) self.add(Activation('softmax'))
Example #8
Source File: sequential.py From keras2pmml with MIT License | 6 votes |
def setUp(self): iris = load_iris() theano.config.floatX = 'float32' X = iris.data.astype(theano.config.floatX) y = iris.target.astype(np.int32) y_ohe = np_utils.to_categorical(y) model = Sequential() model.add(Dense(input_dim=X.shape[1], output_dim=5, activation='tanh')) model.add(Dense(input_dim=5, output_dim=y_ohe.shape[1], activation='sigmoid')) model.compile(loss='categorical_crossentropy', optimizer='sgd') model.fit(X, y_ohe, nb_epoch=10, batch_size=1, verbose=3, validation_data=None) params = {'copyright': 'Václav Čadek', 'model_name': 'Iris Model'} self.model = model self.pmml = keras2pmml(self.model, **params) self.num_inputs = self.model.input_shape[1] self.num_outputs = self.model.output_shape[1] self.num_connection_layers = len(self.model.layers) self.features = ['x{}'.format(i) for i in range(self.num_inputs)] self.class_values = ['y{}'.format(i) for i in range(self.num_outputs)]
Example #9
Source File: lstm.py From copper_price_forecast with GNU General Public License v3.0 | 6 votes |
def build_model(layers): """ 模型定义 """ model = Sequential() model.add(LSTM(units=layers[1], input_shape=(layers[1], layers[0]), return_sequences=True)) model.add(Dropout(0.2)) model.add(LSTM(layers[2], return_sequences=False)) model.add(Dropout(0.2)) model.add(Dense(units=layers[3])) model.add(Activation("tanh")) start = time.time() model.compile(loss="mse", optimizer="rmsprop") print("> Compilation Time : ", time.time() - start) return model
Example #10
Source File: cnn.py From DeepFashion with Apache License 2.0 | 6 votes |
def model_create(input_shape, num_classes): logging.debug('input_shape {}'.format(input_shape)) model = Sequential() model.add(Conv2D(32, (3, 3), border_mode='same', input_shape=input_shape)) model.add(Activation('relu')) model.add(Conv2D(32, (3, 3))) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.5)) model.add(Flatten()) model.add(Dense(128)) model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(Dense(num_classes)) model.add(Activation('softmax')) # use binary_crossentropy if has just 2 prediction yes or no model.compile(loss='categorical_crossentropy', optimizer='adadelta', metrics=['accuracy']) return model
Example #11
Source File: test_graph_model.py From CAPTCHA-breaking with MIT License | 6 votes |
def test_1o_1i(self): print('test a non-sequential graph with 1 input and 1 output') graph = Graph() graph.add_input(name='input1', ndim=2) graph.add_node(Dense(32, 16), name='dense1', input='input1') graph.add_node(Dense(32, 4), name='dense2', input='input1') graph.add_node(Dense(16, 4), name='dense3', input='dense1') graph.add_output(name='output1', inputs=['dense2', 'dense3'], merge_mode='sum') graph.compile('rmsprop', {'output1': 'mse'}) history = graph.fit({'input1': X_train, 'output1': y_train}, nb_epoch=10) out = graph.predict({'input1': X_test}) assert(type(out == dict)) assert(len(out) == 1) loss = graph.test_on_batch({'input1': X_test, 'output1': y_test}) loss = graph.train_on_batch({'input1': X_test, 'output1': y_test}) loss = graph.evaluate({'input1': X_test, 'output1': y_test}) print(loss) assert(loss < 2.5)
Example #12
Source File: test_graph_model.py From CAPTCHA-breaking with MIT License | 6 votes |
def test_1o_1i_2(self): print('test a more complex non-sequential graph with 1 input and 1 output') graph = Graph() graph.add_input(name='input1', ndim=2) graph.add_node(Dense(32, 16), name='dense1', input='input1') graph.add_node(Dense(32, 4), name='dense2-0', input='input1') graph.add_node(Activation('relu'), name='dense2', input='dense2-0') graph.add_node(Dense(4, 16), name='dense3', input='dense2') graph.add_node(Dense(16, 4), name='dense4', inputs=['dense1', 'dense3'], merge_mode='sum') graph.add_output(name='output1', inputs=['dense2', 'dense4'], merge_mode='sum') graph.compile('rmsprop', {'output1': 'mse'}) history = graph.fit({'input1': X_train, 'output1': y_train}, nb_epoch=10) out = graph.predict({'input1': X_train}) assert(type(out == dict)) assert(len(out) == 1) loss = graph.test_on_batch({'input1': X_test, 'output1': y_test}) loss = graph.train_on_batch({'input1': X_test, 'output1': y_test}) loss = graph.evaluate({'input1': X_test, 'output1': y_test}) print(loss) assert(loss < 2.5) graph.get_config(verbose=1)
Example #13
Source File: test_graph_model.py From CAPTCHA-breaking with MIT License | 6 votes |
def test_recursive(self): print('test layer-like API') graph = containers.Graph() graph.add_input(name='input1', ndim=2) graph.add_node(Dense(32, 16), name='dense1', input='input1') graph.add_node(Dense(32, 4), name='dense2', input='input1') graph.add_node(Dense(16, 4), name='dense3', input='dense1') graph.add_output(name='output1', inputs=['dense2', 'dense3'], merge_mode='sum') seq = Sequential() seq.add(Dense(32, 32, name='first_seq_dense')) seq.add(graph) seq.add(Dense(4, 4, name='last_seq_dense')) seq.compile('rmsprop', 'mse') history = seq.fit(X_train, y_train, batch_size=10, nb_epoch=10) loss = seq.evaluate(X_test, y_test) print(loss) assert(loss < 2.5) loss = seq.evaluate(X_test, y_test, show_accuracy=True) pred = seq.predict(X_test) seq.get_config(verbose=1)
Example #14
Source File: co_lstm_predict_day.py From copper_price_forecast with GNU General Public License v3.0 | 6 votes |
def build_model(): """ 定义模型 """ model = Sequential() model.add(LSTM(units=Conf.LAYERS[1], input_shape=(Conf.LAYERS[1], Conf.LAYERS[0]), return_sequences=True)) model.add(Dropout(0.2)) model.add(LSTM(Conf.LAYERS[2], return_sequences=False)) model.add(Dropout(0.2)) model.add(Dense(units=Conf.LAYERS[3])) # model.add(BatchNormalization(weights=None, epsilon=1e-06, momentum=0.9)) model.add(Activation("tanh")) # act = PReLU(alpha_initializer='zeros', weights=None) # act = LeakyReLU(alpha=0.3) # model.add(act) start = time.time() model.compile(loss="mse", optimizer="rmsprop") print("> Compilation Time : ", time.time() - start) return model
Example #15
Source File: GMF.py From neural_collaborative_filtering with Apache License 2.0 | 6 votes |
def get_model(num_users, num_items, latent_dim, regs=[0,0]): # Input variables user_input = Input(shape=(1,), dtype='int32', name = 'user_input') item_input = Input(shape=(1,), dtype='int32', name = 'item_input') MF_Embedding_User = Embedding(input_dim = num_users, output_dim = latent_dim, name = 'user_embedding', init = init_normal, W_regularizer = l2(regs[0]), input_length=1) MF_Embedding_Item = Embedding(input_dim = num_items, output_dim = latent_dim, name = 'item_embedding', init = init_normal, W_regularizer = l2(regs[1]), input_length=1) # Crucial to flatten an embedding vector! user_latent = Flatten()(MF_Embedding_User(user_input)) item_latent = Flatten()(MF_Embedding_Item(item_input)) # Element-wise product of user and item embeddings predict_vector = merge([user_latent, item_latent], mode = 'mul') # Final prediction layer #prediction = Lambda(lambda x: K.sigmoid(K.sum(x)), output_shape=(1,))(predict_vector) prediction = Dense(1, activation='sigmoid', init='lecun_uniform', name = 'prediction')(predict_vector) model = Model(input=[user_input, item_input], output=prediction) return model
Example #16
Source File: liver_model.py From MCF-3D-CNN with MIT License | 6 votes |
def cnn_3D(self, input_shape, modual=''): #建立Sequential模型 model_in = Input(input_shape) model = Convolution3D( filters = 6, kernel_size = (3, 3, 3), input_shape = input_shape, activation='relu', kernel_initializer='he_normal', name = modual+'conv1' )(model_in)# now 30x30x3x6 model = MaxPooling3D(pool_size=(2,2,1))(model)# now 15x15x3x6 model = Convolution3D( filters = 8, kernel_size = (4, 4, 3), activation='relu', kernel_initializer='he_normal', name = modual+'conv2' )(model)# now 12x12x1x8 model = MaxPooling3D(pool_size=(2,2,1))(model)# now 6x6x1x8 model = Flatten()(model) model = Dropout(0.5)(model) model_out = Dense(100, activation='relu', name = modual+'fc1')(model) return model_in, model_out
Example #17
Source File: liver_model.py From MCF-3D-CNN with MIT License | 6 votes |
def build_3dcnn_model(self, fusion_type, Fusion): if len(Fusion[0]) == 1: input_shape = (32, 32, len(Fusion)) model_in,model = self.cnn_2D(input_shape) else: input_shape = (32, 32, 5, len(Fusion)) model_in,model = self.cnn_3D(input_shape) model = Dropout(0.5)(model) model = Dense(32, activation='relu', name = 'fc2')(model) model = Dense(self.config.classes, activation='softmax', name = 'fc3')(model) model = Model(input=model_in,output=model) # 统计参数 # model.summary() plot_model(model,to_file='experiments/img/' + str(Fusion) + fusion_type + r'_model.png',show_shapes=True) print(' Saving model Architecture') adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-8) # model.compile(optimizer=adam, loss=self.mycrossentropy, metrics=['accuracy']) #有改善,但不稳定 model.compile(optimizer=adam, loss='categorical_crossentropy', metrics=['accuracy']) return model
Example #18
Source File: Build_Model.py From DOVE with GNU General Public License v3.0 | 6 votes |
def makecnn(learningrate,regular,decay,channel_number): #model structure model=Sequential() model.add(Conv3D(100, kernel_size=(3,3,3), strides=(1, 1, 1), input_shape = (20,20,20,channel_number),padding='valid', data_format='channels_last', dilation_rate=(1, 1, 1), use_bias=True, kernel_initializer='glorot_normal', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=l2(regular), kernel_constraint=None, bias_constraint=None)) model.add(BatchNormalization()) model.add(LeakyReLU(0.2)) #model.add(Dropout(0.3)) model.add(Conv3D(200, kernel_size=(3,3,3), strides=(1, 1, 1), padding='valid', data_format='channels_last', dilation_rate=(1, 1, 1), use_bias=True, kernel_initializer='glorot_normal', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=l2(regular), kernel_constraint=None, bias_constraint=None)) model.add(BatchNormalization()) model.add(LeakyReLU(0.2)) #model.add(Dropout(0.3)) model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=None, padding='valid', data_format='channels_last')) model.add(BatchNormalization(axis=1, momentum=0.99, epsilon=0.001, center=True, scale=True, beta_initializer='zeros', gamma_initializer='ones', moving_mean_initializer='zeros', moving_variance_initializer='ones', beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, gamma_constraint=None)) model.add(Conv3D(400, kernel_size=(3,3,3),strides=(1, 1, 1), padding='valid', data_format='channels_last', dilation_rate=(1, 1, 1), use_bias=True, kernel_initializer='glorot_normal', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=l2(regular), kernel_constraint=None, bias_constraint=None)) model.add(BatchNormalization()) model.add(LeakyReLU(0.2)) #model.add(Dropout(0.3)) model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=None, padding='valid', data_format='channels_last')) model.add(Flatten()) model.add(Dropout(0.3)) model.add(Dense(1000, use_bias=True, input_shape = (32000,),kernel_initializer='glorot_normal', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=l2(regular), kernel_constraint=None, bias_constraint=None)) model.add(BatchNormalization()) model.add(LeakyReLU(0.2)) model.add(Dropout(0.3)) model.add(Dense(100, use_bias=True, kernel_initializer='glorot_normal', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=l2(regular), kernel_constraint=None, bias_constraint=None)) model.add(BatchNormalization()) model.add(LeakyReLU(0.2)) model.add(Dropout(0.3)) model.add(Dense(1, activation='sigmoid', use_bias=True, kernel_initializer='glorot_normal', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=l2(regular), kernel_constraint=None, bias_constraint=None)) nadam=Nadam(lr=learningrate, beta_1=0.9, beta_2=0.999, epsilon=1e-08, schedule_decay=decay) model.compile(loss='binary_crossentropy', optimizer=nadam, metrics=['accuracy',f1score,precision,recall]) return model
Example #19
Source File: networks.py From C51-DDQN-Keras with MIT License | 6 votes |
def value_distribution_network(input_shape, num_atoms, action_size, learning_rate): """Model Value Distribution With States as inputs and output Probability Distributions for all Actions """ state_input = Input(shape=(input_shape)) cnn_feature = Convolution2D(32, 8, 8, subsample=(4,4), activation='relu')(state_input) cnn_feature = Convolution2D(64, 4, 4, subsample=(2,2), activation='relu')(cnn_feature) cnn_feature = Convolution2D(64, 3, 3, activation='relu')(cnn_feature) cnn_feature = Flatten()(cnn_feature) cnn_feature = Dense(512, activation='relu')(cnn_feature) distribution_list = [] for i in range(action_size): distribution_list.append(Dense(num_atoms, activation='softmax')(cnn_feature)) model = Model(input=state_input, output=distribution_list) adam = Adam(lr=learning_rate) model.compile(loss='categorical_crossentropy',optimizer=adam) return model
Example #20
Source File: test_tasks.py From CAPTCHA-breaking with MIT License | 6 votes |
def test_img_clf(self): print('image classification data:') (X_train, y_train), (X_test, y_test) = get_test_data(nb_train=1000, nb_test=200, input_shape=(3, 32, 32), classification=True, nb_class=2) print('X_train:', X_train.shape) print('X_test:', X_test.shape) print('y_train:', y_train.shape) print('y_test:', y_test.shape) y_train = to_categorical(y_train) y_test = to_categorical(y_test) model = Sequential() model.add(Convolution2D(32, 3, 32, 32)) model.add(Activation('sigmoid')) model.add(Flatten()) model.add(Dense(32, y_test.shape[-1])) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer='sgd') history = model.fit(X_train, y_train, nb_epoch=12, batch_size=16, validation_data=(X_test, y_test), show_accuracy=True, verbose=2) self.assertTrue(history.history['val_acc'][-1] > 0.9)
Example #21
Source File: test_tasks.py From CAPTCHA-breaking with MIT License | 6 votes |
def test_vector_reg(self): nb_hidden = 10 print('vector regression data:') (X_train, y_train), (X_test, y_test) = get_test_data(nb_train=1000, nb_test=200, input_shape=(10,), output_shape=(2,), classification=False) print('X_train:', X_train.shape) print('X_test:', X_test.shape) print('y_train:', y_train.shape) print('y_test:', y_test.shape) model = Sequential() model.add(Dense(X_train.shape[-1], nb_hidden)) model.add(Activation('tanh')) model.add(Dense(nb_hidden, y_train.shape[-1])) model.compile(loss='hinge', optimizer='adagrad') history = model.fit(X_train, y_train, nb_epoch=12, batch_size=16, validation_data=(X_test, y_test), verbose=2) self.assertTrue(history.history['val_loss'][-1] < 0.9)
Example #22
Source File: test_tasks.py From CAPTCHA-breaking with MIT License | 6 votes |
def test_vector_clf(self): nb_hidden = 10 print('vector classification data:') (X_train, y_train), (X_test, y_test) = get_test_data(nb_train=1000, nb_test=200, input_shape=(10,), classification=True, nb_class=2) print('X_train:', X_train.shape) print('X_test:', X_test.shape) print('y_train:', y_train.shape) print('y_test:', y_test.shape) y_train = to_categorical(y_train) y_test = to_categorical(y_test) model = Sequential() model.add(Dense(X_train.shape[-1], nb_hidden)) model.add(Activation('relu')) model.add(Dense(nb_hidden, y_train.shape[-1])) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer='rmsprop') history = model.fit(X_train, y_train, nb_epoch=12, batch_size=16, validation_data=(X_test, y_test), show_accuracy=True, verbose=2) print(history.history) self.assertTrue(history.history['val_acc'][-1] > 0.9)
Example #23
Source File: test_graph_model.py From CAPTCHA-breaking with MIT License | 6 votes |
def test_1o_2i(self): print('test a non-sequential graph with 2 inputs and 1 output') graph = Graph() graph.add_input(name='input1', ndim=2) graph.add_input(name='input2', ndim=2) graph.add_node(Dense(32, 16), name='dense1', input='input1') graph.add_node(Dense(32, 4), name='dense2', input='input2') graph.add_node(Dense(16, 4), name='dense3', input='dense1') graph.add_output(name='output1', inputs=['dense2', 'dense3'], merge_mode='sum') graph.compile('rmsprop', {'output1': 'mse'}) history = graph.fit({'input1': X_train, 'input2': X2_train, 'output1': y_train}, nb_epoch=10) out = graph.predict({'input1': X_test, 'input2': X2_test}) assert(type(out == dict)) assert(len(out) == 1) loss = graph.test_on_batch({'input1': X_test, 'input2': X2_test, 'output1': y_test}) loss = graph.train_on_batch({'input1': X_test, 'input2': X2_test, 'output1': y_test}) loss = graph.evaluate({'input1': X_test, 'input2': X2_test, 'output1': y_test}) print(loss) assert(loss < 3.0) graph.get_config(verbose=1)
Example #24
Source File: model_zoo.py From visual_turing_test-tutorial with MIT License | 5 votes |
def create(self): assert self._config.merge_mode in ['max', 'ave', 'sum'], \ 'Merge mode of this model is either max, ave or sum' unigram = Sequential() self.textual_embedding(unigram, mask_zero=True) unigram.add(Convolution1D( nb_filter=self._config.language_cnn_filters, filter_length=1, border_mode='valid', activation=self._config.language_cnn_activation, subsample_length=1)) self.temporal_pooling(unigram) bigram = Sequential() self.textual_embedding(bigram, mask_zero=True) bigram.add(Convolution1D( nb_filter=self._config.language_cnn_filters, filter_length=2, border_mode='valid', activation=self._config.language_cnn_activation, subsample_length=1)) self.temporal_pooling(bigram) trigram = Sequential() self.textual_embedding(trigram, mask_zero=True) trigram.add(Convolution1D( nb_filter=self._config.language_cnn_filters, filter_length=3, border_mode='valid', activation=self._config.language_cnn_activation, subsample_length=1)) self.temporal_pooling(trigram) self.add(Merge([unigram, bigram, trigram], mode='concat')) self.deep_mlp() self.add(Dense(self._config.output_dim)) self.add(Activation('softmax'))
Example #25
Source File: test_regularizers.py From CAPTCHA-breaking with MIT License | 5 votes |
def create_model(weight_reg=None, activity_reg=None): model = Sequential() model.add(Dense(784, 50)) model.add(Activation('relu')) model.add(Dense(50, 10, W_regularizer=weight_reg, activity_regularizer=activity_reg)) model.add(Activation('softmax')) return model
Example #26
Source File: test_loss_weighting.py From CAPTCHA-breaking with MIT License | 5 votes |
def create_sequential_model(): model = Sequential() model.add(Dense(784, 50)) model.add(Activation('relu')) model.add(Dense(50, 10)) model.add(Activation('softmax')) return model
Example #27
Source File: test_embeddings.py From CAPTCHA-breaking with MIT License | 5 votes |
def test_unitnorm_constraint(self): lookup = Sequential() lookup.add(Embedding(3, 2, weights=[self.W1], W_constraint=unitnorm())) lookup.add(Flatten()) lookup.add(Dense(2, 1)) lookup.add(Activation('sigmoid')) lookup.compile(loss='binary_crossentropy', optimizer='sgd', class_mode='binary') lookup.train_on_batch(self.X1, np.array([[1], [0]], dtype='int32')) norm = np.linalg.norm(lookup.params[0].get_value(), axis=1) self.assertTrue(np.allclose(norm, np.ones_like(norm).astype('float32')))
Example #28
Source File: test_optimizers.py From CAPTCHA-breaking with MIT License | 5 votes |
def get_model(input_dim, nb_hidden, output_dim): model = Sequential() model.add(Dense(input_dim, nb_hidden)) model.add(Activation('relu')) model.add(Dense(nb_hidden, output_dim)) model.add(Activation('softmax')) return model
Example #29
Source File: model_zoo.py From visual_turing_test-tutorial with MIT License | 5 votes |
def create(self): self.textual_embedding_fixed_length(self, mask_zero=False) self.add(Convolution1D( nb_filter=self._config.language_cnn_filters, filter_length=self._config.language_cnn_filter_length, border_mode='valid', activation=self._config.language_cnn_activation, subsample_length=1)) self.add(MaxPooling1D(pool_length=self._config.language_max_pool_length)) self.add(Flatten()) self.deep_mlp() self.add(Dense(self._config.output_dim)) self.add(Activation('softmax'))
Example #30
Source File: model_zoo.py From visual_turing_test-tutorial with MIT License | 5 votes |
def create(self): language_model = Sequential() self.textual_embedding(language_model, mask_zero=True) self.stacked_RNN(language_model) language_model.add(self._config.recurrent_encoder( self._config.hidden_state_dim, return_sequences=False, go_backwards=self._config.go_backwards)) self.language_model = language_model visual_model_factory = \ select_sequential_visual_model[self._config.trainable_perception_name]( self._config.visual_dim) visual_model = visual_model_factory.create() visual_dimensionality = visual_model_factory.get_dimensionality() self.visual_embedding(visual_model, visual_dimensionality) self.visual_model = visual_model if self._config.multimodal_merge_mode == 'dot': self.add(Merge([language_model, visual_model], mode='dot', dot_axes=[(1,),(1,)])) else: self.add(Merge([language_model, visual_model], mode=self._config.multimodal_merge_mode)) self.deep_mlp() self.add(Dense(self._config.output_dim)) self.add(Activation('softmax'))