Python keras.layers.core.Dropout() Examples
The following are 30
code examples of keras.layers.core.Dropout().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
keras.layers.core
, or try the search function
.
Example #1
Source File: TransferLearning.py From Intelligent-Projects-Using-Python with MIT License | 7 votes |
def resnet_pseudo(self,dim=224,freeze_layers=10,full_freeze='N'): model = ResNet50(weights='imagenet',include_top=False) x = model.output x = GlobalAveragePooling2D()(x) x = Dense(512, activation='relu')(x) x = Dropout(0.5)(x) x = Dense(512, activation='relu')(x) x = Dropout(0.5)(x) out = Dense(5,activation='softmax')(x) model_final = Model(input = model.input,outputs=out) if full_freeze != 'N': for layer in model.layers[0:freeze_layers]: layer.trainable = False return model_final # VGG16 Model for transfer Learning
Example #2
Source File: TransferLearning_reg.py From Intelligent-Projects-Using-Python with MIT License | 7 votes |
def resnet_pseudo(self,dim=224,freeze_layers=10,full_freeze='N'): model = ResNet50(weights='imagenet',include_top=False) x = model.output x = GlobalAveragePooling2D()(x) x = Dense(512, activation='relu')(x) x = Dropout(0.5)(x) x = Dense(512, activation='relu')(x) x = Dropout(0.5)(x) out = Dense(1)(x) model_final = Model(input = model.input,outputs=out) if full_freeze != 'N': for layer in model.layers[0:freeze_layers]: layer.trainable = False return model_final # VGG16 Model for transfer Learning
Example #3
Source File: TransferLearning.py From Intelligent-Projects-Using-Python with MIT License | 6 votes |
def inception_pseudo(self,dim=224,freeze_layers=30,full_freeze='N'): model = InceptionV3(weights='imagenet',include_top=False) x = model.output x = GlobalAveragePooling2D()(x) x = Dense(512, activation='relu')(x) x = Dropout(0.5)(x) x = Dense(512, activation='relu')(x) x = Dropout(0.5)(x) out = Dense(5,activation='softmax')(x) model_final = Model(input = model.input,outputs=out) if full_freeze != 'N': for layer in model.layers[0:freeze_layers]: layer.trainable = False return model_final # ResNet50 Model for transfer Learning
Example #4
Source File: model_zoo.py From visual_turing_test-tutorial with MIT License | 6 votes |
def create(self): self.textual_embedding(self, mask_zero=True) self.stacked_RNN(self) self.add(self._config.recurrent_encoder( self._config.hidden_state_dim, return_sequences=False, go_backwards=self._config.go_backwards)) self.add(Dropout(0.5)) self.add(RepeatVector(self._config.max_output_time_steps)) self.add(self._config.recurrent_decoder( self._config.hidden_state_dim, return_sequences=True)) self.add(Dropout(0.5)) self.add(TimeDistributedDense(self._config.output_dim)) self.add(Activation('softmax')) ### # Multimodal models ###
Example #5
Source File: model_zoo.py From visual_turing_test-tutorial with MIT License | 6 votes |
def deep_mlp(self): """ Deep Multilayer Perceptrop. """ if self._config.num_mlp_layers == 0: self.add(Dropout(0.5)) else: for j in xrange(self._config.num_mlp_layers): self.add(Dense(self._config.mlp_hidden_dim)) if self._config.mlp_activation == 'elu': self.add(ELU()) elif self._config.mlp_activation == 'leaky_relu': self.add(LeakyReLU()) elif self._config.mlp_activation == 'prelu': self.add(PReLU()) else: self.add(Activation(self._config.mlp_activation)) self.add(Dropout(0.5))
Example #6
Source File: DenseNet.py From DenseNet-Cifar10 with MIT License | 6 votes |
def conv_block(input, nb_filter, dropout_rate=None, weight_decay=1E-4): ''' Apply BatchNorm, Relu 3x3, Conv2D, optional dropout Args: input: Input keras tensor nb_filter: number of filters dropout_rate: dropout rate weight_decay: weight decay factor Returns: keras tensor with batch_norm, relu and convolution2d added ''' x = Activation('relu')(input) x = Convolution2D(nb_filter, (3, 3), kernel_initializer="he_uniform", padding="same", use_bias=False, kernel_regularizer=l2(weight_decay))(x) if dropout_rate is not None: x = Dropout(dropout_rate)(x) return x
Example #7
Source File: DenseNet.py From DenseNet-Cifar10 with MIT License | 6 votes |
def transition_block(input, nb_filter, dropout_rate=None, weight_decay=1E-4): ''' Apply BatchNorm, Relu 1x1, Conv2D, optional dropout and Maxpooling2D Args: input: keras tensor nb_filter: number of filters dropout_rate: dropout rate weight_decay: weight decay factor Returns: keras tensor, after applying batch_norm, relu-conv, dropout, maxpool ''' concat_axis = 1 if K.image_dim_ordering() == "th" else -1 x = Convolution2D(nb_filter, (1, 1), kernel_initializer="he_uniform", padding="same", use_bias=False, kernel_regularizer=l2(weight_decay))(input) if dropout_rate is not None: x = Dropout(dropout_rate)(x) x = AveragePooling2D((2, 2), strides=(2, 2))(x) x = BatchNormalization(axis=concat_axis, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay))(x) return x
Example #8
Source File: model.py From DeepSequenceClassification with GNU General Public License v2.0 | 6 votes |
def gen_model(vocab_size=100, embedding_size=128, maxlen=100, output_size=6, hidden_layer_size=100, num_hidden_layers = 1, RNN_LAYER_TYPE="LSTM"): RNN_CLASS = LSTM if RNN_LAYER_TYPE == "GRU": RNN_CLASS = GRU logger.info("Parameters: vocab_size = %s, embedding_size = %s, maxlen = %s, output_size = %s, hidden_layer_size = %s, " %\ (vocab_size, embedding_size, maxlen, output_size, hidden_layer_size)) logger.info("Building Model") model = Sequential() logger.info("Init Model with vocab_size = %s, embedding_size = %s, maxlen = %s" % (vocab_size, embedding_size, maxlen)) model.add(Embedding(vocab_size, embedding_size, input_length=maxlen)) logger.info("Added Embedding Layer") model.add(Dropout(0.5)) logger.info("Added Dropout Layer") for i in xrange(num_hidden_layers): model.add(RNN_CLASS(output_dim=hidden_layer_size, activation='sigmoid', inner_activation='hard_sigmoid', return_sequences=True)) logger.info("Added %s Layer" % RNN_LAYER_TYPE) model.add(Dropout(0.5)) logger.info("Added Dropout Layer") model.add(RNN_CLASS(output_dim=output_size, activation='sigmoid', inner_activation='hard_sigmoid', return_sequences=True)) logger.info("Added %s Layer" % RNN_LAYER_TYPE) model.add(Dropout(0.5)) logger.info("Added Dropout Layer") model.add(TimeDistributedDense(output_size, activation="softmax")) logger.info("Added Dropout Layer") logger.info("Created model with following config:\n%s" % json.dumps(model.get_config(), indent=4)) logger.info("Compiling model with optimizer %s" % optimizer) start_time = time.time() model.compile(loss='categorical_crossentropy', optimizer=optimizer) total_time = time.time() - start_time logger.info("Model compiled in %.4f seconds." % total_time) return model
Example #9
Source File: cnn.py From DeepFashion with Apache License 2.0 | 6 votes |
def model_create(input_shape, num_classes): logging.debug('input_shape {}'.format(input_shape)) model = Sequential() model.add(Conv2D(32, (3, 3), border_mode='same', input_shape=input_shape)) model.add(Activation('relu')) model.add(Conv2D(32, (3, 3))) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.5)) model.add(Flatten()) model.add(Dense(128)) model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(Dense(num_classes)) model.add(Activation('softmax')) # use binary_crossentropy if has just 2 prediction yes or no model.compile(loss='categorical_crossentropy', optimizer='adadelta', metrics=['accuracy']) return model
Example #10
Source File: model_zoo.py From visual_turing_test-tutorial with MIT License | 6 votes |
def create(self): self._input_name = 'text' self._output_name = 'output' self.add_input( name=self._input_name, input_shape=(self._config.max_input_time_steps, self._config.input_dim,)) self.inputs['text'].input = T.imatrix() self.add_node(Embedding( self._config.input_dim, self._config.textual_embedding_dim, mask_zero=True), name='embedding', input='text') self.add_node( self._config.recurrent_encoder( self._config.hidden_state_dim, return_sequences=False, go_backwards=self._config.go_backwards), name='recurrent', input='embedding') self.add_node(Dropout(0.5), name='dropout', input='recurrent') self.add_node(Dense(self._config.output_dim), name='dense', input='dropout') self.add_node(Activation('softmax'), name='softmax', input='dense') self.add_output(name=self._output_name, input='softmax')
Example #11
Source File: helpers.py From timecop with Apache License 2.0 | 6 votes |
def fit_model_new(train_X, train_Y, window_size = 1): model2 = Sequential() model2.add(LSTM(input_shape = (window_size, 1), units = window_size, return_sequences = True)) model2.add(Dropout(0.5)) model2.add(LSTM(256)) model2.add(Dropout(0.5)) model2.add(Dense(1)) model2.add(Activation("linear")) model2.compile(loss = "mse", optimizer = "adam") model2.summary() # Fit the first model. model2.fit(train_X, train_Y, epochs = 80, batch_size = 1, verbose = 2) return(model2)
Example #12
Source File: Build_Model.py From DOVE with GNU General Public License v3.0 | 6 votes |
def makecnn(learningrate,regular,decay,channel_number): #model structure model=Sequential() model.add(Conv3D(100, kernel_size=(3,3,3), strides=(1, 1, 1), input_shape = (20,20,20,channel_number),padding='valid', data_format='channels_last', dilation_rate=(1, 1, 1), use_bias=True, kernel_initializer='glorot_normal', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=l2(regular), kernel_constraint=None, bias_constraint=None)) model.add(BatchNormalization()) model.add(LeakyReLU(0.2)) #model.add(Dropout(0.3)) model.add(Conv3D(200, kernel_size=(3,3,3), strides=(1, 1, 1), padding='valid', data_format='channels_last', dilation_rate=(1, 1, 1), use_bias=True, kernel_initializer='glorot_normal', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=l2(regular), kernel_constraint=None, bias_constraint=None)) model.add(BatchNormalization()) model.add(LeakyReLU(0.2)) #model.add(Dropout(0.3)) model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=None, padding='valid', data_format='channels_last')) model.add(BatchNormalization(axis=1, momentum=0.99, epsilon=0.001, center=True, scale=True, beta_initializer='zeros', gamma_initializer='ones', moving_mean_initializer='zeros', moving_variance_initializer='ones', beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, gamma_constraint=None)) model.add(Conv3D(400, kernel_size=(3,3,3),strides=(1, 1, 1), padding='valid', data_format='channels_last', dilation_rate=(1, 1, 1), use_bias=True, kernel_initializer='glorot_normal', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=l2(regular), kernel_constraint=None, bias_constraint=None)) model.add(BatchNormalization()) model.add(LeakyReLU(0.2)) #model.add(Dropout(0.3)) model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=None, padding='valid', data_format='channels_last')) model.add(Flatten()) model.add(Dropout(0.3)) model.add(Dense(1000, use_bias=True, input_shape = (32000,),kernel_initializer='glorot_normal', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=l2(regular), kernel_constraint=None, bias_constraint=None)) model.add(BatchNormalization()) model.add(LeakyReLU(0.2)) model.add(Dropout(0.3)) model.add(Dense(100, use_bias=True, kernel_initializer='glorot_normal', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=l2(regular), kernel_constraint=None, bias_constraint=None)) model.add(BatchNormalization()) model.add(LeakyReLU(0.2)) model.add(Dropout(0.3)) model.add(Dense(1, activation='sigmoid', use_bias=True, kernel_initializer='glorot_normal', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=l2(regular), kernel_constraint=None, bias_constraint=None)) nadam=Nadam(lr=learningrate, beta_1=0.9, beta_2=0.999, epsilon=1e-08, schedule_decay=decay) model.compile(loss='binary_crossentropy', optimizer=nadam, metrics=['accuracy',f1score,precision,recall]) return model
Example #13
Source File: densenet_fast.py From semantic-embeddings with MIT License | 6 votes |
def transition_block(ip, nb_filter, dropout_rate=None, weight_decay=1E-4): ''' Apply BatchNorm, Relu 1x1, Conv2D, optional dropout and Maxpooling2D Args: ip: keras tensor nb_filter: number of filters dropout_rate: dropout rate weight_decay: weight decay factor Returns: keras tensor, after applying batch_norm, relu-conv, dropout, maxpool ''' concat_axis = 1 if K.image_dim_ordering() == "th" else -1 x = Convolution2D(nb_filter, 1, 1, init="he_uniform", border_mode="same", bias=False, W_regularizer=l2(weight_decay))(ip) if dropout_rate: x = Dropout(dropout_rate)(x) x = AveragePooling2D((2, 2), strides=(2, 2))(x) x = BatchNormalization(mode=0, axis=concat_axis, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay))(x) return x
Example #14
Source File: densenet_fast.py From semantic-embeddings with MIT License | 6 votes |
def conv_block(ip, nb_filter, dropout_rate=None, weight_decay=1E-4): ''' Apply BatchNorm, Relu 3x3, Conv2D, optional dropout Args: ip: Input keras tensor nb_filter: number of filters dropout_rate: dropout rate weight_decay: weight decay factor Returns: keras tensor with batch_norm, relu and convolution2d added ''' x = Activation('relu')(ip) x = Convolution2D(nb_filter, 3, 3, init="he_uniform", border_mode="same", bias=False, W_regularizer=l2(weight_decay))(x) if dropout_rate: x = Dropout(dropout_rate)(x) return x
Example #15
Source File: liver_model.py From MCF-3D-CNN with MIT License | 6 votes |
def build_3dcnn_model(self, fusion_type, Fusion): if len(Fusion[0]) == 1: input_shape = (32, 32, len(Fusion)) model_in,model = self.cnn_2D(input_shape) else: input_shape = (32, 32, 5, len(Fusion)) model_in,model = self.cnn_3D(input_shape) model = Dropout(0.5)(model) model = Dense(32, activation='relu', name = 'fc2')(model) model = Dense(self.config.classes, activation='softmax', name = 'fc3')(model) model = Model(input=model_in,output=model) # 统计参数 # model.summary() plot_model(model,to_file='experiments/img/' + str(Fusion) + fusion_type + r'_model.png',show_shapes=True) print(' Saving model Architecture') adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-8) # model.compile(optimizer=adam, loss=self.mycrossentropy, metrics=['accuracy']) #有改善,但不稳定 model.compile(optimizer=adam, loss='categorical_crossentropy', metrics=['accuracy']) return model
Example #16
Source File: RNN-example_using_keras.py From QUANTAXIS with MIT License | 6 votes |
def build_model2(layers): d = 0.2 model = Sequential() model.add(LSTM(128, input_shape=( layers[1], layers[0]), return_sequences=True)) model.add(Dropout(d)) model.add(LSTM(64, input_shape=( layers[1], layers[0]), return_sequences=False)) model.add(Dropout(d)) model.add(Dense(16, init='uniform', activation='relu')) model.add(Dense(1, init='uniform', activation='relu')) model.compile(loss='mse', optimizer='adam', metrics=['accuracy']) return model # In[10]:
Example #17
Source File: RNN-example_using_keras.py From QUANTAXIS with MIT License | 6 votes |
def build_model(layers): model = Sequential() model.add(LSTM( input_dim=layers[0], output_dim=layers[1], return_sequences=True)) model.add(Dropout(0.2)) model.add(LSTM( layers[2], return_sequences=False)) model.add(Dropout(0.2)) model.add(Dense( output_dim=layers[2])) model.add(Activation("linear")) start = time.time() model.compile(loss="mse", optimizer="rmsprop", metrics=['accuracy']) print("Compilation Time : ", time.time() - start) return model
Example #18
Source File: co_lstm_predict_sequence.py From copper_price_forecast with GNU General Public License v3.0 | 6 votes |
def build_model(): """ 定义模型 """ model = Sequential() model.add(LSTM(units=Conf.LAYERS[1], input_shape=(Conf.LAYERS[1], Conf.LAYERS[0]), return_sequences=True)) model.add(Dropout(0.2)) model.add(LSTM(Conf.LAYERS[2], return_sequences=False)) model.add(Dropout(0.2)) model.add(Dense(units=Conf.LAYERS[3])) # model.add(BatchNormalization(weights=None, epsilon=1e-06, momentum=0.9)) model.add(Activation("tanh")) # act = PReLU(alpha_initializer='zeros', weights=None) # act = LeakyReLU(alpha=0.3) # model.add(act) start = time.time() model.compile(loss="mse", optimizer="rmsprop") print("> Compilation Time : ", time.time() - start) return model
Example #19
Source File: co_lstm_predict_day.py From copper_price_forecast with GNU General Public License v3.0 | 6 votes |
def build_model(): """ 定义模型 """ model = Sequential() model.add(LSTM(units=Conf.LAYERS[1], input_shape=(Conf.LAYERS[1], Conf.LAYERS[0]), return_sequences=True)) model.add(Dropout(0.2)) model.add(LSTM(Conf.LAYERS[2], return_sequences=False)) model.add(Dropout(0.2)) model.add(Dense(units=Conf.LAYERS[3])) # model.add(BatchNormalization(weights=None, epsilon=1e-06, momentum=0.9)) model.add(Activation("tanh")) # act = PReLU(alpha_initializer='zeros', weights=None) # act = LeakyReLU(alpha=0.3) # model.add(act) start = time.time() model.compile(loss="mse", optimizer="rmsprop") print("> Compilation Time : ", time.time() - start) return model
Example #20
Source File: lstm.py From copper_price_forecast with GNU General Public License v3.0 | 6 votes |
def build_model(layers): """ 模型定义 """ model = Sequential() model.add(LSTM(units=layers[1], input_shape=(layers[1], layers[0]), return_sequences=True)) model.add(Dropout(0.2)) model.add(LSTM(layers[2], return_sequences=False)) model.add(Dropout(0.2)) model.add(Dense(units=layers[3])) model.add(Activation("tanh")) start = time.time() model.compile(loss="mse", optimizer="rmsprop") print("> Compilation Time : ", time.time() - start) return model
Example #21
Source File: TransferLearning_reg.py From Intelligent-Projects-Using-Python with MIT License | 6 votes |
def inception_pseudo(self,dim=224,freeze_layers=30,full_freeze='N'): model = InceptionV3(weights='imagenet',include_top=False) x = model.output x = GlobalAveragePooling2D()(x) x = Dense(512, activation='relu')(x) x = Dropout(0.5)(x) x = Dense(512, activation='relu')(x) x = Dropout(0.5)(x) out = Dense(1)(x) model_final = Model(input = model.input,outputs=out) if full_freeze != 'N': for layer in model.layers[0:freeze_layers]: layer.trainable = False return model_final # ResNet50 Model for transfer Learning
Example #22
Source File: TransferLearning_ffd.py From Intelligent-Projects-Using-Python with MIT License | 6 votes |
def inception_pseudo(self,dim=224,freeze_layers=10,full_freeze='N'): model = InceptionV3(weights='imagenet',include_top=False) x = model.output x = GlobalAveragePooling2D()(x) x = Dense(512, activation='relu')(x) x = Dropout(0.5)(x) x = Dense(512, activation='relu')(x) x = Dropout(0.5)(x) out = Dense(5,activation='softmax')(x) model_final = Model(input = model.input,outputs=out) if full_freeze != 'N': for layer in model.layers[0:freeze_layers]: layer.trainable = False return model_final # ResNet50 Model for transfer Learning
Example #23
Source File: TransferLearning_ffd.py From Intelligent-Projects-Using-Python with MIT License | 6 votes |
def resnet_pseudo(self,dim=224,freeze_layers=10,full_freeze='N'): model = ResNet50(weights='imagenet',include_top=False) x = model.output x = GlobalAveragePooling2D()(x) x = Dense(512, activation='relu')(x) x = Dropout(0.5)(x) x = Dense(512, activation='relu')(x) x = Dropout(0.5)(x) out = Dense(5,activation='softmax')(x) model_final = Model(input = model.input,outputs=out) if full_freeze != 'N': for layer in model.layers[0:freeze_layers]: layer.trainable = False return model_final # VGG16 Model for transfer Learning
Example #24
Source File: liver_model.py From MCF-3D-CNN with MIT License | 6 votes |
def cnn_3D(self, input_shape, modual=''): #建立Sequential模型 model_in = Input(input_shape) model = Convolution3D( filters = 6, kernel_size = (3, 3, 3), input_shape = input_shape, activation='relu', kernel_initializer='he_normal', name = modual+'conv1' )(model_in)# now 30x30x3x6 model = MaxPooling3D(pool_size=(2,2,1))(model)# now 15x15x3x6 model = Convolution3D( filters = 8, kernel_size = (4, 4, 3), activation='relu', kernel_initializer='he_normal', name = modual+'conv2' )(model)# now 12x12x1x8 model = MaxPooling3D(pool_size=(2,2,1))(model)# now 6x6x1x8 model = Flatten()(model) model = Dropout(0.5)(model) model_out = Dense(100, activation='relu', name = modual+'fc1')(model) return model_in, model_out
Example #25
Source File: TransferLearning.py From Intelligent-Projects-Using-Python with MIT License | 5 votes |
def VGG16_pseudo(self,dim=224,freeze_layers=10,full_freeze='N'): model = VGG16(weights='imagenet',include_top=False) x = model.output x = GlobalAveragePooling2D()(x) x = Dense(512, activation='relu')(x) x = Dropout(0.5)(x) x = Dense(512, activation='relu')(x) x = Dropout(0.5)(x) out = Dense(5,activation='softmax')(x) model_final = Model(input = model.input,outputs=out) if full_freeze != 'N': for layer in model.layers[0:freeze_layers]: layer.trainable = False return model_final
Example #26
Source File: model.py From keras-steering-angle-visualizations with MIT License | 5 votes |
def steering_net(): model = Sequential() model.add(Convolution2D(24, 5, 5, init = normal_init, subsample= (2, 2), name='conv1_1', input_shape=(66, 200, 3))) model.add(Activation('relu')) model.add(Convolution2D(36, 5, 5, init = normal_init, subsample= (2, 2), name='conv2_1')) model.add(Activation('relu')) model.add(Convolution2D(48, 5, 5, init = normal_init, subsample= (2, 2), name='conv3_1')) model.add(Activation('relu')) model.add(Convolution2D(64, 3, 3, init = normal_init, subsample= (1, 1), name='conv4_1')) model.add(Activation('relu')) model.add(Convolution2D(64, 3, 3, init = normal_init, subsample= (1, 1), name='conv4_2')) model.add(Activation('relu')) model.add(Flatten()) model.add(Dense(1164, init = normal_init, name = "dense_0")) model.add(Activation('relu')) #model.add(Dropout(p)) model.add(Dense(100, init = normal_init, name = "dense_1")) model.add(Activation('relu')) #model.add(Dropout(p)) model.add(Dense(50, init = normal_init, name = "dense_2")) model.add(Activation('relu')) #model.add(Dropout(p)) model.add(Dense(10, init = normal_init, name = "dense_3")) model.add(Activation('relu')) model.add(Dense(1, init = normal_init, name = "dense_4")) model.add(Lambda(atan_layer, output_shape = atan_layer_shape, name = "atan_0")) return model
Example #27
Source File: densenet-checkpoint.py From CBAM-keras with MIT License | 5 votes |
def __conv_block(ip, nb_filter, bottleneck=False, dropout_rate=None, weight_decay=1e-4): ''' Apply BatchNorm, Relu, 3x3 Conv2D, optional bottleneck block and dropout Args: ip: Input keras tensor nb_filter: number of filters bottleneck: add bottleneck block dropout_rate: dropout rate weight_decay: weight decay factor Returns: keras tensor with batch_norm, relu and convolution2d added (optional bottleneck) ''' concat_axis = 1 if K.image_data_format() == 'channels_first' else -1 x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5)(ip) x = Activation('relu')(x) if bottleneck: inter_channel = nb_filter * 4 # Obtained from https://github.com/liuzhuang13/DenseNet/blob/master/densenet.lua x = Conv2D(inter_channel, (1, 1), kernel_initializer='he_normal', padding='same', use_bias=False, kernel_regularizer=l2(weight_decay))(x) x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5)(x) x = Activation('relu')(x) x = Conv2D(nb_filter, (3, 3), kernel_initializer='he_normal', padding='same', use_bias=False)(x) if dropout_rate: x = Dropout(dropout_rate)(x) return x
Example #28
Source File: model.py From DeepSequenceClassification with GNU General Public License v2.0 | 5 votes |
def gen_model_brnn(vocab_size=100, embedding_size=128, maxlen=100, output_size=6, hidden_layer_size=100, num_hidden_layers = 1, RNN_LAYER_TYPE="LSTM"): RNN_CLASS = LSTM if RNN_LAYER_TYPE == "GRU": RNN_CLASS = GRU logger.info("Parameters: vocab_size = %s, embedding_size = %s, maxlen = %s, output_size = %s, hidden_layer_size = %s, " %\ (vocab_size, embedding_size, maxlen, output_size, hidden_layer_size)) logger.info("Building Graph model for Bidirectional RNN") model = Graph() model.add_input(name='input', input_shape=(maxlen,), dtype=int) logger.info("Added Input node") logger.info("Init Model with vocab_size = %s, embedding_size = %s, maxlen = %s" % (vocab_size, embedding_size, maxlen)) model.add_node(Embedding(vocab_size, embedding_size, input_length=maxlen), name='embedding', input='input') logger.info("Added Embedding node") model.add_node(Dropout(0.5), name="dropout_0", input="embedding") logger.info("Added Dropout Node") for i in xrange(num_hidden_layers): last_dropout_name = "dropout_%s" % i forward_name, backward_name, dropout_name = ["%s_%s" % (k, i + 1) for k in ["forward", "backward", "dropout"]] model.add_node(RNN_CLASS(output_dim=hidden_layer_size, activation='sigmoid', inner_activation='hard_sigmoid', return_sequences=True), name=forward_name, input=last_dropout_name) logger.info("Added %s forward node[%s]" % (RNN_LAYER_TYPE, i+1)) model.add_node(RNN_CLASS(output_dim=hidden_layer_size, activation='sigmoid', inner_activation='hard_sigmoid', return_sequences=True, go_backwards=True), name=backward_name, input=last_dropout_name) logger.info("Added %s backward node[%s]" % (RNN_LAYER_TYPE, i+1)) model.add_node(Dropout(0.5), name=dropout_name, inputs=[forward_name, backward_name]) logger.info("Added Dropout node[%s]" % (i+1)) model.add_node(TimeDistributedDense(output_size, activation="softmax"), name="tdd", input=dropout_name) logger.info("Added TimeDistributedDense node") model.add_output(name="output", input="tdd") logger.info("Added Output node") logger.info("Created model with following config:\n%s" % model.get_config()) logger.info("Compiling model with optimizer %s" % optimizer) start_time = time.time() model.compile(optimizer, {"output": 'categorical_crossentropy'}) total_time = time.time() - start_time logger.info("Model compiled in %.4f seconds." % total_time) return model
Example #29
Source File: TransferLearning_ffd.py From Intelligent-Projects-Using-Python with MIT License | 5 votes |
def VGG16_pseudo(self,dim=224,freeze_layers=10,full_freeze='N'): model = VGG16(weights='imagenet',include_top=False) x = model.output x = GlobalAveragePooling2D()(x) x = Dense(512, activation='relu')(x) x = Dropout(0.5)(x) x = Dense(512, activation='relu')(x) x = Dropout(0.5)(x) out = Dense(5,activation='softmax')(x) model_final = Model(input = model.input,outputs=out) if full_freeze != 'N': for layer in model.layers[0:freeze_layers]: layer.trainable = False return model_final
Example #30
Source File: lstm_cnn.py From stock-price-predict with MIT License | 5 votes |
def base_model(feature_len=1, after_day=1, input_shape=(20, 1)): model = Sequential() model.add(Conv1D(10, kernel_size=5, input_shape=input_shape, activation='relu', padding='valid', strides=1)) model.add(LSTM(100, return_sequences=False, input_shape=input_shape)) model.add(Dropout(0.25)) # one to many model.add(RepeatVector(after_day)) model.add(LSTM(200, return_sequences=True)) model.add(Dropout(0.25)) model.add(TimeDistributed(Dense(100, activation='relu', kernel_initializer='uniform'))) model.add(TimeDistributed(Dense(feature_len, activation='linear', kernel_initializer='uniform'))) return model