Python keras.layers.advanced_activations.PReLU() Examples
The following are 30
code examples of keras.layers.advanced_activations.PReLU().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
keras.layers.advanced_activations
, or try the search function
.
Example #1
Source File: ikki_NN_1.py From stacking with MIT License | 6 votes |
def build_model(self): model = Sequential() model.add(Dropout(0.1, input_shape=(nn_input_dim_NN,))) model.add(Dense(input_dim=nn_input_dim_NN, output_dim=100, init='he_normal')) model.add(PReLU(init='zero')) model.add(Dropout(0.1)) model.add(Dense(input_dim=100,output_dim=300, init='he_normal')) model.add(PReLU(init='zero')) model.add(Dropout(0.6)) model.add(Dense(input_dim=300,output_dim=50, init='he_normal')) model.add(PReLU(init='zero')) model.add(Dropout(0.6)) model.add(Dense(input_dim=50,output_dim=20, init='he_normal')) model.add(PReLU(init='zero')) model.add(Dropout(0.2)) model.add(Dense(input_dim=20,output_dim=2, init='he_normal', activation='softmax')) sgd = SGD(lr=0.01, decay=1e-10, momentum=0.9, nesterov=True) model.compile(optimizer=sgd, loss='binary_crossentropy',class_mode='binary') return KerasClassifier(nn=model,**self.params)
Example #2
Source File: ikki_NN_1.py From stacking with MIT License | 6 votes |
def build_model(self): model = Sequential() model.add(Dropout(0.1, input_shape=(nn_input_dim_NN,))) model.add(Dense(input_dim=nn_input_dim_NN, output_dim=310, init='he_normal')) model.add(LeakyReLU(alpha=.001)) model.add(BatchNormalization()) model.add(Dropout(0.6)) model.add(Dense(input_dim=310,output_dim=252, init='he_normal')) model.add(PReLU(init='zero')) model.add(BatchNormalization()) model.add(Dropout(0.5)) model.add(Dense(input_dim=252,output_dim=128, init='he_normal')) model.add(LeakyReLU(alpha=.001)) model.add(BatchNormalization()) model.add(Dropout(0.4)) model.add(Dense(input_dim=128,output_dim=2, init='he_normal', activation='softmax')) #model.add(Activation('softmax')) sgd = SGD(lr=0.02, decay=1e-6, momentum=0.9, nesterov=True) model.compile(optimizer=sgd, loss='binary_crossentropy',class_mode='binary') return KerasClassifier(nn=model,**self.params)
Example #3
Source File: ikki_NN_1.py From stacking with MIT License | 6 votes |
def build_model(self): model = Sequential() model.add(Dropout(0.1, input_shape=(nn_input_dim_NN,))) model.add(Dense(input_dim=nn_input_dim_NN, output_dim=62, init='he_normal')) model.add(LeakyReLU(alpha=.001)) model.add(Dropout(0.3)) model.add(Dense(input_dim=62,output_dim=158, init='he_normal')) model.add(LeakyReLU(alpha=.001)) model.add(Dropout(0.25)) model.add(Dense(input_dim=158,output_dim=20, init='he_normal')) model.add(PReLU(init='zero')) model.add(Dropout(0.2)) model.add(Dense(input_dim=20,output_dim=2, init='he_normal', activation='softmax')) #model.add(Activation('softmax')) sgd = SGD(lr=0.05, decay=1e-6, momentum=0.9, nesterov=True) model.compile(optimizer=sgd, loss='binary_crossentropy',class_mode='binary') return KerasClassifier(nn=model,**self.params)
Example #4
Source File: ikki_NN_1.py From stacking with MIT License | 6 votes |
def build_model(self): model = Sequential() model.add(Dropout(0.1, input_shape=(nn_input_dim_NN,))) model.add(Dense(input_dim=nn_input_dim_NN, output_dim=100, init='he_normal')) model.add(PReLU(init='zero')) model.add(Dropout(0.2)) model.add(Dense(input_dim=100,output_dim=380, init='he_normal')) model.add(PReLU(init='zero')) model.add(Dropout(0.6)) model.add(Dense(input_dim=380,output_dim=50, init='he_normal')) model.add(PReLU(init='zero')) model.add(Dropout(0.6)) model.add(Dense(input_dim=50,output_dim=20, init='he_normal')) model.add(PReLU(init='zero')) model.add(Dropout(0.2)) model.add(Dense(input_dim=20,output_dim=2, init='he_normal', activation='softmax')) sgd = SGD(lr=0.01, decay=1e-10, momentum=0.9, nesterov=True) model.compile(optimizer=sgd, loss='binary_crossentropy',class_mode='binary') return KerasClassifier(nn=model,**self.params)
Example #5
Source File: ikki_NN_1.py From stacking with MIT License | 6 votes |
def build_model(self): model = Sequential() model.add(Dropout(0.1, input_shape=(nn_input_dim_NN,))) model.add(Dense(input_dim=nn_input_dim_NN, output_dim=105, init='he_normal')) model.add(PReLU(init='zero')) model.add(BatchNormalization()) model.add(Dropout(0.2)) model.add(Dense(input_dim=105,output_dim=280, init='he_normal')) model.add(PReLU(init='zero')) model.add(BatchNormalization()) model.add(Dropout(0.6)) model.add(Dense(input_dim=280,output_dim=60, init='he_normal')) model.add(PReLU(init='zero')) model.add(BatchNormalization()) model.add(Dropout(0.6)) model.add(Dense(input_dim=60,output_dim=20, init='he_normal')) model.add(PReLU(init='zero')) model.add(BatchNormalization()) model.add(Dropout(0.2)) model.add(Dense(input_dim=20,output_dim=2, init='he_normal', activation='softmax')) sgd = SGD(lr=0.01, decay=1e-10, momentum=0.99, nesterov=True) model.compile(optimizer=sgd, loss='binary_crossentropy',class_mode='binary') return KerasClassifier(nn=model,**self.params)
Example #6
Source File: ikki_NN_1.py From stacking with MIT License | 6 votes |
def build_model(self): model = Sequential() model.add(Dropout(0.2, input_shape=(nn_input_dim_NN,))) model.add(Dense(input_dim=nn_input_dim_NN, output_dim=100, init='he_normal')) model.add(PReLU(init='zero')) model.add(BatchNormalization()) model.add(Dropout(0.2)) model.add(Dense(input_dim=100,output_dim=180, init='he_normal')) model.add(PReLU(init='zero')) model.add(BatchNormalization()) model.add(Dropout(0.6)) model.add(Dense(input_dim=180,output_dim=50, init='he_normal')) model.add(PReLU(init='zero')) model.add(BatchNormalization()) model.add(Dropout(0.5)) model.add(Dense(input_dim=50,output_dim=30, init='he_normal')) model.add(PReLU(init='zero')) model.add(BatchNormalization()) model.add(Dropout(0.2)) model.add(Dense(input_dim=30,output_dim=2, init='he_normal', activation='softmax')) sgd = SGD(lr=0.01, decay=1e-10, momentum=0.9, nesterov=True) model.compile(optimizer=sgd, loss='binary_crossentropy',class_mode='binary') return KerasClassifier(nn=model,**self.params)
Example #7
Source File: ikki_NN_1.py From stacking with MIT License | 6 votes |
def build_model(self): model = Sequential() model.add(Dropout(0.1, input_shape=(nn_input_dim_NN,))) model.add(Dense(input_dim=nn_input_dim_NN, output_dim=100, init='he_normal')) model.add(PReLU(init='zero')) model.add(BatchNormalization()) model.add(Dropout(0.2)) model.add(Dense(input_dim=100,output_dim=360, init='he_normal')) model.add(PReLU(init='zero')) model.add(BatchNormalization()) model.add(Dropout(0.6)) model.add(Dense(input_dim=360,output_dim=50, init='he_normal')) model.add(PReLU(init='zero')) model.add(BatchNormalization()) model.add(Dropout(0.6)) model.add(Dense(input_dim=50,output_dim=20, init='he_normal')) model.add(PReLU(init='zero')) model.add(BatchNormalization()) model.add(Dropout(0.2)) model.add(Dense(input_dim=20,output_dim=2, init='he_normal', activation='softmax')) sgd = SGD(lr=0.01, decay=1e-10, momentum=0.9, nesterov=True) model.compile(optimizer=sgd, loss='binary_crossentropy',class_mode='binary') return KerasClassifier(nn=model,**self.params)
Example #8
Source File: ikki_NN_1.py From stacking with MIT License | 6 votes |
def build_model(self): model = Sequential() model.add(Dropout(0.1, input_shape=(nn_input_dim_NN,))) model.add(Dense(input_dim=nn_input_dim_NN, output_dim=110, init='he_normal')) model.add(PReLU(init='zero')) model.add(BatchNormalization()) model.add(Dropout(0.2)) model.add(Dense(input_dim=110,output_dim=350, init='he_normal')) model.add(PReLU(init='zero')) model.add(BatchNormalization()) model.add(Dropout(0.6)) model.add(Dense(input_dim=350,output_dim=50, init='he_normal')) model.add(PReLU(init='zero')) model.add(BatchNormalization()) model.add(Dropout(0.6)) model.add(Dense(input_dim=50,output_dim=20, init='he_normal')) model.add(PReLU(init='zero')) model.add(BatchNormalization()) model.add(Dropout(0.2)) model.add(Dense(input_dim=20,output_dim=2, init='he_normal', activation='softmax')) sgd = SGD(lr=0.01, decay=1e-10, momentum=0.9, nesterov=True) model.compile(optimizer=sgd, loss='binary_crossentropy',class_mode='binary') return KerasClassifier(nn=model,**self.params)
Example #9
Source File: ikki_NN_1.py From stacking with MIT License | 6 votes |
def build_model(self): model = Sequential() model.add(Dropout(0.1, input_shape=(nn_input_dim_NN,))) model.add(Dense(input_dim=nn_input_dim_NN, output_dim=110, init='he_normal')) model.add(PReLU(init='zero')) model.add(BatchNormalization()) model.add(Dropout(0.3)) model.add(Dense(input_dim=110,output_dim=300, init='he_normal')) model.add(PReLU(init='zero')) model.add(BatchNormalization()) model.add(Dropout(0.5)) model.add(Dense(input_dim=300,output_dim=60, init='he_normal')) model.add(PReLU(init='zero')) model.add(BatchNormalization()) model.add(Dropout(0.6)) model.add(Dense(input_dim=60,output_dim=20, init='he_normal')) model.add(PReLU(init='zero')) model.add(BatchNormalization()) model.add(Dropout(0.2)) model.add(Dense(input_dim=20,output_dim=2, init='he_normal', activation='softmax')) sgd = SGD(lr=0.01, decay=1e-10, momentum=0.9, nesterov=True) model.compile(optimizer=sgd, loss='binary_crossentropy',class_mode='binary') return KerasClassifier(nn=model,**self.params)
Example #10
Source File: model_zoo.py From visual_turing_test-tutorial with MIT License | 6 votes |
def deep_mlp(self): """ Deep Multilayer Perceptrop. """ if self._config.num_mlp_layers == 0: self.add(Dropout(0.5)) else: for j in xrange(self._config.num_mlp_layers): self.add(Dense(self._config.mlp_hidden_dim)) if self._config.mlp_activation == 'elu': self.add(ELU()) elif self._config.mlp_activation == 'leaky_relu': self.add(LeakyReLU()) elif self._config.mlp_activation == 'prelu': self.add(PReLU()) else: self.add(Activation(self._config.mlp_activation)) self.add(Dropout(0.5))
Example #11
Source File: ikki_NN_1.py From stacking with MIT License | 6 votes |
def build_model(self): model = Sequential() model.add(Dropout(0.1, input_shape=(nn_input_dim_NN,))) model.add(Dense(input_dim=nn_input_dim_NN, output_dim=105, init='he_normal')) model.add(PReLU(init='zero')) model.add(BatchNormalization()) model.add(Dropout(0.2)) model.add(Dense(input_dim=105,output_dim=200, init='he_normal')) model.add(PReLU(init='zero')) model.add(BatchNormalization()) model.add(Dropout(0.6)) model.add(Dense(input_dim=200,output_dim=60, init='he_normal')) model.add(PReLU(init='zero')) model.add(BatchNormalization()) model.add(Dropout(0.5)) model.add(Dense(input_dim=60,output_dim=20, init='he_normal')) model.add(PReLU(init='zero')) model.add(BatchNormalization()) model.add(Dropout(0.1)) model.add(Dense(input_dim=20,output_dim=2, init='he_normal', activation='softmax')) sgd = SGD(lr=0.01, decay=1e-10, momentum=0.99, nesterov=True) model.compile(optimizer=sgd, loss='binary_crossentropy',class_mode='binary') return KerasClassifier(nn=model,**self.params)
Example #12
Source File: ikki_NN_1.py From stacking with MIT License | 6 votes |
def build_model(self): model = Sequential() model.add(Dropout(0.1, input_shape=(nn_input_dim_NN,))) model.add(Dense(input_dim=nn_input_dim_NN, output_dim=140, init='he_normal')) model.add(PReLU(init='zero')) model.add(BatchNormalization()) model.add(Dropout(0.2)) model.add(Dense(input_dim=140,output_dim=380, init='he_normal')) model.add(PReLU(init='zero')) model.add(BatchNormalization()) model.add(Dropout(0.6)) model.add(Dense(input_dim=380,output_dim=50, init='he_normal')) model.add(PReLU(init='zero')) model.add(BatchNormalization()) model.add(Dropout(0.6)) model.add(Dense(input_dim=50,output_dim=20, init='he_normal')) model.add(PReLU(init='zero')) model.add(BatchNormalization()) model.add(Dropout(0.2)) model.add(Dense(input_dim=20,output_dim=2, init='he_normal', activation='softmax')) sgd = SGD(lr=0.01, decay=1e-10, momentum=0.9, nesterov=True) model.compile(optimizer=sgd, loss='binary_crossentropy',class_mode='binary') return KerasClassifier(nn=model,**self.params)
Example #13
Source File: ikki_NN_1.py From stacking with MIT License | 6 votes |
def build_model(self): model = Sequential() model.add(Dropout(0.1, input_shape=(nn_input_dim_NN,))) model.add(Dense(input_dim=nn_input_dim_NN, output_dim=100, init='he_normal')) model.add(PReLU(init='zero')) model.add(BatchNormalization()) model.add(Dropout(0.2)) model.add(Dense(input_dim=100,output_dim=360, init='he_normal')) model.add(PReLU(init='zero')) model.add(BatchNormalization()) model.add(Dropout(0.6)) model.add(Dense(input_dim=360,output_dim=50, init='he_normal')) model.add(PReLU(init='zero')) model.add(BatchNormalization()) model.add(Dropout(0.6)) model.add(Dense(input_dim=50,output_dim=20, init='he_normal')) model.add(PReLU(init='zero')) model.add(BatchNormalization()) model.add(Dropout(0.1)) model.add(Dense(input_dim=20,output_dim=2, init='he_normal', activation='softmax')) sgd = SGD(lr=0.007, decay=1e-10, momentum=0.9, nesterov=True) model.compile(optimizer=sgd, loss='binary_crossentropy',class_mode='binary') return KerasClassifier(nn=model,**self.params)
Example #14
Source File: ikki_NN_1.py From stacking with MIT License | 6 votes |
def build_model(self): model = Sequential() model.add(Dropout(0.1, input_shape=(nn_input_dim_NN,))) model.add(Dense(input_dim=nn_input_dim_NN, output_dim=110, init='he_normal')) model.add(PReLU(init='zero')) model.add(BatchNormalization()) model.add(Dropout(0.2)) model.add(Dense(input_dim=110,output_dim=350, init='he_normal')) model.add(PReLU(init='zero')) model.add(BatchNormalization()) model.add(Dropout(0.6)) model.add(Dense(input_dim=350,output_dim=150, init='he_normal')) model.add(PReLU(init='zero')) model.add(BatchNormalization()) model.add(Dropout(0.6)) model.add(Dense(input_dim=150,output_dim=20, init='he_normal')) model.add(PReLU(init='zero')) model.add(BatchNormalization()) model.add(Dropout(0.2)) model.add(Dense(input_dim=20,output_dim=2, init='he_normal', activation='softmax')) sgd = SGD(lr=0.02, decay=1e-10, momentum=0.9, nesterov=True) model.compile(optimizer=sgd, loss='binary_crossentropy',class_mode='binary') return KerasClassifier(nn=model,**self.params)
Example #15
Source File: ikki_NN_1.py From stacking with MIT License | 6 votes |
def build_model(self): model = Sequential() model.add(Dropout(0.1, input_shape=(nn_input_dim_NN,))) model.add(Dense(input_dim=nn_input_dim_NN, output_dim=110, init='he_normal')) model.add(PReLU(init='zero')) model.add(BatchNormalization()) model.add(Dropout(0.3)) model.add(Dense(input_dim=110,output_dim=200, init='he_normal')) model.add(PReLU(init='zero')) model.add(BatchNormalization()) model.add(Dropout(0.5)) model.add(Dense(input_dim=200,output_dim=60, init='he_normal')) model.add(PReLU(init='zero')) model.add(BatchNormalization()) model.add(Dropout(0.6)) model.add(Dense(input_dim=60,output_dim=80, init='he_normal')) model.add(PReLU(init='zero')) model.add(BatchNormalization()) model.add(Dropout(0.3)) model.add(Dense(input_dim=80,output_dim=2, init='he_normal', activation='softmax')) sgd = SGD(lr=0.01, decay=1e-10, momentum=0.9, nesterov=True) model.compile(optimizer=sgd, loss='binary_crossentropy',class_mode='binary') return KerasClassifier(nn=model,**self.params)
Example #16
Source File: mtcnn_model.py From SmooFaceEngine with Apache License 2.0 | 6 votes |
def create_Kao_Onet( weight_path = 'model48.h5'): input = Input(shape = [48,48,3]) x = Conv2D(32, (3, 3), strides=1, padding='valid', name='conv1')(input) x = PReLU(shared_axes=[1,2],name='prelu1')(x) x = MaxPool2D(pool_size=3, strides=2, padding='same')(x) x = Conv2D(64, (3, 3), strides=1, padding='valid', name='conv2')(x) x = PReLU(shared_axes=[1,2],name='prelu2')(x) x = MaxPool2D(pool_size=3, strides=2)(x) x = Conv2D(64, (3, 3), strides=1, padding='valid', name='conv3')(x) x = PReLU(shared_axes=[1,2],name='prelu3')(x) x = MaxPool2D(pool_size=2)(x) x = Conv2D(128, (2, 2), strides=1, padding='valid', name='conv4')(x) x = PReLU(shared_axes=[1,2],name='prelu4')(x) x = Permute((3,2,1))(x) x = Flatten()(x) x = Dense(256, name='conv5') (x) x = PReLU(name='prelu5')(x) classifier = Dense(2, activation='softmax',name='conv6-1')(x) bbox_regress = Dense(4,name='conv6-2')(x) landmark_regress = Dense(10,name='conv6-3')(x) model = Model([input], [classifier, bbox_regress, landmark_regress]) model.load_weights(weight_path, by_name=True) return model
Example #17
Source File: mtcnn_model.py From SmooFaceEngine with Apache License 2.0 | 6 votes |
def create_Kao_Rnet (weight_path = 'model24.h5'): input = Input(shape=[24, 24, 3]) # change this shape to [None,None,3] to enable arbitraty shape input x = Conv2D(28, (3, 3), strides=1, padding='valid', name='conv1')(input) x = PReLU(shared_axes=[1, 2], name='prelu1')(x) x = MaxPool2D(pool_size=3,strides=2, padding='same')(x) x = Conv2D(48, (3, 3), strides=1, padding='valid', name='conv2')(x) x = PReLU(shared_axes=[1, 2], name='prelu2')(x) x = MaxPool2D(pool_size=3, strides=2)(x) x = Conv2D(64, (2, 2), strides=1, padding='valid', name='conv3')(x) x = PReLU(shared_axes=[1, 2], name='prelu3')(x) x = Permute((3, 2, 1))(x) x = Flatten()(x) x = Dense(128, name='conv4')(x) x = PReLU( name='prelu4')(x) classifier = Dense(2, activation='softmax', name='conv5-1')(x) bbox_regress = Dense(4, name='conv5-2')(x) model = Model([input], [classifier, bbox_regress]) model.load_weights(weight_path, by_name=True) return model
Example #18
Source File: mtcnn.py From mtcnn-keras with MIT License | 6 votes |
def create_Pnet(weight_path): input = Input(shape=[None, None, 3]) x = Conv2D(10, (3, 3), strides=1, padding='valid', name='conv1')(input) x = PReLU(shared_axes=[1,2],name='PReLU1')(x) x = MaxPool2D(pool_size=2)(x) x = Conv2D(16, (3, 3), strides=1, padding='valid', name='conv2')(x) x = PReLU(shared_axes=[1,2],name='PReLU2')(x) x = Conv2D(32, (3, 3), strides=1, padding='valid', name='conv3')(x) x = PReLU(shared_axes=[1,2],name='PReLU3')(x) classifier = Conv2D(2, (1, 1), activation='softmax', name='conv4-1')(x) # 无激活函数,线性。 bbox_regress = Conv2D(4, (1, 1), name='conv4-2')(x) model = Model([input], [classifier, bbox_regress]) model.load_weights(weight_path, by_name=True) return model #-----------------------------# # mtcnn的第二段 # 精修框 #-----------------------------#
Example #19
Source File: encoder.py From enet-keras with MIT License | 6 votes |
def build(inp, dropout_rate=0.01): enet = initial_block(inp) enet = BatchNormalization(momentum=0.1)(enet) # enet_unpooling uses momentum of 0.1, keras default is 0.99 enet = PReLU(shared_axes=[1, 2])(enet) enet = bottleneck(enet, 64, downsample=True, dropout_rate=dropout_rate) # bottleneck 1.0 for _ in range(4): enet = bottleneck(enet, 64, dropout_rate=dropout_rate) # bottleneck 1.i enet = bottleneck(enet, 128, downsample=True) # bottleneck 2.0 # bottleneck 2.x and 3.x for _ in range(2): enet = bottleneck(enet, 128) # bottleneck 2.1 enet = bottleneck(enet, 128, dilated=2) # bottleneck 2.2 enet = bottleneck(enet, 128, asymmetric=5) # bottleneck 2.3 enet = bottleneck(enet, 128, dilated=4) # bottleneck 2.4 enet = bottleneck(enet, 128) # bottleneck 2.5 enet = bottleneck(enet, 128, dilated=8) # bottleneck 2.6 enet = bottleneck(enet, 128, asymmetric=5) # bottleneck 2.7 enet = bottleneck(enet, 128, dilated=16) # bottleneck 2.8 return enet
Example #20
Source File: encoder.py From enet-keras with MIT License | 6 votes |
def build(inp, dropout_rate=0.01): pooling_indices = [] enet, indices_single = initial_block(inp) enet = BatchNormalization(momentum=0.1)(enet) # enet_unpooling uses momentum of 0.1, keras default is 0.99 enet = PReLU(shared_axes=[1, 2])(enet) pooling_indices.append(indices_single) enet, indices_single = bottleneck(enet, 64, downsample=True, dropout_rate=dropout_rate) # bottleneck 1.0 pooling_indices.append(indices_single) for _ in range(4): enet = bottleneck(enet, 64, dropout_rate=dropout_rate) # bottleneck 1.i enet, indices_single = bottleneck(enet, 128, downsample=True) # bottleneck 2.0 pooling_indices.append(indices_single) # bottleneck 2.x and 3.x for _ in range(2): enet = bottleneck(enet, 128) # bottleneck 2.1 enet = bottleneck(enet, 128, dilated=2) # bottleneck 2.2 enet = bottleneck(enet, 128, asymmetric=5) # bottleneck 2.3 enet = bottleneck(enet, 128, dilated=4) # bottleneck 2.4 enet = bottleneck(enet, 128) # bottleneck 2.5 enet = bottleneck(enet, 128, dilated=8) # bottleneck 2.6 enet = bottleneck(enet, 128, asymmetric=5) # bottleneck 2.7 enet = bottleneck(enet, 128, dilated=16) # bottleneck 2.8 return enet, pooling_indices
Example #21
Source File: train_predict_krs1.py From kaggler-template with GNU General Public License v3.0 | 6 votes |
def nn_model(dims): model = Sequential() model.add(Dense(400, input_dim=dims, kernel_initializer='he_normal')) model.add(PReLU()) model.add(BatchNormalization()) model.add(Dropout(0.4)) model.add(Dense(200, kernel_initializer='he_normal')) model.add(PReLU()) model.add(BatchNormalization()) model.add(Dropout(0.2)) model.add(Dense(50, kernel_initializer='he_normal')) model.add(PReLU()) model.add(BatchNormalization()) model.add(Dropout(0.2)) model.add(Dense(1, kernel_initializer='he_normal', activation='sigmoid')) model.compile(loss = 'binary_crossentropy', optimizer = 'adadelta') return(model)
Example #22
Source File: CoarseNet_model.py From MinutiaeNet with MIT License | 6 votes |
def conv_bn_prelu(bottom, w_size, name, strides=(1,1), dilation_rate=(1,1)): if dilation_rate == (1,1): conv_type = 'conv' else: conv_type = 'atrousconv' top = Conv2D(w_size[0], (w_size[1],w_size[2]), kernel_regularizer=l2(5e-5), padding='same', strides=strides, dilation_rate=dilation_rate, name=conv_type+name)(bottom) top = BatchNormalization(name='bn-'+name)(top) top = PReLU(alpha_initializer='zero', shared_axes=[1,2], name='prelu-'+name)(top) # top = Dropout(0.25)(top) return top
Example #23
Source File: MTCNN.py From keras-mtcnn with MIT License | 6 votes |
def create_Kao_Rnet (weight_path = 'model24.h5'): input = Input(shape=[24, 24, 3]) # change this shape to [None,None,3] to enable arbitraty shape input x = Conv2D(28, (3, 3), strides=1, padding='valid', name='conv1')(input) x = PReLU(shared_axes=[1, 2], name='prelu1')(x) x = MaxPool2D(pool_size=3,strides=2, padding='same')(x) x = Conv2D(48, (3, 3), strides=1, padding='valid', name='conv2')(x) x = PReLU(shared_axes=[1, 2], name='prelu2')(x) x = MaxPool2D(pool_size=3, strides=2)(x) x = Conv2D(64, (2, 2), strides=1, padding='valid', name='conv3')(x) x = PReLU(shared_axes=[1, 2], name='prelu3')(x) x = Permute((3, 2, 1))(x) x = Flatten()(x) x = Dense(128, name='conv4')(x) x = PReLU( name='prelu4')(x) classifier = Dense(2, activation='softmax', name='conv5-1')(x) bbox_regress = Dense(4, name='conv5-2')(x) model = Model([input], [classifier, bbox_regress]) model.load_weights(weight_path, by_name=True) return model
Example #24
Source File: test_keras2_numeric.py From coremltools with BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_tiny_conv_prelu_random(self, model_precision=_MLMODEL_FULL_PRECISION): np.random.seed(1988) # Define a model from keras.layers.advanced_activations import PReLU model = Sequential() model.add( Conv2D( input_shape=(10, 10, 3), filters=3, kernel_size=(5, 5), padding="same" ) ) model.add(PReLU(shared_axes=[1, 2])) model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()]) # Get the coreml model self._test_model(model, model_precision=model_precision)
Example #25
Source File: test_keras_numeric.py From coremltools with BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_tiny_conv_prelu_random(self): np.random.seed(1988) # Define a model from keras.layers.advanced_activations import PReLU model = Sequential() model.add( Convolution2D( input_shape=(10, 10, 3), nb_filter=3, nb_row=5, nb_col=5, border_mode="same", ) ) model.add(PReLU(shared_axes=[1, 2])) model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()]) # Get the coreml model self._test_keras_model(model)
Example #26
Source File: co_lstm_predict_sequence.py From copper_price_forecast with GNU General Public License v3.0 | 6 votes |
def build_model(): """ 定义模型 """ model = Sequential() model.add(LSTM(units=Conf.LAYERS[1], input_shape=(Conf.LAYERS[1], Conf.LAYERS[0]), return_sequences=True)) model.add(Dropout(0.2)) model.add(LSTM(Conf.LAYERS[2], return_sequences=False)) model.add(Dropout(0.2)) model.add(Dense(units=Conf.LAYERS[3])) # model.add(BatchNormalization(weights=None, epsilon=1e-06, momentum=0.9)) model.add(Activation("tanh")) # act = PReLU(alpha_initializer='zeros', weights=None) # act = LeakyReLU(alpha=0.3) # model.add(act) start = time.time() model.compile(loss="mse", optimizer="rmsprop") print("> Compilation Time : ", time.time() - start) return model
Example #27
Source File: co_lstm_predict_day.py From copper_price_forecast with GNU General Public License v3.0 | 6 votes |
def build_model(): """ 定义模型 """ model = Sequential() model.add(LSTM(units=Conf.LAYERS[1], input_shape=(Conf.LAYERS[1], Conf.LAYERS[0]), return_sequences=True)) model.add(Dropout(0.2)) model.add(LSTM(Conf.LAYERS[2], return_sequences=False)) model.add(Dropout(0.2)) model.add(Dense(units=Conf.LAYERS[3])) # model.add(BatchNormalization(weights=None, epsilon=1e-06, momentum=0.9)) model.add(Activation("tanh")) # act = PReLU(alpha_initializer='zeros', weights=None) # act = LeakyReLU(alpha=0.3) # model.add(act) start = time.time() model.compile(loss="mse", optimizer="rmsprop") print("> Compilation Time : ", time.time() - start) return model
Example #28
Source File: MTCNN.py From keras-mtcnn with MIT License | 6 votes |
def create_Kao_Onet( weight_path = 'model48.h5'): input = Input(shape = [48,48,3]) x = Conv2D(32, (3, 3), strides=1, padding='valid', name='conv1')(input) x = PReLU(shared_axes=[1,2],name='prelu1')(x) x = MaxPool2D(pool_size=3, strides=2, padding='same')(x) x = Conv2D(64, (3, 3), strides=1, padding='valid', name='conv2')(x) x = PReLU(shared_axes=[1,2],name='prelu2')(x) x = MaxPool2D(pool_size=3, strides=2)(x) x = Conv2D(64, (3, 3), strides=1, padding='valid', name='conv3')(x) x = PReLU(shared_axes=[1,2],name='prelu3')(x) x = MaxPool2D(pool_size=2)(x) x = Conv2D(128, (2, 2), strides=1, padding='valid', name='conv4')(x) x = PReLU(shared_axes=[1,2],name='prelu4')(x) x = Permute((3,2,1))(x) x = Flatten()(x) x = Dense(256, name='conv5') (x) x = PReLU(name='prelu5')(x) classifier = Dense(2, activation='softmax',name='conv6-1')(x) bbox_regress = Dense(4,name='conv6-2')(x) landmark_regress = Dense(10,name='conv6-3')(x) model = Model([input], [classifier, bbox_regress, landmark_regress]) model.load_weights(weight_path, by_name=True) return model
Example #29
Source File: models.py From Hands-On-Generative-Adversarial-Networks-with-Keras with MIT License | 5 votes |
def build_segan_discriminator(noisy_input_shape, clean_input_shape, n_filters=[64, 128, 256, 512, 1024], kernel_size=(1, 31)): clean_input = Input(shape=clean_input_shape) noisy_input = Input(shape=noisy_input_shape) x = Concatenate(-1)([clean_input, noisy_input]) # convolution layers for i in range(len(n_filters)): x = Conv2D(filters=n_filters[i], kernel_size=kernel_size, strides=(1, 4), padding='same', use_bias=True, kernel_initializer=weight_init)(x) x = BatchNormalization(epsilon=1e-5, momentum=0.1)(x) x = PReLU()(x) x = Reshape((16384, ))(x) # dense layers x = Dense(256, activation=None, use_bias=True)(x) x = PReLU()(x) x = Dense(128, activation=None, use_bias=True)(x) x = PReLU()(x) x = Dense(1, activation=None, use_bias=True)(x) # create model graph model = Model(inputs=[noisy_input, clean_input], outputs=x, name='Discriminator') print("\nDiscriminator") model.summary() return model
Example #30
Source File: mtcnn.py From mtcnn-keras with MIT License | 5 votes |
def create_Onet(weight_path): input = Input(shape = [48,48,3]) # 48,48,3 -> 23,23,32 x = Conv2D(32, (3, 3), strides=1, padding='valid', name='conv1')(input) x = PReLU(shared_axes=[1,2],name='prelu1')(x) x = MaxPool2D(pool_size=3, strides=2, padding='same')(x) # 23,23,32 -> 10,10,64 x = Conv2D(64, (3, 3), strides=1, padding='valid', name='conv2')(x) x = PReLU(shared_axes=[1,2],name='prelu2')(x) x = MaxPool2D(pool_size=3, strides=2)(x) # 8,8,64 -> 4,4,64 x = Conv2D(64, (3, 3), strides=1, padding='valid', name='conv3')(x) x = PReLU(shared_axes=[1,2],name='prelu3')(x) x = MaxPool2D(pool_size=2)(x) # 4,4,64 -> 3,3,128 x = Conv2D(128, (2, 2), strides=1, padding='valid', name='conv4')(x) x = PReLU(shared_axes=[1,2],name='prelu4')(x) # 3,3,128 -> 128,12,12 x = Permute((3,2,1))(x) # 1152 -> 256 x = Flatten()(x) x = Dense(256, name='conv5') (x) x = PReLU(name='prelu5')(x) # 鉴别 # 256 -> 2 256 -> 4 256 -> 10 classifier = Dense(2, activation='softmax',name='conv6-1')(x) bbox_regress = Dense(4,name='conv6-2')(x) landmark_regress = Dense(10,name='conv6-3')(x) model = Model([input], [classifier, bbox_regress, landmark_regress]) model.load_weights(weight_path, by_name=True) return model