Python keras.layers.advanced_activations.LeakyReLU() Examples
The following are 30
code examples of keras.layers.advanced_activations.LeakyReLU().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
keras.layers.advanced_activations
, or try the search function
.
Example #1
Source File: model.py From Pix2Pix-Keras with MIT License | 7 votes |
def creat_discriminator(self): # layer 0 image_A = Input(shape=self.image_shape) image_B = Input(shape=self.image_shape) combined_images = Concatenate(axis=-1)([image_A, image_B]) # layer 1 d1 = Conv2D(filters=64, kernel_size=4, strides=2, padding='same')(combined_images) d1 = LeakyReLU(alpha=0.2)(d1) # layer 2 d2 = Conv2D(filters=128, kernel_size=4, strides=2, padding='same')(d1) d2 = LeakyReLU(alpha=0.2)(d2) d2 = BatchNormalization(momentum=0.8)(d2) # layer 3 d3 = Conv2D(filters=128, kernel_size=4, strides=2, padding='same')(d2) d3 = LeakyReLU(alpha=0.2)(d3) d3 = BatchNormalization(momentum=0.8)(d3) # layer 4 d4 = Conv2D(filters=128, kernel_size=4, strides=2, padding='same')(d3) d4 = LeakyReLU(alpha=0.2)(d4) d4 = BatchNormalization(momentum=0.8)(d4) validity = Conv2D(1, kernel_size=4, strides=1, padding='same')(d4) return Model([image_A, image_B], validity)
Example #2
Source File: context_encoder.py From Keras-GAN with MIT License | 7 votes |
def build_discriminator(self): model = Sequential() model.add(Conv2D(64, kernel_size=3, strides=2, input_shape=self.missing_shape, padding="same")) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Conv2D(128, kernel_size=3, strides=2, padding="same")) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Conv2D(256, kernel_size=3, padding="same")) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Flatten()) model.add(Dense(1, activation='sigmoid')) model.summary() img = Input(shape=self.missing_shape) validity = model(img) return Model(img, validity)
Example #3
Source File: cogan.py From Keras-GAN with MIT License | 7 votes |
def build_discriminators(self): img1 = Input(shape=self.img_shape) img2 = Input(shape=self.img_shape) # Shared discriminator layers model = Sequential() model.add(Flatten(input_shape=self.img_shape)) model.add(Dense(512)) model.add(LeakyReLU(alpha=0.2)) model.add(Dense(256)) model.add(LeakyReLU(alpha=0.2)) img1_embedding = model(img1) img2_embedding = model(img2) # Discriminator 1 validity1 = Dense(1, activation='sigmoid')(img1_embedding) # Discriminator 2 validity2 = Dense(1, activation='sigmoid')(img2_embedding) return Model(img1, validity1), Model(img2, validity2)
Example #4
Source File: discogan.py From Keras-GAN with MIT License | 6 votes |
def build_discriminator(self): def d_layer(layer_input, filters, f_size=4, normalization=True): """Discriminator layer""" d = Conv2D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input) d = LeakyReLU(alpha=0.2)(d) if normalization: d = InstanceNormalization()(d) return d img = Input(shape=self.img_shape) d1 = d_layer(img, self.df, normalization=False) d2 = d_layer(d1, self.df*2) d3 = d_layer(d2, self.df*4) d4 = d_layer(d3, self.df*8) validity = Conv2D(1, kernel_size=4, strides=1, padding='same')(d4) return Model(img, validity)
Example #5
Source File: lsgan.py From Keras-GAN with MIT License | 6 votes |
def build_generator(self): model = Sequential() model.add(Dense(256, input_dim=self.latent_dim)) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Dense(512)) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Dense(1024)) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Dense(np.prod(self.img_shape), activation='tanh')) model.add(Reshape(self.img_shape)) model.summary() noise = Input(shape=(self.latent_dim,)) img = model(noise) return Model(noise, img)
Example #6
Source File: pixelda.py From Keras-GAN with MIT License | 6 votes |
def build_classifier(self): def clf_layer(layer_input, filters, f_size=4, normalization=True): """Classifier layer""" d = Conv2D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input) d = LeakyReLU(alpha=0.2)(d) if normalization: d = InstanceNormalization()(d) return d img = Input(shape=self.img_shape) c1 = clf_layer(img, self.cf, normalization=False) c2 = clf_layer(c1, self.cf*2) c3 = clf_layer(c2, self.cf*4) c4 = clf_layer(c3, self.cf*8) c5 = clf_layer(c4, self.cf*8) class_pred = Dense(self.num_classes, activation='softmax')(Flatten()(c5)) return Model(img, class_pred)
Example #7
Source File: bigan.py From Keras-GAN with MIT License | 6 votes |
def build_encoder(self): model = Sequential() model.add(Flatten(input_shape=self.img_shape)) model.add(Dense(512)) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Dense(512)) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Dense(self.latent_dim)) model.summary() img = Input(shape=self.img_shape) z = model(img) return Model(img, z)
Example #8
Source File: ccgan.py From Keras-GAN with MIT License | 6 votes |
def build_discriminator(self): img = Input(shape=self.img_shape) model = Sequential() model.add(Conv2D(64, kernel_size=4, strides=2, padding='same', input_shape=self.img_shape)) model.add(LeakyReLU(alpha=0.8)) model.add(Conv2D(128, kernel_size=4, strides=2, padding='same')) model.add(LeakyReLU(alpha=0.2)) model.add(InstanceNormalization()) model.add(Conv2D(256, kernel_size=4, strides=2, padding='same')) model.add(LeakyReLU(alpha=0.2)) model.add(InstanceNormalization()) model.summary() img = Input(shape=self.img_shape) features = model(img) validity = Conv2D(1, kernel_size=4, strides=1, padding='same')(features) label = Flatten()(features) label = Dense(self.num_classes+1, activation="softmax")(label) return Model(img, [validity, label])
Example #9
Source File: bigan.py From Keras-GAN with MIT License | 6 votes |
def build_discriminator(self): z = Input(shape=(self.latent_dim, )) img = Input(shape=self.img_shape) d_in = concatenate([z, Flatten()(img)]) model = Dense(1024)(d_in) model = LeakyReLU(alpha=0.2)(model) model = Dropout(0.5)(model) model = Dense(1024)(model) model = LeakyReLU(alpha=0.2)(model) model = Dropout(0.5)(model) model = Dense(1024)(model) model = LeakyReLU(alpha=0.2)(model) model = Dropout(0.5)(model) validity = Dense(1, activation="sigmoid")(model) return Model([z, img], validity)
Example #10
Source File: discriminator.py From Generative-Adversarial-Networks-Cookbook with MIT License | 6 votes |
def model(self): input_layer = Input(self.SHAPE) up_layer_1 = Convolution2D(64, kernel_size=4, strides=2, padding='same',activation=LeakyReLU(alpha=0.2))(input_layer) up_layer_2 = Convolution2D(64*2, kernel_size=4, strides=2, padding='same',activation=LeakyReLU(alpha=0.2))(up_layer_1) norm_layer_1 = InstanceNormalization()(up_layer_2) up_layer_3 = Convolution2D(64*4, kernel_size=4, strides=2, padding='same',activation=LeakyReLU(alpha=0.2))(norm_layer_1) norm_layer_2 = InstanceNormalization()(up_layer_3) up_layer_4 = Convolution2D(64*8, kernel_size=4, strides=2, padding='same',activation=LeakyReLU(alpha=0.2))(norm_layer_2) norm_layer_3 =InstanceNormalization()(up_layer_4) output_layer = Convolution2D(1, kernel_size=4, strides=1, padding='same')(norm_layer_3) output_layer_1 = Flatten()(output_layer) output_layer_2 = Dense(1, activation='sigmoid')(output_layer_1) return Model(input_layer,output_layer_2)
Example #11
Source File: test_keras2_numeric.py From coremltools with BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_tiny_conv_leaky_relu_random(self): np.random.seed(1988) # Define a model from keras.layers.advanced_activations import LeakyReLU model = Sequential() model.add( Conv2D( input_shape=(10, 10, 3), filters=3, kernel_size=(5, 5), padding="same" ) ) model.add(LeakyReLU(alpha=0.3)) model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()]) # Get the coreml model self._test_model(model)
Example #12
Source File: pixelda.py From Keras-GAN with MIT License | 6 votes |
def build_discriminator(self): def d_layer(layer_input, filters, f_size=4, normalization=True): """Discriminator layer""" d = Conv2D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input) d = LeakyReLU(alpha=0.2)(d) if normalization: d = InstanceNormalization()(d) return d img = Input(shape=self.img_shape) d1 = d_layer(img, self.df, normalization=False) d2 = d_layer(d1, self.df*2) d3 = d_layer(d2, self.df*4) d4 = d_layer(d3, self.df*8) validity = Conv2D(1, kernel_size=4, strides=1, padding='same')(d4) return Model(img, validity)
Example #13
Source File: test_keras_numeric.py From coremltools with BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_tiny_conv_leaky_relu_random(self): np.random.seed(1988) # Define a model from keras.layers.advanced_activations import LeakyReLU model = Sequential() model.add( Convolution2D( input_shape=(10, 10, 3), nb_filter=3, nb_row=5, nb_col=5, border_mode="same", ) ) model.add(LeakyReLU(alpha=0.3)) model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()]) # Get the coreml model self._test_keras_model(model)
Example #14
Source File: trainer.py From CT-GAN with MIT License | 6 votes |
def build_discriminator(self): def d_layer(layer_input, filters, f_size=4, bn=True): """Discriminator layer""" d = Conv3D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input) d = LeakyReLU(alpha=0.2)(d) if bn: d = BatchNormalization(momentum=0.8)(d) return d img_A = Input(shape=self.img_shape) img_B = Input(shape=self.img_shape) # Concatenate image and conditioning image by channels to produce input model_input = Concatenate(axis=-1)([img_A, img_B]) d1 = d_layer(model_input, self.df, bn=False) d2 = d_layer(d1, self.df * 2) d3 = d_layer(d2, self.df * 4) d4 = d_layer(d3, self.df * 8) validity = Conv3D(1, kernel_size=4, strides=1, padding='same')(d4) return Model([img_A, img_B], validity)
Example #15
Source File: dcgan_mnist.py From keras-examples with MIT License | 6 votes |
def discriminator_model(): model = Sequential() # Poolingの代わりにsubsample使う(出力のサイズ半分) model.add(Convolution2D(64, 5, 5, subsample=(2, 2), border_mode='same', input_shape=(28, 28, 1))) # tfモードはチャネルは後! model.add(LeakyReLU(0.2)) model.add(Convolution2D(128, 5, 5, subsample=(2, 2))) model.add(LeakyReLU(0.2)) model.add(Flatten()) model.add(Dense(256)) model.add(LeakyReLU(0.2)) model.add(Dropout(0.5)) model.add(Dense(1)) model.add(Activation('sigmoid')) return model
Example #16
Source File: cyclegan.py From Keras-GAN with MIT License | 6 votes |
def build_discriminator(self): def d_layer(layer_input, filters, f_size=4, normalization=True): """Discriminator layer""" d = Conv2D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input) d = LeakyReLU(alpha=0.2)(d) if normalization: d = InstanceNormalization()(d) return d img = Input(shape=self.img_shape) d1 = d_layer(img, self.df, normalization=False) d2 = d_layer(d1, self.df*2) d3 = d_layer(d2, self.df*4) d4 = d_layer(d3, self.df*8) validity = Conv2D(1, kernel_size=4, strides=1, padding='same')(d4) return Model(img, validity)
Example #17
Source File: pix2pix.py From Keras-GAN with MIT License | 6 votes |
def build_discriminator(self): def d_layer(layer_input, filters, f_size=4, bn=True): """Discriminator layer""" d = Conv2D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input) d = LeakyReLU(alpha=0.2)(d) if bn: d = BatchNormalization(momentum=0.8)(d) return d img_A = Input(shape=self.img_shape) img_B = Input(shape=self.img_shape) # Concatenate image and conditioning image by channels to produce input combined_imgs = Concatenate(axis=-1)([img_A, img_B]) d1 = d_layer(combined_imgs, self.df, bn=False) d2 = d_layer(d1, self.df*2) d3 = d_layer(d2, self.df*4) d4 = d_layer(d3, self.df*8) validity = Conv2D(1, kernel_size=4, strides=1, padding='same')(d4) return Model([img_A, img_B], validity)
Example #18
Source File: dualgan.py From Keras-GAN with MIT License | 6 votes |
def build_generator(self): X = Input(shape=(self.img_dim,)) model = Sequential() model.add(Dense(256, input_dim=self.img_dim)) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Dropout(0.4)) model.add(Dense(512)) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Dropout(0.4)) model.add(Dense(1024)) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Dropout(0.4)) model.add(Dense(self.img_dim, activation='tanh')) X_translated = model(X) return Model(X, X_translated)
Example #19
Source File: Build_Model.py From DOVE with GNU General Public License v3.0 | 6 votes |
def makecnn(learningrate,regular,decay,channel_number): #model structure model=Sequential() model.add(Conv3D(100, kernel_size=(3,3,3), strides=(1, 1, 1), input_shape = (20,20,20,channel_number),padding='valid', data_format='channels_last', dilation_rate=(1, 1, 1), use_bias=True, kernel_initializer='glorot_normal', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=l2(regular), kernel_constraint=None, bias_constraint=None)) model.add(BatchNormalization()) model.add(LeakyReLU(0.2)) #model.add(Dropout(0.3)) model.add(Conv3D(200, kernel_size=(3,3,3), strides=(1, 1, 1), padding='valid', data_format='channels_last', dilation_rate=(1, 1, 1), use_bias=True, kernel_initializer='glorot_normal', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=l2(regular), kernel_constraint=None, bias_constraint=None)) model.add(BatchNormalization()) model.add(LeakyReLU(0.2)) #model.add(Dropout(0.3)) model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=None, padding='valid', data_format='channels_last')) model.add(BatchNormalization(axis=1, momentum=0.99, epsilon=0.001, center=True, scale=True, beta_initializer='zeros', gamma_initializer='ones', moving_mean_initializer='zeros', moving_variance_initializer='ones', beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, gamma_constraint=None)) model.add(Conv3D(400, kernel_size=(3,3,3),strides=(1, 1, 1), padding='valid', data_format='channels_last', dilation_rate=(1, 1, 1), use_bias=True, kernel_initializer='glorot_normal', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=l2(regular), kernel_constraint=None, bias_constraint=None)) model.add(BatchNormalization()) model.add(LeakyReLU(0.2)) #model.add(Dropout(0.3)) model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=None, padding='valid', data_format='channels_last')) model.add(Flatten()) model.add(Dropout(0.3)) model.add(Dense(1000, use_bias=True, input_shape = (32000,),kernel_initializer='glorot_normal', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=l2(regular), kernel_constraint=None, bias_constraint=None)) model.add(BatchNormalization()) model.add(LeakyReLU(0.2)) model.add(Dropout(0.3)) model.add(Dense(100, use_bias=True, kernel_initializer='glorot_normal', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=l2(regular), kernel_constraint=None, bias_constraint=None)) model.add(BatchNormalization()) model.add(LeakyReLU(0.2)) model.add(Dropout(0.3)) model.add(Dense(1, activation='sigmoid', use_bias=True, kernel_initializer='glorot_normal', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=l2(regular), kernel_constraint=None, bias_constraint=None)) nadam=Nadam(lr=learningrate, beta_1=0.9, beta_2=0.999, epsilon=1e-08, schedule_decay=decay) model.compile(loss='binary_crossentropy', optimizer=nadam, metrics=['accuracy',f1score,precision,recall]) return model
Example #20
Source File: gan.py From Keras-GAN with MIT License | 6 votes |
def build_generator(self): model = Sequential() model.add(Dense(256, input_dim=self.latent_dim)) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Dense(512)) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Dense(1024)) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Dense(np.prod(self.img_shape), activation='tanh')) model.add(Reshape(self.img_shape)) model.summary() noise = Input(shape=(self.latent_dim,)) img = model(noise) return Model(noise, img)
Example #21
Source File: gan.py From Keras-GAN with MIT License | 6 votes |
def build_discriminator(self): model = Sequential() model.add(Flatten(input_shape=self.img_shape)) model.add(Dense(512)) model.add(LeakyReLU(alpha=0.2)) model.add(Dense(256)) model.add(LeakyReLU(alpha=0.2)) model.add(Dense(1, activation='sigmoid')) model.summary() img = Input(shape=self.img_shape) validity = model(img) return Model(img, validity)
Example #22
Source File: aae.py From Keras-GAN with MIT License | 6 votes |
def build_encoder(self): # Encoder img = Input(shape=self.img_shape) h = Flatten()(img) h = Dense(512)(h) h = LeakyReLU(alpha=0.2)(h) h = Dense(512)(h) h = LeakyReLU(alpha=0.2)(h) mu = Dense(self.latent_dim)(h) log_var = Dense(self.latent_dim)(h) latent_repr = merge([mu, log_var], mode=lambda p: p[0] + K.random_normal(K.shape(p[0])) * K.exp(p[1] / 2), output_shape=lambda p: p[0]) return Model(img, latent_repr)
Example #23
Source File: aae.py From Keras-GAN with MIT License | 6 votes |
def build_decoder(self): model = Sequential() model.add(Dense(512, input_dim=self.latent_dim)) model.add(LeakyReLU(alpha=0.2)) model.add(Dense(512)) model.add(LeakyReLU(alpha=0.2)) model.add(Dense(np.prod(self.img_shape), activation='tanh')) model.add(Reshape(self.img_shape)) model.summary() z = Input(shape=(self.latent_dim,)) img = model(z) return Model(z, img)
Example #24
Source File: resnext.py From CBAM-keras with MIT License | 6 votes |
def __initial_conv_block_inception(input, weight_decay=5e-4): ''' Adds an initial conv block, with batch norm and relu for the inception resnext Args: input: input tensor weight_decay: weight decay factor Returns: a keras tensor ''' channel_axis = 1 if K.image_data_format() == 'channels_first' else -1 x = Conv2D(64, (7, 7), padding='same', use_bias=False, kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay), strides=(2, 2))(input) x = BatchNormalization(axis=channel_axis)(x) x = LeakyReLU()(x) x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x) return x
Example #25
Source File: bgan.py From Keras-GAN with MIT License | 6 votes |
def build_discriminator(self): model = Sequential() model.add(Flatten(input_shape=self.img_shape)) model.add(Dense(512)) model.add(LeakyReLU(alpha=0.2)) model.add(Dense(256)) model.add(LeakyReLU(alpha=0.2)) model.add(Dense(1, activation='sigmoid')) model.summary() img = Input(shape=self.img_shape) validity = model(img) return Model(img, validity)
Example #26
Source File: model_zoo.py From visual_turing_test-tutorial with MIT License | 6 votes |
def deep_mlp(self): """ Deep Multilayer Perceptrop. """ if self._config.num_mlp_layers == 0: self.add(Dropout(0.5)) else: for j in xrange(self._config.num_mlp_layers): self.add(Dense(self._config.mlp_hidden_dim)) if self._config.mlp_activation == 'elu': self.add(ELU()) elif self._config.mlp_activation == 'leaky_relu': self.add(LeakyReLU()) elif self._config.mlp_activation == 'prelu': self.add(PReLU()) else: self.add(Activation(self._config.mlp_activation)) self.add(Dropout(0.5))
Example #27
Source File: ae_model.py From Pix2Pose with MIT License | 6 votes |
def DCGAN_discriminator(): nb_filters = 64 nb_conv = int(np.floor(np.log(128) / np.log(2))) list_filters = [nb_filters * min(8, (2 ** i)) for i in range(nb_conv)] input_img = Input(shape=(128, 128, 3)) x = Conv2D(list_filters[0], (3, 3), strides=(2, 2), name="disc_conv2d_1", padding="same")(input_img) x = BatchNormalization(axis=-1)(x) x = LeakyReLU(0.2)(x) # Next convs for i, f in enumerate(list_filters[1:]): name = "disc_conv2d_%s" % (i + 2) x = Conv2D(f, (3, 3), strides=(2, 2), name=name, padding="same")(x) x = BatchNormalization(axis=-1)(x) x = LeakyReLU(0.2)(x) x_flat = Flatten()(x) x_out = Dense(1, activation="sigmoid", name="disc_dense")(x_flat) discriminator_model = Model(inputs=input_img, outputs=[x_out]) return discriminator_model
Example #28
Source File: captcha_gan.py From Intelligent-Projects-Using-Python with MIT License | 6 votes |
def discriminator(img_dim,alpha=0.2): model = Sequential() model.add( Conv2D(64, kernel_size=5,strides=2, padding='same', input_shape=img_dim) ) model.add(LeakyReLU(alpha)) model.add(Conv2D(128,kernel_size=5,strides=2,padding='same')) model.add(BatchNormalization()) model.add(LeakyReLU(alpha)) model.add(Conv2D(256,kernel_size=5,strides=2,padding='same')) model.add(BatchNormalization()) model.add(LeakyReLU(alpha)) model.add(Flatten()) model.add(Dense(1)) model.add(Activation('sigmoid')) return model # Define a combination of Generator and Discriminator
Example #29
Source File: co_lstm_predict_day.py From copper_price_forecast with GNU General Public License v3.0 | 6 votes |
def build_model(): """ 定义模型 """ model = Sequential() model.add(LSTM(units=Conf.LAYERS[1], input_shape=(Conf.LAYERS[1], Conf.LAYERS[0]), return_sequences=True)) model.add(Dropout(0.2)) model.add(LSTM(Conf.LAYERS[2], return_sequences=False)) model.add(Dropout(0.2)) model.add(Dense(units=Conf.LAYERS[3])) # model.add(BatchNormalization(weights=None, epsilon=1e-06, momentum=0.9)) model.add(Activation("tanh")) # act = PReLU(alpha_initializer='zeros', weights=None) # act = LeakyReLU(alpha=0.3) # model.add(act) start = time.time() model.compile(loss="mse", optimizer="rmsprop") print("> Compilation Time : ", time.time() - start) return model
Example #30
Source File: co_lstm_predict_sequence.py From copper_price_forecast with GNU General Public License v3.0 | 6 votes |
def build_model(): """ 定义模型 """ model = Sequential() model.add(LSTM(units=Conf.LAYERS[1], input_shape=(Conf.LAYERS[1], Conf.LAYERS[0]), return_sequences=True)) model.add(Dropout(0.2)) model.add(LSTM(Conf.LAYERS[2], return_sequences=False)) model.add(Dropout(0.2)) model.add(Dense(units=Conf.LAYERS[3])) # model.add(BatchNormalization(weights=None, epsilon=1e-06, momentum=0.9)) model.add(Activation("tanh")) # act = PReLU(alpha_initializer='zeros', weights=None) # act = LeakyReLU(alpha=0.3) # model.add(act) start = time.time() model.compile(loss="mse", optimizer="rmsprop") print("> Compilation Time : ", time.time() - start) return model