Python keras.layers.convolutional.Convolution1D() Examples
The following are 19
code examples of keras.layers.convolutional.Convolution1D().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
keras.layers.convolutional
, or try the search function
.
Example #1
Source File: model_zoo.py From visual_turing_test-tutorial with MIT License | 6 votes |
def create(self): self.textual_embedding(self, mask_zero=False) self.add(Convolution1D( nb_filter=self._config.language_cnn_filters, filter_length=self._config.language_cnn_filter_length, border_mode='valid', activation=self._config.language_cnn_activation, subsample_length=1)) #self.add(MaxPooling1D(pool_length=self._config.language_max_pool_length)) self.add(self._config.recurrent_encoder( self._config.hidden_state_dim, return_sequences=False, go_backwards=False)) self.deep_mlp() self.add(Dense(self._config.output_dim)) self.add(Activation('softmax'))
Example #2
Source File: model.py From keras-molecules with MIT License | 6 votes |
def _buildEncoder(self, x, latent_rep_size, max_length, epsilon_std = 0.01): h = Convolution1D(9, 9, activation = 'relu', name='conv_1')(x) h = Convolution1D(9, 9, activation = 'relu', name='conv_2')(h) h = Convolution1D(10, 11, activation = 'relu', name='conv_3')(h) h = Flatten(name='flatten_1')(h) h = Dense(435, activation = 'relu', name='dense_1')(h) def sampling(args): z_mean_, z_log_var_ = args batch_size = K.shape(z_mean_)[0] epsilon = K.random_normal(shape=(batch_size, latent_rep_size), mean=0., std = epsilon_std) return z_mean_ + K.exp(z_log_var_ / 2) * epsilon z_mean = Dense(latent_rep_size, name='z_mean', activation = 'linear')(h) z_log_var = Dense(latent_rep_size, name='z_log_var', activation = 'linear')(h) def vae_loss(x, x_decoded_mean): x = K.flatten(x) x_decoded_mean = K.flatten(x_decoded_mean) xent_loss = max_length * objectives.binary_crossentropy(x, x_decoded_mean) kl_loss = - 0.5 * K.mean(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis = -1) return xent_loss + kl_loss return (vae_loss, Lambda(sampling, output_shape=(latent_rep_size,), name='lambda')([z_mean, z_log_var]))
Example #3
Source File: model_zoo.py From visual_turing_test-tutorial with MIT License | 6 votes |
def create(self): assert self._config.merge_mode in ['max', 'ave', 'sum'], \ 'Merge mode of this model is either max, ave or sum' model_list = [None] * self._config.language_cnn_views for j in xrange(1,self._config.language_cnn_views+1): current_view = Sequential() self.textual_embedding(current_view, mask_zero=True) current_view.add(Convolution1D( nb_filter=self._config.language_cnn_filters, filter_length=j, border_mode='valid', activation=self._config.language_cnn_activation, subsample_length=1)) self.temporal_pooling(current_view) model_list[j-1] = current_view self.add(Merge(model_list, mode='concat')) self.deep_mlp() self.add(Dense(self._config.output_dim)) self.add(Activation('softmax'))
Example #4
Source File: deep_models.py From urgent-care-comparative with GNU General Public License v3.0 | 6 votes |
def hierarchical_cnn (input_shape, aux_shape, targets = 1, hidden = 256, multiclass = False, learn_rate=1e-4): x = Input(shape = input_shape, name = 'x') xx = Convolution1D(nb_filter = 64, filter_length = 3, border_mode = 'same', activation = 'relu') (x) xx = MaxPooling1D(pool_length = 3) (xx) xx = Bidirectional(LSTM (256, activation = 'relu'), merge_mode = 'concat') (xx) xx = Dropout(0.5)(xx) dx = Input(shape = aux_shape, name = 'aux') xx = concatenate([xx, dx]) if multiclass: y = Dense(targets, activation = 'softmax') (xx) model = Model(inputs = [x, dx], outputs = [y]) model.compile (loss = 'categorical_crossentropy', optimizer = Adam(lr = learn_rate), metrics = ['categorical_accuracy']) else: y = Dense(targets, activation = 'sigmoid') (xx) model = Model(inputs = [x, dx], outputs = [y]) model.compile (loss = 'binary_crossentropy', optimizer = Adam(lr = learn_rate), metrics = ['accuracy']) return (model)
Example #5
Source File: deep_models.py From urgent-care-comparative with GNU General Public License v3.0 | 6 votes |
def cnn_model(input_shape, hidden = 256, targets = 1, learn_rate = 1e-4): model = Sequential() model.add(Convolution1D(input_shape = input_shape, nb_filter = 64, filter_length = 3, border_mode = 'same', activation = 'relu')) model.add(MaxPooling1D(pool_length = 3)) model.add(Bidirectional(LSTM(hidden), merge_mode = 'concat')) model.add(Activation('tanh')) model.add(Dropout(0.5)) model.add(Dense(targets)) if multiclass: model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=learn_rate, beta_1 =.5 ), metrics=['categorical_accuracy']) else: model.add(Activation ('sigmoid')) model.compile(loss='binary_crossentropy', optimizer=Adam(lr=learn_rate, beta_1 =.5 ), metrics=['accuracy']) return (model)
Example #6
Source File: cnn_lstm.py From hyperas with MIT License | 5 votes |
def model(X_train, X_test, y_train, y_test, maxlen, max_features): embedding_size = 300 pool_length = 4 lstm_output_size = 100 batch_size = 200 nb_epoch = 1 model = Sequential() model.add(Embedding(max_features, embedding_size, input_length=maxlen)) model.add(Dropout({{uniform(0, 1)}})) # Note that we use unnamed parameters here, which is bad style, but is used here # to demonstrate that it works. Always prefer named parameters. model.add(Convolution1D({{choice([64, 128])}}, {{choice([6, 8])}}, border_mode='valid', activation='relu', subsample_length=1)) model.add(MaxPooling1D(pool_length=pool_length)) model.add(LSTM(lstm_output_size)) model.add(Dense(1)) model.add(Activation('sigmoid')) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) print('Train...') model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, validation_data=(X_test, y_test)) score, acc = model.evaluate(X_test, y_test, batch_size=batch_size) print('Test score:', score) print('Test accuracy:', acc) return {'loss': -acc, 'status': STATUS_OK, 'model': model}
Example #7
Source File: models.py From eXposeDeepNeuralNetwork with BSD 3-Clause "New" or "Revised" License | 5 votes |
def getconvmodel(filter_length,nb_filter): model = Sequential() model.add(Convolution1D(nb_filter=nb_filter, input_shape=(100,32), filter_length=filter_length, border_mode='same', activation='relu', subsample_length=1)) model.add(Lambda(sum_1d, output_shape=(nb_filter,))) #model.add(BatchNormalization(mode=0)) model.add(Dropout(0.5)) return model
Example #8
Source File: model_zoo.py From visual_turing_test-tutorial with MIT License | 5 votes |
def create(self): self.textual_embedding_fixed_length(self, mask_zero=False) self.add(Convolution1D( nb_filter=self._config.language_cnn_filters, filter_length=self._config.language_cnn_filter_length, border_mode='valid', activation=self._config.language_cnn_activation, subsample_length=1)) self.add(MaxPooling1D(pool_length=self._config.language_max_pool_length)) self.add(Flatten()) self.deep_mlp() self.add(Dense(self._config.output_dim)) self.add(Activation('softmax'))
Example #9
Source File: model_keras_cnn_rnn.py From Sentiment-Analysis-in-Event-Driven-Stock-Price-Movement-Prediction with MIT License | 5 votes |
def embeddingCNN(shape, clusters=2, embedLayer=200, middle = 100): top_words = 2001 lossType = 'binary_crossentropy' if clusters == 2 else 'categorical_crossentropy' model = Sequential() model.add(Embedding(top_words, embedLayer, input_length=shape)) model.add(Convolution1D(nb_filter=embedLayer, filter_length=3, border_mode='same', activation='relu')) model.add(MaxPooling1D(pool_length=2)) model.add(Flatten()) model.add(Dense(middle, activation='relu')) model.add(Dense(clusters, activation='sigmoid')) model.compile(loss=lossType, optimizer='adam', metrics=['accuracy']) return model
Example #10
Source File: model.py From deepchem with MIT License | 5 votes |
def _buildEncoder(self, x, latent_rep_size, max_length, epsilon_std=0.01): h = Convolution1D(9, 9, activation='relu', name='conv_1')(x) h = Convolution1D(9, 9, activation='relu', name='conv_2')(h) h = Convolution1D(10, 11, activation='relu', name='conv_3')(h) h = Flatten(name='flatten_1')(h) h = Dense(435, activation='relu', name='dense_1')(h) def sampling(args): z_mean_, z_log_var_ = args batch_size = K.shape(z_mean_)[0] epsilon = K.random_normal( shape=(batch_size, latent_rep_size), mean=0., std=epsilon_std) return z_mean_ + K.exp(z_log_var_ / 2) * epsilon z_mean = Dense(latent_rep_size, name='z_mean', activation='linear')(h) z_log_var = Dense(latent_rep_size, name='z_log_var', activation='linear')(h) def vae_loss(x, x_decoded_mean): x = K.flatten(x) x_decoded_mean = K.flatten(x_decoded_mean) xent_loss = max_length * objectives.binary_crossentropy(x, x_decoded_mean) kl_loss = -0.5 * K.mean( 1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1) return xent_loss + kl_loss return (vae_loss, Lambda( sampling, output_shape=(latent_rep_size,), name='lambda')([z_mean, z_log_var]))
Example #11
Source File: model_zoo.py From visual_turing_test-tutorial with MIT License | 5 votes |
def create(self): assert self._config.merge_mode in ['max', 'ave', 'sum'], \ 'Merge mode of this model is either max, ave or sum' unigram = Sequential() self.textual_embedding(unigram, mask_zero=True) unigram.add(Convolution1D( nb_filter=self._config.language_cnn_filters, filter_length=1, border_mode='valid', activation=self._config.language_cnn_activation, subsample_length=1)) self.temporal_pooling(unigram) bigram = Sequential() self.textual_embedding(bigram, mask_zero=True) bigram.add(Convolution1D( nb_filter=self._config.language_cnn_filters, filter_length=2, border_mode='valid', activation=self._config.language_cnn_activation, subsample_length=1)) self.temporal_pooling(bigram) trigram = Sequential() self.textual_embedding(trigram, mask_zero=True) trigram.add(Convolution1D( nb_filter=self._config.language_cnn_filters, filter_length=3, border_mode='valid', activation=self._config.language_cnn_activation, subsample_length=1)) self.temporal_pooling(trigram) self.add(Merge([unigram, bigram, trigram], mode='concat')) self.deep_mlp() self.add(Dense(self._config.output_dim)) self.add(Activation('softmax'))
Example #12
Source File: audiounet.py From audio-super-res with MIT License | 4 votes |
def create_model(self, n_dim, r): # load inputs X, _, _ = self.inputs K.set_session(self.sess) with tf.name_scope('generator'): x = X L = self.layers # dim/layer: 4096, 2048, 1024, 512, 256, 128, 64, 32, n_filters = [128, 384, 512, 512, 512, 512, 512, 512] n_filtersizes = [65, 33, 17, 9, 9, 9, 9, 9, 9] downsampling_l = [] print 'building model...' # downsampling layers for l, nf, fs in zip(range(L), n_filters, n_filtersizes): with tf.name_scope('downsc_conv%d' % l): x = (Convolution1D(nb_filter=nf, filter_length=fs, activation=None, border_mode='same', init=orthogonal_init, subsample_length=2))(x) # if l > 0: x = BatchNormalization(mode=2)(x) x = LeakyReLU(0.2)(x) print 'D-Block: ', x.get_shape() downsampling_l.append(x) # bottleneck layer with tf.name_scope('bottleneck_conv'): x = (Convolution1D(nb_filter=n_filters[-1], filter_length=n_filtersizes[-1], activation=None, border_mode='same', init=orthogonal_init, subsample_length=2))(x) x = Dropout(p=0.5)(x) x = LeakyReLU(0.2)(x) # upsampling layers for l, nf, fs, l_in in reversed(zip(range(L), n_filters, n_filtersizes, downsampling_l)): with tf.name_scope('upsc_conv%d' % l): # (-1, n/2, 2f) x = (Convolution1D(nb_filter=2*nf, filter_length=fs, activation=None, border_mode='same', init=orthogonal_init))(x) x = Dropout(p=0.5)(x) x = Activation('relu')(x) # (-1, n, f) x = SubPixel1D(x, r=2) # (-1, n, 2f) x = K.concatenate(tensors=[x, l_in], axis=2) print 'U-Block: ', x.get_shape() # final conv layer with tf.name_scope('lastconv'): x = Convolution1D(nb_filter=2, filter_length=9, activation=None, border_mode='same', init=normal_init)(x) x = SubPixel1D(x, r=2) print x.get_shape() g = merge([x, X], mode='sum') return g
Example #13
Source File: Q_Learning_Agent.py From rf_helicopter with MIT License | 4 votes |
def create_neural_network_rnn(self): """ Create the Neural Network Model :return: Keras Modelh """ model = Sequential() # we start off with an efficient embedding layer which maps # our vocab indices into embedding_dims dimensions model.add(Embedding(12, # Number of Features from State Space 300, # Vector Size input_length=self.input_dim)) # we add a Convolution1D, which will learn nb_filter # word group filters of size filter_length: model.add(Convolution1D(nb_filter=self.nb_filter, filter_length=self.filter_length, border_mode='valid', activation='relu', subsample_length=1)) # we use standard max pooling (halving the output of the previous # layer): model.add(MaxPooling1D(pool_length=self.pool_length)) model.add(Dropout(self.dropout)) # We flatten the output of the conv layer, # so that we can add a vanilla dense layer: model.add(Flatten()) # We add a vanilla hidden layer: model.add(Dense(self.neurons)) model.add(Dropout(self.dropout)) model.add(Activation('relu')) # We project onto a single unit output layer, and squash it with a # sigmoid: model.add(Dense(len(self.actions))) model.add(Activation('linear')) model.compile(loss='mse', optimizer=Adadelta(lr=0.00025)) print(model.summary()) return model
Example #14
Source File: p3_cnn.py From DeepLearn with MIT License | 4 votes |
def trainCNN(obj, dataset_headLines, dataset_body): embedding_dim = 300 LSTM_neurons = 50 dense_neuron = 16 dimx = 100 dimy = 200 lamda = 0.0 nb_filter = 100 filter_length = 4 vocab_size = 10000 batch_size = 50 epochs = 5 ntn_out = 16 ntn_in = nb_filter state = False train_head,train_body,embedding_matrix = obj.process_data(sent_Q=dataset_headLines, sent_A=dataset_body,dimx=dimx,dimy=dimy, wordVec_model = wordVec_model) inpx = Input(shape=(dimx,),dtype='int32',name='inpx') #x = Embedding(output_dim=embedding_dim, input_dim=vocab_size, input_length=dimx)(inpx) x = word2vec_embedding_layer(embedding_matrix)(inpx) inpy = Input(shape=(dimy,),dtype='int32',name='inpy') #y = Embedding(output_dim=embedding_dim, input_dim=vocab_size, input_length=dimy)(inpy) y = word2vec_embedding_layer(embedding_matrix)(inpy) ques = Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='valid', activation='relu', subsample_length=1)(x) ans = Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='valid', activation='relu', subsample_length=1)(y) #hx = Lambda(max_1d, output_shape=(nb_filter,))(ques) #hy = Lambda(max_1d, output_shape=(nb_filter,))(ans) hx = GlobalMaxPooling1D()(ques) hy = GlobalMaxPooling1D()(ans) #wordVec_model = [] #h = Merge(mode="concat",name='h')([hx,hy]) h1 = Multiply()([hx,hy]) h2 = Abs()([hx,hy]) h = Merge(mode="concat",name='h')([h1,h2]) #h = NeuralTensorLayer(output_dim=1,input_dim=ntn_in)([hx,hy]) #h = ntn_layer(ntn_in,ntn_out,activation=None)([hx,hy]) #score = h wrap = Dense(dense_neuron, activation='relu',name='wrap')(h) #score = Dense(1,activation='sigmoid',name='score')(h) #wrap = Dense(dense_neuron,activation='relu',name='wrap')(h) score = Dense(4,activation='softmax',name='score')(wrap) #score=K.clip(score,1e-7,1.0-1e-7) #corr = CorrelationRegularization(-lamda)([hx,hy]) #model = Model( [inpx,inpy],[score,corr]) model = Model( [inpx,inpy],score) model.compile( loss='categorical_crossentropy',optimizer="adadelta",metrics=['accuracy']) return model,train_head,train_body
Example #15
Source File: multiCNN.py From MusiteDeep with GNU General Public License v2.0 | 4 votes |
def copy_model(input_row,input_col): input = Input(shape=(input_row,input_col)) filtersize1=1 filtersize2=9 filtersize3=10 filter1=200 filter2=150 filter3=200 dropout1=0.75 dropout2=0.75 dropout4=0.75 dropout5=0.75 dropout6=0 L1CNN=0 nb_classes=2 batch_size=1200 actfun="relu"; optimization='adam'; attentionhidden_x=10 attentionhidden_xr=8 attention_reg_x=0.151948 attention_reg_xr=2 dense_size1=149 dense_size2=8 dropout_dense1=0.298224 dropout_dense2=0 input = Input(shape=(input_row,input_col)) x = conv.Convolution1D(filter1, filtersize1,kernel_initializer='he_normal',kernel_regularizer= l1(L1CNN),padding="same")(input) x = Dropout(dropout1)(x) x = Activation(actfun)(x) x = conv.Convolution1D(filter2,filtersize2,kernel_initializer='he_normal',kernel_regularizer= l1(L1CNN),padding="same")(x) x = Dropout(dropout2)(x) x = Activation(actfun)(x) x = conv.Convolution1D(filter3,filtersize3,kernel_initializer='he_normal',kernel_regularizer= l1(L1CNN),padding="same")(x) x = Activation(actfun)(x) x_reshape=core.Reshape((x._keras_shape[2],x._keras_shape[1]))(x) x = Dropout(dropout4)(x) x_reshape=Dropout(dropout5)(x_reshape) decoder_x = Attention(hidden=attentionhidden_x,activation='linear',init='he_normal',W_regularizer=l1(attention_reg_x)) # success decoded_x=decoder_x(x) output_x = myFlatten(x._keras_shape[2])(decoded_x) decoder_xr = Attention(hidden=attentionhidden_xr,activation='linear',init='he_normal',W_regularizer=l1(attention_reg_xr)) decoded_xr=decoder_xr(x_reshape) output_xr = myFlatten(x_reshape._keras_shape[2])(decoded_xr) output=merge([output_x,output_xr],mode='concat') output=Dropout(dropout6)(output) output=Dense(dense_size1,kernel_initializer='he_normal',activation='relu')(output) output=Dropout(dropout_dense1)(output) output=Dense(dense_size2,activation="relu",kernel_initializer='he_normal')(output) output=Dropout(dropout_dense2)(output) out=Dense(nb_classes,kernel_initializer='he_normal',activation='softmax')(output) cp_model=Model(input,out) return cp_model
Example #16
Source File: models.py From ResGAN with MIT License | 4 votes |
def time_glot_model(timesteps=128, input_dim=22, output_dim=400, model_name="time_glot_model"): ac_input = Input(shape=(timesteps, input_dim), name="ac_input") x_t = ac_input x_t = GRU(50, activation='relu', kernel_initializer='glorot_normal', return_sequences=False, unroll=False)(x_t) x = x_t x = Dense(output_dim)(x) x = BatchNormalization()(x) x = Activation('relu')(x) x = Reshape((output_dim, 1))(x) x = Convolution1D(filters=100, kernel_size=15, padding='same', strides=1)(x) x = BatchNormalization()(x) x = LeakyReLU(0.1)(x) x = Convolution1D(filters=100, kernel_size=15, padding='same', strides=1)(x) x = BatchNormalization()(x) x = LeakyReLU(0.1)(x) x = Convolution1D(filters=100, kernel_size=15, padding='same', strides=1)(x) x = BatchNormalization()(x) x = LeakyReLU(0.1)(x) x = Convolution1D(filters=100, kernel_size=15, padding='same', strides=1)(x) x = BatchNormalization()(x) x = LeakyReLU(0.1)(x) x = Convolution1D(filters=1, kernel_size=15, padding='same', strides=1)(x) # remove singleton outer dimension x = Reshape((output_dim,))(x) x_t = x x_fft = fft_layer(x) model = Model(inputs=[ac_input], outputs=[x_t, x_fft], name=model_name) return model
Example #17
Source File: models.py From ResGAN with MIT License | 4 votes |
def generator(input_dim=400, ac_dim=22, output_dim=400): pls_input = Input(shape=(input_dim,), name="pls_input") noise_input = Input(shape=(input_dim,), name="noise_input") pls = Reshape((input_dim, 1))(pls_input) noise = Reshape((input_dim, 1))(noise_input) x = concatenate([pls, noise], axis=2) # concat as different channels x = Convolution1D(filters=100, kernel_size=15, padding='same', strides=1)(x) x = BatchNormalization()(x) x = LeakyReLU(0.1)(x) x = concatenate([pls, x], axis=2) # concat as different channels x = Convolution1D(filters=100, kernel_size=15, padding='same', strides=1)(x) x = BatchNormalization()(x) x = LeakyReLU(0.1)(x) x = concatenate([pls, x], axis=2) # concat as different channels x = Convolution1D(filters=100, kernel_size=15, padding='same', strides=1)(x) x = BatchNormalization()(x) x = LeakyReLU(0.1)(x) x = concatenate([pls, x], axis=2) # concat as different channels x = Convolution1D(filters=1, kernel_size=15, padding='same', strides=1)(x) x = Activation('tanh')(x) # force additivity x = add([pls, x]) # remove singleton outer dimension x = Reshape((output_dim,))(x) # add fft channel to output x_fft = fft_layer(x) model = Model(inputs=[pls_input, noise_input], outputs=[x, x_fft], name="generator") return model
Example #18
Source File: models.py From ResGAN with MIT License | 4 votes |
def discriminator(input_dim=400): pls_input = Input(shape=(input_dim,), name="pls_input") fft_input = Input(shape=(input_dim,), name="fft_input") x = Reshape((input_dim, 1))(pls_input) x_fft = Reshape((input_dim, 1))(fft_input) x = concatenate([x, x_fft], axis=2) # concat as different channels # input shape batch_size x 1 (number of channels) x 400 (length of pulse) x = Convolution1D(filters=64, kernel_size=7, strides=3)(x) x = BatchNormalization()(x) x = LeakyReLU(0.1)(x) # shape [batch_size x 64 x 132] x = Convolution1D(filters=128, kernel_size=7, strides=3)(x) x = BatchNormalization()(x) x = LeakyReLU(0.1)(x) # shape [batch_size x 128 x 42] x = Convolution1D(filters=256, kernel_size=7, strides=3)(x) x = BatchNormalization()(x) x = LeakyReLU(0.1)(x) peek_output = x # used for generator training regularization # shape [batch_size x 256 x 12] x = Convolution1D(filters=128, kernel_size=5, strides=2)(x) x = BatchNormalization()(x) x = LeakyReLU(0.1)(x) # shape [batch_size x 128 x 4] #nn.Sigmoid() # use sigmoid for normal gan, commented out for LS-GAN x = Convolution1D(filters=1, kernel_size=3, strides=2)(x) # shape [batch_size x 1 x 1] x = Reshape((1,))(x) model = Model(inputs=[pls_input, fft_input], outputs=[x, peek_output], name="discriminator") return model
Example #19
Source File: models.py From chemical_vae with Apache License 2.0 | 4 votes |
def encoder_model(params): # K_params is dictionary of keras variables x_in = Input(shape=(params['MAX_LEN'], params[ 'NCHARS']), name='input_molecule_smi') # Convolution layers x = Convolution1D(int(params['conv_dim_depth'] * params['conv_d_growth_factor']), int(params['conv_dim_width'] * params['conv_w_growth_factor']), activation='tanh', name="encoder_conv0")(x_in) if params['batchnorm_conv']: x = BatchNormalization(axis=-1, name="encoder_norm0")(x) for j in range(1, params['conv_depth'] - 1): x = Convolution1D(int(params['conv_dim_depth'] * params['conv_d_growth_factor'] ** (j)), int(params['conv_dim_width'] * params['conv_w_growth_factor'] ** (j)), activation='tanh', name="encoder_conv{}".format(j))(x) if params['batchnorm_conv']: x = BatchNormalization(axis=-1, name="encoder_norm{}".format(j))(x) x = Flatten()(x) # Middle layers if params['middle_layer'] > 0: middle = Dense(int(params['hidden_dim'] * params['hg_growth_factor'] ** (params['middle_layer'] - 1)), activation=params['activation'], name='encoder_dense0')(x) if params['dropout_rate_mid'] > 0: middle = Dropout(params['dropout_rate_mid'])(middle) if params['batchnorm_mid']: middle = BatchNormalization(axis=-1, name='encoder_dense0_norm')(middle) for i in range(2, params['middle_layer'] + 1): middle = Dense(int(params['hidden_dim'] * params['hg_growth_factor'] ** (params['middle_layer'] - i)), activation=params['activation'], name='encoder_dense{}'.format(i))(middle) if params['dropout_rate_mid'] > 0: middle = Dropout(params['dropout_rate_mid'])(middle) if params['batchnorm_mid']: middle = BatchNormalization(axis=-1, name='encoder_dense{}_norm'.format(i))(middle) else: middle = x z_mean = Dense(params['hidden_dim'], name='z_mean_sample')(middle) # return both mean and last encoding layer for std dev sampling return Model(x_in, [z_mean, middle], name="encoder")