Python keras.layers.Flatten() Examples
The following are 30
code examples of keras.layers.Flatten().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
keras.layers
, or try the search function
.
Example #1
Source File: train_ann.py From subsync with Apache License 2.0 | 8 votes |
def ann_model(input_shape): inp = Input(shape=input_shape, name='mfcc_in') model = inp model = Conv1D(filters=12, kernel_size=(3), activation='relu')(model) model = Conv1D(filters=12, kernel_size=(3), activation='relu')(model) model = Flatten()(model) model = Dense(56)(model) model = Activation('relu')(model) model = BatchNormalization()(model) model = Dropout(0.2)(model) model = Dense(28)(model) model = Activation('relu')(model) model = BatchNormalization()(model) model = Dense(1)(model) model = Activation('sigmoid')(model) model = Model(inp, model) return model
Example #2
Source File: context_encoder.py From Keras-GAN with MIT License | 7 votes |
def build_discriminator(self): model = Sequential() model.add(Conv2D(64, kernel_size=3, strides=2, input_shape=self.missing_shape, padding="same")) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Conv2D(128, kernel_size=3, strides=2, padding="same")) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Conv2D(256, kernel_size=3, padding="same")) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Flatten()) model.add(Dense(1, activation='sigmoid')) model.summary() img = Input(shape=self.missing_shape) validity = model(img) return Model(img, validity)
Example #3
Source File: cogan.py From Keras-GAN with MIT License | 7 votes |
def build_discriminators(self): img1 = Input(shape=self.img_shape) img2 = Input(shape=self.img_shape) # Shared discriminator layers model = Sequential() model.add(Flatten(input_shape=self.img_shape)) model.add(Dense(512)) model.add(LeakyReLU(alpha=0.2)) model.add(Dense(256)) model.add(LeakyReLU(alpha=0.2)) img1_embedding = model(img1) img2_embedding = model(img2) # Discriminator 1 validity1 = Dense(1, activation='sigmoid')(img1_embedding) # Discriminator 2 validity2 = Dense(1, activation='sigmoid')(img2_embedding) return Model(img1, validity1), Model(img2, validity2)
Example #4
Source File: breakout_a3c.py From reinforcement-learning with MIT License | 6 votes |
def build_model(self): input = Input(shape=self.state_size) conv = Conv2D(16, (8, 8), strides=(4, 4), activation='relu')(input) conv = Conv2D(32, (4, 4), strides=(2, 2), activation='relu')(conv) conv = Flatten()(conv) fc = Dense(256, activation='relu')(conv) policy = Dense(self.action_size, activation='softmax')(fc) value = Dense(1, activation='linear')(fc) actor = Model(inputs=input, outputs=policy) critic = Model(inputs=input, outputs=value) actor._make_predict_function() critic._make_predict_function() actor.summary() critic.summary() return actor, critic # make loss function for Policy Gradient # [log(action probability) * advantages] will be input for the back prop # we add entropy of action probability to loss
Example #5
Source File: ccgan.py From Keras-GAN with MIT License | 6 votes |
def build_discriminator(self): img = Input(shape=self.img_shape) model = Sequential() model.add(Conv2D(64, kernel_size=4, strides=2, padding='same', input_shape=self.img_shape)) model.add(LeakyReLU(alpha=0.8)) model.add(Conv2D(128, kernel_size=4, strides=2, padding='same')) model.add(LeakyReLU(alpha=0.2)) model.add(InstanceNormalization()) model.add(Conv2D(256, kernel_size=4, strides=2, padding='same')) model.add(LeakyReLU(alpha=0.2)) model.add(InstanceNormalization()) model.summary() img = Input(shape=self.img_shape) features = model(img) validity = Conv2D(1, kernel_size=4, strides=1, padding='same')(features) label = Flatten()(features) label = Dense(self.num_classes+1, activation="softmax")(label) return Model(img, [validity, label])
Example #6
Source File: bigan.py From Keras-GAN with MIT License | 6 votes |
def build_encoder(self): model = Sequential() model.add(Flatten(input_shape=self.img_shape)) model.add(Dense(512)) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Dense(512)) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Dense(self.latent_dim)) model.summary() img = Input(shape=self.img_shape) z = model(img) return Model(img, z)
Example #7
Source File: bigan.py From Keras-GAN with MIT License | 6 votes |
def build_discriminator(self): z = Input(shape=(self.latent_dim, )) img = Input(shape=self.img_shape) d_in = concatenate([z, Flatten()(img)]) model = Dense(1024)(d_in) model = LeakyReLU(alpha=0.2)(model) model = Dropout(0.5)(model) model = Dense(1024)(model) model = LeakyReLU(alpha=0.2)(model) model = Dropout(0.5)(model) model = Dense(1024)(model) model = LeakyReLU(alpha=0.2)(model) model = Dropout(0.5)(model) validity = Dense(1, activation="sigmoid")(model) return Model([z, img], validity)
Example #8
Source File: pixelda.py From Keras-GAN with MIT License | 6 votes |
def build_classifier(self): def clf_layer(layer_input, filters, f_size=4, normalization=True): """Classifier layer""" d = Conv2D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input) d = LeakyReLU(alpha=0.2)(d) if normalization: d = InstanceNormalization()(d) return d img = Input(shape=self.img_shape) c1 = clf_layer(img, self.cf, normalization=False) c2 = clf_layer(c1, self.cf*2) c3 = clf_layer(c2, self.cf*4) c4 = clf_layer(c3, self.cf*8) c5 = clf_layer(c4, self.cf*8) class_pred = Dense(self.num_classes, activation='softmax')(Flatten()(c5)) return Model(img, class_pred)
Example #9
Source File: bgan.py From Keras-GAN with MIT License | 6 votes |
def build_discriminator(self): model = Sequential() model.add(Flatten(input_shape=self.img_shape)) model.add(Dense(512)) model.add(LeakyReLU(alpha=0.2)) model.add(Dense(256)) model.add(LeakyReLU(alpha=0.2)) model.add(Dense(1, activation='sigmoid')) model.summary() img = Input(shape=self.img_shape) validity = model(img) return Model(img, validity)
Example #10
Source File: gan.py From Keras-GAN with MIT License | 6 votes |
def build_discriminator(self): model = Sequential() model.add(Flatten(input_shape=self.img_shape)) model.add(Dense(512)) model.add(LeakyReLU(alpha=0.2)) model.add(Dense(256)) model.add(LeakyReLU(alpha=0.2)) model.add(Dense(1, activation='sigmoid')) model.summary() img = Input(shape=self.img_shape) validity = model(img) return Model(img, validity)
Example #11
Source File: networks.py From C51-DDQN-Keras with MIT License | 6 votes |
def value_distribution_network(input_shape, num_atoms, action_size, learning_rate): """Model Value Distribution With States as inputs and output Probability Distributions for all Actions """ state_input = Input(shape=(input_shape)) cnn_feature = Convolution2D(32, 8, 8, subsample=(4,4), activation='relu')(state_input) cnn_feature = Convolution2D(64, 4, 4, subsample=(2,2), activation='relu')(cnn_feature) cnn_feature = Convolution2D(64, 3, 3, activation='relu')(cnn_feature) cnn_feature = Flatten()(cnn_feature) cnn_feature = Dense(512, activation='relu')(cnn_feature) distribution_list = [] for i in range(action_size): distribution_list.append(Dense(num_atoms, activation='softmax')(cnn_feature)) model = Model(input=state_input, output=distribution_list) adam = Adam(lr=learning_rate) model.compile(loss='categorical_crossentropy',optimizer=adam) return model
Example #12
Source File: architectures.py From deepJDOT with MIT License | 6 votes |
def assda_feat_ext(main_input, l2_weight=0.0, small_model=False): padding = 'same' maxpool_strides = 2 if small_model else 1 net = dnn.Convolution2D(32, (3, 3),padding=padding, activation='relu', kernel_regularizer=dnn.keras.regularizers.l2(l2_weight))(main_input) net = dnn.Convolution2D(32, (3, 3), padding=padding, activation='relu', kernel_regularizer=dnn.keras.regularizers.l2(l2_weight))(net) net = dnn.MaxPooling2D(pool_size=(2, 2), strides=maxpool_strides)(net) # net = dnn.Dropout(0.5)(net) net = dnn.Convolution2D(64, (3, 3), padding=padding, activation='relu', kernel_regularizer=dnn.keras.regularizers.l2(l2_weight))(net) net = dnn.Convolution2D(64, (3, 3), padding=padding, activation='relu', kernel_regularizer=dnn.keras.regularizers.l2(l2_weight))(net) net = dnn.MaxPooling2D(pool_size=(2, 2), strides=maxpool_strides)(net) # net = dnn.Dropout(0.5)(net) net = dnn.Convolution2D(128, (3, 3), padding=padding, activation='relu', kernel_regularizer=dnn.keras.regularizers.l2(l2_weight))(net) net = dnn.Convolution2D(128, (3, 3), padding=padding, activation='relu', kernel_regularizer=dnn.keras.regularizers.l2(l2_weight))(net) net = dnn.MaxPooling2D(pool_size=(2, 2), strides=maxpool_strides)(net) # net = dnn.Flatten()(net) net = dnn.Dense(128,activation='sigmoid', kernel_regularizer=dnn.keras.regularizers.l2(l2_weight),name='feat_ext')(net) return net
Example #13
Source File: bigan.py From Keras-BiGAN with MIT License | 6 votes |
def encoder(self): if self.E: return self.E inp = Input(shape = [im_size, im_size, 3]) x = d_block(inp, 1 * cha) #64 x = d_block(x, 2 * cha) #32 x = d_block(x, 3 * cha) #16 x = d_block(x, 4 * cha) #8 x = d_block(x, 8 * cha) #4 x = d_block(x, 16 * cha, p = False) #4 x = Flatten()(x) x = Dense(16 * cha, kernel_initializer = 'he_normal')(x) x = LeakyReLU(0.2)(x) x = Dense(latent_size, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(x) self.E = Model(inputs = inp, outputs = x) return self.E
Example #14
Source File: parallel_model.py From dataiku-contrib with Apache License 2.0 | 6 votes |
def build_model(x_train, num_classes): # Reset default graph. Keras leaves old ops in the graph, # which are ignored for execution but clutter graph # visualization in TensorBoard. tf.reset_default_graph() inputs = KL.Input(shape=x_train.shape[1:], name="input_image") x = KL.Conv2D(32, (3, 3), activation='relu', padding="same", name="conv1")(inputs) x = KL.Conv2D(64, (3, 3), activation='relu', padding="same", name="conv2")(x) x = KL.MaxPooling2D(pool_size=(2, 2), name="pool1")(x) x = KL.Flatten(name="flat1")(x) x = KL.Dense(128, activation='relu', name="dense1")(x) x = KL.Dense(num_classes, activation='softmax', name="dense2")(x) return KM.Model(inputs, x, "digit_classifier_model") # Load MNIST Data
Example #15
Source File: breakout_a3c.py From reinforcement-learning with MIT License | 6 votes |
def build_localmodel(self): input = Input(shape=self.state_size) conv = Conv2D(16, (8, 8), strides=(4, 4), activation='relu')(input) conv = Conv2D(32, (4, 4), strides=(2, 2), activation='relu')(conv) conv = Flatten()(conv) fc = Dense(256, activation='relu')(conv) policy = Dense(self.action_size, activation='softmax')(fc) value = Dense(1, activation='linear')(fc) actor = Model(inputs=input, outputs=policy) critic = Model(inputs=input, outputs=value) actor._make_predict_function() critic._make_predict_function() actor.set_weights(self.actor.get_weights()) critic.set_weights(self.critic.get_weights()) actor.summary() critic.summary() return actor, critic
Example #16
Source File: localizer.py From cnn-levelset with MIT License | 6 votes |
def __init__(self, model_path=None): if model_path is not None: self.model = self.load_model(model_path) else: # VGG16 last conv features inputs = Input(shape=(7, 7, 512)) x = Convolution2D(128, 1, 1)(inputs) x = Flatten()(x) # Cls head h_cls = Dense(256, activation='relu', W_regularizer=l2(l=0.01))(x) h_cls = Dropout(p=0.5)(h_cls) cls_head = Dense(20, activation='softmax', name='cls')(h_cls) # Reg head h_reg = Dense(256, activation='relu', W_regularizer=l2(l=0.01))(x) h_reg = Dropout(p=0.5)(h_reg) reg_head = Dense(4, activation='linear', name='reg')(h_reg) # Joint model self.model = Model(input=inputs, output=[cls_head, reg_head])
Example #17
Source File: mnist.py From blackbox-attacks with MIT License | 6 votes |
def modelA(): model = Sequential() model.add(Conv2D(64, (5, 5), padding='valid')) model.add(Activation('relu')) model.add(Conv2D(64, (5, 5))) model.add(Activation('relu')) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(128)) model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(Dense(FLAGS.NUM_CLASSES)) return model
Example #18
Source File: mnist.py From blackbox-attacks with MIT License | 6 votes |
def modelB(): model = Sequential() model.add(Dropout(0.2, input_shape=(FLAGS.IMAGE_ROWS, FLAGS.IMAGE_COLS, FLAGS.NUM_CHANNELS))) model.add(Convolution2D(64, 8, 8, subsample=(2, 2), border_mode='same')) model.add(Activation('relu')) model.add(Convolution2D(128, 6, 6, subsample=(2, 2), border_mode='valid')) model.add(Activation('relu')) model.add(Convolution2D(128, 5, 5, subsample=(1, 1))) model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(Flatten()) model.add(Dense(FLAGS.NUM_CLASSES)) return model
Example #19
Source File: mnist.py From blackbox-attacks with MIT License | 6 votes |
def modelC(): model = Sequential() model.add(Convolution2D(128, 3, 3, border_mode='valid', input_shape=(FLAGS.IMAGE_ROWS, FLAGS.IMAGE_COLS, FLAGS.NUM_CHANNELS))) model.add(Activation('relu')) model.add(Convolution2D(64, 3, 3)) model.add(Activation('relu')) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(128)) model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(Dense(FLAGS.NUM_CLASSES)) return model
Example #20
Source File: mnist.py From blackbox-attacks with MIT License | 6 votes |
def modelD(): model = Sequential() model.add(Flatten(input_shape=(FLAGS.IMAGE_ROWS, FLAGS.IMAGE_COLS, FLAGS.NUM_CHANNELS))) model.add(Dense(300, init='he_normal', activation='relu')) model.add(Dropout(0.5)) model.add(Dense(300, init='he_normal', activation='relu')) model.add(Dropout(0.5)) model.add(Dense(300, init='he_normal', activation='relu')) model.add(Dropout(0.5)) model.add(Dense(300, init='he_normal', activation='relu')) model.add(Dropout(0.5)) model.add(Dense(FLAGS.NUM_CLASSES)) return model
Example #21
Source File: play_a3c_model.py From reinforcement-learning with MIT License | 6 votes |
def build_model(self): input = Input(shape=self.state_size) conv = Conv2D(16, (8, 8), strides=(4, 4), activation='relu')(input) conv = Conv2D(32, (4, 4), strides=(2, 2), activation='relu')(conv) conv = Flatten()(conv) fc = Dense(256, activation='relu')(conv) policy = Dense(self.action_size, activation='softmax')(fc) value = Dense(1, activation='linear')(fc) actor = Model(inputs=input, outputs=policy) critic = Model(inputs=input, outputs=value) actor.summary() critic.summary() return actor, critic
Example #22
Source File: keras_sql_udf_test.py From spark-deep-learning with Apache License 2.0 | 6 votes |
def test_simple_keras_udf(self): """ Simple Keras sequential model """ # Notice that the input layer for a image UDF model # must be of shape (width, height, numChannels) # The leading batch size is taken care of by Keras with IsolatedSession(using_keras=True) as issn: model = Sequential() # Make the test model simpler to increase the stability of travis tests model.add(Flatten(input_shape=(640, 480, 3))) # model.add(Dense(64, activation='relu')) model.add(Dense(16, activation='softmax')) # Initialize the variables init_op = tf.global_variables_initializer() issn.run(init_op) makeGraphUDF(issn.graph, 'my_keras_model_udf', model.outputs, {tfx.op_name(model.inputs[0], issn.graph): 'image_col'}) # Run the training procedure # Export the graph in this IsolatedSession as a GraphFunction # gfn = issn.asGraphFunction(model.inputs, model.outputs) fh_name = "test_keras_simple_sequential_model" registerKerasImageUDF(fh_name, model) self._assert_function_exists(fh_name)
Example #23
Source File: cnn_main.py From Convolutional-Networks-for-Stock-Predicting with MIT License | 6 votes |
def create_model(): model = Sequential() model.add(Convolution2D(32, 3, 3, border_mode='valid', input_shape=(100, 100, 3))) model.add(Activation('relu')) model.add(Convolution2D(32, 3, 3)) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Convolution2D(64, 3, 3, border_mode='valid')) model.add(Activation('relu')) model.add(Convolution2D(64, 3, 3)) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(256)) model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(Dense(2)) model.add(Activation('softmax')) return model
Example #24
Source File: play_a3c_model.py From reinforcement-learning-kr with MIT License | 6 votes |
def build_model(self): input = Input(shape=self.state_size) conv = Conv2D(16, (8, 8), strides=(4, 4), activation='relu')(input) conv = Conv2D(32, (4, 4), strides=(2, 2), activation='relu')(conv) conv = Flatten()(conv) fc = Dense(256, activation='relu')(conv) policy = Dense(self.action_size, activation='softmax')(fc) value = Dense(1, activation='linear')(fc) actor = Model(inputs=input, outputs=policy) critic = Model(inputs=input, outputs=value) actor.summary() critic.summary() return actor, critic
Example #25
Source File: breakout_a3c.py From reinforcement-learning-kr with MIT License | 6 votes |
def build_model(self): input = Input(shape=self.state_size) conv = Conv2D(16, (8, 8), strides=(4, 4), activation='relu')(input) conv = Conv2D(32, (4, 4), strides=(2, 2), activation='relu')(conv) conv = Flatten()(conv) fc = Dense(256, activation='relu')(conv) policy = Dense(self.action_size, activation='softmax')(fc) value = Dense(1, activation='linear')(fc) actor = Model(inputs=input, outputs=policy) critic = Model(inputs=input, outputs=value) # 가치와 정책을 예측하는 함수를 만들어냄 actor._make_predict_function() critic._make_predict_function() actor.summary() critic.summary() return actor, critic # 정책신경망을 업데이트하는 함수
Example #26
Source File: breakout_a3c.py From reinforcement-learning-kr with MIT License | 6 votes |
def build_local_model(self): input = Input(shape=self.state_size) conv = Conv2D(16, (8, 8), strides=(4, 4), activation='relu')(input) conv = Conv2D(32, (4, 4), strides=(2, 2), activation='relu')(conv) conv = Flatten()(conv) fc = Dense(256, activation='relu')(conv) policy = Dense(self.action_size, activation='softmax')(fc) value = Dense(1, activation='linear')(fc) local_actor = Model(inputs=input, outputs=policy) local_critic = Model(inputs=input, outputs=value) local_actor._make_predict_function() local_critic._make_predict_function() local_actor.set_weights(self.actor.get_weights()) local_critic.set_weights(self.critic.get_weights()) local_actor.summary() local_critic.summary() return local_actor, local_critic # 로컬신경망을 글로벌신경망으로 업데이트
Example #27
Source File: ae_model.py From Pix2Pose with MIT License | 6 votes |
def DCGAN_discriminator(): nb_filters = 64 nb_conv = int(np.floor(np.log(128) / np.log(2))) list_filters = [nb_filters * min(8, (2 ** i)) for i in range(nb_conv)] input_img = Input(shape=(128, 128, 3)) x = Conv2D(list_filters[0], (3, 3), strides=(2, 2), name="disc_conv2d_1", padding="same")(input_img) x = BatchNormalization(axis=-1)(x) x = LeakyReLU(0.2)(x) # Next convs for i, f in enumerate(list_filters[1:]): name = "disc_conv2d_%s" % (i + 2) x = Conv2D(f, (3, 3), strides=(2, 2), name=name, padding="same")(x) x = BatchNormalization(axis=-1)(x) x = LeakyReLU(0.2)(x) x_flat = Flatten()(x) x_out = Dense(1, activation="sigmoid", name="disc_dense")(x_flat) discriminator_model = Model(inputs=input_img, outputs=[x_out]) return discriminator_model
Example #28
Source File: keras.py From stagesepx with MIT License | 5 votes |
def create_model(self) -> Sequential: """ model structure. you can overwrite this method to build your own model """ logger.info(f"creating keras sequential model") if K.image_data_format() == "channels_first": input_shape = (1, *self.data_size) else: input_shape = (*self.data_size, 1) model = Sequential() model.add(Conv2D(32, (3, 3), input_shape=input_shape)) model.add(Activation("relu")) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(32, (3, 3))) model.add(Activation("relu")) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(64, (3, 3))) model.add(Activation("relu")) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dense(64)) model.add(Activation("relu")) model.add(Dropout(0.5)) model.add(Dense(6)) model.add(Activation("softmax")) model.compile( loss="sparse_categorical_crossentropy", optimizer="rmsprop", metrics=["accuracy"], ) logger.info("model created") return model
Example #29
Source File: caption_generator.py From caption_generator with MIT License | 5 votes |
def create_model(self, ret_model = False): #base_model = VGG16(weights='imagenet', include_top=False, input_shape = (224, 224, 3)) #base_model.trainable=False image_model = Sequential() #image_model.add(base_model) #image_model.add(Flatten()) image_model.add(Dense(EMBEDDING_DIM, input_dim = 4096, activation='relu')) image_model.add(RepeatVector(self.max_cap_len)) lang_model = Sequential() lang_model.add(Embedding(self.vocab_size, 256, input_length=self.max_cap_len)) lang_model.add(LSTM(256,return_sequences=True)) lang_model.add(TimeDistributed(Dense(EMBEDDING_DIM))) model = Sequential() model.add(Merge([image_model, lang_model], mode='concat')) model.add(LSTM(1000,return_sequences=False)) model.add(Dense(self.vocab_size)) model.add(Activation('softmax')) print "Model created!" if(ret_model==True): return model model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy']) return model
Example #30
Source File: BBalpha_dropout.py From Dropout_BBalpha with MIT License | 5 votes |
def get_logit_cnn_layers(nb_units, p, wd, nb_classes, layers = [], dropout = False): # number of convolutional filters to use nb_filters = 32 # size of pooling area for max pooling pool_size = (2, 2) # convolution kernel size kernel_size = (3, 3) if dropout == 'MC': D = Dropout_mc if dropout == 'pW': D = pW if dropout == 'none': D = Identity layers.append(Convolution2D(nb_filters, kernel_size[0], kernel_size[1], border_mode='valid', W_regularizer=l2(wd))) layers.append(Activation('relu')) layers.append(Convolution2D(nb_filters, kernel_size[0], kernel_size[1], W_regularizer=l2(wd))) layers.append(Activation('relu')) layers.append(MaxPooling2D(pool_size=pool_size)) layers.append(Flatten()) layers.append(D(p)) layers.append(Dense(nb_units, W_regularizer=l2(wd))) layers.append(Activation('relu')) layers.append(D(p)) layers.append(Dense(nb_classes, W_regularizer=l2(wd))) return layers