Python keras.models.Sequential() Examples
The following are 30
code examples of keras.models.Sequential().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
keras.models
, or try the search function
.
Example #1
Source File: recurrent.py From keras-anomaly-detection with MIT License | 18 votes |
def create_model(time_window_size, metric): model = Sequential() model.add(Conv1D(filters=256, kernel_size=5, padding='same', activation='relu', input_shape=(time_window_size, 1))) model.add(MaxPooling1D(pool_size=4)) model.add(LSTM(64)) model.add(Dense(units=time_window_size, activation='linear')) model.compile(optimizer='adam', loss='mean_squared_error', metrics=[metric]) # model.compile(optimizer='adam', loss='mean_squared_error', metrics=[metric]) # model.compile(optimizer="sgd", loss="mse", metrics=[metric]) print(model.summary()) return model
Example #2
Source File: sgan.py From Keras-GAN with MIT License | 8 votes |
def build_generator(self): model = Sequential() model.add(Dense(128 * 7 * 7, activation="relu", input_dim=self.latent_dim)) model.add(Reshape((7, 7, 128))) model.add(BatchNormalization(momentum=0.8)) model.add(UpSampling2D()) model.add(Conv2D(128, kernel_size=3, padding="same")) model.add(Activation("relu")) model.add(BatchNormalization(momentum=0.8)) model.add(UpSampling2D()) model.add(Conv2D(64, kernel_size=3, padding="same")) model.add(Activation("relu")) model.add(BatchNormalization(momentum=0.8)) model.add(Conv2D(1, kernel_size=3, padding="same")) model.add(Activation("tanh")) model.summary() noise = Input(shape=(self.latent_dim,)) img = model(noise) return Model(noise, img)
Example #3
Source File: example.py From residual_block_keras with GNU General Public License v3.0 | 7 votes |
def get_residual_model(is_mnist=True, img_channels=1, img_rows=28, img_cols=28): model = keras.models.Sequential() first_layer_channel = 128 if is_mnist: # size to be changed to 32,32 model.add(ZeroPadding2D((2,2), input_shape=(img_channels, img_rows, img_cols))) # resize (28,28)-->(32,32) # the first conv model.add(Convolution2D(first_layer_channel, 3, 3, border_mode='same')) else: model.add(Convolution2D(first_layer_channel, 3, 3, border_mode='same', input_shape=(img_channels, img_rows, img_cols))) model.add(Activation('relu')) # [residual-based Conv layers] residual_blocks = design_for_residual_blocks(num_channel_input=first_layer_channel) model.add(residual_blocks) model.add(BatchNormalization(axis=1)) model.add(Activation('relu')) # [Classifier] model.add(Flatten()) model.add(Dense(nb_classes)) model.add(Activation('softmax')) # [END] return model
Example #4
Source File: cogan.py From Keras-GAN with MIT License | 7 votes |
def build_discriminators(self): img1 = Input(shape=self.img_shape) img2 = Input(shape=self.img_shape) # Shared discriminator layers model = Sequential() model.add(Flatten(input_shape=self.img_shape)) model.add(Dense(512)) model.add(LeakyReLU(alpha=0.2)) model.add(Dense(256)) model.add(LeakyReLU(alpha=0.2)) img1_embedding = model(img1) img2_embedding = model(img2) # Discriminator 1 validity1 = Dense(1, activation='sigmoid')(img1_embedding) # Discriminator 2 validity2 = Dense(1, activation='sigmoid')(img2_embedding) return Model(img1, validity1), Model(img2, validity2)
Example #5
Source File: context_encoder.py From Keras-GAN with MIT License | 7 votes |
def build_discriminator(self): model = Sequential() model.add(Conv2D(64, kernel_size=3, strides=2, input_shape=self.missing_shape, padding="same")) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Conv2D(128, kernel_size=3, strides=2, padding="same")) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Conv2D(256, kernel_size=3, padding="same")) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Flatten()) model.add(Dense(1, activation='sigmoid')) model.summary() img = Input(shape=self.missing_shape) validity = model(img) return Model(img, validity)
Example #6
Source File: reaction.py From armchair-expert with MIT License | 6 votes |
def __init__(self, path: str = None, use_gpu=False): import tensorflow as tf from keras.models import Sequential from keras.layers import Dense from keras.backend import set_session self.model = Sequential() self.model.add(Dense(AOLReactionFeatureAnalyzer.NUM_FEATURES, activation='relu', input_dim=AOLReactionFeatureAnalyzer.NUM_FEATURES)) self.model.add(Dense(AOLReactionFeatureAnalyzer.NUM_FEATURES - 2, activation='relu')) self.model.add(Dense(1, activation='sigmoid')) self.model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy']) if use_gpu: config = tf.ConfigProto() config.gpu_options.allow_growth = True set_session(tf.Session(config=config))
Example #7
Source File: lsgan.py From Keras-GAN with MIT License | 6 votes |
def build_generator(self): model = Sequential() model.add(Dense(256, input_dim=self.latent_dim)) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Dense(512)) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Dense(1024)) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Dense(np.prod(self.img_shape), activation='tanh')) model.add(Reshape(self.img_shape)) model.summary() noise = Input(shape=(self.latent_dim,)) img = model(noise) return Model(noise, img)
Example #8
Source File: bgan.py From Keras-GAN with MIT License | 6 votes |
def build_discriminator(self): model = Sequential() model.add(Flatten(input_shape=self.img_shape)) model.add(Dense(512)) model.add(LeakyReLU(alpha=0.2)) model.add(Dense(256)) model.add(LeakyReLU(alpha=0.2)) model.add(Dense(1, activation='sigmoid')) model.summary() img = Input(shape=self.img_shape) validity = model(img) return Model(img, validity)
Example #9
Source File: HandWritingRecognition.py From Jtyoui with MIT License | 6 votes |
def nn_model(): (x_train, y_train), _ = mnist.load_data() # 归一化 x_train = x_train.reshape(x_train.shape[0], -1) / 255. # one-hot y_train = np_utils.to_categorical(y=y_train, num_classes=10) # constant(value=1.)自定义常数,constant(value=1.)===one() # 创建模型:输入784个神经元,输出10个神经元 model = Sequential([ Dense(units=200, input_dim=784, bias_initializer=constant(value=1.), activation=tanh), Dense(units=100, bias_initializer=one(), activation=tanh), Dense(units=10, bias_initializer=one(), activation=softmax), ]) opt = SGD(lr=0.2, clipnorm=1.) # 优化器 model.compile(optimizer=opt, loss=categorical_crossentropy, metrics=['acc', 'mae']) # 编译 model.fit(x_train, y_train, batch_size=64, epochs=20, callbacks=[RemoteMonitor()]) model_save(model, './model.h5')
Example #10
Source File: BuildModel.py From HDLTex with MIT License | 6 votes |
def buildModel_DNN(Shape, nClasses, nLayers=3,Number_Node=100, dropout=0.5): ''' buildModel_DNN(nFeatures, nClasses, nLayers=3,Numberof_NOde=100, dropout=0.5) Build Deep neural networks (Multi-layer perceptron) Model for text classification Shape is input feature space nClasses is number of classes nLayers is number of hidden Layer Number_Node is number of unit in each hidden layer dropout is dropout value for solving overfitting problem ''' model = Sequential() model.add(Dense(Number_Node, input_dim=Shape)) model.add(Dropout(dropout)) for i in range(0,nLayers): model.add(Dense(Number_Node, activation='relu')) model.add(Dropout(dropout)) model.add(Dense(nClasses, activation='softmax')) model.compile(loss='sparse_categorical_crossentropy', optimizer='RMSprop', metrics=['accuracy']) return model
Example #11
Source File: cnn_main.py From Convolutional-Networks-for-Stock-Predicting with MIT License | 6 votes |
def create_model(): model = Sequential() model.add(Convolution2D(32, 3, 3, border_mode='valid', input_shape=(100, 100, 3))) model.add(Activation('relu')) model.add(Convolution2D(32, 3, 3)) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Convolution2D(64, 3, 3, border_mode='valid')) model.add(Activation('relu')) model.add(Convolution2D(64, 3, 3)) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(256)) model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(Dense(2)) model.add(Activation('softmax')) return model
Example #12
Source File: chapter_06_002.py From Python-Deep-Learning-SE with MIT License | 6 votes |
def build_discriminator(): """ Build discriminator network """ model = Sequential([ Flatten(input_shape=(28, 28, 1)), Dense(256), LeakyReLU(alpha=0.2), Dense(128), LeakyReLU(alpha=0.2), Dense(1, activation='sigmoid'), ], name='discriminator') model.summary() image = Input(shape=(28, 28, 1)) output = model(image) return Model(image, output)
Example #13
Source File: NER.py From Jtyoui with MIT License | 6 votes |
def train_model(): if cxl_model: embedding_matrix = load_embedding() else: embedding_matrix = {} train, label = vocab_train_label(train_path, vocab=vocab, tags=tag, max_chunk_length=length) n = np.array(label, dtype=np.float) labels = n.reshape((n.shape[0], n.shape[1], 1)) model = Sequential([ Embedding(input_dim=len(vocab), output_dim=300, mask_zero=True, input_length=length, weights=[embedding_matrix], trainable=False), SpatialDropout1D(0.2), Bidirectional(layer=LSTM(units=150, return_sequences=True, dropout=0.2, recurrent_dropout=0.2)), TimeDistributed(Dense(len(tag), activation=relu)), ]) crf_ = CRF(units=len(tag), sparse_target=True) model.add(crf_) model.compile(optimizer=Adam(), loss=crf_.loss_function, metrics=[crf_.accuracy]) model.fit(x=np.array(train), y=labels, batch_size=16, epochs=4, callbacks=[RemoteMonitor()]) model.save(model_path)
Example #14
Source File: structure.py From armchair-expert with MIT License | 6 votes |
def __init__(self, use_gpu: bool = False): import tensorflow as tf from keras.models import Sequential from keras.layers import Dense, Embedding from keras.layers import LSTM from keras.backend import set_session latent_dim = StructureModel.SEQUENCE_LENGTH * 8 model = Sequential() model.add( Embedding(StructureFeatureAnalyzer.NUM_FEATURES, StructureFeatureAnalyzer.NUM_FEATURES, input_length=StructureModel.SEQUENCE_LENGTH)) model.add(LSTM(latent_dim, dropout=0.2, return_sequences=False)) model.add(Dense(StructureFeatureAnalyzer.NUM_FEATURES, activation='softmax')) model.summary() model.compile(loss='sparse_categorical_crossentropy', optimizer='adam') self.model = model if use_gpu: config = tf.ConfigProto() config.gpu_options.allow_growth = True set_session(tf.Session(config=config))
Example #15
Source File: mnist.py From blackbox-attacks with MIT License | 6 votes |
def modelD(): model = Sequential() model.add(Flatten(input_shape=(FLAGS.IMAGE_ROWS, FLAGS.IMAGE_COLS, FLAGS.NUM_CHANNELS))) model.add(Dense(300, init='he_normal', activation='relu')) model.add(Dropout(0.5)) model.add(Dense(300, init='he_normal', activation='relu')) model.add(Dropout(0.5)) model.add(Dense(300, init='he_normal', activation='relu')) model.add(Dropout(0.5)) model.add(Dense(300, init='he_normal', activation='relu')) model.add(Dropout(0.5)) model.add(Dense(FLAGS.NUM_CLASSES)) return model
Example #16
Source File: sequential.py From keras2pmml with MIT License | 6 votes |
def setUp(self): iris = load_iris() theano.config.floatX = 'float32' X = iris.data.astype(theano.config.floatX) y = iris.target.astype(np.int32) y_ohe = np_utils.to_categorical(y) model = Sequential() model.add(Dense(input_dim=X.shape[1], output_dim=5, activation='tanh')) model.add(Dense(input_dim=5, output_dim=y_ohe.shape[1], activation='sigmoid')) model.compile(loss='categorical_crossentropy', optimizer='sgd') model.fit(X, y_ohe, nb_epoch=10, batch_size=1, verbose=3, validation_data=None) params = {'copyright': 'Václav Čadek', 'model_name': 'Iris Model'} self.model = model self.pmml = keras2pmml(self.model, **params) self.num_inputs = self.model.input_shape[1] self.num_outputs = self.model.output_shape[1] self.num_connection_layers = len(self.model.layers) self.features = ['x{}'.format(i) for i in range(self.num_inputs)] self.class_values = ['y{}'.format(i) for i in range(self.num_outputs)]
Example #17
Source File: models.py From tartarus with MIT License | 6 votes |
def get_model_41(params): embedding_weights = pickle.load(open("../data/datasets/train_data/embedding_weights_w2v-google_MSD-AG.pk","rb")) # main sequential model model = Sequential() model.add(Embedding(len(embedding_weights[0]), params['embedding_dim'], input_length=params['sequence_length'], weights=embedding_weights)) #model.add(Dropout(params['dropout_prob'][0], input_shape=(params['sequence_length'], params['embedding_dim']))) model.add(LSTM(2048)) #model.add(Dropout(params['dropout_prob'][1])) model.add(Dense(output_dim=params["n_out"], init="uniform")) model.add(Activation(params['final_activation'])) logging.debug("Output CNN: %s" % str(model.output_shape)) if params['final_activation'] == 'linear': model.add(Lambda(lambda x :K.l2_normalize(x, axis=1))) return model # CRNN Arch for audio
Example #18
Source File: ccgan.py From Keras-GAN with MIT License | 6 votes |
def build_discriminator(self): img = Input(shape=self.img_shape) model = Sequential() model.add(Conv2D(64, kernel_size=4, strides=2, padding='same', input_shape=self.img_shape)) model.add(LeakyReLU(alpha=0.8)) model.add(Conv2D(128, kernel_size=4, strides=2, padding='same')) model.add(LeakyReLU(alpha=0.2)) model.add(InstanceNormalization()) model.add(Conv2D(256, kernel_size=4, strides=2, padding='same')) model.add(LeakyReLU(alpha=0.2)) model.add(InstanceNormalization()) model.summary() img = Input(shape=self.img_shape) features = model(img) validity = Conv2D(1, kernel_size=4, strides=1, padding='same')(features) label = Flatten()(features) label = Dense(self.num_classes+1, activation="softmax")(label) return Model(img, [validity, label])
Example #19
Source File: wgan_gp.py From Keras-GAN with MIT License | 6 votes |
def build_generator(self): model = Sequential() model.add(Dense(128 * 7 * 7, activation="relu", input_dim=self.latent_dim)) model.add(Reshape((7, 7, 128))) model.add(UpSampling2D()) model.add(Conv2D(128, kernel_size=4, padding="same")) model.add(BatchNormalization(momentum=0.8)) model.add(Activation("relu")) model.add(UpSampling2D()) model.add(Conv2D(64, kernel_size=4, padding="same")) model.add(BatchNormalization(momentum=0.8)) model.add(Activation("relu")) model.add(Conv2D(self.channels, kernel_size=4, padding="same")) model.add(Activation("tanh")) model.summary() noise = Input(shape=(self.latent_dim,)) img = model(noise) return Model(noise, img)
Example #20
Source File: mnist.py From blackbox-attacks with MIT License | 6 votes |
def modelA(): model = Sequential() model.add(Conv2D(64, (5, 5), padding='valid')) model.add(Activation('relu')) model.add(Conv2D(64, (5, 5))) model.add(Activation('relu')) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(128)) model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(Dense(FLAGS.NUM_CLASSES)) return model
Example #21
Source File: infogan.py From Keras-GAN with MIT License | 6 votes |
def build_generator(self): model = Sequential() model.add(Dense(128 * 7 * 7, activation="relu", input_dim=self.latent_dim)) model.add(Reshape((7, 7, 128))) model.add(BatchNormalization(momentum=0.8)) model.add(UpSampling2D()) model.add(Conv2D(128, kernel_size=3, padding="same")) model.add(Activation("relu")) model.add(BatchNormalization(momentum=0.8)) model.add(UpSampling2D()) model.add(Conv2D(64, kernel_size=3, padding="same")) model.add(Activation("relu")) model.add(BatchNormalization(momentum=0.8)) model.add(Conv2D(self.channels, kernel_size=3, padding='same')) model.add(Activation("tanh")) gen_input = Input(shape=(self.latent_dim,)) img = model(gen_input) model.summary() return Model(gen_input, img)
Example #22
Source File: lsgan.py From Keras-GAN with MIT License | 6 votes |
def build_discriminator(self): model = Sequential() model.add(Flatten(input_shape=self.img_shape)) model.add(Dense(512)) model.add(LeakyReLU(alpha=0.2)) model.add(Dense(256)) model.add(LeakyReLU(alpha=0.2)) # (!!!) No softmax model.add(Dense(1)) model.summary() img = Input(shape=self.img_shape) validity = model(img) return Model(img, validity)
Example #23
Source File: mnist.py From blackbox-attacks with MIT License | 6 votes |
def modelB(): model = Sequential() model.add(Dropout(0.2, input_shape=(FLAGS.IMAGE_ROWS, FLAGS.IMAGE_COLS, FLAGS.NUM_CHANNELS))) model.add(Convolution2D(64, 8, 8, subsample=(2, 2), border_mode='same')) model.add(Activation('relu')) model.add(Convolution2D(128, 6, 6, subsample=(2, 2), border_mode='valid')) model.add(Activation('relu')) model.add(Convolution2D(128, 5, 5, subsample=(1, 1))) model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(Flatten()) model.add(Dense(FLAGS.NUM_CLASSES)) return model
Example #24
Source File: keras_sql_udf_test.py From spark-deep-learning with Apache License 2.0 | 6 votes |
def test_simple_keras_udf(self): """ Simple Keras sequential model """ # Notice that the input layer for a image UDF model # must be of shape (width, height, numChannels) # The leading batch size is taken care of by Keras with IsolatedSession(using_keras=True) as issn: model = Sequential() # Make the test model simpler to increase the stability of travis tests model.add(Flatten(input_shape=(640, 480, 3))) # model.add(Dense(64, activation='relu')) model.add(Dense(16, activation='softmax')) # Initialize the variables init_op = tf.global_variables_initializer() issn.run(init_op) makeGraphUDF(issn.graph, 'my_keras_model_udf', model.outputs, {tfx.op_name(model.inputs[0], issn.graph): 'image_col'}) # Run the training procedure # Export the graph in this IsolatedSession as a GraphFunction # gfn = issn.asGraphFunction(model.inputs, model.outputs) fh_name = "test_keras_simple_sequential_model" registerKerasImageUDF(fh_name, model) self._assert_function_exists(fh_name)
Example #25
Source File: dualgan.py From Keras-GAN with MIT License | 6 votes |
def build_generator(self): X = Input(shape=(self.img_dim,)) model = Sequential() model.add(Dense(256, input_dim=self.img_dim)) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Dropout(0.4)) model.add(Dense(512)) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Dropout(0.4)) model.add(Dense(1024)) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Dropout(0.4)) model.add(Dense(self.img_dim, activation='tanh')) X_translated = model(X) return Model(X, X_translated)
Example #26
Source File: mnist.py From blackbox-attacks with MIT License | 6 votes |
def modelC(): model = Sequential() model.add(Convolution2D(128, 3, 3, border_mode='valid', input_shape=(FLAGS.IMAGE_ROWS, FLAGS.IMAGE_COLS, FLAGS.NUM_CHANNELS))) model.add(Activation('relu')) model.add(Convolution2D(64, 3, 3)) model.add(Activation('relu')) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(128)) model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(Dense(FLAGS.NUM_CLASSES)) return model
Example #27
Source File: keras_transformer_test.py From spark-deep-learning with Apache License 2.0 | 6 votes |
def test_keras_transformer_single_dim(self): """ Test that KerasTransformer correctly handles single-dimensional input data. """ # Construct a model for simple binary classification (with a single hidden layer) model = Sequential() input_shape = [10] model.add(Dense(units=10, input_shape=input_shape, bias_initializer=self._getKerasModelWeightInitializer(), kernel_initializer=self._getKerasModelWeightInitializer())) model.add(Activation('relu')) model.add(Dense(units=1, bias_initializer=self._getKerasModelWeightInitializer(), kernel_initializer=self._getKerasModelWeightInitializer())) model.add(Activation('sigmoid')) # Compare KerasTransformer output to raw Keras model output self._test_keras_transformer_helper(model, model_filename="keras_transformer_single_dim")
Example #28
Source File: gan.py From Keras-GAN with MIT License | 6 votes |
def build_generator(self): model = Sequential() model.add(Dense(256, input_dim=self.latent_dim)) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Dense(512)) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Dense(1024)) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Dense(np.prod(self.img_shape), activation='tanh')) model.add(Reshape(self.img_shape)) model.summary() noise = Input(shape=(self.latent_dim,)) img = model(noise) return Model(noise, img)
Example #29
Source File: gan.py From Keras-GAN with MIT License | 6 votes |
def build_discriminator(self): model = Sequential() model.add(Flatten(input_shape=self.img_shape)) model.add(Dense(512)) model.add(LeakyReLU(alpha=0.2)) model.add(Dense(256)) model.add(LeakyReLU(alpha=0.2)) model.add(Dense(1, activation='sigmoid')) model.summary() img = Input(shape=self.img_shape) validity = model(img) return Model(img, validity)
Example #30
Source File: aae.py From Keras-GAN with MIT License | 6 votes |
def build_decoder(self): model = Sequential() model.add(Dense(512, input_dim=self.latent_dim)) model.add(LeakyReLU(alpha=0.2)) model.add(Dense(512)) model.add(LeakyReLU(alpha=0.2)) model.add(Dense(np.prod(self.img_shape), activation='tanh')) model.add(Reshape(self.img_shape)) model.summary() z = Input(shape=(self.latent_dim,)) img = model(z) return Model(z, img)