Python tensorflow.keras.models.Sequential() Examples
The following are 30
code examples of tensorflow.keras.models.Sequential().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.keras.models
, or try the search function
.
Example #1
Source File: osnet.py From keras_imagenet with MIT License | 7 votes |
def get_aggregation_gate(in_filters, reduction=16): """Get the "aggregation gate (AG)" op. # Arguments reduction: channel reduction for the hidden layer. # Returns The AG op (a models.Sequential module). """ gate = models.Sequential() gate.add(layers.GlobalAveragePooling2D()) gate.add(layers.Dense(in_filters // reduction, use_bias=False)) gate.add(layers.BatchNormalization()) gate.add(layers.Activation('relu')) gate.add(layers.Dense(in_filters)) gate.add(layers.Activation('sigmoid')) gate.add(layers.Reshape((1, 1, -1))) # reshape as (H, W, C) return gate
Example #2
Source File: rerank_terms.py From nlp-architect with Apache License 2.0 | 6 votes |
def generate_model(self, input_vector_dimension): """Generate MLP model. Args: input_vector_dimension (int): word emb vec length Returns: """ mlp_model = Sequential() mlp_model.add(Dense(128, activation=self.activation_1, input_dim=input_vector_dimension)) mlp_model.add(Dropout(0.5)) mlp_model.add(Dense(64, activation=self.activation_2)) mlp_model.add(Dropout(0.5)) mlp_model.add(Dense(1, activation=self.activation_3)) mlp_model.compile(metrics=["accuracy"], loss=self.loss, optimizer=self.optimizer) return mlp_model
Example #3
Source File: gin_conv.py From spektral with MIT License | 6 votes |
def build(self, input_shape): assert len(input_shape) >= 2 layer_kwargs = dict( kernel_initializer=self.kernel_initializer, bias_initializer=self.bias_initializer, kernel_regularizer=self.kernel_regularizer, bias_regularizer=self.bias_regularizer, kernel_constraint=self.kernel_constraint, bias_constraint=self.bias_constraint ) self.mlp = Sequential([ Dense(channels, self.mlp_activation, **layer_kwargs) for channels in self.mlp_hidden ] + [Dense(self.channels, self.activation, use_bias=self.use_bias, **layer_kwargs)]) if self.epsilon is None: self.eps = self.add_weight(shape=(1,), initializer='zeros', name='eps') else: # If epsilon is given, keep it constant self.eps = K.constant(self.epsilon) self.built = True
Example #4
Source File: appnp.py From spektral with MIT License | 6 votes |
def build(self, input_shape): assert len(input_shape) >= 2 layer_kwargs = dict( kernel_initializer=self.kernel_initializer, bias_initializer=self.bias_initializer, kernel_regularizer=self.kernel_regularizer, bias_regularizer=self.bias_regularizer, kernel_constraint=self.kernel_constraint, bias_constraint=self.bias_constraint ) mlp_layers = [] for i, channels in enumerate(self.mlp_hidden): mlp_layers.extend([ Dropout(self.dropout_rate), Dense(channels, self.mlp_activation, **layer_kwargs) ]) mlp_layers.append( Dense(self.channels, 'linear', **layer_kwargs) ) self.mlp = Sequential(mlp_layers) self.built = True
Example #5
Source File: test_discrete.py From keras-rl2 with MIT License | 6 votes |
def test_double_dqn(): env = TwoRoundDeterministicRewardEnv() np.random.seed(123) env.seed(123) random.seed(123) nb_actions = env.action_space.n # Next, we build a very simple model. model = Sequential() model.add(Dense(16, input_shape=(1,))) model.add(Activation('relu')) model.add(Dense(nb_actions)) model.add(Activation('linear')) memory = SequentialMemory(limit=1000, window_length=1) policy = EpsGreedyQPolicy(eps=.1) dqn = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=50, target_model_update=1e-1, policy=policy, enable_double_dqn=True) dqn.compile(Adam(lr=1e-3)) dqn.fit(env, nb_steps=2000, visualize=False, verbose=0) policy.eps = 0. h = dqn.test(env, nb_episodes=20, visualize=False) assert_allclose(np.mean(h.history['episode_reward']), 3.)
Example #6
Source File: test_discrete.py From keras-rl2 with MIT License | 6 votes |
def test_dqn(): env = TwoRoundDeterministicRewardEnv() np.random.seed(123) env.seed(123) random.seed(123) nb_actions = env.action_space.n # Next, we build a very simple model. model = Sequential() model.add(Dense(16, input_shape=(1,))) model.add(Activation('relu')) model.add(Dense(nb_actions)) model.add(Activation('linear')) memory = SequentialMemory(limit=1000, window_length=1) policy = EpsGreedyQPolicy(eps=.1) dqn = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=50, target_model_update=1e-1, policy=policy, enable_double_dqn=False) dqn.compile(Adam(lr=1e-3)) dqn.fit(env, nb_steps=2000, visualize=False, verbose=0) policy.eps = 0. h = dqn.test(env, nb_episodes=20, visualize=False) assert_allclose(np.mean(h.history['episode_reward']), 3.)
Example #7
Source File: test_continuous.py From keras-rl2 with MIT License | 6 votes |
def test_ddpg(): # TODO: replace this with a simpler environment where we can actually test if it finds a solution env = gym.make('Pendulum-v0') np.random.seed(123) env.seed(123) random.seed(123) nb_actions = env.action_space.shape[0] actor = Sequential() actor.add(Flatten(input_shape=(1,) + env.observation_space.shape)) actor.add(Dense(16)) actor.add(Activation('relu')) actor.add(Dense(nb_actions)) actor.add(Activation('linear')) action_input = Input(shape=(nb_actions,), name='action_input') observation_input = Input(shape=(1,) + env.observation_space.shape, name='observation_input') flattened_observation = Flatten()(observation_input) x = Concatenate()([action_input, flattened_observation]) x = Dense(16)(x) x = Activation('relu')(x) x = Dense(1)(x) x = Activation('linear')(x) critic = Model(inputs=[action_input, observation_input], outputs=x) memory = SequentialMemory(limit=1000, window_length=1) random_process = OrnsteinUhlenbeckProcess(theta=.15, mu=0., sigma=.3) agent = DDPGAgent(nb_actions=nb_actions, actor=actor, critic=critic, critic_action_input=action_input, memory=memory, nb_steps_warmup_critic=50, nb_steps_warmup_actor=50, random_process=random_process, gamma=.99, target_model_update=1e-3) agent.compile([Adam(lr=1e-3), Adam(lr=1e-3)]) agent.fit(env, nb_steps=400, visualize=False, verbose=0, nb_max_episode_steps=100) h = agent.test(env, nb_episodes=2, visualize=False, nb_max_episode_steps=100) # TODO: evaluate history
Example #8
Source File: test_ddpg.py From keras-rl2 with MIT License | 6 votes |
def test_single_ddpg_input(): nb_actions = 2 actor = Sequential() actor.add(Flatten(input_shape=(2, 3))) actor.add(Dense(nb_actions)) action_input = Input(shape=(nb_actions,), name='action_input') observation_input = Input(shape=(2, 3), name='observation_input') x = Concatenate()([action_input, Flatten()(observation_input)]) x = Dense(1)(x) critic = Model(inputs=[action_input, observation_input], outputs=x) memory = SequentialMemory(limit=10, window_length=2) agent = DDPGAgent(actor=actor, critic=critic, critic_action_input=action_input, memory=memory, nb_actions=2, nb_steps_warmup_critic=5, nb_steps_warmup_actor=5, batch_size=4) agent.compile('sgd') agent.fit(MultiInputTestEnv((3,)), nb_steps=10)
Example #9
Source File: test_discrete.py From keras-rl2 with MIT License | 6 votes |
def test_cem(): env = TwoRoundDeterministicRewardEnv() np.random.seed(123) env.seed(123) random.seed(123) nb_actions = env.action_space.n # Next, we build a very simple model. model = Sequential() model.add(Dense(16, input_shape=(1,))) model.add(Activation('relu')) model.add(Dense(nb_actions)) model.add(Activation('linear')) memory = EpisodeParameterMemory(limit=1000, window_length=1) dqn = CEMAgent(model=model, nb_actions=nb_actions, memory=memory) dqn.compile() dqn.fit(env, nb_steps=2000, visualize=False, verbose=1) h = dqn.test(env, nb_episodes=20, visualize=False) assert_allclose(np.mean(h.history['episode_reward']), 3.)
Example #10
Source File: edge_conv.py From spektral with MIT License | 6 votes |
def build(self, input_shape): assert len(input_shape) >= 2 layer_kwargs = dict( kernel_initializer=self.kernel_initializer, bias_initializer=self.bias_initializer, kernel_regularizer=self.kernel_regularizer, bias_regularizer=self.bias_regularizer, kernel_constraint=self.kernel_constraint, bias_constraint=self.bias_constraint ) self.mlp = Sequential([ Dense(channels, self.mlp_activation, **layer_kwargs) for channels in self.mlp_hidden ] + [Dense(self.channels, self.activation, use_bias=self.use_bias, **layer_kwargs)]) self.built = True
Example #11
Source File: test_discrete.py From keras-rl2 with MIT License | 6 votes |
def test_sarsa(): env = TwoRoundDeterministicRewardEnv() np.random.seed(123) env.seed(123) random.seed(123) nb_actions = env.action_space.n # Next, we build a very simple model. model = Sequential() model.add(Dense(16, input_shape=(1,))) model.add(Activation('relu')) model.add(Dense(nb_actions, activation='linear')) policy = EpsGreedyQPolicy(eps=.1) sarsa = SARSAAgent(model=model, nb_actions=nb_actions, nb_steps_warmup=50, policy=policy) sarsa.compile(Adam(lr=1e-3)) sarsa.fit(env, nb_steps=20000, visualize=False, verbose=0) policy.eps = 0. h = sarsa.test(env, nb_episodes=20, visualize=False) assert_allclose(np.mean(h.history['episode_reward']), 3.)
Example #12
Source File: ml_agent.py From Grid2Op with Mozilla Public License 2.0 | 6 votes |
def construct_q_network(self): # replacement of the Convolution layers by Dense layers, and change the size of the input space and output space # Uses the network architecture found in DeepMind paper self.model = Sequential() input_layer = Input(shape=(self.observation_size * self.training_param.NUM_FRAMES,)) layer1 = Dense(self.observation_size * self.training_param.NUM_FRAMES)(input_layer) layer1 = Activation('relu')(layer1) layer2 = Dense(self.observation_size)(layer1) layer2 = Activation('relu')(layer2) layer3 = Dense(self.observation_size)(layer2) layer3 = Activation('relu')(layer3) layer4 = Dense(2 * self.action_size)(layer3) layer4 = Activation('relu')(layer4) output = Dense(self.action_size)(layer4) self.model = Model(inputs=[input_layer], outputs=[output]) self.model.compile(loss='mse', optimizer=Adam(lr=self.lr_)) self.target_model = Model(inputs=[input_layer], outputs=[output]) self.target_model.compile(loss='mse', optimizer=Adam(lr=self.lr_)) self.target_model.set_weights(self.model.get_weights())
Example #13
Source File: keras_policy.py From rasa_core with Apache License 2.0 | 6 votes |
def __init__(self, featurizer: Optional[TrackerFeaturizer] = None, priority: int = 1, model: Optional[tf.keras.models.Sequential] = None, graph: Optional[tf.Graph] = None, session: Optional[tf.Session] = None, current_epoch: int = 0, max_history: Optional[int] = None, **kwargs: Any ) -> None: if not featurizer: featurizer = self._standard_featurizer(max_history) super(KerasPolicy, self).__init__(featurizer, priority) self._load_params(**kwargs) self.model = model # by default keras uses default tf graph and global tf session # we are going to either load them or create them in train(...) self.graph = graph self.session = session self.current_epoch = current_epoch
Example #14
Source File: convert_test.py From tf-encrypted with Apache License 2.0 | 6 votes |
def _keras_depthwise_conv2d_core(shape=None, data=None): assert shape is None or data is None if shape is None: shape = data.shape init = tf.keras.initializers.RandomNormal(seed=1) model = Sequential() c2d = DepthwiseConv2D( (3, 3), depthwise_initializer=init, data_format="channels_last", use_bias=False, input_shape=shape[1:], ) model.add(c2d) if data is None: data = np.random.uniform(size=shape) out = model.predict(data) return model, out
Example #15
Source File: test_network.py From ivis with GNU General Public License v2.0 | 6 votes |
def test_triplet_network(): X = np.zeros(shape=(10, 5)) embedding_dims = 3 base_model = Sequential() base_model.add(Dense(8, input_shape=(X.shape[-1],))) model, _, _, _ = triplet_network(base_model, embedding_dims=embedding_dims, embedding_l2=0.1) encoder = model.layers[3] assert model.layers[3].output_shape == (None, 3) assert np.all(base_model.get_weights()[0] == encoder.get_weights()[0]) assert np.all([isinstance(layer, keras.layers.InputLayer) for layer in model.layers[:3]]) assert encoder.output_shape == (None, embedding_dims)
Example #16
Source File: run.py From polyaxon-examples with Apache License 2.0 | 6 votes |
def get_model(args): model = models.Sequential() model.add( layers.Conv2D(args.conv1_size, (3, 3), activation=args.conv_activation, input_shape=(28, 28, 1))) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Conv2D(args.conv2_size, (3, 3), activation=args.conv_activation)) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Conv2D(64, (3, 3), activation=args.conv_activation)) model.add(layers.Dropout(args.dropout)) model.add(layers.Flatten()) model.add(layers.Dense(args.hidden1_size, activation=args.dense_activation)) model.add(layers.Dense(10, activation='softmax')) model.summary() model.compile(optimizer=OPTIMIZERS[args.optimizer](learning_rate=args.learning_rate), loss=args.loss, metrics=['accuracy']) return model
Example #17
Source File: test_conv_layer.py From TensorNetwork with Apache License 2.0 | 6 votes |
def make_model(dummy_data): # pylint: disable=redefined-outer-name data, _ = dummy_data model = Sequential() model.add( Conv2DMPO(filters=4, kernel_size=3, num_nodes=2, bond_dim=10, padding='same', input_shape=data.shape[1:], name=LAYER_NAME) ) model.add(Flatten()) model.add(Dense(1, activation='sigmoid')) return model
Example #18
Source File: keras_policy.py From rasa-for-botfront with Apache License 2.0 | 6 votes |
def __init__( self, featurizer: Optional[TrackerFeaturizer] = None, priority: int = DEFAULT_POLICY_PRIORITY, model: Optional[tf.keras.models.Sequential] = None, current_epoch: int = 0, max_history: Optional[int] = None, **kwargs: Any, ) -> None: if not featurizer: featurizer = self._standard_featurizer(max_history) super().__init__(featurizer, priority) self._load_params(**kwargs) self.model = model self.current_epoch = current_epoch common_utils.raise_warning( "'KerasPolicy' is deprecated and will be removed in version " "2.0. Use 'TEDPolicy' instead.", category=FutureWarning, docs=DOCS_URL_MIGRATION_GUIDE, )
Example #19
Source File: run.py From polyaxon with Apache License 2.0 | 6 votes |
def get_model(args): model = models.Sequential() model.add( layers.Conv2D(args.conv1_size, (3, 3), activation=args.conv_activation, input_shape=(28, 28, 1))) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Conv2D(args.conv2_size, (3, 3), activation=args.conv_activation)) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Conv2D(64, (3, 3), activation=args.conv_activation)) model.add(layers.Dropout(args.dropout)) model.add(layers.Flatten()) model.add(layers.Dense(args.hidden1_size, activation=args.dense_activation)) model.add(layers.Dense(10, activation='softmax')) model.summary() model.compile(optimizer=OPTIMIZERS[args.optimizer](learning_rate=args.learning_rate), loss=args.loss, metrics=['accuracy']) return model
Example #20
Source File: cnnmodule.py From RSN with MIT License | 6 votes |
def _cnn_(cnn_input_shape,name=None): with tf.variable_scope(name or 'convnet', reuse=tf.AUTO_REUSE): convnet = Sequential() convnet.add(Conv1D(230, 3, input_shape = cnn_input_shape, kernel_initializer = W_init, bias_initializer = b_init_conv, kernel_regularizer=l2(2e-4) )) convnet.add(MaxPooling1D(pool_size=cnn_input_shape[0]-4)) convnet.add(Activation('relu')) convnet.add(Flatten()) convnet.add(Dense(cnn_input_shape[-1]*230, activation = 'sigmoid', kernel_initializer = W_init, bias_initializer = b_init_dense, kernel_regularizer=l2(1e-3) )) return convnet
Example #21
Source File: ml_agent.py From Grid2Op with Mozilla Public License 2.0 | 6 votes |
def construct_q_network(self): # construct double Q networks self.model_Q = self._build_q_NN() self.model_Q2 = self._build_q_NN() # state value function approximation self.model_value = self._build_model_value() self.model_value_target = self._build_model_value() self.model_value_target.set_weights(self.model_value.get_weights()) # policy function approximation self.model_policy = Sequential() # proba of choosing action a depending on policy pi input_states = Input(shape = (self.observation_size,)) lay1 = Dense(self.observation_size)(input_states) lay1 = Activation('relu')(lay1) lay2 = Dense(self.observation_size)(lay1) lay2 = Activation('relu')(lay2) lay3 = Dense(2*self.action_size)(lay2) lay3 = Activation('relu')(lay3) soft_proba = Dense(self.action_size, activation="softmax", kernel_initializer='uniform')(lay3) self.model_policy = Model(inputs=[input_states], outputs=[soft_proba]) self.model_policy.compile(loss='categorical_crossentropy', optimizer=Adam(lr=self.lr_)) print("Successfully constructed networks.")
Example #22
Source File: convert_test.py From tf-encrypted with Apache License 2.0 | 6 votes |
def _keras_conv2d_core(shape=None, data=None): assert shape is None or data is None if shape is None: shape = data.shape init = tf.keras.initializers.RandomNormal(seed=1) model = Sequential() c2d = Conv2D( 2, (3, 3), data_format="channels_last", use_bias=False, kernel_initializer=init, input_shape=shape[1:], ) model.add(c2d) if data is None: data = np.random.uniform(size=shape) out = model.predict(data) return model, out
Example #23
Source File: model.py From kryptoflow with GNU General Public License v3.0 | 5 votes |
def build_model(self): model = Sequential() model.add(LSTM(32, input_shape=self.input_shape, return_sequences=False)) adam = Adam(lr=0.1, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.01, amsgrad=False) model.add(Dense(1)) model.compile(loss='mean_squared_error', optimizer=adam) return model
Example #24
Source File: test_keras_model_io.py From kryptoflow with GNU General Public License v3.0 | 5 votes |
def keras_model(): x, y = make_classification(n_features=2, n_redundant=0, n_informative=1, n_clusters_per_class=1) model = Sequential() model.add(Dense(64, input_dim=2, activation='relu')) model.add(Dense(1, activation='sigmoid')) model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy']) model.fit(x, y) return model
Example #25
Source File: test_dqn.py From keras-rl2 with MIT License | 5 votes |
def test_single_dqn_input(): model = Sequential() model.add(Flatten(input_shape=(2, 3))) model.add(Dense(2)) memory = SequentialMemory(limit=10, window_length=2) for double_dqn in (True, False): agent = DQNAgent(model, memory=memory, nb_actions=2, nb_steps_warmup=5, batch_size=4, enable_double_dqn=double_dqn) agent.compile('sgd') agent.fit(MultiInputTestEnv((3,)), nb_steps=10)
Example #26
Source File: cifar_tf_example.py From ray with Apache License 2.0 | 5 votes |
def create_model(config): import tensorflow as tf model = Sequential() model.add(Conv2D(32, (3, 3), padding="same", input_shape=input_shape)) model.add(Activation("relu")) model.add(Conv2D(32, (3, 3))) model.add(Activation("relu")) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Conv2D(64, (3, 3), padding="same")) model.add(Activation("relu")) model.add(Conv2D(64, (3, 3))) model.add(Activation("relu")) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(64)) model.add(Activation("relu")) model.add(Dropout(0.5)) model.add(Dense(num_classes)) model.add(Activation("softmax")) # initiate RMSprop optimizer opt = tf.keras.optimizers.RMSprop(lr=0.001, decay=1e-6) # Let"s train the model using RMSprop model.compile( loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"]) return model
Example #27
Source File: tensorflow_train_example.py From ray with Apache License 2.0 | 5 votes |
def simple_model(config): model = Sequential([Dense(10, input_shape=(1, )), Dense(1)]) model.compile( optimizer="sgd", loss="mean_squared_error", metrics=["mean_squared_error"]) return model
Example #28
Source File: dqn_agent.py From Multi-Commander with Apache License 2.0 | 5 votes |
def _build_model(self): # Neural Net for Deep-Q learning Model # input:state; output:action value model = Sequential() model.add(Dense(256, input_dim=self.state_size, activation='relu')) model.add(Dense(128, activation='relu')) model.add(Dropout(0.3)) #model.add((LSTM(128)) model.add(Dense(self.action_size, activation='linear')) model.compile(loss='mse', optimizer=Adam(lr=self.learning_rate)) return model
Example #29
Source File: model.py From kryptoflow with GNU General Public License v3.0 | 5 votes |
def build_model(self): model = Sequential() model.add(LSTM(32, input_shape=self.input_shape, return_sequences=False)) adam = Adam(lr=0.1, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.01, amsgrad=False) model.add(Dense(1)) model.compile(loss='mean_squared_error', optimizer=adam) return model
Example #30
Source File: mpo_test.py From TensorNetwork with Apache License 2.0 | 5 votes |
def test_shape_sanity_check(in_dim_base, dim1, dim2, num_nodes, bond_dim): model = Sequential([ Input(in_dim_base**num_nodes), mpo.DenseMPO(dim1**num_nodes, num_nodes=num_nodes, bond_dim=bond_dim), mpo.DenseMPO(dim2**num_nodes, num_nodes=num_nodes, bond_dim=bond_dim), ]) # Hard code batch size. result = model.predict(np.ones((32, in_dim_base**num_nodes))) assert result.shape == (32, dim2**num_nodes)