Python keras.Input() Examples
The following are 30
code examples of keras.Input().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
keras
, or try the search function
.
Example #1
Source File: recurrent_test.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def test_stacked_rnn_attributes(): cells = [recurrent.LSTMCell(3), recurrent.LSTMCell(3, kernel_regularizer='l2')] layer = recurrent.RNN(cells) layer.build((None, None, 5)) # Test regularization losses assert len(layer.losses) == 1 # Test weights assert len(layer.trainable_weights) == 6 cells[0].trainable = False assert len(layer.trainable_weights) == 3 assert len(layer.non_trainable_weights) == 3 # Test `get_losses_for` x = keras.Input((None, 5)) y = K.sum(x) cells[0].add_loss(y, inputs=x) assert layer.get_losses_for(x) == [y]
Example #2
Source File: recurrent_test.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def test_initial_states_as_other_inputs(layer_class): num_states = 2 if layer_class is recurrent.LSTM else 1 # Test with Keras tensor main_inputs = Input((timesteps, embedding_dim)) initial_state = [Input((units,)) for _ in range(num_states)] inputs = [main_inputs] + initial_state layer = layer_class(units) output = layer(inputs) assert initial_state[0] in layer._inbound_nodes[0].input_tensors model = Model(inputs, output) model.compile(loss='categorical_crossentropy', optimizer='adam') main_inputs = np.random.random((num_samples, timesteps, embedding_dim)) initial_state = [np.random.random((num_samples, units)) for _ in range(num_states)] targets = np.random.random((num_samples, units)) model.train_on_batch([main_inputs] + initial_state, targets)
Example #3
Source File: core.py From bi-lstm-crf with Apache License 2.0 | 6 votes |
def __build_model(self, emb_matrix=None): word_input = Input(shape=(None,), dtype='int32', name="word_input") word_emb = Embedding(self.vocab_size + 1, self.embed_dim, weights=[emb_matrix] if emb_matrix is not None else None, trainable=True if emb_matrix is None else False, name='word_emb')(word_input) bilstm_output = Bidirectional(LSTM(self.bi_lstm_units // 2, return_sequences=True))(word_emb) bilstm_output = Dropout(self.dropout_rate)(bilstm_output) output = Dense(self.chunk_size + 1, kernel_initializer="he_normal")(bilstm_output) output = CRF(self.chunk_size + 1, sparse_target=self.sparse_target)(output) model = Model([word_input], [output]) parallel_model = model if self.num_gpu > 1: parallel_model = multi_gpu_model(model, gpus=self.num_gpu) parallel_model.compile(optimizer=self.optimizer, loss=crf_loss, metrics=[crf_accuracy]) return model, parallel_model
Example #4
Source File: cudnn_recurrent_test.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def test_return_state(): input_size = 10 timesteps = 6 units = 2 num_samples = 32 for layer_class in [keras.layers.CuDNNGRU, keras.layers.CuDNNLSTM]: num_states = 2 if layer_class is keras.layers.CuDNNLSTM else 1 inputs = keras.Input(batch_shape=(num_samples, timesteps, input_size)) layer = layer_class(units, return_state=True, stateful=True) outputs = layer(inputs) output, state = outputs[0], outputs[1:] assert len(state) == num_states model = keras.models.Model(inputs, state[0]) inputs = np.random.random((num_samples, timesteps, input_size)) state = model.predict(inputs) np.testing.assert_allclose( keras.backend.eval(layer.states[0]), state, atol=1e-4)
Example #5
Source File: cudnn_recurrent_test.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def test_specify_initial_state_keras_tensor(): input_size = 10 timesteps = 6 units = 2 num_samples = 32 for layer_class in [keras.layers.CuDNNGRU, keras.layers.CuDNNLSTM]: num_states = 2 if layer_class is keras.layers.CuDNNLSTM else 1 inputs = keras.Input((timesteps, input_size)) initial_state = [keras.Input((units,)) for _ in range(num_states)] layer = layer_class(units) if len(initial_state) == 1: output = layer(inputs, initial_state=initial_state[0]) else: output = layer(inputs, initial_state=initial_state) assert initial_state[0] in layer._inbound_nodes[0].input_tensors model = keras.models.Model([inputs] + initial_state, output) model.compile(loss='categorical_crossentropy', optimizer='adam') inputs = np.random.random((num_samples, timesteps, input_size)) initial_state = [np.random.random((num_samples, units)) for _ in range(num_states)] targets = np.random.random((num_samples, units)) model.fit([inputs] + initial_state, targets)
Example #6
Source File: recurrent_test.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def test_specify_initial_state_non_keras_tensor(layer_class): num_states = 2 if layer_class is recurrent.LSTM else 1 # Test with non-Keras tensor inputs = Input((timesteps, embedding_dim)) initial_state = [K.random_normal_variable((num_samples, units), 0, 1) for _ in range(num_states)] layer = layer_class(units) output = layer(inputs, initial_state=initial_state) model = Model(inputs, output) model.compile(loss='categorical_crossentropy', optimizer='adam') inputs = np.random.random((num_samples, timesteps, embedding_dim)) targets = np.random.random((num_samples, units)) model.fit(inputs, targets)
Example #7
Source File: recurrent_test.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def test_initial_states_as_other_inputs(layer_class): num_states = 2 if layer_class is recurrent.LSTM else 1 # Test with Keras tensor main_inputs = Input((timesteps, embedding_dim)) initial_state = [Input((units,)) for _ in range(num_states)] inputs = [main_inputs] + initial_state layer = layer_class(units) output = layer(inputs) assert initial_state[0] in layer._inbound_nodes[0].input_tensors model = Model(inputs, output) model.compile(loss='categorical_crossentropy', optimizer='adam') main_inputs = np.random.random((num_samples, timesteps, embedding_dim)) initial_state = [np.random.random((num_samples, units)) for _ in range(num_states)] targets = np.random.random((num_samples, units)) model.train_on_batch([main_inputs] + initial_state, targets)
Example #8
Source File: recurrent_test.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def test_specify_state_with_masking(layer_class): ''' This test based on a previously failing issue here: https://github.com/keras-team/keras/issues/1567 ''' num_states = 2 if layer_class is recurrent.LSTM else 1 inputs = Input((timesteps, embedding_dim)) _ = Masking()(inputs) initial_state = [Input((units,)) for _ in range(num_states)] output = layer_class(units)(inputs, initial_state=initial_state) model = Model([inputs] + initial_state, output) model.compile(loss='categorical_crossentropy', optimizer='adam') inputs = np.random.random((num_samples, timesteps, embedding_dim)) initial_state = [np.random.random((num_samples, units)) for _ in range(num_states)] targets = np.random.random((num_samples, units)) model.fit([inputs] + initial_state, targets)
Example #9
Source File: my_social_model.py From social_lstm_keras_tf with GNU General Public License v3.0 | 6 votes |
def __init__(self, config: ModelConfig) -> None: self.x_input = Input((config.obs_len, config.max_n_peds, pxy_dim)) # y_input = Input((config.obs_len, config.max_n_peds, pxy_dim)) self.grid_input = Input( (config.obs_len, config.max_n_peds, config.max_n_peds, config.grid_side_squared)) self.zeros_input = Input( (config.obs_len, config.max_n_peds, config.lstm_state_dim)) # Social LSTM layers self.lstm_layer = LSTM(config.lstm_state_dim, return_state=True) self.W_e_relu = Dense(config.emb_dim, activation="relu") self.W_a_relu = Dense(config.emb_dim, activation="relu") self.W_p = Dense(out_dim) self._build_model(config)
Example #10
Source File: recurrent_test.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def test_specify_state_with_masking(layer_class): ''' This test based on a previously failing issue here: https://github.com/keras-team/keras/issues/1567 ''' num_states = 2 if layer_class is recurrent.LSTM else 1 inputs = Input((timesteps, embedding_dim)) _ = Masking()(inputs) initial_state = [Input((units,)) for _ in range(num_states)] output = layer_class(units)(inputs, initial_state=initial_state) model = Model([inputs] + initial_state, output) model.compile(loss='categorical_crossentropy', optimizer='adam') inputs = np.random.random((num_samples, timesteps, embedding_dim)) initial_state = [np.random.random((num_samples, units)) for _ in range(num_states)] targets = np.random.random((num_samples, units)) model.fit([inputs] + initial_state, targets)
Example #11
Source File: recurrent_test.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def test_specify_initial_state_non_keras_tensor(layer_class): num_states = 2 if layer_class is recurrent.LSTM else 1 # Test with non-Keras tensor inputs = Input((timesteps, embedding_dim)) initial_state = [K.random_normal_variable((num_samples, units), 0, 1) for _ in range(num_states)] layer = layer_class(units) output = layer(inputs, initial_state=initial_state) model = Model(inputs, output) model.compile(loss='categorical_crossentropy', optimizer='adam') inputs = np.random.random((num_samples, timesteps, embedding_dim)) targets = np.random.random((num_samples, units)) model.fit(inputs, targets)
Example #12
Source File: recurrent_test.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def test_stacked_rnn_attributes(): cells = [recurrent.LSTMCell(3), recurrent.LSTMCell(3, kernel_regularizer='l2')] layer = recurrent.RNN(cells) layer.build((None, None, 5)) # Test regularization losses assert len(layer.losses) == 1 # Test weights assert len(layer.trainable_weights) == 6 cells[0].trainable = False assert len(layer.trainable_weights) == 3 assert len(layer.non_trainable_weights) == 3 # Test `get_losses_for` x = keras.Input((None, 5)) y = K.sum(x) cells[0].add_loss(y, inputs=x) assert layer.get_losses_for(x) == [y]
Example #13
Source File: cudnn_recurrent_test.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def test_return_state(): input_size = 10 timesteps = 6 units = 2 num_samples = 32 for layer_class in [keras.layers.CuDNNGRU, keras.layers.CuDNNLSTM]: num_states = 2 if layer_class is keras.layers.CuDNNLSTM else 1 inputs = keras.Input(batch_shape=(num_samples, timesteps, input_size)) layer = layer_class(units, return_state=True, stateful=True) outputs = layer(inputs) output, state = outputs[0], outputs[1:] assert len(state) == num_states model = keras.models.Model(inputs, state[0]) inputs = np.random.random((num_samples, timesteps, input_size)) state = model.predict(inputs) np.testing.assert_allclose( keras.backend.eval(layer.states[0]), state, atol=1e-4)
Example #14
Source File: recurrent_test.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def test_stacked_rnn_attributes(): cells = [recurrent.LSTMCell(3), recurrent.LSTMCell(3, kernel_regularizer='l2')] layer = recurrent.RNN(cells) layer.build((None, None, 5)) # Test regularization losses assert len(layer.losses) == 1 # Test weights assert len(layer.trainable_weights) == 6 cells[0].trainable = False assert len(layer.trainable_weights) == 3 assert len(layer.non_trainable_weights) == 3 # Test `get_losses_for` x = keras.Input((None, 5)) y = K.sum(x) cells[0].add_loss(y, inputs=x) assert layer.get_losses_for(x) == [y]
Example #15
Source File: recurrent_test.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def test_specify_initial_state_keras_tensor(layer_class): num_states = 2 if layer_class is recurrent.LSTM else 1 # Test with Keras tensor inputs = Input((timesteps, embedding_dim)) initial_state = [Input((units,)) for _ in range(num_states)] layer = layer_class(units) if len(initial_state) == 1: output = layer(inputs, initial_state=initial_state[0]) else: output = layer(inputs, initial_state=initial_state) assert initial_state[0] in layer._inbound_nodes[0].input_tensors model = Model([inputs] + initial_state, output) model.compile(loss='categorical_crossentropy', optimizer='adam') inputs = np.random.random((num_samples, timesteps, embedding_dim)) initial_state = [np.random.random((num_samples, units)) for _ in range(num_states)] targets = np.random.random((num_samples, units)) model.fit([inputs] + initial_state, targets)
Example #16
Source File: recurrent_test.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def test_specify_initial_state_non_keras_tensor(layer_class): num_states = 2 if layer_class is recurrent.LSTM else 1 # Test with non-Keras tensor inputs = Input((timesteps, embedding_dim)) initial_state = [K.random_normal_variable((num_samples, units), 0, 1) for _ in range(num_states)] layer = layer_class(units) output = layer(inputs, initial_state=initial_state) model = Model(inputs, output) model.compile(loss='categorical_crossentropy', optimizer='adam') inputs = np.random.random((num_samples, timesteps, embedding_dim)) targets = np.random.random((num_samples, units)) model.fit(inputs, targets)
Example #17
Source File: recurrent_test.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def test_initial_states_as_other_inputs(layer_class): num_states = 2 if layer_class is recurrent.LSTM else 1 # Test with Keras tensor main_inputs = Input((timesteps, embedding_dim)) initial_state = [Input((units,)) for _ in range(num_states)] inputs = [main_inputs] + initial_state layer = layer_class(units) output = layer(inputs) assert initial_state[0] in layer._inbound_nodes[0].input_tensors model = Model(inputs, output) model.compile(loss='categorical_crossentropy', optimizer='adam') main_inputs = np.random.random((num_samples, timesteps, embedding_dim)) initial_state = [np.random.random((num_samples, units)) for _ in range(num_states)] targets = np.random.random((num_samples, units)) model.train_on_batch([main_inputs] + initial_state, targets)
Example #18
Source File: cudnn_recurrent_test.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def test_specify_initial_state_keras_tensor(): input_size = 10 timesteps = 6 units = 2 num_samples = 32 for layer_class in [keras.layers.CuDNNGRU, keras.layers.CuDNNLSTM]: num_states = 2 if layer_class is keras.layers.CuDNNLSTM else 1 inputs = keras.Input((timesteps, input_size)) initial_state = [keras.Input((units,)) for _ in range(num_states)] layer = layer_class(units) if len(initial_state) == 1: output = layer(inputs, initial_state=initial_state[0]) else: output = layer(inputs, initial_state=initial_state) assert initial_state[0] in layer._inbound_nodes[0].input_tensors model = keras.models.Model([inputs] + initial_state, output) model.compile(loss='categorical_crossentropy', optimizer='adam') inputs = np.random.random((num_samples, timesteps, input_size)) initial_state = [np.random.random((num_samples, units)) for _ in range(num_states)] targets = np.random.random((num_samples, units)) model.fit([inputs] + initial_state, targets)
Example #19
Source File: recurrent_test.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def test_initial_states_as_other_inputs(layer_class): num_states = 2 if layer_class is recurrent.LSTM else 1 # Test with Keras tensor main_inputs = Input((timesteps, embedding_dim)) initial_state = [Input((units,)) for _ in range(num_states)] inputs = [main_inputs] + initial_state layer = layer_class(units) output = layer(inputs) assert initial_state[0] in layer._inbound_nodes[0].input_tensors model = Model(inputs, output) model.compile(loss='categorical_crossentropy', optimizer='adam') main_inputs = np.random.random((num_samples, timesteps, embedding_dim)) initial_state = [np.random.random((num_samples, units)) for _ in range(num_states)] targets = np.random.random((num_samples, units)) model.train_on_batch([main_inputs] + initial_state, targets)
Example #20
Source File: recurrent_test.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def test_specify_initial_state_non_keras_tensor(layer_class): num_states = 2 if layer_class is recurrent.LSTM else 1 # Test with non-Keras tensor inputs = Input((timesteps, embedding_dim)) initial_state = [K.random_normal_variable((num_samples, units), 0, 1) for _ in range(num_states)] layer = layer_class(units) output = layer(inputs, initial_state=initial_state) model = Model(inputs, output) model.compile(loss='categorical_crossentropy', optimizer='adam') inputs = np.random.random((num_samples, timesteps, embedding_dim)) targets = np.random.random((num_samples, units)) model.fit(inputs, targets)
Example #21
Source File: recurrent_test.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def test_specify_initial_state_keras_tensor(layer_class): num_states = 2 if layer_class is recurrent.LSTM else 1 # Test with Keras tensor inputs = Input((timesteps, embedding_dim)) initial_state = [Input((units,)) for _ in range(num_states)] layer = layer_class(units) if len(initial_state) == 1: output = layer(inputs, initial_state=initial_state[0]) else: output = layer(inputs, initial_state=initial_state) assert initial_state[0] in layer._inbound_nodes[0].input_tensors model = Model([inputs] + initial_state, output) model.compile(loss='categorical_crossentropy', optimizer='adam') inputs = np.random.random((num_samples, timesteps, embedding_dim)) initial_state = [np.random.random((num_samples, units)) for _ in range(num_states)] targets = np.random.random((num_samples, units)) model.fit([inputs] + initial_state, targets)
Example #22
Source File: cudnn_recurrent_test.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def test_specify_initial_state_keras_tensor(): input_size = 10 timesteps = 6 units = 2 num_samples = 32 for layer_class in [keras.layers.CuDNNGRU, keras.layers.CuDNNLSTM]: num_states = 2 if layer_class is keras.layers.CuDNNLSTM else 1 inputs = keras.Input((timesteps, input_size)) initial_state = [keras.Input((units,)) for _ in range(num_states)] layer = layer_class(units) if len(initial_state) == 1: output = layer(inputs, initial_state=initial_state[0]) else: output = layer(inputs, initial_state=initial_state) assert initial_state[0] in layer._inbound_nodes[0].input_tensors model = keras.models.Model([inputs] + initial_state, output) model.compile(loss='categorical_crossentropy', optimizer='adam') inputs = np.random.random((num_samples, timesteps, input_size)) initial_state = [np.random.random((num_samples, units)) for _ in range(num_states)] targets = np.random.random((num_samples, units)) model.fit([inputs] + initial_state, targets)
Example #23
Source File: cudnn_recurrent_test.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def test_specify_initial_state_keras_tensor(): input_size = 10 timesteps = 6 units = 2 num_samples = 32 for layer_class in [keras.layers.CuDNNGRU, keras.layers.CuDNNLSTM]: num_states = 2 if layer_class is keras.layers.CuDNNLSTM else 1 inputs = keras.Input((timesteps, input_size)) initial_state = [keras.Input((units,)) for _ in range(num_states)] layer = layer_class(units) if len(initial_state) == 1: output = layer(inputs, initial_state=initial_state[0]) else: output = layer(inputs, initial_state=initial_state) assert initial_state[0] in layer._inbound_nodes[0].input_tensors model = keras.models.Model([inputs] + initial_state, output) model.compile(loss='categorical_crossentropy', optimizer='adam') inputs = np.random.random((num_samples, timesteps, input_size)) initial_state = [np.random.random((num_samples, units)) for _ in range(num_states)] targets = np.random.random((num_samples, units)) model.fit([inputs] + initial_state, targets)
Example #24
Source File: recurrent_test.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def test_minimal_rnn_cell_non_layer_multiple_states(): class MinimalRNNCell(object): def __init__(self, units, input_dim): self.units = units self.state_size = (units, units) self.kernel = keras.backend.variable( np.random.random((input_dim, units))) def call(self, inputs, states): prev_output_1 = states[0] prev_output_2 = states[1] output = keras.backend.dot(inputs, self.kernel) output += prev_output_1 output -= prev_output_2 return output, [output * 2, output * 3] # Basic test case. cell = MinimalRNNCell(32, 5) x = keras.Input((None, 5)) layer = recurrent.RNN(cell) y = layer(x) model = keras.models.Model(x, y) model.compile(optimizer='rmsprop', loss='mse') model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32))) # Test stacking. cells = [MinimalRNNCell(8, 5), MinimalRNNCell(16, 8), MinimalRNNCell(32, 16)] layer = recurrent.RNN(cells) assert layer.cell.state_size == (32, 32, 16, 16, 8, 8) y = layer(x) model = keras.models.Model(x, y) model.compile(optimizer='rmsprop', loss='mse') model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
Example #25
Source File: recurrent_test.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def test_minimal_rnn_cell_non_layer_multiple_states(): class MinimalRNNCell(object): def __init__(self, units, input_dim): self.units = units self.state_size = (units, units) self.kernel = keras.backend.variable( np.random.random((input_dim, units))) def call(self, inputs, states): prev_output_1 = states[0] prev_output_2 = states[1] output = keras.backend.dot(inputs, self.kernel) output += prev_output_1 output -= prev_output_2 return output, [output * 2, output * 3] # Basic test case. cell = MinimalRNNCell(32, 5) x = keras.Input((None, 5)) layer = recurrent.RNN(cell) y = layer(x) model = keras.models.Model(x, y) model.compile(optimizer='rmsprop', loss='mse') model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32))) # Test stacking. cells = [MinimalRNNCell(8, 5), MinimalRNNCell(16, 8), MinimalRNNCell(32, 16)] layer = recurrent.RNN(cells) assert layer.cell.state_size == (32, 32, 16, 16, 8, 8) y = layer(x) model = keras.models.Model(x, y) model.compile(optimizer='rmsprop', loss='mse') model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
Example #26
Source File: recurrent_test.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def test_stacked_rnn_dropout(): cells = [recurrent.LSTMCell(3, dropout=0.1, recurrent_dropout=0.1), recurrent.LSTMCell(3, dropout=0.1, recurrent_dropout=0.1)] layer = recurrent.RNN(cells) x = keras.Input((None, 5)) y = layer(x) model = keras.models.Model(x, y) model.compile('sgd', 'mse') x_np = np.random.random((6, 5, 5)) y_np = np.random.random((6, 3)) model.train_on_batch(x_np, y_np)
Example #27
Source File: optimizers_test.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def _test_no_grad(optimizer): inp = Input([3]) x = Dense(10)(inp) x = Lambda(lambda l: 1.0 * K.reshape(K.cast(K.argmax(l), 'float32'), [-1, 1]))(x) mod = Model(inp, x) mod.compile(optimizer, 'mse') with pytest.raises(ValueError): mod.fit(np.zeros([10, 3]), np.zeros([10, 1], np.float32), batch_size=10, epochs=10)
Example #28
Source File: multi_gpu_test.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def multi_gpu_test_multi_io_model(): print('####### test multi-io model') num_samples = 1000 input_dim_a = 10 input_dim_b = 5 output_dim_a = 1 output_dim_b = 2 hidden_dim = 10 gpus = 8 target_gpu_id = [0, 2, 4] epochs = 2 input_a = keras.Input((input_dim_a,)) input_b = keras.Input((input_dim_b,)) a = keras.layers.Dense(hidden_dim)(input_a) b = keras.layers.Dense(hidden_dim)(input_b) c = keras.layers.concatenate([a, b]) output_a = keras.layers.Dense(output_dim_a)(c) output_b = keras.layers.Dense(output_dim_b)(c) model = keras.models.Model([input_a, input_b], [output_a, output_b]) a_x = np.random.random((num_samples, input_dim_a)) b_x = np.random.random((num_samples, input_dim_b)) a_y = np.random.random((num_samples, output_dim_a)) b_y = np.random.random((num_samples, output_dim_b)) parallel_model = multi_gpu_model(model, gpus=gpus) parallel_model.compile(loss='mse', optimizer='rmsprop') parallel_model.fit([a_x, b_x], [a_y, b_y], epochs=epochs) parallel_model = multi_gpu_model(model, gpus=target_gpu_id) parallel_model.compile(loss='mse', optimizer='rmsprop') parallel_model.fit([a_x, b_x], [a_y, b_y], epochs=epochs)
Example #29
Source File: test_sequential_model.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def test_clone_sequential_model(): val_a = np.random.random((10, 4)) val_out = np.random.random((10, 4)) model = keras.models.Sequential() model.add(keras.layers.Dense(4, input_shape=(4,))) model.add(keras.layers.BatchNormalization()) model.add(keras.layers.Dropout(0.5)) model.add(keras.layers.Dense(4)) if K.backend() == 'tensorflow': # Everything should work in a new session. K.clear_session() # With placeholder creation new_model = keras.models.clone_model(model) new_model.compile('rmsprop', 'mse') new_model.train_on_batch(val_a, val_out) # On top of new tensor input_a = keras.Input(shape=(4,)) new_model = keras.models.clone_model( model, input_tensors=input_a) new_model.compile('rmsprop', 'mse') new_model.train_on_batch(val_a, val_out) # On top of new, non-Keras tensor input_a = keras.backend.variable(val_a) new_model = keras.models.clone_model( model, input_tensors=input_a) new_model.compile('rmsprop', 'mse') new_model.train_on_batch(None, val_out)
Example #30
Source File: cudnn_recurrent_test.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def test_cudnn_rnn_timing(rnn_type): input_size = 1000 timesteps = 60 units = 256 num_samples = 10000 times = [] for use_cudnn in [True, False]: start_time = time.time() inputs = keras.layers.Input(shape=(None, input_size)) if use_cudnn: if rnn_type == 'lstm': layer = keras.layers.CuDNNLSTM(units) else: layer = keras.layers.CuDNNGRU(units) else: if rnn_type == 'lstm': layer = keras.layers.LSTM(units) else: layer = keras.layers.GRU(units) outputs = layer(inputs) model = keras.models.Model(inputs, outputs) model.compile('sgd', 'mse') x = np.random.random((num_samples, timesteps, input_size)) y = np.random.random((num_samples, units)) model.fit(x, y, epochs=4, batch_size=32) times.append(time.time() - start_time) speedup = times[1] / times[0] print(rnn_type, 'speedup', speedup) assert speedup > 3