Python keras.layers.wrappers.Bidirectional() Examples
The following are 30
code examples of keras.layers.wrappers.Bidirectional().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
keras.layers.wrappers
, or try the search function
.
Example #1
Source File: models.py From keras-image-captioning with MIT License | 6 votes |
def _build_sequence_model(self, sequence_input): RNN = GRU if self._rnn_type == 'gru' else LSTM def rnn(): rnn = RNN(units=self._rnn_output_size, return_sequences=True, dropout=self._dropout_rate, recurrent_dropout=self._dropout_rate, kernel_regularizer=self._regularizer, kernel_initializer=self._initializer, implementation=2) rnn = Bidirectional(rnn) if self._bidirectional_rnn else rnn return rnn input_ = sequence_input for _ in range(self._rnn_layers): input_ = BatchNormalization(axis=-1)(input_) rnn_out = rnn()(input_) input_ = rnn_out time_dist_dense = TimeDistributed(Dense(units=self._vocab_size))(rnn_out) return time_dist_dense
Example #2
Source File: rnn_tagger.py From neural_complete with MIT License | 6 votes |
def bilstm_layer(input_layer, lstm_dims, rnn_layers, dropout): lstm = None if isinstance(lstm_dims, (list, tuple)): lstm_dims = lstm_dims else: assert isinstance(lstm_dims, int) lstm_dims = [lstm_dims] * rnn_layers for i in range(rnn_layers): if i == 0: nested = input_layer else: nested = lstm wrapped = LSTM( output_dim=lstm_dims[i], activation='tanh', return_sequences=True, dropout_W=dropout, dropout_U=dropout, name='bistm_%d' % i) lstm = Bidirectional(wrapped, merge_mode='sum')(nested) return lstm
Example #3
Source File: models.py From neurowriter with MIT License | 6 votes |
def create(inputtokens, vocabsize, units=16, dropout=0, embedding=32): input_ = Input(shape=(inputtokens,), dtype='int32') # Embedding layer net = Embedding(input_dim=vocabsize, output_dim=embedding, input_length=inputtokens)(input_) net = Dropout(dropout)(net) # Bidirectional LSTM layer net = BatchNormalization()(net) net = Bidirectional(CuDNNLSTM(units))(net) net = Dropout(dropout)(net) # Output layer net = Dense(vocabsize, activation='softmax')(net) model = Model(inputs=input_, outputs=net) # Make data-parallel ngpus = len(get_available_gpus()) if ngpus > 1: model = make_parallel(model, ngpus) return model
Example #4
Source File: wrappers_test.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def test_Bidirectional_state_reuse(): rnn = layers.LSTM samples = 2 dim = 5 timesteps = 3 units = 3 input1 = Input((timesteps, dim)) layer = wrappers.Bidirectional(rnn(units, return_state=True, return_sequences=True)) state = layer(input1)[1:] # test passing invalid initial_state: passing a tensor input2 = Input((timesteps, dim)) with pytest.raises(ValueError): output = wrappers.Bidirectional(rnn(units))(input2, initial_state=state[0]) # test valid usage: passing a list output = wrappers.Bidirectional(rnn(units))(input2, initial_state=state) model = Model([input1, input2], output) assert len(model.layers) == 4 assert isinstance(model.layers[-1].input, list) inputs = [np.random.rand(samples, timesteps, dim), np.random.rand(samples, timesteps, dim)] outputs = model.predict(inputs)
Example #5
Source File: wrappers_test.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def test_Bidirectional_state_reuse(): rnn = layers.LSTM samples = 2 dim = 5 timesteps = 3 units = 3 input1 = Input((timesteps, dim)) layer = wrappers.Bidirectional(rnn(units, return_state=True, return_sequences=True)) state = layer(input1)[1:] # test passing invalid initial_state: passing a tensor input2 = Input((timesteps, dim)) with pytest.raises(ValueError): output = wrappers.Bidirectional(rnn(units))(input2, initial_state=state[0]) # test valid usage: passing a list output = wrappers.Bidirectional(rnn(units))(input2, initial_state=state) model = Model([input1, input2], output) assert len(model.layers) == 4 assert isinstance(model.layers[-1].input, list) inputs = [np.random.rand(samples, timesteps, dim), np.random.rand(samples, timesteps, dim)] outputs = model.predict(inputs)
Example #6
Source File: wrappers_test.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def test_Bidirectional_state_reuse(): rnn = layers.LSTM samples = 2 dim = 5 timesteps = 3 units = 3 input1 = Input((timesteps, dim)) layer = wrappers.Bidirectional(rnn(units, return_state=True, return_sequences=True)) state = layer(input1)[1:] # test passing invalid initial_state: passing a tensor input2 = Input((timesteps, dim)) with pytest.raises(ValueError): output = wrappers.Bidirectional(rnn(units))(input2, initial_state=state[0]) # test valid usage: passing a list output = wrappers.Bidirectional(rnn(units))(input2, initial_state=state) model = Model([input1, input2], output) assert len(model.layers) == 4 assert isinstance(model.layers[-1].input, list) inputs = [np.random.rand(samples, timesteps, dim), np.random.rand(samples, timesteps, dim)] outputs = model.predict(inputs)
Example #7
Source File: wrappers_test.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def test_Bidirectional_state_reuse(): rnn = layers.LSTM samples = 2 dim = 5 timesteps = 3 units = 3 input1 = Input((timesteps, dim)) layer = wrappers.Bidirectional(rnn(units, return_state=True, return_sequences=True)) state = layer(input1)[1:] # test passing invalid initial_state: passing a tensor input2 = Input((timesteps, dim)) with pytest.raises(ValueError): output = wrappers.Bidirectional(rnn(units))(input2, initial_state=state[0]) # test valid usage: passing a list output = wrappers.Bidirectional(rnn(units))(input2, initial_state=state) model = Model([input1, input2], output) assert len(model.layers) == 4 assert isinstance(model.layers[-1].input, list) inputs = [np.random.rand(samples, timesteps, dim), np.random.rand(samples, timesteps, dim)] outputs = model.predict(inputs)
Example #8
Source File: wrappers_test.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def test_Bidirectional_state_reuse(): rnn = layers.LSTM samples = 2 dim = 5 timesteps = 3 units = 3 input1 = Input((timesteps, dim)) layer = wrappers.Bidirectional(rnn(units, return_state=True, return_sequences=True)) state = layer(input1)[1:] # test passing invalid initial_state: passing a tensor input2 = Input((timesteps, dim)) with pytest.raises(ValueError): output = wrappers.Bidirectional(rnn(units))(input2, initial_state=state[0]) # test valid usage: passing a list output = wrappers.Bidirectional(rnn(units))(input2, initial_state=state) model = Model([input1, input2], output) assert len(model.layers) == 4 assert isinstance(model.layers[-1].input, list) inputs = [np.random.rand(samples, timesteps, dim), np.random.rand(samples, timesteps, dim)] outputs = model.predict(inputs)
Example #9
Source File: wrappers_test.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def test_Bidirectional_state_reuse(): rnn = layers.LSTM samples = 2 dim = 5 timesteps = 3 units = 3 input1 = Input((timesteps, dim)) layer = wrappers.Bidirectional(rnn(units, return_state=True, return_sequences=True)) state = layer(input1)[1:] # test passing invalid initial_state: passing a tensor input2 = Input((timesteps, dim)) with pytest.raises(ValueError): output = wrappers.Bidirectional(rnn(units))(input2, initial_state=state[0]) # test valid usage: passing a list output = wrappers.Bidirectional(rnn(units))(input2, initial_state=state) model = Model([input1, input2], output) assert len(model.layers) == 4 assert isinstance(model.layers[-1].input, list) inputs = [np.random.rand(samples, timesteps, dim), np.random.rand(samples, timesteps, dim)] outputs = model.predict(inputs)
Example #10
Source File: wrappers_test.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def test_Bidirectional_state_reuse(): rnn = layers.LSTM samples = 2 dim = 5 timesteps = 3 units = 3 input1 = Input((timesteps, dim)) layer = wrappers.Bidirectional(rnn(units, return_state=True, return_sequences=True)) state = layer(input1)[1:] # test passing invalid initial_state: passing a tensor input2 = Input((timesteps, dim)) with pytest.raises(ValueError): output = wrappers.Bidirectional(rnn(units))(input2, initial_state=state[0]) # test valid usage: passing a list output = wrappers.Bidirectional(rnn(units))(input2, initial_state=state) model = Model([input1, input2], output) assert len(model.layers) == 4 assert isinstance(model.layers[-1].input, list) inputs = [np.random.rand(samples, timesteps, dim), np.random.rand(samples, timesteps, dim)] outputs = model.predict(inputs)
Example #11
Source File: keras_models.py From krnnt with GNU Lesser General Public License v3.0 | 6 votes |
def create_model(self): features_length = self.parameters.pref['features_length'] inputs = Input(shape=(None, features_length)) x = inputs x = Masking(mask_value=0., input_shape=(None, features_length))(x) x = Bidirectional( GRU(self.parameters.pref['internal_neurons'], return_sequences=True, dropout=0.0, recurrent_dropout=0.5, implementation=1), input_shape=(None, features_length))(x) x = Bidirectional( GRU(self.parameters.pref['internal_neurons'], return_sequences=True, dropout=0.0, recurrent_dropout=0.5, implementation=1), input_shape=(None, features_length))(x) x = Dropout(0.5)(x) x = TimeDistributed(Dense(self.parameters.pref['output_length'], activation='softmax'))(x) self.model = Model(inputs=inputs, outputs=x) self.loss = 'categorical_crossentropy' self.optimizer = keras.optimizers.Nadam()
Example #12
Source File: test_keras2_numeric.py From coremltools with BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_small_no_sequence_bidir_random(self): np.random.seed(1988) input_dim = 10 input_length = 1 num_channels = 1 # Define a model model = Sequential() model.add( Bidirectional( LSTM(num_channels, implementation=2, recurrent_activation="sigmoid"), input_shape=(input_length, input_dim), ) ) # Set some random weights model.set_weights( [np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()] ) # Test the keras model self._test_model(model)
Example #13
Source File: test_keras2_numeric.py From coremltools with BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_tiny_no_sequence_bidir_random( self, model_precision=_MLMODEL_FULL_PRECISION ): np.random.seed(1988) input_dim = 1 input_length = 1 num_channels = 1 num_samples = 1 # Define a model model = Sequential() model.add( Bidirectional( LSTM(num_channels, implementation=1, recurrent_activation="sigmoid"), input_shape=(input_length, input_dim), ) ) # Set some random weights model.set_weights( [np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()] ) # Test the keras model self._test_model(model, model_precision=model_precision)
Example #14
Source File: finetuning.py From DeepMoji with MIT License | 6 votes |
def change_trainable(layer, trainable, verbose=False): """ Helper method that fixes some of Keras' issues with wrappers and trainability. Freezes or unfreezes a given layer. # Arguments: layer: Layer to be modified. trainable: Whether the layer should be frozen or unfrozen. verbose: Verbosity flag. """ layer.trainable = trainable if type(layer) == Bidirectional: layer.backward_layer.trainable = trainable layer.forward_layer.trainable = trainable if type(layer) == TimeDistributed: layer.backward_layer.trainable = trainable if verbose: action = 'Unfroze' if trainable else 'Froze' print("{} {}".format(action, layer.name))
Example #15
Source File: test_keras2_numeric.py From coremltools with BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_medium_no_sequence_bidir_random(self): np.random.seed(1988) input_dim = 10 input_length = 1 num_channels = 10 # Define a model model = Sequential() model.add( Bidirectional( LSTM(num_channels, implementation=2, recurrent_activation="sigmoid"), input_shape=(input_length, input_dim), ) ) # Set some random weights model.set_weights( [np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()] ) # Test the keras model self._test_model(model)
Example #16
Source File: test_keras2_numeric.py From coremltools with BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_tiny_no_sequence_bidir_random_gpu( self, model_precision=_MLMODEL_FULL_PRECISION ): np.random.seed(1988) input_dim = 1 input_length = 1 num_channels = 1 num_samples = 1 # Define a model model = Sequential() model.add( Bidirectional( LSTM(num_channels, implementation=2, recurrent_activation="sigmoid"), input_shape=(input_length, input_dim), ) ) # Set some random weights model.set_weights( [np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()] ) # Test the keras model self._test_model(model, model_precision=model_precision)
Example #17
Source File: NMT.py From keras-attention with GNU Affero General Public License v3.0 | 5 votes |
def simpleNMT(pad_length=100, n_chars=105, n_labels=6, embedding_learnable=False, encoder_units=256, decoder_units=256, trainable=True, return_probabilities=False): """ Builds a Neural Machine Translator that has alignment attention :param pad_length: the size of the input sequence :param n_chars: the number of characters in the vocabulary :param n_labels: the number of possible labelings for each character :param embedding_learnable: decides if the one hot embedding should be refinable. :return: keras.models.Model that can be compiled and fit'ed *** REFERENCES *** Lee, Jason, Kyunghyun Cho, and Thomas Hofmann. "Neural Machine Translation By Jointly Learning To Align and Translate" """ input_ = Input(shape=(pad_length,), dtype='float32') input_embed = Embedding(n_chars, n_chars, input_length=pad_length, trainable=embedding_learnable, weights=[np.eye(n_chars)], name='OneHot')(input_) rnn_encoded = Bidirectional(LSTM(encoder_units, return_sequences=True), name='bidirectional_1', merge_mode='concat', trainable=trainable)(input_embed) y_hat = AttentionDecoder(decoder_units, name='attention_decoder_1', output_dim=n_labels, return_probabilities=return_probabilities, trainable=trainable)(rnn_encoded) model = Model(inputs=input_, outputs=y_hat) return model
Example #18
Source File: wrappers_test.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def test_Bidirectional_trainable(): # test layers that need learning_phase to be set x = Input(shape=(3, 2)) layer = wrappers.Bidirectional(layers.SimpleRNN(3)) _ = layer(x) assert len(layer.trainable_weights) == 6 layer.trainable = False assert len(layer.trainable_weights) == 0 layer.trainable = True assert len(layer.trainable_weights) == 6
Example #19
Source File: wrappers_test.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def test_Bidirectional_dropout(merge_mode): rnn = layers.LSTM samples = 2 dim = 5 timesteps = 3 units = 3 X = [np.random.rand(samples, timesteps, dim)] inputs = Input((timesteps, dim)) wrapped = wrappers.Bidirectional(rnn(units, dropout=0.2, recurrent_dropout=0.2), merge_mode=merge_mode) outputs = _to_list(wrapped(inputs, training=True)) assert all(not getattr(x, '_uses_learning_phase') for x in outputs) inputs = Input((timesteps, dim)) wrapped = wrappers.Bidirectional(rnn(units, dropout=0.2, return_state=True), merge_mode=merge_mode) outputs = _to_list(wrapped(inputs)) assert all(x._uses_learning_phase for x in outputs) model = Model(inputs, outputs) assert model.uses_learning_phase y1 = _to_list(model.predict(X)) y2 = _to_list(model.predict(X)) for x1, x2 in zip(y1, y2): assert_allclose(x1, x2, atol=1e-5)
Example #20
Source File: wrappers_test.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def test_Bidirectional_trainable(): # test layers that need learning_phase to be set x = Input(shape=(3, 2)) layer = wrappers.Bidirectional(layers.SimpleRNN(3)) _ = layer(x) assert len(layer.trainable_weights) == 6 layer.trainable = False assert len(layer.trainable_weights) == 0 layer.trainable = True assert len(layer.trainable_weights) == 6
Example #21
Source File: model.py From LipNet with MIT License | 5 votes |
def build(self): if K.image_data_format() == 'channels_first': input_shape = (self.img_c, self.frames_n, self.img_w, self.img_h) else: input_shape = (self.frames_n, self.img_w, self.img_h, self.img_c) self.input_data = Input(name='the_input', shape=input_shape, dtype='float32') self.zero1 = ZeroPadding3D(padding=(1, 2, 2), name='zero1')(self.input_data) self.conv1 = Conv3D(32, (3, 5, 5), strides=(1, 2, 2), activation='relu', kernel_initializer='he_normal', name='conv1')(self.zero1) self.maxp1 = MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2), name='max1')(self.conv1) self.drop1 = Dropout(0.5)(self.maxp1) self.zero2 = ZeroPadding3D(padding=(1, 2, 2), name='zero2')(self.drop1) self.conv2 = Conv3D(64, (3, 5, 5), strides=(1, 1, 1), activation='relu', kernel_initializer='he_normal', name='conv2')(self.zero2) self.maxp2 = MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2), name='max2')(self.conv2) self.drop2 = Dropout(0.5)(self.maxp2) self.zero3 = ZeroPadding3D(padding=(1, 1, 1), name='zero3')(self.drop2) self.conv3 = Conv3D(96, (3, 3, 3), strides=(1, 1, 1), activation='relu', kernel_initializer='he_normal', name='conv3')(self.zero3) self.maxp3 = MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2), name='max3')(self.conv3) self.drop3 = Dropout(0.5)(self.maxp3) self.resh1 = TimeDistributed(Flatten())(self.drop3) self.gru_1 = Bidirectional(GRU(256, return_sequences=True, kernel_initializer='Orthogonal', name='gru1'), merge_mode='concat')(self.resh1) self.gru_2 = Bidirectional(GRU(256, return_sequences=True, kernel_initializer='Orthogonal', name='gru2'), merge_mode='concat')(self.gru_1) # transforms RNN output to character activations: self.dense1 = Dense(self.output_size, kernel_initializer='he_normal', name='dense1')(self.gru_2) self.y_pred = Activation('softmax', name='softmax')(self.dense1) self.labels = Input(name='the_labels', shape=[self.absolute_max_string_len], dtype='float32') self.input_length = Input(name='input_length', shape=[1], dtype='int64') self.label_length = Input(name='label_length', shape=[1], dtype='int64') self.loss_out = CTC('ctc', [self.y_pred, self.labels, self.input_length, self.label_length]) self.model = Model(inputs=[self.input_data, self.labels, self.input_length, self.label_length], outputs=self.loss_out)
Example #22
Source File: wrappers_test.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def test_Bidirectional_dropout(merge_mode): rnn = layers.LSTM samples = 2 dim = 5 timesteps = 3 units = 3 X = [np.random.rand(samples, timesteps, dim)] inputs = Input((timesteps, dim)) wrapped = wrappers.Bidirectional(rnn(units, dropout=0.2, recurrent_dropout=0.2), merge_mode=merge_mode) outputs = _to_list(wrapped(inputs, training=True)) assert all(not getattr(x, '_uses_learning_phase') for x in outputs) inputs = Input((timesteps, dim)) wrapped = wrappers.Bidirectional(rnn(units, dropout=0.2, return_state=True), merge_mode=merge_mode) outputs = _to_list(wrapped(inputs)) assert all(x._uses_learning_phase for x in outputs) model = Model(inputs, outputs) assert model.uses_learning_phase y1 = _to_list(model.predict(X)) y2 = _to_list(model.predict(X)) for x1, x2 in zip(y1, y2): assert_allclose(x1, x2, atol=1e-5)
Example #23
Source File: topcoder_crnn.py From crnn-lid with GNU General Public License v3.0 | 5 votes |
def create_model(input_shape, config, is_training=True): weight_decay = 0.001 model = Sequential() model.add(Convolution2D(16, 7, 7, W_regularizer=l2(weight_decay), activation="relu", input_shape=input_shape)) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2))) model.add(Convolution2D(32, 5, 5, W_regularizer=l2(weight_decay), activation="relu")) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2))) model.add(Convolution2D(64, 3, 3, W_regularizer=l2(weight_decay), activation="relu")) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2))) model.add(Convolution2D(128, 3, 3, W_regularizer=l2(weight_decay), activation="relu")) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2))) model.add(Convolution2D(256, 3, 3, W_regularizer=l2(weight_decay), activation="relu")) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2))) # (bs, y, x, c) --> (bs, x, y, c) model.add(Permute((2, 1, 3))) # (bs, x, y, c) --> (bs, x, y * c) bs, x, y, c = model.layers[-1].output_shape model.add(Reshape((x, y*c))) model.add(Bidirectional(LSTM(512, return_sequences=False), merge_mode="concat")) model.add(Dense(config["num_classes"], activation="softmax")) return model
Example #24
Source File: inceptionv3_crnn.py From crnn-lid with GNU General Public License v3.0 | 5 votes |
def create_model(input_shape, config): input_tensor = Input(shape=input_shape) # this assumes K.image_dim_ordering() == 'tf' inception_model = InceptionV3(include_top=False, weights=None, input_tensor=input_tensor) # inception_model.load_weights("logs/2016-12-18-13-56-44/weights.21.model", by_name=True) for layer in inception_model.layers: layer.trainable = False x = inception_model.output #x = GlobalAveragePooling2D()(x) # (bs, y, x, c) --> (bs, x, y, c) x = Permute((2, 1, 3))(x) # (bs, x, y, c) --> (bs, x, y * c) _x, _y, _c = [int(s) for s in x._shape[1:]] x = Reshape((_x, _y*_c))(x) x = Bidirectional(LSTM(512, return_sequences=False), merge_mode="concat")(x) predictions = Dense(config["num_classes"], activation='softmax')(x) model = Model(input=inception_model.input, output=predictions) model.load_weights("logs/2017-01-02-13-39-41/weights.06.model") return model
Example #25
Source File: wrappers_test.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def test_Bidirectional_dropout(merge_mode): rnn = layers.LSTM samples = 2 dim = 5 timesteps = 3 units = 3 X = [np.random.rand(samples, timesteps, dim)] inputs = Input((timesteps, dim)) wrapped = wrappers.Bidirectional(rnn(units, dropout=0.2, recurrent_dropout=0.2), merge_mode=merge_mode) outputs = _to_list(wrapped(inputs, training=True)) assert all(not getattr(x, '_uses_learning_phase') for x in outputs) inputs = Input((timesteps, dim)) wrapped = wrappers.Bidirectional(rnn(units, dropout=0.2, return_state=True), merge_mode=merge_mode) outputs = _to_list(wrapped(inputs)) assert all(x._uses_learning_phase for x in outputs) model = Model(inputs, outputs) assert model.uses_learning_phase y1 = _to_list(model.predict(X)) y2 = _to_list(model.predict(X)) for x1, x2 in zip(y1, y2): assert_allclose(x1, x2, atol=1e-5)
Example #26
Source File: wrappers_test.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def test_Bidirectional_trainable(): # test layers that need learning_phase to be set x = Input(shape=(3, 2)) layer = wrappers.Bidirectional(layers.SimpleRNN(3)) _ = layer(x) assert len(layer.trainable_weights) == 6 layer.trainable = False assert len(layer.trainable_weights) == 0 layer.trainable = True assert len(layer.trainable_weights) == 6
Example #27
Source File: wrappers_test.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def test_Bidirectional_dropout(merge_mode): rnn = layers.LSTM samples = 2 dim = 5 timesteps = 3 units = 3 X = [np.random.rand(samples, timesteps, dim)] inputs = Input((timesteps, dim)) wrapped = wrappers.Bidirectional(rnn(units, dropout=0.2, recurrent_dropout=0.2), merge_mode=merge_mode) outputs = _to_list(wrapped(inputs, training=True)) assert all(not getattr(x, '_uses_learning_phase') for x in outputs) inputs = Input((timesteps, dim)) wrapped = wrappers.Bidirectional(rnn(units, dropout=0.2, return_state=True), merge_mode=merge_mode) outputs = _to_list(wrapped(inputs)) assert all(x._uses_learning_phase for x in outputs) model = Model(inputs, outputs) assert model.uses_learning_phase y1 = _to_list(model.predict(X)) y2 = _to_list(model.predict(X)) for x1, x2 in zip(y1, y2): assert_allclose(x1, x2, atol=1e-5)
Example #28
Source File: wrappers_test.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def test_Bidirectional_trainable(): # test layers that need learning_phase to be set x = Input(shape=(3, 2)) layer = wrappers.Bidirectional(layers.SimpleRNN(3)) _ = layer(x) assert len(layer.trainable_weights) == 6 layer.trainable = False assert len(layer.trainable_weights) == 0 layer.trainable = True assert len(layer.trainable_weights) == 6
Example #29
Source File: models.py From neurowriter with MIT License | 5 votes |
def create(inputtokens, vocabsize, layers=1, units=16, dropout=0, embedding=32): input_ = Input(shape=(inputtokens,), dtype='int32') # Embedding layer net = Embedding(input_dim=vocabsize, output_dim=embedding, input_length=inputtokens)(input_) net = Dropout(dropout)(net) # Bidirectional LSTM layer net = BatchNormalization()(net) net = Bidirectional(CuDNNLSTM(units, return_sequences=(layers > 1)))(net) net = Dropout(dropout)(net) # Rest of LSTM layers with residual connections (if any) for i in range(1, layers): if i < layers-1: block = BatchNormalization()(net) block = CuDNNLSTM(2*units, return_sequences=True)(block) block = Dropout(dropout)(block) net = add([block, net]) else: net = BatchNormalization()(net) net = CuDNNLSTM(2*units)(net) net = Dropout(dropout)(net) # Output layer net = Dense(vocabsize, activation='softmax')(net) model = Model(inputs=input_, outputs=net) # Make data-parallel ngpus = len(get_available_gpus()) if ngpus > 1: model = make_parallel(model, ngpus) return model
Example #30
Source File: wrappers_test.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def test_Bidirectional_trainable(): # test layers that need learning_phase to be set x = Input(shape=(3, 2)) layer = wrappers.Bidirectional(layers.SimpleRNN(3)) _ = layer(x) assert len(layer.trainable_weights) == 6 layer.trainable = False assert len(layer.trainable_weights) == 0 layer.trainable = True assert len(layer.trainable_weights) == 6