Python keras.layers.Masking() Examples

The following are 30 code examples of keras.layers.Masking(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module keras.layers , or try the search function .
Example #1
Source File: example.py    From CTCModel with MIT License 8 votes vote down vote up
def create_network(nb_features, nb_labels, padding_value):

    # Define the network architecture
    input_data = Input(name='input', shape=(None, nb_features)) # nb_features = image height

    masking = Masking(mask_value=padding_value)(input_data)
    noise = GaussianNoise(0.01)(masking)
    blstm = Bidirectional(LSTM(128, return_sequences=True, dropout=0.1))(noise)
    blstm = Bidirectional(LSTM(128, return_sequences=True, dropout=0.1))(blstm)
    blstm = Bidirectional(LSTM(128, return_sequences=True, dropout=0.1))(blstm)

    dense = TimeDistributed(Dense(nb_labels + 1, name="dense"))(blstm)
    outrnn = Activation('softmax', name='softmax')(dense)

    network = CTCModel([input_data], [outrnn])
    network.compile(Adam(lr=0.0001))

    return network 
Example #2
Source File: baseline.py    From MELD with GNU General Public License v3.0 6 votes vote down vote up
def get_audio_model(self):

		# Modality specific hyperparameters
		self.epochs = 100
		self.batch_size = 50

		# Modality specific parameters
		self.embedding_dim = self.train_x.shape[2]

		print("Creating Model...")
		
		inputs = Input(shape=(self.sequence_length, self.embedding_dim), dtype='float32')
		masked = Masking(mask_value =0)(inputs)
		lstm = Bidirectional(LSTM(300, activation='tanh', return_sequences = True, dropout=0.4))(masked)
		lstm = Bidirectional(LSTM(300, activation='tanh', return_sequences = True, dropout=0.4), name="utter")(lstm)
		output = TimeDistributed(Dense(self.classes,activation='softmax'))(lstm)

		model = Model(inputs, output)
		return model 
Example #3
Source File: baseline.py    From MELD with GNU General Public License v3.0 6 votes vote down vote up
def get_bimodal_model(self):

		# Modality specific hyperparameters
		self.epochs = 100
		self.batch_size = 10

		# Modality specific parameters
		self.embedding_dim = self.train_x.shape[2]

		print("Creating Model...")
		
		inputs = Input(shape=(self.sequence_length, self.embedding_dim), dtype='float32')
		masked = Masking(mask_value =0)(inputs)
		lstm = Bidirectional(LSTM(300, activation='tanh', return_sequences = True, dropout=0.4), name="utter")(masked)
		output = TimeDistributed(Dense(self.classes,activation='softmax'))(lstm)

		model = Model(inputs, output)
		return model 
Example #4
Source File: mom_example.py    From rasa_wechat with Apache License 2.0 6 votes vote down vote up
def _build_model(self, num_features, num_actions, max_history_len):
        """Build a keras model and return a compiled model.
        :param max_history_len: The maximum number of historical turns used to
                                decide on next action"""
        from keras.layers import LSTM, Activation, Masking, Dense
        from keras.models import Sequential

        n_hidden = 32  # size of hidden layer in LSTM
        # Build Model
        batch_shape = (None, max_history_len, num_features)

        model = Sequential()
        model.add(Masking(-1, batch_input_shape=batch_shape))
        model.add(LSTM(n_hidden, batch_input_shape=batch_shape))
        model.add(Dense(input_dim=n_hidden, output_dim=num_actions))
        model.add(Activation('softmax'))

        model.compile(loss='categorical_crossentropy',
                      optimizer='adam',
                      metrics=['accuracy'])

        logger.debug(model.summary())
        return model 
Example #5
Source File: restaurant_example.py    From rasa_wechat with Apache License 2.0 6 votes vote down vote up
def _build_model(self, num_features, num_actions, max_history_len):
        """Build a keras model and return a compiled model.
        :param max_history_len: The maximum number of historical turns used to
                                decide on next action"""
        from keras.layers import LSTM, Activation, Masking, Dense
        from keras.models import Sequential

        n_hidden = 32  # size of hidden layer in LSTM
        # Build Model
        batch_shape = (None, max_history_len, num_features)

        model = Sequential()
        model.add(Masking(-1, batch_input_shape=batch_shape))
        model.add(LSTM(n_hidden, batch_input_shape=batch_shape))
        model.add(Dense(input_dim=n_hidden, output_dim=num_actions))
        model.add(Activation('softmax'))

        model.compile(loss='categorical_crossentropy',
                      optimizer='adam',
                      metrics=['accuracy'])

        logger.debug(model.summary())
        return model 
Example #6
Source File: policy.py    From rasa_wechat with Apache License 2.0 6 votes vote down vote up
def _build_model(self, num_features, num_actions, max_history_len):
        """Build a keras model and return a compiled model.
        :param max_history_len: The maximum number of historical turns used to
                                decide on next action"""
        from keras.layers import Activation, Masking, Dense, SimpleRNN
        from keras.models import Sequential

        n_hidden = 8  # size of hidden layer in RNN
        # Build Model
        batch_input_shape = (None, max_history_len, num_features)

        model = Sequential()
        model.add(Masking(-1, batch_input_shape=batch_input_shape))
        model.add(SimpleRNN(n_hidden, batch_input_shape=batch_input_shape))
        model.add(Dense(input_dim=n_hidden, output_dim=num_actions))
        model.add(Activation('softmax'))

        model.compile(loss='categorical_crossentropy',
                      optimizer='adam',
                      metrics=['accuracy'])

        logger.debug(model.summary())
        return model 
Example #7
Source File: keras_policy.py    From rasa_wechat with Apache License 2.0 6 votes vote down vote up
def _build_model(self, num_features, num_actions, max_history_len):
        """Build a keras model and return a compiled model.

        :param max_history_len: The maximum number of historical
                                turns used to decide on next action
        """
        from keras.layers import LSTM, Activation, Masking, Dense
        from keras.models import Sequential

        n_hidden = 32  # Neural Net and training params
        batch_shape = (None, max_history_len, num_features)
        # Build Model
        model = Sequential()
        model.add(Masking(-1, batch_input_shape=batch_shape))
        model.add(LSTM(n_hidden, batch_input_shape=batch_shape))
        model.add(Dense(input_dim=n_hidden, units=num_actions))
        model.add(Activation('softmax'))

        model.compile(loss='categorical_crossentropy',
                      optimizer='rmsprop',
                      metrics=['accuracy'])

        logger.debug(model.summary())
        return model 
Example #8
Source File: bot.py    From rasa_bot with Apache License 2.0 6 votes vote down vote up
def model_architecture(self, num_features, num_actions, max_history_len):
        """Build a Keras model and return a compiled model."""
        from keras.layers import LSTM, Activation, Masking, Dense
        from keras.models import Sequential

        n_hidden = 32  # size of hidden layer in LSTM
        # Build Model
        batch_shape = (None, max_history_len, num_features)

        model = Sequential()
        model.add(Masking(-1, batch_input_shape=batch_shape))
        model.add(LSTM(n_hidden, batch_input_shape=batch_shape))
        model.add(Dense(input_dim=n_hidden, output_dim=num_actions))
        model.add(Activation("softmax"))

        model.compile(loss="categorical_crossentropy",
                      optimizer="adam",
                      metrics=["accuracy"])

        logger.debug(model.summary())
        return model 
Example #9
Source File: test_l1_normalize.py    From deep_qa with Apache License 2.0 5 votes vote down vote up
def test_squeeze_case_mask(self):
        input_length = 4
        mask_value = 3

        input_layer = Input(shape=(input_length, 1), dtype='float32', name="input")
        mask_layer = Masking(mask_value=mask_value)
        masked_input = mask_layer(input_layer)
        l1_normalize_layer = L1Normalize()
        normalized_input = l1_normalize_layer(masked_input)

        model = Model([input_layer], normalized_input)
        unnormalized_vector = np.array([[[1.0], [2.0], [3.0], [4.0]]])
        result = model.predict([unnormalized_vector])
        assert_array_almost_equal(result, np.array([[0.14285715, 0.2857143,
                                                     0, 0.5714286]]))
        assert_array_almost_equal(np.sum(result, axis=1), np.ones(1))

        # Testing general masked batched case
        unnormalized_matrix = np.array([[[1.0], [2.0], [3.0], [4.0]],
                                        [[3.0], [2.0], [3.0], [4.0]]])

        result = model.predict([unnormalized_matrix])
        assert_array_almost_equal(result, np.array([[0.14285715, 0.2857143,
                                                     0, 0.5714286],
                                                    [0, 2.0/6.0, 0, 4.0/6.0]]))
        assert_array_almost_equal(np.sum(result, axis=1), np.ones(2)) 
Example #10
Source File: layers_test.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def test_merge_mask_2d():
    rand = lambda *shape: np.asarray(np.random.random(shape) > 0.5, dtype='int32')

    # inputs
    input_a = layers.Input(shape=(3,))
    input_b = layers.Input(shape=(3,))

    # masks
    masked_a = layers.Masking(mask_value=0)(input_a)
    masked_b = layers.Masking(mask_value=0)(input_b)

    # three different types of merging
    merged_sum = legacy_layers.merge([masked_a, masked_b], mode='sum')
    merged_concat = legacy_layers.merge([masked_a, masked_b], mode='concat', concat_axis=1)
    merged_concat_mixed = legacy_layers.merge([masked_a, input_b], mode='concat', concat_axis=1)

    # test sum
    model_sum = models.Model([input_a, input_b], [merged_sum])
    model_sum.compile(loss='mse', optimizer='sgd')
    model_sum.fit([rand(2, 3), rand(2, 3)], [rand(2, 3)], epochs=1)

    # test concatenation
    model_concat = models.Model([input_a, input_b], [merged_concat])
    model_concat.compile(loss='mse', optimizer='sgd')
    model_concat.fit([rand(2, 3), rand(2, 3)], [rand(2, 6)], epochs=1)

    # test concatenation with masked and non-masked inputs
    model_concat = models.Model([input_a, input_b], [merged_concat_mixed])
    model_concat.compile(loss='mse', optimizer='sgd')
    model_concat.fit([rand(2, 3), rand(2, 3)], [rand(2, 6)], epochs=1) 
Example #11
Source File: baseline_lstm.py    From AVEC2018 with MIT License 5 votes vote down vote up
def emotion_model(max_seq_len, num_features, learning_rate, num_units_1, num_units_2, bidirectional, dropout, num_targets):
    # Input layer
    inputs = Input(shape=(max_seq_len,num_features))
    
    # Masking zero input - shorter sequences
    net = Masking()(inputs)
    
    # 1st layer
    if bidirectional:
        net = Bidirectional(LSTM( num_units_1, return_sequences=True, dropout=dropout, recurrent_dropout=dropout))(net)
    else:
        net = LSTM(num_units_1, return_sequences=True, dropout=dropout, recurrent_dropout=dropout)(net)
    
    # 2nd layer
    if bidirectional:
        net = Bidirectional(LSTM( num_units_2, return_sequences=True, dropout=dropout, recurrent_dropout=dropout ))(net)
    else:
        net = LSTM(num_units_2, return_sequences=True, dropout=dropout, recurrent_dropout=dropout)(net)
    
    # Output layer
    outputs = []
    out1 = TimeDistributed(Dense(1))(net)  # linear activation
    outputs.append(out1)
    if num_targets>=2:
        out2 = TimeDistributed(Dense(1))(net)  # linear activation
        outputs.append(out2)
    if num_targets==3:
        out3 = TimeDistributed(Dense(1))(net)  # linear activation
        outputs.append(out3)
    
    # Create and compile model
    rmsprop = RMSprop(lr=learning_rate)
    model   = Model(inputs=inputs, outputs=outputs)
    model.compile(optimizer=rmsprop, loss=ccc_loss)  # CCC-based loss function
    return model 
Example #12
Source File: networks.py    From Speech_emotion_recognition_BLSTM with MIT License 5 votes vote down vote up
def create_softmax_la_network(input_shape, nb_lstm_cells=128, nb_classes=7):
    '''
    input_shape: (time_steps, features,)
    '''

    with K.name_scope('BLSTMLayer'):
        # Bi-directional Long Short-Term Memory for learning the temporal aggregation
        input_feature = Input(shape=input_shape)
        x = Masking(mask_value=globalvars.masking_value)(input_feature)
        x = Dense(globalvars.nb_hidden_units, activation='relu')(x)
        x = Dropout(0.5)(x)
        x = Dense(globalvars.nb_hidden_units, activation='relu')(x)
        x = Dropout(0.5)(x)
        y = Bidirectional(LSTM(nb_lstm_cells, return_sequences=True, dropout=0.5))(x)

    with K.name_scope('AttentionLayer'):
        # Logistic regression for learning the attention parameters with a standalone feature as input
        input_attention = Input(shape=(nb_lstm_cells * 2,))
        u = Dense(nb_lstm_cells * 2, activation='softmax')(input_attention)

        # To compute the final weights for the frames which sum to unity
        alpha = dot([u, y], axes=-1)  # inner prod.
        alpha = Activation('softmax')(alpha)

    with K.name_scope('WeightedPooling'):
        # Weighted pooling to get the utterance-level representation
        z = dot([alpha, y], axes=1)

    # Get posterior probability for each emotional class
    output = Dense(nb_classes, activation='softmax')(z)

    return Model(inputs=[input_attention, input_feature], outputs=output) 
Example #13
Source File: core_test.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def test_masking():
    layer_test(layers.Masking,
               kwargs={},
               input_shape=(3, 2, 3)) 
Example #14
Source File: layers_test.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def test_merge_mask_2d():
    rand = lambda *shape: np.asarray(np.random.random(shape) > 0.5, dtype='int32')

    # inputs
    input_a = layers.Input(shape=(3,))
    input_b = layers.Input(shape=(3,))

    # masks
    masked_a = layers.Masking(mask_value=0)(input_a)
    masked_b = layers.Masking(mask_value=0)(input_b)

    # three different types of merging
    merged_sum = legacy_layers.merge([masked_a, masked_b], mode='sum')
    merged_concat = legacy_layers.merge([masked_a, masked_b], mode='concat', concat_axis=1)
    merged_concat_mixed = legacy_layers.merge([masked_a, input_b], mode='concat', concat_axis=1)

    # test sum
    model_sum = models.Model([input_a, input_b], [merged_sum])
    model_sum.compile(loss='mse', optimizer='sgd')
    model_sum.fit([rand(2, 3), rand(2, 3)], [rand(2, 3)], epochs=1)

    # test concatenation
    model_concat = models.Model([input_a, input_b], [merged_concat])
    model_concat.compile(loss='mse', optimizer='sgd')
    model_concat.fit([rand(2, 3), rand(2, 3)], [rand(2, 6)], epochs=1)

    # test concatenation with masked and non-masked inputs
    model_concat = models.Model([input_a, input_b], [merged_concat_mixed])
    model_concat.compile(loss='mse', optimizer='sgd')
    model_concat.fit([rand(2, 3), rand(2, 3)], [rand(2, 6)], epochs=1) 
Example #15
Source File: test_loss_masking.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def test_masking():
    np.random.seed(1337)
    x = np.array([[[1], [1]],
                  [[0], [0]]])
    model = Sequential()
    model.add(Masking(mask_value=0, input_shape=(2, 1)))
    model.add(TimeDistributed(Dense(1, kernel_initializer='one')))
    model.compile(loss='mse', optimizer='sgd')
    y = np.array([[[1], [1]],
                  [[1], [1]]])
    loss = model.train_on_batch(x, y)
    assert loss == 0 
Example #16
Source File: core_test.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def test_masking():
    layer_test(layers.Masking,
               kwargs={},
               input_shape=(3, 2, 3)) 
Example #17
Source File: test_views.py    From Fabrik with GNU General Public License v3.0 5 votes vote down vote up
def test_keras_export(self):
        tests = open(os.path.join(settings.BASE_DIR, 'tests', 'unit', 'keras_app',
                                  'keras_export_test.json'), 'r')
        response = json.load(tests)
        tests.close()
        net = yaml.safe_load(json.dumps(response['net']))
        net = {'l0': net['Input2'], 'l1': net['Masking']}
        net['l0']['connection']['output'].append('l1')
        inp = data(net['l0'], '', 'l0')['l0']
        net = masking(net['l1'], [inp], 'l1')
        model = Model(inp, net['l1'])
        self.assertEqual(model.layers[1].__class__.__name__, 'Masking')


# ********** Vision Layers Test ********** 
Example #18
Source File: batch_dot_test.py    From deep_qa with Apache License 2.0 5 votes vote down vote up
def test_a_smaller_than_b(self):
        batch_size = 3
        tensor_a = numpy.random.randint(7, size=(batch_size, 5))
        tensor_b = numpy.random.randint(7, size=(batch_size, 2, 5))

        # Manually set some values to 1 here, which will be masked later
        # (1 and not 0 so that masked values are still non-zero in the output)
        tensor_a[0] = 0
        tensor_b[0][1] = 0

        input_tensor_a = Input(shape=(5,))
        masked_tensor_a = Masking(mask_value=0)(input_tensor_a)
        input_tensor_b = Input(shape=(2, 5))
        masked_tensor_b = Masking(mask_value=0)(input_tensor_b)

        a_dot_b = BatchDot()([masked_tensor_a, masked_tensor_b])

        a_dot_b_mask = OutputMask()(a_dot_b)
        model = Model(inputs=[input_tensor_a, input_tensor_b],
                      outputs=[a_dot_b, a_dot_b_mask])
        # a_dot_b and mask_tensor are of shape (3, 2).
        a_dot_b_tensor, mask_tensor = model.predict([tensor_a, tensor_b])
        # Test that the dot happened like we expected.
        for i in range(batch_size):
            # each dot product should be of shape (2,)
            assert_almost_equal(a_dot_b_tensor[i],
                                numpy.einsum("i,mi->m", tensor_a[i], tensor_b[i]))
        # Check that the values in the output mask are 0 where the
        # values were set to 1 above.
        assert mask_tensor[0][0] == 0
        assert mask_tensor[0][1] == 0 
Example #19
Source File: batch_dot_test.py    From deep_qa with Apache License 2.0 5 votes vote down vote up
def test_a_larger_than_b(self):
        batch_size = 3
        tensor_a = numpy.random.randint(7, size=(batch_size, 2, 5))
        tensor_b = numpy.random.randint(7, size=(batch_size, 5))

        # Manually set some values to 1 here, which will be masked later
        # (1 and not 0 so that masked values are still non-zero in the output)
        tensor_a[0][1] = 0
        tensor_b[0] = 0

        input_tensor_a = Input(shape=(2, 5))
        masked_tensor_a = Masking(mask_value=0)(input_tensor_a)
        input_tensor_b = Input(shape=(5,))
        masked_tensor_b = Masking(mask_value=0)(input_tensor_b)

        a_dot_b = BatchDot()([masked_tensor_a, masked_tensor_b])

        a_dot_b_mask = OutputMask()(a_dot_b)
        model = Model(inputs=[input_tensor_a, input_tensor_b],
                      outputs=[a_dot_b, a_dot_b_mask])
        # a_dot_b and mask_tensor are of shape (3, 2).
        a_dot_b_tensor, mask_tensor = model.predict([tensor_a, tensor_b])
        # Test that the dot happened like we expected.
        for i in range(batch_size):
            # each dot product should be of shape (2,)
            assert_almost_equal(a_dot_b_tensor[i],
                                numpy.einsum("mi,i->m", tensor_a[i], tensor_b[i]))
        # Check that the values in the output mask are 0 where the
        # values were set to 1 above.
        assert mask_tensor[0][0] == 0
        assert mask_tensor[0][1] == 0 
Example #20
Source File: batch_dot_test.py    From deep_qa with Apache License 2.0 5 votes vote down vote up
def test_a_smaller_than_b_higher_dimension(self):
        batch_size = 3
        tensor_a = numpy.random.randint(7, size=(batch_size, 4, 5))
        tensor_b = numpy.random.randint(7, size=(batch_size, 4, 2, 5))

        # Manually set some values to 1 here, which will be masked later
        # (1 and not 0 so that masked values are still non-zero in the output)
        tensor_a[0][1] = 0
        tensor_a[1][3] = 0
        tensor_b[0][1][1] = 0
        tensor_b[0][2][1] = 0

        input_tensor_a = Input(shape=(4, 5))
        masked_tensor_a = Masking(mask_value=0)(input_tensor_a)
        input_tensor_b = Input(shape=(4, 2, 5))
        masked_tensor_b = Masking(mask_value=0)(input_tensor_b)
        a_dot_b = BatchDot()([masked_tensor_a, masked_tensor_b])
        a_dot_b_mask = OutputMask()(a_dot_b)
        model = Model(inputs=[input_tensor_a, input_tensor_b],
                      outputs=[a_dot_b, a_dot_b_mask])
        # a_dot_b and mask_tensor are of shape (3, 4, 2).
        a_dot_b_tensor, mask_tensor = model.predict([tensor_a, tensor_b])
        # Test that the dot happened like we expected.
        for i in range(batch_size):
            # each dot product should be of shape (4, 2)
            assert_almost_equal(a_dot_b_tensor[i],
                                numpy.einsum("ij,imj->im", tensor_a[i], tensor_b[i]))
        # Check that the values in the output mask are 0 where the
        # values were set to 1 above.
        assert mask_tensor[0][1][0] == 0
        assert mask_tensor[0][1][1] == 0
        assert mask_tensor[0][2][1] == 0
        assert mask_tensor[1][3][0] == 0
        assert mask_tensor[1][3][1] == 0 
Example #21
Source File: batch_dot_test.py    From deep_qa with Apache License 2.0 5 votes vote down vote up
def test_a_larger_than_b_higher_dimension(self):
        batch_size = 3
        tensor_a = numpy.random.randint(7, size=(batch_size, 4, 2, 5))
        tensor_b = numpy.random.randint(7, size=(batch_size, 4, 5))

        # Manually set some values to 1 here, which will be masked later
        # (1 and not 0 so that masked values are still non-zero in the output)
        tensor_a[0][1][1] = 0
        tensor_a[0][2][1] = 0
        tensor_b[0][1] = 0
        tensor_b[1][3] = 0

        input_tensor_a = Input(shape=(4, 2, 5))
        masked_tensor_a = Masking(mask_value=0)(input_tensor_a)
        input_tensor_b = Input(shape=(4, 5))
        masked_tensor_b = Masking(mask_value=0)(input_tensor_b)
        a_dot_b = BatchDot()([masked_tensor_a, masked_tensor_b])
        a_dot_b_mask = OutputMask()(a_dot_b)
        model = Model(inputs=[input_tensor_a, input_tensor_b],
                      outputs=[a_dot_b, a_dot_b_mask])
        # a_dot_b and mask_tensor are of shape (3, 4, 2).
        a_dot_b_tensor, mask_tensor = model.predict([tensor_a, tensor_b])
        # Test that the dot happened like we expected.
        for i in range(batch_size):
            # each dot product should be of shape (4, 2)
            assert_almost_equal(a_dot_b_tensor[i],
                                numpy.einsum("imj,ij->im", tensor_a[i], tensor_b[i]))
            # Check that the values in the output mask are 0 where the
            # values were set to 1 above.
        assert mask_tensor[0][1][0] == 0
        assert mask_tensor[0][1][1] == 0
        assert mask_tensor[0][2][1] == 0
        assert mask_tensor[1][3][0] == 0
        assert mask_tensor[1][3][1] == 0 
Example #22
Source File: MTL2_RumEval_VeracityStance.py    From Multitask4Veracity with MIT License 5 votes vote down vote up
def build_model(params, num_features):
    
    num_lstm_units = int(params['num_lstm_units'])
    num_lstm_layers = int (params['num_lstm_layers'])
    num_dense_layers = int(params['num_dense_layers'])
    num_dense_units = int(params['num_dense_units'])
    l2reg = params['l2reg']

    inputs_ab = Input(shape=(None,num_features))
    mask_ab = Masking(mask_value=0.)(inputs_ab)
    lstm_ab = LSTM(num_lstm_units, return_sequences=True)(mask_ab)
    for nl in range(num_lstm_layers-1): 
        lstm_ab2 = LSTM(num_lstm_units, return_sequences=True)(lstm_ab)
        lstm_ab = lstm_ab2
#    lstma = LSTM(num_lstm_units, return_sequences=True)(lstm_ab)    
    hidden1_a = TimeDistributed(Dense(num_dense_units))(lstm_ab)
    for nl in range(num_dense_layers-1):
        hidden2_a = TimeDistributed(Dense(num_dense_units))(hidden1_a)
        hidden1_a = hidden2_a
    dropout_a = Dropout(0.5)(hidden1_a)
    softmax_a = TimeDistributed(
                    Dense(4, activation='softmax',
                          activity_regularizer=regularizers.l2(l2reg)))(
                                                                  dropout_a)   
    #slice_layer=Lambda(lambda x:x[:,-1,:],output_shape=lambda s:(s[0],s[2]))
    #sliced = slice_layer(lstm_b)
    lstm_b = LSTM(num_lstm_units, return_sequences=False)(lstm_ab)
#    y = Lambda(lambda x: x[:,0,:,:], output_shape=(1,) + input_shape[2:])(x)
#    print slice_layer.output_shape
    hidden1_b = Dense(num_dense_units)(lstm_b)
    for nl in range(num_dense_layers-1):
        hidden2_b = Dense(num_dense_units)(hidden1_b)
        hidden1_b = hidden2_b
    dropout_b = Dropout(0.5)(hidden1_b)
    softmax_b = Dense(3, activation='softmax',
                      activity_regularizer=regularizers.l2(l2reg))(dropout_b)
    model = Model(inputs=inputs_ab, outputs=[softmax_a, softmax_b])
    model.compile(optimizer='adam',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    return model 
Example #23
Source File: LSTM.py    From neural_complete with MIT License 5 votes vote down vote up
def myLSTM():
	model = Sequential()
	model.add(Masking(mask_value=0., input_shape=(hypers["timesteps"],hypers["feature_size"])))
	model.add(LSTM(hypers["lstm_hidden"]))
	model.add(Dense(7))
	model.add(Activation('softmax'))
	return model 
Example #24
Source File: layers_export.py    From Fabrik with GNU General Public License v3.0 5 votes vote down vote up
def masking(layer, layer_in, layerId, tensor=True):
    out = {layerId: Masking(mask_value=layer['params']['mask_value'])}
    if tensor:
        out[layerId] = out[layerId](*layer_in)
    return out


# ********** Convolution Layers ********** 
Example #25
Source File: test_views.py    From Fabrik with GNU General Public License v3.0 5 votes vote down vote up
def test_keras_import(self):
        model = Sequential()
        model.add(Masking(mask_value=0., input_shape=(100, 5)))
        model.build()
        self.keras_type_test(model, 0, 'Masking')


# ********** Convolutional Layers ********** 
Example #26
Source File: va-rnn.py    From View-Adaptive-Neural-Networks-for-Skeleton-based-Human-Action-Recognition with MIT License 5 votes vote down vote up
def creat_model(input_shape, num_class):

    init = initializers.Orthogonal(gain=args.norm)
    sequence_input =Input(shape=input_shape)
    mask = Masking(mask_value=0.)(sequence_input)
    if args.aug:
        mask = augmentaion()(mask)
    X = Noise(0.075)(mask)
    if args.model[0:2]=='VA':
        # VA
        trans = LSTM(args.nhid,recurrent_activation='sigmoid',return_sequences=True,implementation=2,recurrent_initializer=init)(X)
        trans = Dropout(0.5)(trans)
        trans = TimeDistributed(Dense(3,kernel_initializer='zeros'))(trans)
        rot = LSTM(args.nhid,recurrent_activation='sigmoid',return_sequences=True,implementation=2,recurrent_initializer=init)(X)
        rot = Dropout(0.5)(rot)
        rot = TimeDistributed(Dense(3,kernel_initializer='zeros'))(rot)
        transform = Concatenate()([rot,trans])
        X = VA()([mask,transform])

    X = LSTM(args.nhid,recurrent_activation='sigmoid',return_sequences=True,implementation=2,recurrent_initializer=init)(X)
    X = Dropout(0.5)(X)
    X = LSTM(args.nhid,recurrent_activation='sigmoid',return_sequences=True,implementation=2,recurrent_initializer=init)(X)
    X = Dropout(0.5)(X)
    X = LSTM(args.nhid,recurrent_activation='sigmoid',return_sequences=True,implementation=2,recurrent_initializer=init)(X)
    X = Dropout(0.5)(X)
    X = TimeDistributed(Dense(num_class))(X)
    X = MeanOverTime()(X)
    X = Activation('softmax')(X)

    model=Model(sequence_input,X)
    return model 
Example #27
Source File: layers_test.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def test_merge_mask_2d():
    rand = lambda *shape: np.asarray(np.random.random(shape) > 0.5, dtype='int32')

    # inputs
    input_a = layers.Input(shape=(3,))
    input_b = layers.Input(shape=(3,))

    # masks
    masked_a = layers.Masking(mask_value=0)(input_a)
    masked_b = layers.Masking(mask_value=0)(input_b)

    # three different types of merging
    merged_sum = legacy_layers.merge([masked_a, masked_b], mode='sum')
    merged_concat = legacy_layers.merge([masked_a, masked_b], mode='concat', concat_axis=1)
    merged_concat_mixed = legacy_layers.merge([masked_a, input_b], mode='concat', concat_axis=1)

    # test sum
    model_sum = models.Model([input_a, input_b], [merged_sum])
    model_sum.compile(loss='mse', optimizer='sgd')
    model_sum.fit([rand(2, 3), rand(2, 3)], [rand(2, 3)], epochs=1)

    # test concatenation
    model_concat = models.Model([input_a, input_b], [merged_concat])
    model_concat.compile(loss='mse', optimizer='sgd')
    model_concat.fit([rand(2, 3), rand(2, 3)], [rand(2, 6)], epochs=1)

    # test concatenation with masked and non-masked inputs
    model_concat = models.Model([input_a, input_b], [merged_concat_mixed])
    model_concat.compile(loss='mse', optimizer='sgd')
    model_concat.fit([rand(2, 3), rand(2, 3)], [rand(2, 6)], epochs=1) 
Example #28
Source File: test_keras.py    From wtte-rnn with MIT License 5 votes vote down vote up
def model_masking(discrete_time, init_alpha, max_beta):
    model = Sequential()

    model.add(Masking(mask_value=mask_value,
                      input_shape=(n_timesteps, n_features)))
    model.add(TimeDistributed(Dense(2)))
    model.add(Lambda(wtte.output_lambda, arguments={"init_alpha": init_alpha,
                                                    "max_beta_value": max_beta}))

    if discrete_time:
        loss = wtte.loss(kind='discrete', reduce_loss=False).loss_function
    else:
        loss = wtte.loss(kind='continuous', reduce_loss=False).loss_function

    model.compile(loss=loss, optimizer=RMSprop(
        lr=lr), sample_weight_mode='temporal')
    return model 
Example #29
Source File: lstm.py    From user-behavior-anomaly-detector with MIT License 5 votes vote down vote up
def create_model(self):
        model = Sequential()
        #model.add(Masking(mask_value=0, input_shape=(1, self.settings.getint("LSTM", "max_vector_length"))))
        model.add(LSTM_CELL(self.settings.getint("LSTM", "hidden_layers"),
                            input_shape=(self.settings.getint("LSTM", "time_series"), self.settings.getint("LSTM", "max_vector_length")),
                            return_sequences=True))
        model.add(LSTM_CELL(self.settings.getint("LSTM", "hidden_layers")))
        model.add(Dropout(self.settings.getfloat("LSTM", "dropout")))
        model.add(Dense(self.settings.getint('LSTM', 'max_vector_length')))

        return model 
Example #30
Source File: char2ir_gpu.py    From plastering with MIT License 5 votes vote down vote up
def learn_model(self, features, labels, degrade_mask, epochs=30, batch_size=None, model=None):
        print('learning model')
        if True or not model and not self.model:
            model = Sequential()
            masking = Masking(mask_value=0.0, input_shape=(features.shape[1], features.shape[2],))
            model.add(masking)
            crf = CRF(#input_shape=(features.shape[1], features.shape[2],),
                      units=labels.shape[-1],
                      sparse_target=False,
                      kernel_regularizer=keras.regularizers.l1_l2(0.0001, 0.0001),
                      #bias_regularizer=keras.regularizers.l2(0.005),
                      #chain_regularizer=keras.regularizers.l2(0.005),
                      #boundary_regularizer=keras.regularizers.l2(0.005),
                      learn_mode='marginal',
                      test_mode='marginal',
                      unroll=self.unroll_flag,
                     )
            model.add(crf)
            model.compile(optimizer=self.opt,
                          loss=crf_loss,
                          #loss=crf.loss_function,
                          metrics=[crf_accuracy],
                          #metrics=[crf.accuracy],
                          )
        elif self.model:
            model = self.model
        else:
            assert model

        #assert features.shape[0] == len(self.degrade_mask)
        #weights = self._weight_logic(features, degrade_mask)

        model.fit(features,
                  labels,
                  epochs=epochs,
                  batch_size=batch_size,
                  verbose=1,
                  #sample_weight=weights,
                  )
        return model