Python keras.layers.core.TimeDistributedDense() Examples
The following are 9
code examples of keras.layers.core.TimeDistributedDense().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
keras.layers.core
, or try the search function
.
Example #1
Source File: model_zoo.py From visual_turing_test-tutorial with MIT License | 6 votes |
def create(self): self.textual_embedding(self, mask_zero=True) self.stacked_RNN(self) self.add(self._config.recurrent_encoder( self._config.hidden_state_dim, return_sequences=False, go_backwards=self._config.go_backwards)) self.add(Dropout(0.5)) self.add(RepeatVector(self._config.max_output_time_steps)) self.add(self._config.recurrent_decoder( self._config.hidden_state_dim, return_sequences=True)) self.add(Dropout(0.5)) self.add(TimeDistributedDense(self._config.output_dim)) self.add(Activation('softmax')) ### # Multimodal models ###
Example #2
Source File: model.py From DeepSequenceClassification with GNU General Public License v2.0 | 6 votes |
def gen_model(vocab_size=100, embedding_size=128, maxlen=100, output_size=6, hidden_layer_size=100, num_hidden_layers = 1, RNN_LAYER_TYPE="LSTM"): RNN_CLASS = LSTM if RNN_LAYER_TYPE == "GRU": RNN_CLASS = GRU logger.info("Parameters: vocab_size = %s, embedding_size = %s, maxlen = %s, output_size = %s, hidden_layer_size = %s, " %\ (vocab_size, embedding_size, maxlen, output_size, hidden_layer_size)) logger.info("Building Model") model = Sequential() logger.info("Init Model with vocab_size = %s, embedding_size = %s, maxlen = %s" % (vocab_size, embedding_size, maxlen)) model.add(Embedding(vocab_size, embedding_size, input_length=maxlen)) logger.info("Added Embedding Layer") model.add(Dropout(0.5)) logger.info("Added Dropout Layer") for i in xrange(num_hidden_layers): model.add(RNN_CLASS(output_dim=hidden_layer_size, activation='sigmoid', inner_activation='hard_sigmoid', return_sequences=True)) logger.info("Added %s Layer" % RNN_LAYER_TYPE) model.add(Dropout(0.5)) logger.info("Added Dropout Layer") model.add(RNN_CLASS(output_dim=output_size, activation='sigmoid', inner_activation='hard_sigmoid', return_sequences=True)) logger.info("Added %s Layer" % RNN_LAYER_TYPE) model.add(Dropout(0.5)) logger.info("Added Dropout Layer") model.add(TimeDistributedDense(output_size, activation="softmax")) logger.info("Added Dropout Layer") logger.info("Created model with following config:\n%s" % json.dumps(model.get_config(), indent=4)) logger.info("Compiling model with optimizer %s" % optimizer) start_time = time.time() model.compile(loss='categorical_crossentropy', optimizer=optimizer) total_time = time.time() - start_time logger.info("Model compiled in %.4f seconds." % total_time) return model
Example #3
Source File: model_zoo.py From visual_turing_test-tutorial with MIT License | 5 votes |
def create(self): assert self._config.merge_mode in ['max', 'ave', 'sum'], \ 'Merge mode of this model is either max, ave or sum' self.textual_embedding(self, mask_zero=False) self.stacked_RNN(self) self.add(self._config.recurrent_encoder( self._config.hidden_state_dim, return_sequences=True, go_backwards=self._config.go_backwards)) self.add(Dropout(0.5)) self.add(TimeDistributedDense(self._config.output_dim)) self.temporal_pooling(self) self.add(Activation('softmax'))
Example #4
Source File: model_zoo.py From visual_turing_test-tutorial with MIT License | 5 votes |
def create(self): language_model = Sequential() self.textual_embedding(language_model, mask_zero=True) self.stacked_RNN(language_model) language_model.add(self._config.recurrent_encoder( self._config.hidden_state_dim, return_sequences=False, go_backwards=self._config.go_backwards)) self.language_model = language_model visual_model_factory = \ select_sequential_visual_model[self._config.trainable_perception_name]( self._config.visual_dim) visual_model = visual_model_factory.create() visual_dimensionality = visual_model_factory.get_dimensionality() self.visual_embedding(visual_model, visual_dimensionality) #visual_model = Sequential() #self.visual_embedding(visual_model) self.visual_model = visual_model if self._config.multimodal_merge_mode == 'dot': self.add(Merge([language_model, visual_model], mode='dot', dot_axes=[(1,),(1,)])) else: self.add(Merge([language_model, visual_model], mode=self._config.multimodal_merge_mode)) self.add(Dropout(0.5)) self.add(Dense(self._config.output_dim)) self.add(RepeatVector(self._config.max_output_time_steps)) self.add(self._config.recurrent_decoder( self._config.hidden_state_dim, return_sequences=True)) self.add(Dropout(0.5)) self.add(TimeDistributedDense(self._config.output_dim)) self.add(Activation('softmax')) ### # Graph-based models ###
Example #5
Source File: check_autoencoder.py From CAPTCHA-breaking with MIT License | 5 votes |
def build_lstm_autoencoder(autoencoder, X_train, X_test): X_train = X_train[:, np.newaxis, :] X_test = X_test[:, np.newaxis, :] print("Modified X_train: ", X_train.shape) print("Modified X_test: ", X_test.shape) # The TimeDistributedDense isn't really necessary, however you need a lot of GPU memory to do 784x394-394x784 autoencoder.add(TimeDistributedDense(input_dim, 16)) autoencoder.add(AutoEncoder(encoder=LSTM(16, 8, activation=activation, return_sequences=True), decoder=LSTM(8, input_dim, activation=activation, return_sequences=True), output_reconstruction=False)) return autoencoder, X_train, X_test
Example #6
Source File: test_core.py From CAPTCHA-breaking with MIT License | 5 votes |
def test_time_dist_dense(self): layer = core.TimeDistributedDense(10, 10) self._runner(layer)
Example #7
Source File: test_tasks.py From CAPTCHA-breaking with MIT License | 5 votes |
def test_seq_to_seq(self): print('sequence to sequence data:') (X_train, y_train), (X_test, y_test) = get_test_data(nb_train=1000, nb_test=200, input_shape=(5, 10), output_shape=(5, 10), classification=False) print('X_train:', X_train.shape) print('X_test:', X_test.shape) print('y_train:', y_train.shape) print('y_test:', y_test.shape) model = Sequential() model.add(TimeDistributedDense(X_train.shape[-1], y_train.shape[-1])) model.compile(loss='hinge', optimizer='rmsprop') history = model.fit(X_train, y_train, nb_epoch=12, batch_size=16, validation_data=(X_test, y_test), verbose=2) self.assertTrue(history.history['val_loss'][-1] < 0.75)
Example #8
Source File: model.py From DeepSequenceClassification with GNU General Public License v2.0 | 5 votes |
def gen_model_brnn(vocab_size=100, embedding_size=128, maxlen=100, output_size=6, hidden_layer_size=100, num_hidden_layers = 1, RNN_LAYER_TYPE="LSTM"): RNN_CLASS = LSTM if RNN_LAYER_TYPE == "GRU": RNN_CLASS = GRU logger.info("Parameters: vocab_size = %s, embedding_size = %s, maxlen = %s, output_size = %s, hidden_layer_size = %s, " %\ (vocab_size, embedding_size, maxlen, output_size, hidden_layer_size)) logger.info("Building Graph model for Bidirectional RNN") model = Graph() model.add_input(name='input', input_shape=(maxlen,), dtype=int) logger.info("Added Input node") logger.info("Init Model with vocab_size = %s, embedding_size = %s, maxlen = %s" % (vocab_size, embedding_size, maxlen)) model.add_node(Embedding(vocab_size, embedding_size, input_length=maxlen), name='embedding', input='input') logger.info("Added Embedding node") model.add_node(Dropout(0.5), name="dropout_0", input="embedding") logger.info("Added Dropout Node") for i in xrange(num_hidden_layers): last_dropout_name = "dropout_%s" % i forward_name, backward_name, dropout_name = ["%s_%s" % (k, i + 1) for k in ["forward", "backward", "dropout"]] model.add_node(RNN_CLASS(output_dim=hidden_layer_size, activation='sigmoid', inner_activation='hard_sigmoid', return_sequences=True), name=forward_name, input=last_dropout_name) logger.info("Added %s forward node[%s]" % (RNN_LAYER_TYPE, i+1)) model.add_node(RNN_CLASS(output_dim=hidden_layer_size, activation='sigmoid', inner_activation='hard_sigmoid', return_sequences=True, go_backwards=True), name=backward_name, input=last_dropout_name) logger.info("Added %s backward node[%s]" % (RNN_LAYER_TYPE, i+1)) model.add_node(Dropout(0.5), name=dropout_name, inputs=[forward_name, backward_name]) logger.info("Added Dropout node[%s]" % (i+1)) model.add_node(TimeDistributedDense(output_size, activation="softmax"), name="tdd", input=dropout_name) logger.info("Added TimeDistributedDense node") model.add_output(name="output", input="tdd") logger.info("Added Output node") logger.info("Created model with following config:\n%s" % model.get_config()) logger.info("Compiling model with optimizer %s" % optimizer) start_time = time.time() model.compile(optimizer, {"output": 'categorical_crossentropy'}) total_time = time.time() - start_time logger.info("Model compiled in %.4f seconds." % total_time) return model
Example #9
Source File: model.py From DeepSequenceClassification with GNU General Public License v2.0 | 5 votes |
def gen_model_brnn_multitask(vocab_size=100, embedding_size=128, maxlen=100, output_size=[6, 96], hidden_layer_size=100, num_hidden_layers = 1, RNN_LAYER_TYPE="LSTM"): RNN_CLASS = LSTM if RNN_LAYER_TYPE == "GRU": RNN_CLASS = GRU logger.info("Parameters: vocab_size = %s, embedding_size = %s, maxlen = %s, output_size = %s, hidden_layer_size = %s, " %\ (vocab_size, embedding_size, maxlen, output_size, hidden_layer_size)) logger.info("Building Graph model for Bidirectional RNN") model = Graph() model.add_input(name='input', input_shape=(maxlen,), dtype=int) logger.info("Added Input node") logger.info("Init Model with vocab_size = %s, embedding_size = %s, maxlen = %s" % (vocab_size, embedding_size, maxlen)) model.add_node(Embedding(vocab_size, embedding_size, input_length=maxlen, mask_zero=True), name='embedding', input='input') logger.info("Added Embedding node") model.add_node(Dropout(0.5), name="dropout_0", input="embedding") logger.info("Added Dropout Node") for i in xrange(num_hidden_layers): last_dropout_name = "dropout_%s" % i forward_name, backward_name, dropout_name = ["%s_%s" % (k, i + 1) for k in ["forward", "backward", "dropout"]] model.add_node(RNN_CLASS(output_dim=hidden_layer_size, activation='sigmoid', inner_activation='hard_sigmoid', return_sequences=True), name=forward_name, input=last_dropout_name) logger.info("Added %s forward node[%s]" % (RNN_LAYER_TYPE, i+1)) model.add_node(RNN_CLASS(output_dim=hidden_layer_size, activation='sigmoid', inner_activation='hard_sigmoid', return_sequences=True, go_backwards=True), name=backward_name, input=last_dropout_name) logger.info("Added %s backward node[%s]" % (RNN_LAYER_TYPE, i+1)) model.add_node(Dropout(0.5), name=dropout_name, inputs=[forward_name, backward_name]) logger.info("Added Dropout node[%s]" % (i+1)) output_names = [] for i, output_task_size in enumerate(output_size): tdd_name, output_name = "tdd_%s" % i, "output_%s" % i model.add_node(TimeDistributedDense(output_task_size, activation="softmax"), name=tdd_name, input=dropout_name) logger.info("Added TimeDistributedDense node %s with output_size %s" % (i, output_task_size)) model.add_output(name=output_name, input=tdd_name) output_names.append(output_name) logger.info("Added Output node") logger.info("Created model with following config:\n%s" % model.get_config()) logger.info("Compiling model with optimizer %s" % optimizer) start_time = time.time() model.compile(optimizer, {k: 'categorical_crossentropy' for k in output_names}) total_time = time.time() - start_time logger.info("Model compiled in %.4f seconds." % total_time) return model, output_names