Python tensorflow.keras.layers.Bidirectional() Examples
The following are 7
code examples of tensorflow.keras.layers.Bidirectional().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.keras.layers
, or try the search function
.
Example #1
Source File: mpm_siamese_network.py From DeepPavlov with Apache License 2.0 | 6 votes |
def create_lstm_layer_2(self): ker_in = glorot_uniform(seed=self.seed) rec_in = Orthogonal(seed=self.seed) bioutp = Bidirectional(LSTM(self.aggregation_dim, input_shape=(self.max_sequence_length, 8 * self.perspective_num,), kernel_regularizer=None, recurrent_regularizer=None, bias_regularizer=None, activity_regularizer=None, recurrent_dropout=self.recdrop_val, dropout=self.inpdrop_val, kernel_initializer=ker_in, recurrent_initializer=rec_in, return_sequences=False), merge_mode='concat', name="sentence_embedding") return bioutp
Example #2
Source File: autoencoder.py From alibi-detect with Apache License 2.0 | 5 votes |
def __init__(self, latent_dim: int, name: str = 'encoder_lstm') -> None: """ Bidirectional LSTM encoder. Parameters ---------- latent_dim Latent dimension. Must be an even number given the bidirectional encoder. name Name of encoder. """ super(EncoderLSTM, self).__init__(name=name) self.encoder_net = Bidirectional(LSTM(latent_dim // 2, return_state=True, return_sequences=True))
Example #3
Source File: mpm_siamese_network.py From DeepPavlov with Apache License 2.0 | 5 votes |
def create_lstm_layer_1(self): ker_in = glorot_uniform(seed=self.seed) rec_in = Orthogonal(seed=self.seed) bioutp = Bidirectional(LSTM(self.hidden_dim, input_shape=(self.max_sequence_length, self.embedding_dim,), kernel_regularizer=None, recurrent_regularizer=None, bias_regularizer=None, activity_regularizer=None, recurrent_dropout=self.recdrop_val, dropout=self.inpdrop_val, kernel_initializer=ker_in, recurrent_initializer=rec_in, return_sequences=True), merge_mode=None) return bioutp
Example #4
Source File: basic.py From autokeras with MIT License | 5 votes |
def build(self, hp, inputs=None): inputs = nest.flatten(inputs) utils.validate_num_inputs(inputs, 1) input_node = inputs[0] shape = input_node.shape.as_list() if len(shape) != 3: raise ValueError( 'Expect the input tensor to have ' 'at least 3 dimensions for rnn models, ' 'but got {shape}'.format(shape=input_node.shape)) feature_size = shape[-1] output_node = input_node bidirectional = self.bidirectional if bidirectional is None: bidirectional = hp.Boolean('bidirectional', default=True) layer_type = self.layer_type or hp.Choice('layer_type', ['gru', 'lstm'], default='lstm') num_layers = self.num_layers or hp.Choice('num_layers', [1, 2, 3], default=2) rnn_layers = { 'gru': layers.GRU, 'lstm': layers.LSTM } in_layer = rnn_layers[layer_type] for i in range(num_layers): return_sequences = True if i == num_layers - 1: return_sequences = self.return_sequences if bidirectional: output_node = layers.Bidirectional( in_layer(feature_size, return_sequences=return_sequences))(output_node) else: output_node = in_layer( feature_size, return_sequences=return_sequences)(output_node) return output_node
Example #5
Source File: chemnet_models.py From deepchem with MIT License | 4 votes |
def _build_graph(self): """Build the model.""" smiles_seqs = Input(dtype=tf.int32, shape=(self.max_seq_len,), name='Input') rnn_input = tf.keras.layers.Embedding( input_dim=len(self.char_to_idx), output_dim=self.embedding_dim)(smiles_seqs) if self.use_conv: rnn_input = Conv1D( filters=self.filters, kernel_size=self.kernel_size, strides=self.strides, activation=tf.nn.relu, name='Conv1D')(rnn_input) rnn_embeddings = rnn_input for idx, rnn_type in enumerate(self.rnn_types[:-1]): rnn_layer = RNN_DICT[rnn_type] layer = rnn_layer(units=self.rnn_sizes[idx], return_sequences=True) if self.use_bidir: layer = Bidirectional(layer) rnn_embeddings = layer(rnn_embeddings) # Last layer sequences not returned. layer = RNN_DICT[self.rnn_types[-1]](units=self.rnn_sizes[-1]) if self.use_bidir: layer = Bidirectional(layer) rnn_embeddings = layer(rnn_embeddings) if self.mode == "classification": logits = Dense(self.n_tasks * self.n_classes)(rnn_embeddings) logits = Reshape((self.n_tasks, self.n_classes))(logits) if self.n_classes == 2: output = Activation(activation='sigmoid')(logits) loss = SigmoidCrossEntropy() else: output = Softmax()(logits) loss = SoftmaxCrossEntropy() outputs = [output, logits] output_types = ['prediction', 'loss'] else: output = Dense(self.n_tasks * 1, name='Dense')(rnn_embeddings) output = Reshape((self.n_tasks, 1), name='Reshape')(output) outputs = [output] output_types = ['prediction'] loss = L2Loss() model = tf.keras.Model(inputs=[smiles_seqs], outputs=outputs) return model, loss, output_types
Example #6
Source File: model.py From CRNN.tf2 with MIT License | 4 votes |
def build_model(num_classes, image_width=None, channels=1): """ build CNN-RNN model """ def vgg_style(input_tensor): """ The original feature extraction structure from CRNN paper. Related paper: https://ieeexplore.ieee.org/abstract/document/7801919 """ x = layers.Conv2D( filters=64, kernel_size=3, padding='same', activation='relu')(input_tensor) x = layers.MaxPool2D(pool_size=2, padding='same')(x) x = layers.Conv2D( filters=128, kernel_size=3, padding='same', activation='relu')(x) x = layers.MaxPool2D(pool_size=2, padding='same')(x) x = layers.Conv2D(filters=256, kernel_size=3, padding='same')(x) x = layers.BatchNormalization()(x) x = layers.Activation('relu')(x) x = layers.Conv2D(filters=256, kernel_size=3, padding='same', activation='relu')(x) x = layers.MaxPool2D(pool_size=(2, 2), strides=(2, 1), padding='same')(x) x = layers.Conv2D(filters=512, kernel_size=3, padding='same')(x) x = layers.BatchNormalization()(x) x = layers.Activation('relu')(x) x = layers.Conv2D(filters=512, kernel_size=3, padding='same', activation='relu')(x) x = layers.MaxPool2D(pool_size=(2, 2), strides=(2, 1), padding='same')(x) x = layers.Conv2D(filters=512, kernel_size=2)(x) x = layers.BatchNormalization()(x) x = layers.Activation('relu')(x) return x img_input = keras.Input(shape=(32, image_width, channels)) x = vgg_style(img_input) x = layers.Reshape((-1, 512))(x) x = layers.Bidirectional(layers.LSTM(units=256, return_sequences=True))(x) x = layers.Bidirectional(layers.LSTM(units=256, return_sequences=True))(x) x = layers.Dense(units=num_classes)(x) return keras.Model(inputs=img_input, outputs=x, name='CRNN')
Example #7
Source File: models.py From medaka with Mozilla Public License 2.0 | 4 votes |
def build_model(feature_len, num_classes, gru_size=128, classify_activation='softmax', time_steps=None, allow_cudnn=True): """Build a bidirectional GRU model with CuDNNGRU support. CuDNNGRU implementation is claimed to give speed-up on GPU of 7x. The function will build a model capable of running on GPU with CuDNNGRU provided a) a GPU is present, b) the option has been allowed by the `allow_cudnn` argument; otherwise a compatible (but not CuDNNGRU accelerated model) is built. :param feature_len: int, number of features for each pileup column. :param num_classes: int, number of output class labels. :param gru_size: int, size of each GRU layer. :param classify_activation: str, activation to use in classification layer. :param time_steps: int, number of pileup columns in a sample. :param allow_cudnn: bool, opt-in to cudnn when using a GPU. :returns: `keras.models.Sequential` object. """ import tensorflow as tf from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, GRU, CuDNNGRU, Bidirectional # Determine whether to use CuDNNGRU or not cudnn = False if tf.test.is_gpu_available(cuda_only=True) and allow_cudnn: cudnn = True logger.info("Building model with cudnn optimization: {}".format(cudnn)) model = Sequential() input_shape = (time_steps, feature_len) for i in [1, 2]: name = 'gru{}'.format(i) # Options here are to be mutually compatible: train with CuDNNGRU # but allow inference with GRU (on cpu). # https://gist.github.com/bzamecnik/bd3786a074f8cb891bc2a397343070f1 if cudnn: gru = CuDNNGRU(gru_size, return_sequences=True, name=name) else: gru = GRU( gru_size, reset_after=True, recurrent_activation='sigmoid', return_sequences=True, name=name) model.add(Bidirectional(gru, input_shape=input_shape)) # see keras #10417 for why we specify input shape model.add(Dense( num_classes, activation=classify_activation, name='classify', input_shape=(time_steps, 2 * gru_size) )) return model