Python layers.LSTM Examples
The following are 30
code examples of layers.LSTM().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
layers
, or try the search function
.
Example #1
Source File: graphs.py From models with Apache License 2.0 | 6 votes |
def language_model_graph(self, compute_loss=True): """Constructs LM graph from inputs to LM loss. * Caches the VatxtInput object in `self.lm_inputs` * Caches tensors: `lm_embedded` Args: compute_loss: bool, whether to compute and return the loss or stop after the LSTM computation. Returns: loss: scalar float. """ inputs = _inputs('train', pretrain=True) self.lm_inputs = inputs return self._lm_loss(inputs, compute_loss=compute_loss)
Example #2
Source File: graphs.py From object_detection_with_tensorflow with MIT License | 6 votes |
def language_model_graph(self, compute_loss=True): """Constructs LM graph from inputs to LM loss. * Caches the VatxtInput object in `self.lm_inputs` * Caches tensors: `lm_embedded` Args: compute_loss: bool, whether to compute and return the loss or stop after the LSTM computation. Returns: loss: scalar float. """ inputs = _inputs('train', pretrain=True) self.lm_inputs = inputs return self._lm_loss(inputs, compute_loss=compute_loss)
Example #3
Source File: graphs.py From object_detection_kitti with Apache License 2.0 | 6 votes |
def language_model_graph(self, compute_loss=True): """Constructs LM graph from inputs to LM loss. * Caches the VatxtInput object in `self.lm_inputs` * Caches tensors: `lm_embedded` Args: compute_loss: bool, whether to compute and return the loss or stop after the LSTM computation. Returns: loss: scalar float. """ inputs = _inputs('train', pretrain=True) self.lm_inputs = inputs return self._lm_loss(inputs, compute_loss=compute_loss)
Example #4
Source File: graphs.py From g-tensorflow-models with Apache License 2.0 | 6 votes |
def language_model_graph(self, compute_loss=True): """Constructs LM graph from inputs to LM loss. * Caches the VatxtInput object in `self.lm_inputs` * Caches tensors: `lm_embedded` Args: compute_loss: bool, whether to compute and return the loss or stop after the LSTM computation. Returns: loss: scalar float. """ inputs = _inputs('train', pretrain=True) self.lm_inputs = inputs return self._lm_loss(inputs, compute_loss=compute_loss)
Example #5
Source File: graphs.py From hands-detection with MIT License | 6 votes |
def language_model_graph(self, compute_loss=True): """Constructs LM graph from inputs to LM loss. * Caches the VatxtInput object in `self.lm_inputs` * Caches tensors: `lm_embedded` Args: compute_loss: bool, whether to compute and return the loss or stop after the LSTM computation. Returns: loss: scalar float. """ inputs = _inputs('train', pretrain=True) self.lm_inputs = inputs return self._lm_loss(inputs, compute_loss=compute_loss)
Example #6
Source File: graphs.py From Gun-Detector with Apache License 2.0 | 6 votes |
def language_model_graph(self, compute_loss=True): """Constructs LM graph from inputs to LM loss. * Caches the VatxtInput object in `self.lm_inputs` * Caches tensors: `lm_embedded` Args: compute_loss: bool, whether to compute and return the loss or stop after the LSTM computation. Returns: loss: scalar float. """ inputs = _inputs('train', pretrain=True) self.lm_inputs = inputs return self._lm_loss(inputs, compute_loss=compute_loss)
Example #7
Source File: graphs.py From yolo_v2 with Apache License 2.0 | 6 votes |
def language_model_graph(self, compute_loss=True): """Constructs LM graph from inputs to LM loss. * Caches the VatxtInput object in `self.lm_inputs` * Caches tensors: `lm_embedded` Args: compute_loss: bool, whether to compute and return the loss or stop after the LSTM computation. Returns: loss: scalar float. """ inputs = _inputs('train', pretrain=True) self.lm_inputs = inputs return self._lm_loss(inputs, compute_loss=compute_loss)
Example #8
Source File: graphs.py From multilabel-image-classification-tensorflow with MIT License | 6 votes |
def language_model_graph(self, compute_loss=True): """Constructs LM graph from inputs to LM loss. * Caches the VatxtInput object in `self.lm_inputs` * Caches tensors: `lm_embedded` Args: compute_loss: bool, whether to compute and return the loss or stop after the LSTM computation. Returns: loss: scalar float. """ inputs = _inputs('train', pretrain=True) self.lm_inputs = inputs return self._lm_loss(inputs, compute_loss=compute_loss)
Example #9
Source File: graphs.py From DOTA_models with Apache License 2.0 | 6 votes |
def language_model_graph(self, compute_loss=True): """Constructs LM graph from inputs to LM loss. * Caches the VatxtInput object in `self.lm_inputs` * Caches tensors: `lm_embedded` Args: compute_loss: bool, whether to compute and return the loss or stop after the LSTM computation. Returns: loss: scalar float. """ inputs = _inputs('train', pretrain=True) self.lm_inputs = inputs return self._lm_loss(inputs, compute_loss=compute_loss)
Example #10
Source File: graphs.py From multilabel-image-classification-tensorflow with MIT License | 5 votes |
def __init__(self, cl_logits_input_dim=None): self.global_step = tf.train.get_or_create_global_step() self.vocab_freqs = _get_vocab_freqs() # Cache VatxtInput objects self.cl_inputs = None self.lm_inputs = None # Cache intermediate Tensors that are reused self.tensors = {} # Construct layers which are reused in constructing the LM and # Classification graphs. Instantiating them all once here ensures that # variable reuse works correctly. self.layers = {} self.layers['embedding'] = layers_lib.Embedding( FLAGS.vocab_size, FLAGS.embedding_dims, FLAGS.normalize_embeddings, self.vocab_freqs, FLAGS.keep_prob_emb) self.layers['lstm'] = layers_lib.LSTM( FLAGS.rnn_cell_size, FLAGS.rnn_num_layers, FLAGS.keep_prob_lstm_out) self.layers['lm_loss'] = layers_lib.SoftmaxLoss( FLAGS.vocab_size, FLAGS.num_candidate_samples, self.vocab_freqs, name='LM_loss') cl_logits_input_dim = cl_logits_input_dim or FLAGS.rnn_cell_size self.layers['cl_logits'] = layers_lib.cl_logits_subgraph( [FLAGS.cl_hidden_size] * FLAGS.cl_num_layers, cl_logits_input_dim, FLAGS.num_classes, FLAGS.keep_prob_cl_hidden)
Example #11
Source File: graphs.py From models with Apache License 2.0 | 5 votes |
def language_model_graph(self, compute_loss=True): """Constructs forward and reverse LM graphs from inputs to LM losses. * Caches the VatxtInput objects in `self.lm_inputs` * Caches tensors: `lm_embedded`, `lm_embedded_reverse` Args: compute_loss: bool, whether to compute and return the loss or stop after the LSTM computation. Returns: loss: scalar float, sum of forward and reverse losses. """ inputs = _inputs('train', pretrain=True, bidir=True) self.lm_inputs = inputs f_inputs, r_inputs = inputs f_loss = self._lm_loss(f_inputs, compute_loss=compute_loss) r_loss = self._lm_loss( r_inputs, emb_key='lm_embedded_reverse', lstm_layer='lstm_reverse', lm_loss_layer='lm_loss_reverse', loss_name='lm_loss_reverse', compute_loss=compute_loss) if compute_loss: return f_loss + r_loss
Example #12
Source File: graphs.py From models with Apache License 2.0 | 5 votes |
def __init__(self): super(VatxtBidirModel, self).__init__(cl_logits_input_dim=FLAGS.rnn_cell_size * 2) # Reverse LSTM and LM loss for bidirectional models self.layers['lstm_reverse'] = layers_lib.LSTM( FLAGS.rnn_cell_size, FLAGS.rnn_num_layers, FLAGS.keep_prob_lstm_out, name='LSTM_Reverse') self.layers['lm_loss_reverse'] = layers_lib.SoftmaxLoss( FLAGS.vocab_size, FLAGS.num_candidate_samples, self.vocab_freqs, name='LM_loss_reverse')
Example #13
Source File: graphs.py From multilabel-image-classification-tensorflow with MIT License | 5 votes |
def language_model_graph(self, compute_loss=True): """Constructs forward and reverse LM graphs from inputs to LM losses. * Caches the VatxtInput objects in `self.lm_inputs` * Caches tensors: `lm_embedded`, `lm_embedded_reverse` Args: compute_loss: bool, whether to compute and return the loss or stop after the LSTM computation. Returns: loss: scalar float, sum of forward and reverse losses. """ inputs = _inputs('train', pretrain=True, bidir=True) self.lm_inputs = inputs f_inputs, r_inputs = inputs f_loss = self._lm_loss(f_inputs, compute_loss=compute_loss) r_loss = self._lm_loss( r_inputs, emb_key='lm_embedded_reverse', lstm_layer='lstm_reverse', lm_loss_layer='lm_loss_reverse', loss_name='lm_loss_reverse', compute_loss=compute_loss) if compute_loss: return f_loss + r_loss
Example #14
Source File: graphs.py From models with Apache License 2.0 | 5 votes |
def __init__(self, cl_logits_input_dim=None): self.global_step = tf.train.get_or_create_global_step() self.vocab_freqs = _get_vocab_freqs() # Cache VatxtInput objects self.cl_inputs = None self.lm_inputs = None # Cache intermediate Tensors that are reused self.tensors = {} # Construct layers which are reused in constructing the LM and # Classification graphs. Instantiating them all once here ensures that # variable reuse works correctly. self.layers = {} self.layers['embedding'] = layers_lib.Embedding( FLAGS.vocab_size, FLAGS.embedding_dims, FLAGS.normalize_embeddings, self.vocab_freqs, FLAGS.keep_prob_emb) self.layers['lstm'] = layers_lib.LSTM( FLAGS.rnn_cell_size, FLAGS.rnn_num_layers, FLAGS.keep_prob_lstm_out) self.layers['lm_loss'] = layers_lib.SoftmaxLoss( FLAGS.vocab_size, FLAGS.num_candidate_samples, self.vocab_freqs, name='LM_loss') cl_logits_input_dim = cl_logits_input_dim or FLAGS.rnn_cell_size self.layers['cl_logits'] = layers_lib.cl_logits_subgraph( [FLAGS.cl_hidden_size] * FLAGS.cl_num_layers, cl_logits_input_dim, FLAGS.num_classes, FLAGS.keep_prob_cl_hidden)
Example #15
Source File: graphs.py From g-tensorflow-models with Apache License 2.0 | 5 votes |
def language_model_graph(self, compute_loss=True): """Constructs forward and reverse LM graphs from inputs to LM losses. * Caches the VatxtInput objects in `self.lm_inputs` * Caches tensors: `lm_embedded`, `lm_embedded_reverse` Args: compute_loss: bool, whether to compute and return the loss or stop after the LSTM computation. Returns: loss: scalar float, sum of forward and reverse losses. """ inputs = _inputs('train', pretrain=True, bidir=True) self.lm_inputs = inputs f_inputs, r_inputs = inputs f_loss = self._lm_loss(f_inputs, compute_loss=compute_loss) r_loss = self._lm_loss( r_inputs, emb_key='lm_embedded_reverse', lstm_layer='lstm_reverse', lm_loss_layer='lm_loss_reverse', loss_name='lm_loss_reverse', compute_loss=compute_loss) if compute_loss: return f_loss + r_loss
Example #16
Source File: graphs.py From g-tensorflow-models with Apache License 2.0 | 5 votes |
def __init__(self): super(VatxtBidirModel, self).__init__(cl_logits_input_dim=FLAGS.rnn_cell_size * 2) # Reverse LSTM and LM loss for bidirectional models self.layers['lstm_reverse'] = layers_lib.LSTM( FLAGS.rnn_cell_size, FLAGS.rnn_num_layers, FLAGS.keep_prob_lstm_out, name='LSTM_Reverse') self.layers['lm_loss_reverse'] = layers_lib.SoftmaxLoss( FLAGS.vocab_size, FLAGS.num_candidate_samples, self.vocab_freqs, name='LM_loss_reverse')
Example #17
Source File: graphs.py From multilabel-image-classification-tensorflow with MIT License | 5 votes |
def __init__(self): super(VatxtBidirModel, self).__init__(cl_logits_input_dim=FLAGS.rnn_cell_size * 2) # Reverse LSTM and LM loss for bidirectional models self.layers['lstm_reverse'] = layers_lib.LSTM( FLAGS.rnn_cell_size, FLAGS.rnn_num_layers, FLAGS.keep_prob_lstm_out, name='LSTM_Reverse') self.layers['lm_loss_reverse'] = layers_lib.SoftmaxLoss( FLAGS.vocab_size, FLAGS.num_candidate_samples, self.vocab_freqs, name='LM_loss_reverse')
Example #18
Source File: graphs.py From g-tensorflow-models with Apache License 2.0 | 5 votes |
def __init__(self, cl_logits_input_dim=None): self.global_step = tf.train.get_or_create_global_step() self.vocab_freqs = _get_vocab_freqs() # Cache VatxtInput objects self.cl_inputs = None self.lm_inputs = None # Cache intermediate Tensors that are reused self.tensors = {} # Construct layers which are reused in constructing the LM and # Classification graphs. Instantiating them all once here ensures that # variable reuse works correctly. self.layers = {} self.layers['embedding'] = layers_lib.Embedding( FLAGS.vocab_size, FLAGS.embedding_dims, FLAGS.normalize_embeddings, self.vocab_freqs, FLAGS.keep_prob_emb) self.layers['lstm'] = layers_lib.LSTM( FLAGS.rnn_cell_size, FLAGS.rnn_num_layers, FLAGS.keep_prob_lstm_out) self.layers['lm_loss'] = layers_lib.SoftmaxLoss( FLAGS.vocab_size, FLAGS.num_candidate_samples, self.vocab_freqs, name='LM_loss') cl_logits_input_dim = cl_logits_input_dim or FLAGS.rnn_cell_size self.layers['cl_logits'] = layers_lib.cl_logits_subgraph( [FLAGS.cl_hidden_size] * FLAGS.cl_num_layers, cl_logits_input_dim, FLAGS.num_classes, FLAGS.keep_prob_cl_hidden)
Example #19
Source File: graphs.py From DOTA_models with Apache License 2.0 | 5 votes |
def __init__(self, cl_logits_input_dim=None): self.global_step = tf.contrib.framework.get_or_create_global_step() self.vocab_freqs = _get_vocab_freqs() # Cache VatxtInput objects self.cl_inputs = None self.lm_inputs = None # Cache intermediate Tensors that are reused self.tensors = {} # Construct layers which are reused in constructing the LM and # Classification graphs. Instantiating them all once here ensures that # variable reuse works correctly. self.layers = {} self.layers['embedding'] = layers_lib.Embedding( FLAGS.vocab_size, FLAGS.embedding_dims, FLAGS.normalize_embeddings, self.vocab_freqs, FLAGS.keep_prob_emb) self.layers['lstm'] = layers_lib.LSTM( FLAGS.rnn_cell_size, FLAGS.rnn_num_layers, FLAGS.keep_prob_lstm_out) self.layers['lm_loss'] = layers_lib.SoftmaxLoss( FLAGS.vocab_size, FLAGS.num_candidate_samples, self.vocab_freqs, name='LM_loss') cl_logits_input_dim = cl_logits_input_dim or FLAGS.rnn_cell_size self.layers['cl_logits'] = layers_lib.cl_logits_subgraph( [FLAGS.cl_hidden_size] * FLAGS.cl_num_layers, cl_logits_input_dim, FLAGS.num_classes, FLAGS.keep_prob_cl_hidden)
Example #20
Source File: graphs.py From object_detection_with_tensorflow with MIT License | 5 votes |
def language_model_graph(self, compute_loss=True): """Constructs forward and reverse LM graphs from inputs to LM losses. * Caches the VatxtInput objects in `self.lm_inputs` * Caches tensors: `lm_embedded`, `lm_embedded_reverse` Args: compute_loss: bool, whether to compute and return the loss or stop after the LSTM computation. Returns: loss: scalar float, sum of forward and reverse losses. """ inputs = _inputs('train', pretrain=True, bidir=True) self.lm_inputs = inputs f_inputs, r_inputs = inputs f_loss = self._lm_loss(f_inputs, compute_loss=compute_loss) r_loss = self._lm_loss( r_inputs, emb_key='lm_embedded_reverse', lstm_layer='lstm_reverse', lm_loss_layer='lm_loss_reverse', loss_name='lm_loss_reverse', compute_loss=compute_loss) if compute_loss: return f_loss + r_loss
Example #21
Source File: graphs.py From object_detection_with_tensorflow with MIT License | 5 votes |
def __init__(self): super(VatxtBidirModel, self).__init__(cl_logits_input_dim=FLAGS.rnn_cell_size * 2) # Reverse LSTM and LM loss for bidirectional models self.layers['lstm_reverse'] = layers_lib.LSTM( FLAGS.rnn_cell_size, FLAGS.rnn_num_layers, FLAGS.keep_prob_lstm_out, name='LSTM_Reverse') self.layers['lm_loss_reverse'] = layers_lib.SoftmaxLoss( FLAGS.vocab_size, FLAGS.num_candidate_samples, self.vocab_freqs, name='LM_loss_reverse')
Example #22
Source File: graphs.py From object_detection_with_tensorflow with MIT License | 5 votes |
def __init__(self, cl_logits_input_dim=None): self.global_step = tf.train.get_or_create_global_step() self.vocab_freqs = _get_vocab_freqs() # Cache VatxtInput objects self.cl_inputs = None self.lm_inputs = None # Cache intermediate Tensors that are reused self.tensors = {} # Construct layers which are reused in constructing the LM and # Classification graphs. Instantiating them all once here ensures that # variable reuse works correctly. self.layers = {} self.layers['embedding'] = layers_lib.Embedding( FLAGS.vocab_size, FLAGS.embedding_dims, FLAGS.normalize_embeddings, self.vocab_freqs, FLAGS.keep_prob_emb) self.layers['lstm'] = layers_lib.LSTM( FLAGS.rnn_cell_size, FLAGS.rnn_num_layers, FLAGS.keep_prob_lstm_out) self.layers['lm_loss'] = layers_lib.SoftmaxLoss( FLAGS.vocab_size, FLAGS.num_candidate_samples, self.vocab_freqs, name='LM_loss') cl_logits_input_dim = cl_logits_input_dim or FLAGS.rnn_cell_size self.layers['cl_logits'] = layers_lib.cl_logits_subgraph( [FLAGS.cl_hidden_size] * FLAGS.cl_num_layers, cl_logits_input_dim, FLAGS.num_classes, FLAGS.keep_prob_cl_hidden)
Example #23
Source File: graphs.py From object_detection_kitti with Apache License 2.0 | 5 votes |
def language_model_graph(self, compute_loss=True): """Constructs forward and reverse LM graphs from inputs to LM losses. * Caches the VatxtInput objects in `self.lm_inputs` * Caches tensors: `lm_embedded`, `lm_embedded_reverse` Args: compute_loss: bool, whether to compute and return the loss or stop after the LSTM computation. Returns: loss: scalar float, sum of forward and reverse losses. """ inputs = _inputs('train', pretrain=True, bidir=True) self.lm_inputs = inputs f_inputs, r_inputs = inputs f_loss = self._lm_loss(f_inputs, compute_loss=compute_loss) r_loss = self._lm_loss( r_inputs, emb_key='lm_embedded_reverse', lstm_layer='lstm_reverse', lm_loss_layer='lm_loss_reverse', loss_name='lm_loss_reverse', compute_loss=compute_loss) if compute_loss: return f_loss + r_loss
Example #24
Source File: graphs.py From object_detection_kitti with Apache License 2.0 | 5 votes |
def __init__(self): super(VatxtBidirModel, self).__init__(cl_logits_input_dim=FLAGS.rnn_cell_size * 2) # Reverse LSTM and LM loss for bidirectional models self.layers['lstm_reverse'] = layers_lib.LSTM( FLAGS.rnn_cell_size, FLAGS.rnn_num_layers, FLAGS.keep_prob_lstm_out, name='LSTM_Reverse') self.layers['lm_loss_reverse'] = layers_lib.SoftmaxLoss( FLAGS.vocab_size, FLAGS.num_candidate_samples, self.vocab_freqs, name='LM_loss_reverse')
Example #25
Source File: graphs.py From object_detection_kitti with Apache License 2.0 | 5 votes |
def __init__(self, cl_logits_input_dim=None): self.global_step = tf.contrib.framework.get_or_create_global_step() self.vocab_freqs = _get_vocab_freqs() # Cache VatxtInput objects self.cl_inputs = None self.lm_inputs = None # Cache intermediate Tensors that are reused self.tensors = {} # Construct layers which are reused in constructing the LM and # Classification graphs. Instantiating them all once here ensures that # variable reuse works correctly. self.layers = {} self.layers['embedding'] = layers_lib.Embedding( FLAGS.vocab_size, FLAGS.embedding_dims, FLAGS.normalize_embeddings, self.vocab_freqs, FLAGS.keep_prob_emb) self.layers['lstm'] = layers_lib.LSTM( FLAGS.rnn_cell_size, FLAGS.rnn_num_layers, FLAGS.keep_prob_lstm_out) self.layers['lm_loss'] = layers_lib.SoftmaxLoss( FLAGS.vocab_size, FLAGS.num_candidate_samples, self.vocab_freqs, name='LM_loss') cl_logits_input_dim = cl_logits_input_dim or FLAGS.rnn_cell_size self.layers['cl_logits'] = layers_lib.cl_logits_subgraph( [FLAGS.cl_hidden_size] * FLAGS.cl_num_layers, cl_logits_input_dim, FLAGS.num_classes, FLAGS.keep_prob_cl_hidden)
Example #26
Source File: graphs.py From hands-detection with MIT License | 5 votes |
def language_model_graph(self, compute_loss=True): """Constructs forward and reverse LM graphs from inputs to LM losses. * Caches the VatxtInput objects in `self.lm_inputs` * Caches tensors: `lm_embedded`, `lm_embedded_reverse` Args: compute_loss: bool, whether to compute and return the loss or stop after the LSTM computation. Returns: loss: scalar float, sum of forward and reverse losses. """ inputs = _inputs('train', pretrain=True, bidir=True) self.lm_inputs = inputs f_inputs, r_inputs = inputs f_loss = self._lm_loss(f_inputs, compute_loss=compute_loss) r_loss = self._lm_loss( r_inputs, emb_key='lm_embedded_reverse', lstm_layer='lstm_reverse', lm_loss_layer='lm_loss_reverse', loss_name='lm_loss_reverse', compute_loss=compute_loss) if compute_loss: return f_loss + r_loss
Example #27
Source File: graphs.py From hands-detection with MIT License | 5 votes |
def __init__(self): super(VatxtBidirModel, self).__init__(cl_logits_input_dim=FLAGS.rnn_cell_size * 2) # Reverse LSTM and LM loss for bidirectional models self.layers['lstm_reverse'] = layers_lib.LSTM( FLAGS.rnn_cell_size, FLAGS.rnn_num_layers, FLAGS.keep_prob_lstm_out, name='LSTM_Reverse') self.layers['lm_loss_reverse'] = layers_lib.SoftmaxLoss( FLAGS.vocab_size, FLAGS.num_candidate_samples, self.vocab_freqs, name='LM_loss_reverse')
Example #28
Source File: graphs.py From hands-detection with MIT License | 5 votes |
def __init__(self, cl_logits_input_dim=None): self.global_step = tf.contrib.framework.get_or_create_global_step() self.vocab_freqs = _get_vocab_freqs() # Cache VatxtInput objects self.cl_inputs = None self.lm_inputs = None # Cache intermediate Tensors that are reused self.tensors = {} # Construct layers which are reused in constructing the LM and # Classification graphs. Instantiating them all once here ensures that # variable reuse works correctly. self.layers = {} self.layers['embedding'] = layers_lib.Embedding( FLAGS.vocab_size, FLAGS.embedding_dims, FLAGS.normalize_embeddings, self.vocab_freqs, FLAGS.keep_prob_emb) self.layers['lstm'] = layers_lib.LSTM( FLAGS.rnn_cell_size, FLAGS.rnn_num_layers, FLAGS.keep_prob_lstm_out) self.layers['lm_loss'] = layers_lib.SoftmaxLoss( FLAGS.vocab_size, FLAGS.num_candidate_samples, self.vocab_freqs, name='LM_loss') cl_logits_input_dim = cl_logits_input_dim or FLAGS.rnn_cell_size self.layers['cl_logits'] = layers_lib.cl_logits_subgraph( [FLAGS.cl_hidden_size] * FLAGS.cl_num_layers, cl_logits_input_dim, FLAGS.num_classes, FLAGS.keep_prob_cl_hidden)
Example #29
Source File: graphs.py From Gun-Detector with Apache License 2.0 | 5 votes |
def language_model_graph(self, compute_loss=True): """Constructs forward and reverse LM graphs from inputs to LM losses. * Caches the VatxtInput objects in `self.lm_inputs` * Caches tensors: `lm_embedded`, `lm_embedded_reverse` Args: compute_loss: bool, whether to compute and return the loss or stop after the LSTM computation. Returns: loss: scalar float, sum of forward and reverse losses. """ inputs = _inputs('train', pretrain=True, bidir=True) self.lm_inputs = inputs f_inputs, r_inputs = inputs f_loss = self._lm_loss(f_inputs, compute_loss=compute_loss) r_loss = self._lm_loss( r_inputs, emb_key='lm_embedded_reverse', lstm_layer='lstm_reverse', lm_loss_layer='lm_loss_reverse', loss_name='lm_loss_reverse', compute_loss=compute_loss) if compute_loss: return f_loss + r_loss
Example #30
Source File: graphs.py From Gun-Detector with Apache License 2.0 | 5 votes |
def __init__(self): super(VatxtBidirModel, self).__init__(cl_logits_input_dim=FLAGS.rnn_cell_size * 2) # Reverse LSTM and LM loss for bidirectional models self.layers['lstm_reverse'] = layers_lib.LSTM( FLAGS.rnn_cell_size, FLAGS.rnn_num_layers, FLAGS.keep_prob_lstm_out, name='LSTM_Reverse') self.layers['lm_loss_reverse'] = layers_lib.SoftmaxLoss( FLAGS.vocab_size, FLAGS.num_candidate_samples, self.vocab_freqs, name='LM_loss_reverse')