Python layers.SoftmaxLoss() Examples

The following are 18 code examples of layers.SoftmaxLoss(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module layers , or try the search function .
Example #1
Source File: graphs.py    From object_detection_kitti with Apache License 2.0 5 votes vote down vote up
def __init__(self):
    super(VatxtBidirModel,
          self).__init__(cl_logits_input_dim=FLAGS.rnn_cell_size * 2)

    # Reverse LSTM and LM loss for bidirectional models
    self.layers['lstm_reverse'] = layers_lib.LSTM(
        FLAGS.rnn_cell_size,
        FLAGS.rnn_num_layers,
        FLAGS.keep_prob_lstm_out,
        name='LSTM_Reverse')
    self.layers['lm_loss_reverse'] = layers_lib.SoftmaxLoss(
        FLAGS.vocab_size,
        FLAGS.num_candidate_samples,
        self.vocab_freqs,
        name='LM_loss_reverse') 
Example #2
Source File: graphs.py    From multilabel-image-classification-tensorflow with MIT License 5 votes vote down vote up
def __init__(self):
    super(VatxtBidirModel,
          self).__init__(cl_logits_input_dim=FLAGS.rnn_cell_size * 2)

    # Reverse LSTM and LM loss for bidirectional models
    self.layers['lstm_reverse'] = layers_lib.LSTM(
        FLAGS.rnn_cell_size,
        FLAGS.rnn_num_layers,
        FLAGS.keep_prob_lstm_out,
        name='LSTM_Reverse')
    self.layers['lm_loss_reverse'] = layers_lib.SoftmaxLoss(
        FLAGS.vocab_size,
        FLAGS.num_candidate_samples,
        self.vocab_freqs,
        name='LM_loss_reverse') 
Example #3
Source File: graphs.py    From multilabel-image-classification-tensorflow with MIT License 5 votes vote down vote up
def __init__(self, cl_logits_input_dim=None):
    self.global_step = tf.train.get_or_create_global_step()
    self.vocab_freqs = _get_vocab_freqs()

    # Cache VatxtInput objects
    self.cl_inputs = None
    self.lm_inputs = None

    # Cache intermediate Tensors that are reused
    self.tensors = {}

    # Construct layers which are reused in constructing the LM and
    # Classification graphs. Instantiating them all once here ensures that
    # variable reuse works correctly.
    self.layers = {}
    self.layers['embedding'] = layers_lib.Embedding(
        FLAGS.vocab_size, FLAGS.embedding_dims, FLAGS.normalize_embeddings,
        self.vocab_freqs, FLAGS.keep_prob_emb)
    self.layers['lstm'] = layers_lib.LSTM(
        FLAGS.rnn_cell_size, FLAGS.rnn_num_layers, FLAGS.keep_prob_lstm_out)
    self.layers['lm_loss'] = layers_lib.SoftmaxLoss(
        FLAGS.vocab_size,
        FLAGS.num_candidate_samples,
        self.vocab_freqs,
        name='LM_loss')

    cl_logits_input_dim = cl_logits_input_dim or FLAGS.rnn_cell_size
    self.layers['cl_logits'] = layers_lib.cl_logits_subgraph(
        [FLAGS.cl_hidden_size] * FLAGS.cl_num_layers, cl_logits_input_dim,
        FLAGS.num_classes, FLAGS.keep_prob_cl_hidden) 
Example #4
Source File: graphs.py    From models with Apache License 2.0 5 votes vote down vote up
def __init__(self):
    super(VatxtBidirModel,
          self).__init__(cl_logits_input_dim=FLAGS.rnn_cell_size * 2)

    # Reverse LSTM and LM loss for bidirectional models
    self.layers['lstm_reverse'] = layers_lib.LSTM(
        FLAGS.rnn_cell_size,
        FLAGS.rnn_num_layers,
        FLAGS.keep_prob_lstm_out,
        name='LSTM_Reverse')
    self.layers['lm_loss_reverse'] = layers_lib.SoftmaxLoss(
        FLAGS.vocab_size,
        FLAGS.num_candidate_samples,
        self.vocab_freqs,
        name='LM_loss_reverse') 
Example #5
Source File: graphs.py    From models with Apache License 2.0 5 votes vote down vote up
def __init__(self, cl_logits_input_dim=None):
    self.global_step = tf.train.get_or_create_global_step()
    self.vocab_freqs = _get_vocab_freqs()

    # Cache VatxtInput objects
    self.cl_inputs = None
    self.lm_inputs = None

    # Cache intermediate Tensors that are reused
    self.tensors = {}

    # Construct layers which are reused in constructing the LM and
    # Classification graphs. Instantiating them all once here ensures that
    # variable reuse works correctly.
    self.layers = {}
    self.layers['embedding'] = layers_lib.Embedding(
        FLAGS.vocab_size, FLAGS.embedding_dims, FLAGS.normalize_embeddings,
        self.vocab_freqs, FLAGS.keep_prob_emb)
    self.layers['lstm'] = layers_lib.LSTM(
        FLAGS.rnn_cell_size, FLAGS.rnn_num_layers, FLAGS.keep_prob_lstm_out)
    self.layers['lm_loss'] = layers_lib.SoftmaxLoss(
        FLAGS.vocab_size,
        FLAGS.num_candidate_samples,
        self.vocab_freqs,
        name='LM_loss')

    cl_logits_input_dim = cl_logits_input_dim or FLAGS.rnn_cell_size
    self.layers['cl_logits'] = layers_lib.cl_logits_subgraph(
        [FLAGS.cl_hidden_size] * FLAGS.cl_num_layers, cl_logits_input_dim,
        FLAGS.num_classes, FLAGS.keep_prob_cl_hidden) 
Example #6
Source File: graphs.py    From g-tensorflow-models with Apache License 2.0 5 votes vote down vote up
def __init__(self):
    super(VatxtBidirModel,
          self).__init__(cl_logits_input_dim=FLAGS.rnn_cell_size * 2)

    # Reverse LSTM and LM loss for bidirectional models
    self.layers['lstm_reverse'] = layers_lib.LSTM(
        FLAGS.rnn_cell_size,
        FLAGS.rnn_num_layers,
        FLAGS.keep_prob_lstm_out,
        name='LSTM_Reverse')
    self.layers['lm_loss_reverse'] = layers_lib.SoftmaxLoss(
        FLAGS.vocab_size,
        FLAGS.num_candidate_samples,
        self.vocab_freqs,
        name='LM_loss_reverse') 
Example #7
Source File: graphs.py    From g-tensorflow-models with Apache License 2.0 5 votes vote down vote up
def __init__(self, cl_logits_input_dim=None):
    self.global_step = tf.train.get_or_create_global_step()
    self.vocab_freqs = _get_vocab_freqs()

    # Cache VatxtInput objects
    self.cl_inputs = None
    self.lm_inputs = None

    # Cache intermediate Tensors that are reused
    self.tensors = {}

    # Construct layers which are reused in constructing the LM and
    # Classification graphs. Instantiating them all once here ensures that
    # variable reuse works correctly.
    self.layers = {}
    self.layers['embedding'] = layers_lib.Embedding(
        FLAGS.vocab_size, FLAGS.embedding_dims, FLAGS.normalize_embeddings,
        self.vocab_freqs, FLAGS.keep_prob_emb)
    self.layers['lstm'] = layers_lib.LSTM(
        FLAGS.rnn_cell_size, FLAGS.rnn_num_layers, FLAGS.keep_prob_lstm_out)
    self.layers['lm_loss'] = layers_lib.SoftmaxLoss(
        FLAGS.vocab_size,
        FLAGS.num_candidate_samples,
        self.vocab_freqs,
        name='LM_loss')

    cl_logits_input_dim = cl_logits_input_dim or FLAGS.rnn_cell_size
    self.layers['cl_logits'] = layers_lib.cl_logits_subgraph(
        [FLAGS.cl_hidden_size] * FLAGS.cl_num_layers, cl_logits_input_dim,
        FLAGS.num_classes, FLAGS.keep_prob_cl_hidden) 
Example #8
Source File: graphs.py    From object_detection_with_tensorflow with MIT License 5 votes vote down vote up
def __init__(self):
    super(VatxtBidirModel,
          self).__init__(cl_logits_input_dim=FLAGS.rnn_cell_size * 2)

    # Reverse LSTM and LM loss for bidirectional models
    self.layers['lstm_reverse'] = layers_lib.LSTM(
        FLAGS.rnn_cell_size,
        FLAGS.rnn_num_layers,
        FLAGS.keep_prob_lstm_out,
        name='LSTM_Reverse')
    self.layers['lm_loss_reverse'] = layers_lib.SoftmaxLoss(
        FLAGS.vocab_size,
        FLAGS.num_candidate_samples,
        self.vocab_freqs,
        name='LM_loss_reverse') 
Example #9
Source File: graphs.py    From object_detection_with_tensorflow with MIT License 5 votes vote down vote up
def __init__(self, cl_logits_input_dim=None):
    self.global_step = tf.train.get_or_create_global_step()
    self.vocab_freqs = _get_vocab_freqs()

    # Cache VatxtInput objects
    self.cl_inputs = None
    self.lm_inputs = None

    # Cache intermediate Tensors that are reused
    self.tensors = {}

    # Construct layers which are reused in constructing the LM and
    # Classification graphs. Instantiating them all once here ensures that
    # variable reuse works correctly.
    self.layers = {}
    self.layers['embedding'] = layers_lib.Embedding(
        FLAGS.vocab_size, FLAGS.embedding_dims, FLAGS.normalize_embeddings,
        self.vocab_freqs, FLAGS.keep_prob_emb)
    self.layers['lstm'] = layers_lib.LSTM(
        FLAGS.rnn_cell_size, FLAGS.rnn_num_layers, FLAGS.keep_prob_lstm_out)
    self.layers['lm_loss'] = layers_lib.SoftmaxLoss(
        FLAGS.vocab_size,
        FLAGS.num_candidate_samples,
        self.vocab_freqs,
        name='LM_loss')

    cl_logits_input_dim = cl_logits_input_dim or FLAGS.rnn_cell_size
    self.layers['cl_logits'] = layers_lib.cl_logits_subgraph(
        [FLAGS.cl_hidden_size] * FLAGS.cl_num_layers, cl_logits_input_dim,
        FLAGS.num_classes, FLAGS.keep_prob_cl_hidden) 
Example #10
Source File: graphs.py    From DOTA_models with Apache License 2.0 5 votes vote down vote up
def __init__(self, cl_logits_input_dim=None):
    self.global_step = tf.contrib.framework.get_or_create_global_step()
    self.vocab_freqs = _get_vocab_freqs()

    # Cache VatxtInput objects
    self.cl_inputs = None
    self.lm_inputs = None

    # Cache intermediate Tensors that are reused
    self.tensors = {}

    # Construct layers which are reused in constructing the LM and
    # Classification graphs. Instantiating them all once here ensures that
    # variable reuse works correctly.
    self.layers = {}
    self.layers['embedding'] = layers_lib.Embedding(
        FLAGS.vocab_size, FLAGS.embedding_dims, FLAGS.normalize_embeddings,
        self.vocab_freqs, FLAGS.keep_prob_emb)
    self.layers['lstm'] = layers_lib.LSTM(
        FLAGS.rnn_cell_size, FLAGS.rnn_num_layers, FLAGS.keep_prob_lstm_out)
    self.layers['lm_loss'] = layers_lib.SoftmaxLoss(
        FLAGS.vocab_size,
        FLAGS.num_candidate_samples,
        self.vocab_freqs,
        name='LM_loss')

    cl_logits_input_dim = cl_logits_input_dim or FLAGS.rnn_cell_size
    self.layers['cl_logits'] = layers_lib.cl_logits_subgraph(
        [FLAGS.cl_hidden_size] * FLAGS.cl_num_layers, cl_logits_input_dim,
        FLAGS.num_classes, FLAGS.keep_prob_cl_hidden) 
Example #11
Source File: graphs.py    From object_detection_kitti with Apache License 2.0 5 votes vote down vote up
def __init__(self, cl_logits_input_dim=None):
    self.global_step = tf.contrib.framework.get_or_create_global_step()
    self.vocab_freqs = _get_vocab_freqs()

    # Cache VatxtInput objects
    self.cl_inputs = None
    self.lm_inputs = None

    # Cache intermediate Tensors that are reused
    self.tensors = {}

    # Construct layers which are reused in constructing the LM and
    # Classification graphs. Instantiating them all once here ensures that
    # variable reuse works correctly.
    self.layers = {}
    self.layers['embedding'] = layers_lib.Embedding(
        FLAGS.vocab_size, FLAGS.embedding_dims, FLAGS.normalize_embeddings,
        self.vocab_freqs, FLAGS.keep_prob_emb)
    self.layers['lstm'] = layers_lib.LSTM(
        FLAGS.rnn_cell_size, FLAGS.rnn_num_layers, FLAGS.keep_prob_lstm_out)
    self.layers['lm_loss'] = layers_lib.SoftmaxLoss(
        FLAGS.vocab_size,
        FLAGS.num_candidate_samples,
        self.vocab_freqs,
        name='LM_loss')

    cl_logits_input_dim = cl_logits_input_dim or FLAGS.rnn_cell_size
    self.layers['cl_logits'] = layers_lib.cl_logits_subgraph(
        [FLAGS.cl_hidden_size] * FLAGS.cl_num_layers, cl_logits_input_dim,
        FLAGS.num_classes, FLAGS.keep_prob_cl_hidden) 
Example #12
Source File: graphs.py    From hands-detection with MIT License 5 votes vote down vote up
def __init__(self):
    super(VatxtBidirModel,
          self).__init__(cl_logits_input_dim=FLAGS.rnn_cell_size * 2)

    # Reverse LSTM and LM loss for bidirectional models
    self.layers['lstm_reverse'] = layers_lib.LSTM(
        FLAGS.rnn_cell_size,
        FLAGS.rnn_num_layers,
        FLAGS.keep_prob_lstm_out,
        name='LSTM_Reverse')
    self.layers['lm_loss_reverse'] = layers_lib.SoftmaxLoss(
        FLAGS.vocab_size,
        FLAGS.num_candidate_samples,
        self.vocab_freqs,
        name='LM_loss_reverse') 
Example #13
Source File: graphs.py    From hands-detection with MIT License 5 votes vote down vote up
def __init__(self, cl_logits_input_dim=None):
    self.global_step = tf.contrib.framework.get_or_create_global_step()
    self.vocab_freqs = _get_vocab_freqs()

    # Cache VatxtInput objects
    self.cl_inputs = None
    self.lm_inputs = None

    # Cache intermediate Tensors that are reused
    self.tensors = {}

    # Construct layers which are reused in constructing the LM and
    # Classification graphs. Instantiating them all once here ensures that
    # variable reuse works correctly.
    self.layers = {}
    self.layers['embedding'] = layers_lib.Embedding(
        FLAGS.vocab_size, FLAGS.embedding_dims, FLAGS.normalize_embeddings,
        self.vocab_freqs, FLAGS.keep_prob_emb)
    self.layers['lstm'] = layers_lib.LSTM(
        FLAGS.rnn_cell_size, FLAGS.rnn_num_layers, FLAGS.keep_prob_lstm_out)
    self.layers['lm_loss'] = layers_lib.SoftmaxLoss(
        FLAGS.vocab_size,
        FLAGS.num_candidate_samples,
        self.vocab_freqs,
        name='LM_loss')

    cl_logits_input_dim = cl_logits_input_dim or FLAGS.rnn_cell_size
    self.layers['cl_logits'] = layers_lib.cl_logits_subgraph(
        [FLAGS.cl_hidden_size] * FLAGS.cl_num_layers, cl_logits_input_dim,
        FLAGS.num_classes, FLAGS.keep_prob_cl_hidden) 
Example #14
Source File: graphs.py    From Gun-Detector with Apache License 2.0 5 votes vote down vote up
def __init__(self):
    super(VatxtBidirModel,
          self).__init__(cl_logits_input_dim=FLAGS.rnn_cell_size * 2)

    # Reverse LSTM and LM loss for bidirectional models
    self.layers['lstm_reverse'] = layers_lib.LSTM(
        FLAGS.rnn_cell_size,
        FLAGS.rnn_num_layers,
        FLAGS.keep_prob_lstm_out,
        name='LSTM_Reverse')
    self.layers['lm_loss_reverse'] = layers_lib.SoftmaxLoss(
        FLAGS.vocab_size,
        FLAGS.num_candidate_samples,
        self.vocab_freqs,
        name='LM_loss_reverse') 
Example #15
Source File: graphs.py    From Gun-Detector with Apache License 2.0 5 votes vote down vote up
def __init__(self, cl_logits_input_dim=None):
    self.global_step = tf.train.get_or_create_global_step()
    self.vocab_freqs = _get_vocab_freqs()

    # Cache VatxtInput objects
    self.cl_inputs = None
    self.lm_inputs = None

    # Cache intermediate Tensors that are reused
    self.tensors = {}

    # Construct layers which are reused in constructing the LM and
    # Classification graphs. Instantiating them all once here ensures that
    # variable reuse works correctly.
    self.layers = {}
    self.layers['embedding'] = layers_lib.Embedding(
        FLAGS.vocab_size, FLAGS.embedding_dims, FLAGS.normalize_embeddings,
        self.vocab_freqs, FLAGS.keep_prob_emb)
    self.layers['lstm'] = layers_lib.LSTM(
        FLAGS.rnn_cell_size, FLAGS.rnn_num_layers, FLAGS.keep_prob_lstm_out)
    self.layers['lm_loss'] = layers_lib.SoftmaxLoss(
        FLAGS.vocab_size,
        FLAGS.num_candidate_samples,
        self.vocab_freqs,
        name='LM_loss')

    cl_logits_input_dim = cl_logits_input_dim or FLAGS.rnn_cell_size
    self.layers['cl_logits'] = layers_lib.cl_logits_subgraph(
        [FLAGS.cl_hidden_size] * FLAGS.cl_num_layers, cl_logits_input_dim,
        FLAGS.num_classes, FLAGS.keep_prob_cl_hidden) 
Example #16
Source File: graphs.py    From yolo_v2 with Apache License 2.0 5 votes vote down vote up
def __init__(self):
    super(VatxtBidirModel,
          self).__init__(cl_logits_input_dim=FLAGS.rnn_cell_size * 2)

    # Reverse LSTM and LM loss for bidirectional models
    self.layers['lstm_reverse'] = layers_lib.LSTM(
        FLAGS.rnn_cell_size,
        FLAGS.rnn_num_layers,
        FLAGS.keep_prob_lstm_out,
        name='LSTM_Reverse')
    self.layers['lm_loss_reverse'] = layers_lib.SoftmaxLoss(
        FLAGS.vocab_size,
        FLAGS.num_candidate_samples,
        self.vocab_freqs,
        name='LM_loss_reverse') 
Example #17
Source File: graphs.py    From yolo_v2 with Apache License 2.0 5 votes vote down vote up
def __init__(self, cl_logits_input_dim=None):
    self.global_step = tf.train.get_or_create_global_step()
    self.vocab_freqs = _get_vocab_freqs()

    # Cache VatxtInput objects
    self.cl_inputs = None
    self.lm_inputs = None

    # Cache intermediate Tensors that are reused
    self.tensors = {}

    # Construct layers which are reused in constructing the LM and
    # Classification graphs. Instantiating them all once here ensures that
    # variable reuse works correctly.
    self.layers = {}
    self.layers['embedding'] = layers_lib.Embedding(
        FLAGS.vocab_size, FLAGS.embedding_dims, FLAGS.normalize_embeddings,
        self.vocab_freqs, FLAGS.keep_prob_emb)
    self.layers['lstm'] = layers_lib.LSTM(
        FLAGS.rnn_cell_size, FLAGS.rnn_num_layers, FLAGS.keep_prob_lstm_out)
    self.layers['lm_loss'] = layers_lib.SoftmaxLoss(
        FLAGS.vocab_size,
        FLAGS.num_candidate_samples,
        self.vocab_freqs,
        name='LM_loss')

    cl_logits_input_dim = cl_logits_input_dim or FLAGS.rnn_cell_size
    self.layers['cl_logits'] = layers_lib.cl_logits_subgraph(
        [FLAGS.cl_hidden_size] * FLAGS.cl_num_layers, cl_logits_input_dim,
        FLAGS.num_classes, FLAGS.keep_prob_cl_hidden) 
Example #18
Source File: graphs.py    From DOTA_models with Apache License 2.0 5 votes vote down vote up
def __init__(self):
    super(VatxtBidirModel,
          self).__init__(cl_logits_input_dim=FLAGS.rnn_cell_size * 2)

    # Reverse LSTM and LM loss for bidirectional models
    self.layers['lstm_reverse'] = layers_lib.LSTM(
        FLAGS.rnn_cell_size,
        FLAGS.rnn_num_layers,
        FLAGS.keep_prob_lstm_out,
        name='LSTM_Reverse')
    self.layers['lm_loss_reverse'] = layers_lib.SoftmaxLoss(
        FLAGS.vocab_size,
        FLAGS.num_candidate_samples,
        self.vocab_freqs,
        name='LM_loss_reverse')