Python tensorflow.nce_loss() Examples
The following are 8
code examples of tensorflow.nce_loss().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow
, or try the search function
.
Example #1
Source File: p8_TextRNN_model_multi_layers.py From text_classification with MIT License | 6 votes |
def loss_nce(self,l2_lambda=0.0001): #0.0001-->0.001 """calculate loss using (NCE)cross entropy here""" # Compute the average NCE loss for the batch. # tf.nce_loss automatically draws a new sample of the negative labels each # time we evaluate the loss. if self.is_training: #training #labels=tf.reshape(self.input_y,[-1]) #[batch_size,1]------>[batch_size,] labels=tf.expand_dims(self.input_y,1) #[batch_size,]----->[batch_size,1] loss = tf.reduce_mean( #inputs: A `Tensor` of shape `[batch_size, dim]`. The forward activations of the input network. tf.nn.nce_loss(weights=tf.transpose(self.W_projection),#[hidden_size*2, num_classes]--->[num_classes,hidden_size*2]. nce_weights:A `Tensor` of shape `[num_classes, dim].O.K. biases=self.b_projection, #[label_size]. nce_biases:A `Tensor` of shape `[num_classes]`. labels=labels, #[batch_size,1]. train_labels, # A `Tensor` of type `int64` and shape `[batch_size,num_true]`. The target classes. inputs=self.output_rnn_last,# [batch_size,hidden_size*2] #A `Tensor` of shape `[batch_size, dim]`. The forward activations of the input network. num_sampled=self.num_sampled, #scalar. 100 num_classes=self.num_classes,partition_strategy="div")) #scalar. 1999 l2_losses = tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables() if 'bias' not in v.name]) * l2_lambda loss = loss + l2_losses return loss
Example #2
Source File: p8_TextRNN_model.py From text_classification with MIT License | 6 votes |
def loss_nce(self,l2_lambda=0.0001): #0.0001-->0.001 """calculate loss using (NCE)cross entropy here""" # Compute the average NCE loss for the batch. # tf.nce_loss automatically draws a new sample of the negative labels each # time we evaluate the loss. if self.is_training: #training #labels=tf.reshape(self.input_y,[-1]) #[batch_size,1]------>[batch_size,] labels=tf.expand_dims(self.input_y,1) #[batch_size,]----->[batch_size,1] loss = tf.reduce_mean( #inputs: A `Tensor` of shape `[batch_size, dim]`. The forward activations of the input network. tf.nn.nce_loss(weights=tf.transpose(self.W_projection),#[hidden_size*2, num_classes]--->[num_classes,hidden_size*2]. nce_weights:A `Tensor` of shape `[num_classes, dim].O.K. biases=self.b_projection, #[label_size]. nce_biases:A `Tensor` of shape `[num_classes]`. labels=labels, #[batch_size,1]. train_labels, # A `Tensor` of type `int64` and shape `[batch_size,num_true]`. The target classes. inputs=self.output_rnn_last,# [batch_size,hidden_size*2] #A `Tensor` of shape `[batch_size, dim]`. The forward activations of the input network. num_sampled=self.num_sampled, #scalar. 100 num_classes=self.num_classes,partition_strategy="div")) #scalar. 1999 l2_losses = tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables() if 'bias' not in v.name]) * l2_lambda loss = loss + l2_losses return loss
Example #3
Source File: p8_TextRNN_model_multi_layers.py From pynlp with MIT License | 6 votes |
def loss_nce(self,l2_lambda=0.0001): #0.0001-->0.001 """calculate loss using (NCE)cross entropy here""" # Compute the average NCE loss for the batch. # tf.nce_loss automatically draws a new sample of the negative labels each # time we evaluate the loss. if self.is_training: #training #labels=tf.reshape(self.input_y,[-1]) #[batch_size,1]------>[batch_size,] labels=tf.expand_dims(self.input_y,1) #[batch_size,]----->[batch_size,1] loss = tf.reduce_mean( #inputs: A `Tensor` of shape `[batch_size, dim]`. The forward activations of the input network. tf.nn.nce_loss(weights=tf.transpose(self.W_projection),#[hidden_size*2, num_classes]--->[num_classes,hidden_size*2]. nce_weights:A `Tensor` of shape `[num_classes, dim].O.K. biases=self.b_projection, #[label_size]. nce_biases:A `Tensor` of shape `[num_classes]`. labels=labels, #[batch_size,1]. train_labels, # A `Tensor` of type `int64` and shape `[batch_size,num_true]`. The target classes. inputs=self.output_rnn_last,# [batch_size,hidden_size*2] #A `Tensor` of shape `[batch_size, dim]`. The forward activations of the input network. num_sampled=self.num_sampled, #scalar. 100 num_classes=self.num_classes,partition_strategy="div")) #scalar. 1999 l2_losses = tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables() if 'bias' not in v.name]) * l2_lambda loss = loss + l2_losses return loss
Example #4
Source File: p8_TextRNN_model.py From pynlp with MIT License | 6 votes |
def loss_nce(self,l2_lambda=0.0001): #0.0001-->0.001 """calculate loss using (NCE)cross entropy here""" # Compute the average NCE loss for the batch. # tf.nce_loss automatically draws a new sample of the negative labels each # time we evaluate the loss. if self.is_training: #training #labels=tf.reshape(self.input_y,[-1]) #[batch_size,1]------>[batch_size,] labels=tf.expand_dims(self.input_y,1) #[batch_size,]----->[batch_size,1] loss = tf.reduce_mean( #inputs: A `Tensor` of shape `[batch_size, dim]`. The forward activations of the input network. tf.nn.nce_loss(weights=tf.transpose(self.W_projection),#[hidden_size*2, num_classes]--->[num_classes,hidden_size*2]. nce_weights:A `Tensor` of shape `[num_classes, dim].O.K. biases=self.b_projection, #[label_size]. nce_biases:A `Tensor` of shape `[num_classes]`. labels=labels, #[batch_size,1]. train_labels, # A `Tensor` of type `int64` and shape `[batch_size,num_true]`. The target classes. inputs=self.output_rnn_last,# [batch_size,hidden_size*2] #A `Tensor` of shape `[batch_size, dim]`. The forward activations of the input network. num_sampled=self.num_sampled, #scalar. 100 num_classes=self.num_classes,partition_strategy="div")) #scalar. 1999 l2_losses = tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables() if 'bias' not in v.name]) * l2_lambda loss = loss + l2_losses return loss
Example #5
Source File: p5_fastTextB_model.py From text_classification with MIT License | 5 votes |
def loss(self,l2_lambda=0.01): #0.0001-->0.001 """calculate loss using (NCE)cross entropy here""" # Compute the average NCE loss for the batch. # tf.nce_loss automatically draws a new sample of the negative labels each # time we evaluate the loss. if self.is_training: #training labels=tf.reshape(self.labels,[-1]) #[batch_size,1]------>[batch_size,] labels=tf.expand_dims(labels,1) #[batch_size,]----->[batch_size,1] loss = tf.reduce_mean( #inputs: A `Tensor` of shape `[batch_size, dim]`. The forward activations of the input network. tf.nn.nce_loss(weights=tf.transpose(self.W), #[embed_size, label_size]--->[label_size,embed_size]. nce_weights:A `Tensor` of shape `[num_classes, dim].O.K. biases=self.b, #[label_size]. nce_biases:A `Tensor` of shape `[num_classes]`. labels=labels, #[batch_size,1]. train_labels, # A `Tensor` of type `int64` and shape `[batch_size,num_true]`. The target classes. inputs=self.sentence_embeddings,# [None,self.embed_size] #A `Tensor` of shape `[batch_size, dim]`. The forward activations of the input network. num_sampled=self.num_sampled, #scalar. 100 num_classes=self.label_size,partition_strategy="div")) #scalar. 1999 else:#eval/inference #logits = tf.matmul(self.sentence_embeddings, tf.transpose(self.W)) #matmul([None,self.embed_size])---> #logits = tf.nn.bias_add(logits, self.b) labels_one_hot = tf.one_hot(self.labels, self.label_size) #[batch_size]---->[batch_size,label_size] #sigmoid_cross_entropy_with_logits:Computes sigmoid cross entropy given `logits`.Measures the probability error in discrete classification tasks in which each class is independent and not mutually exclusive. For instance, one could perform multilabel classification where a picture can contain both an elephant and a dog at the same time. loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=labels_one_hot,logits=self.logits) #labels:[batch_size,label_size];logits:[batch, label_size] print("loss0:", loss) #shape=(?, 1999) loss = tf.reduce_sum(loss, axis=1) print("loss1:",loss) #shape=(?,) l2_losses = tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables() if 'bias' not in v.name]) * l2_lambda return loss
Example #6
Source File: p6_fastTextB_model_multilabel.py From text_classification with MIT License | 5 votes |
def loss(self,l2_lambda=0.0001): """calculate loss using (NCE)cross entropy here""" # Compute the average NCE loss for the batch. # tf.nce_loss automatically draws a new sample of the negative labels each # time we evaluate the loss. #if self.is_training:#training #labels=tf.reshape(self.labels,[-1]) #3.[batch_size,max_label_per_example]------>[batch_size*max_label_per_example,] #labels=tf.expand_dims(labels,1) #[batch_size*max_label_per_example,]----->[batch_size*max_label_per_example,1] #nce_loss: notice-->for now, if you have a variable number of target classes, you can pad them out to a constant number by either repeating them or by padding with an otherwise unused class. # loss = tf.reduce_mean(#inputs's SHAPE should be: [batch_size, dim] # tf.nn.nce_loss(weights=tf.transpose(self.W), #[embed_size, label_size]--->[label_size,embed_size]. nce_weights:A `Tensor` of shape `[num_classes, dim].O.K. # biases=self.b, #[label_size]. nce_biases:A `Tensor` of shape `[num_classes]`. # labels=self.labels, #4.[batch_size,max_label_per_example]. train_labels, # A `Tensor` of type `int64` and shape `[batch_size,num_true]`. The target classes. # inputs=self.sentence_embeddings,#TODO [None,self.embed_size] #A `Tensor` of shape `[batch_size, dim]`. The forward activations of the input network. # num_sampled=self.num_sampled, # scalar. 100 # num_true=self.max_label_per_example, # num_classes=self.label_size,partition_strategy="div")) #scalar. 1999 #else:#eval(/inference) labels_multi_hot = self.labels_l1999 #[batch_size,label_size] #sigmoid_cross_entropy_with_logits:Computes sigmoid cross entropy given `logits`.Measures the probability error in discrete classification tasks in which each class is independent and not mutually exclusive. For instance, one could perform multilabel classification where a picture can contain both an elephant and a dog at the same time. loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=labels_multi_hot,logits=self.logits) #labels:[batch_size,label_size];logits:[batch, label_size] loss = tf.reduce_mean(tf.reduce_sum(loss, axis=1)) # reduce_sum print("loss:",loss) # add regularization result in not converge self.l2_losses = tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables() if 'bias' not in v.name]) * l2_lambda print("l2_losses:",self.l2_losses) loss=loss+self.l2_losses return loss
Example #7
Source File: 4_1_word2vec_embedding.py From hyper-engine with Apache License 2.0 | 5 votes |
def word2vec_model(params): # Input data. inputs = tf.placeholder(dtype=tf.int32, shape=[None], name='input') labels = tf.placeholder(dtype=tf.int32, shape=[None, 1], name='label') # Look up embeddings for inputs. embeddings = tf.Variable( tf.random_uniform([params.vocab_size, params.embedding_size], -1.0, 1.0) ) embed = tf.nn.embedding_lookup(embeddings, inputs) # Construct the variables for the NCE loss nce_weights = tf.Variable( tf.truncated_normal(shape=[params.vocab_size, params.embedding_size], stddev=1.0 / np.sqrt(params.embedding_size)) ) nce_biases = tf.Variable(tf.zeros([params.vocab_size])) # Compute the average NCE loss for the batch. # tf.nce_loss automatically draws a new sample of the negative labels each # time we evaluate the loss. loss = tf.reduce_mean( tf.nn.nce_loss(nce_weights, nce_biases, labels=labels, inputs=embed, num_sampled=params.negative_samples, num_classes=params.vocab_size), name='loss' ) optimizer = tf.train.AdamOptimizer(params.learning_rate) optimizer.minimize(loss, name='minimize')
Example #8
Source File: tensorlayer-layers.py From Conv3D_BICLSTM with MIT License | 4 votes |
def __init__( self, inputs = None, train_labels = None, vocabulary_size = 80000, embedding_size = 200, num_sampled = 64, nce_loss_args = {}, E_init = tf.random_uniform_initializer(minval=-1.0, maxval=1.0), E_init_args = {}, nce_W_init = tf.truncated_normal_initializer(stddev=0.03), nce_W_init_args = {}, nce_b_init = tf.constant_initializer(value=0.0), nce_b_init_args = {}, name ='word2vec_layer', ): Layer.__init__(self, name=name) self.inputs = inputs print(" tensorlayer:Instantiate Word2vecEmbeddingInputlayer %s: (%d, %d)" % (self.name, vocabulary_size, embedding_size)) # Look up embeddings for inputs. # Note: a row of 'embeddings' is the vector representation of a word. # for the sake of speed, it is better to slice the embedding matrix # instead of transfering a word id to one-hot-format vector and then # multiply by the embedding matrix. # embed is the outputs of the hidden layer (embedding layer), it is a # row vector with 'embedding_size' values. with tf.variable_scope(name) as vs: embeddings = tf.get_variable(name='embeddings', shape=(vocabulary_size, embedding_size), initializer=E_init, **E_init_args) embed = tf.nn.embedding_lookup(embeddings, self.inputs) # Construct the variables for the NCE loss (i.e. negative sampling) nce_weights = tf.get_variable(name='nce_weights', shape=(vocabulary_size, embedding_size), initializer=nce_W_init, **nce_W_init_args) nce_biases = tf.get_variable(name='nce_biases', shape=(vocabulary_size), initializer=nce_b_init, **nce_b_init_args) # Compute the average NCE loss for the batch. # tf.nce_loss automatically draws a new sample of the negative labels # each time we evaluate the loss. self.nce_cost = tf.reduce_mean( tf.nn.nce_loss(weights=nce_weights, biases=nce_biases, inputs=embed, labels=train_labels, num_sampled=num_sampled, num_classes=vocabulary_size, **nce_loss_args)) self.outputs = embed self.normalized_embeddings = tf.nn.l2_normalize(embeddings, 1) self.all_layers = [self.outputs] self.all_params = [embeddings, nce_weights, nce_biases] self.all_drop = {}