Python tensorflow.count_nonzero() Examples

The following are 30 code examples of tensorflow.count_nonzero(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: modules.py    From Tacotron-2 with MIT License 6 votes vote down vote up
def MaskedCrossEntropyLoss(outputs, targets, lengths=None, mask=None, max_len=None):
	if lengths is None and mask is None:
		raise RuntimeError('Please provide either lengths or mask')

	#[batch_size, time_length]
	if mask is None:
		mask = sequence_mask(lengths, max_len, False)

	#One hot encode targets (outputs.shape[-1] = hparams.quantize_channels)
	targets_ = tf.one_hot(targets, depth=tf.shape(outputs)[-1])

	with tf.control_dependencies([tf.assert_equal(tf.shape(outputs), tf.shape(targets_))]):
		losses = tf.nn.softmax_cross_entropy_with_logits_v2(logits=outputs, labels=targets_)

	with tf.control_dependencies([tf.assert_equal(tf.shape(mask), tf.shape(losses))]):
		masked_loss = losses * mask

	return tf.reduce_sum(masked_loss) / tf.count_nonzero(masked_loss, dtype=tf.float32) 
Example #2
Source File: yellowfin.py    From training_results_v0.5 with Apache License 2.0 6 votes vote down vote up
def _grad_sparsity(self):
    """Gradient sparsity."""
    # If the sparse minibatch gradient has 10 percent of its entries
    # non-zero, its sparsity is 0.1.
    # The norm of dense gradient averaged from full dataset
    # are roughly estimated norm of minibatch
    # sparse gradient norm * sqrt(sparsity)
    # An extension maybe only correct the sparse blob.
    non_zero_cnt = tf.add_n([tf.count_nonzero(g) for g in self._grad])
    all_entry_cnt = tf.add_n([tf.size(g) for g in self._grad])
    self._sparsity = tf.cast(non_zero_cnt, self._grad[0].dtype)
    self._sparsity /= tf.cast(all_entry_cnt, self._grad[0].dtype)
    avg_op = self._moving_averager.apply([self._sparsity,])
    with tf.control_dependencies([avg_op]):
      self._sparsity_avg = self._moving_averager.average(self._sparsity)
    return avg_op 
Example #3
Source File: yellowfin.py    From BERT with Apache License 2.0 6 votes vote down vote up
def _grad_sparsity(self):
    """Gradient sparsity."""
    # If the sparse minibatch gradient has 10 percent of its entries
    # non-zero, its sparsity is 0.1.
    # The norm of dense gradient averaged from full dataset
    # are roughly estimated norm of minibatch
    # sparse gradient norm * sqrt(sparsity)
    # An extension maybe only correct the sparse blob.
    non_zero_cnt = tf.add_n([tf.count_nonzero(g) for g in self._grad])
    all_entry_cnt = tf.add_n([tf.size(g) for g in self._grad])
    self._sparsity = tf.cast(non_zero_cnt, self._grad[0].dtype)
    self._sparsity /= tf.cast(all_entry_cnt, self._grad[0].dtype)
    avg_op = self._moving_averager.apply([self._sparsity,])
    with tf.control_dependencies([avg_op]):
      self._sparsity_avg = self._moving_averager.average(self._sparsity)
    return avg_op 
Example #4
Source File: cnn.py    From leaf with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def create_model(self):
        input_ph = tf.placeholder(
            tf.float32, shape=(None, IMAGE_SIZE, IMAGE_SIZE, 3))
        out = input_ph
        for _ in range(4):
            out = tf.layers.conv2d(out, 32, 3, padding='same')
            out = tf.layers.batch_normalization(out, training=True)
            out = tf.layers.max_pooling2d(out, 2, 2, padding='same')
            out = tf.nn.relu(out)
        out = tf.reshape(out, (-1, int(np.prod(out.get_shape()[1:]))))
        logits = tf.layers.dense(out, self.num_classes)
        label_ph = tf.placeholder(tf.int64, shape=(None,))
        loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
            labels=label_ph,
            logits=logits)
        predictions = tf.argmax(logits, axis=-1)
        minimize_op = self.optimizer.minimize(
            loss=loss, global_step=tf.train.get_global_step())
        eval_metric_ops = tf.count_nonzero(
            tf.equal(label_ph, tf.argmax(input=logits, axis=1)))
        return input_ph, label_ph, minimize_op, eval_metric_ops, tf.math.reduce_mean(loss) 
Example #5
Source File: log_reg.py    From leaf with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def create_model(self):
        features = tf.placeholder(tf.float32, [None, self.input_dim])
        labels = tf.placeholder(tf.int64, [None])

        logits = tf.layers.dense(features, self.num_classes, activation=tf.nn.sigmoid)
        loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
            labels=labels,
            logits=logits)
        
        train_op = self.optimizer.minimize(
            loss=loss,
            global_step=tf.train.get_global_step())

        predictions = tf.argmax(logits, axis=-1)
        correct_pred = tf.equal(predictions, labels)
        eval_metric_ops = tf.count_nonzero(correct_pred)
        
        return features, labels, train_op, eval_metric_ops, tf.reduce_mean(loss) 
Example #6
Source File: model.py    From minimal-entropy-correlation-alignment with MIT License 6 votes vote down vote up
def log_coral_loss(self, h_src, h_trg, gamma=1e-3):
	# regularized covariances result in inf or nan
	# First: subtract the mean from the data matrix
	batch_size = tf.to_float(tf.shape(h_src)[0])
	h_src = h_src - tf.reduce_mean(h_src, axis=0) 
	h_trg = h_trg - tf.reduce_mean(h_trg, axis=0 )
	cov_source = (1./(batch_size-1)) * tf.matmul( h_src, h_src, transpose_a=True) #+ gamma * tf.eye(self.hidden_repr_size)
	cov_target = (1./(batch_size-1)) * tf.matmul( h_trg, h_trg, transpose_a=True) #+ gamma * tf.eye(self.hidden_repr_size)
	#eigen decomposition
	eig_source  = tf.self_adjoint_eig(cov_source)
	eig_target  = tf.self_adjoint_eig(cov_target)
	log_cov_source = tf.matmul( eig_source[1] ,  tf.matmul(tf.diag( tf.log(eig_source[0]) ), eig_source[1], transpose_b=True) )
	log_cov_target = tf.matmul( eig_target[1] ,  tf.matmul(tf.diag( tf.log(eig_target[0]) ), eig_target[1], transpose_b=True) )

	# Returns the Frobenius norm
	return tf.reduce_mean(tf.square( tf.subtract(log_cov_source,log_cov_target))) 
	#~ return tf.reduce_mean(tf.reduce_max(eig_target[0]))
	#~ return tf.to_float(tf.equal(tf.count_nonzero(h_src), tf.count_nonzero(h_src))) 
Example #7
Source File: model_frcnn.py    From tensorpack with Apache License 2.0 6 votes vote down vote up
def proposal_metrics(iou):
    """
    Add summaries for RPN proposals.

    Args:
        iou: nxm, #proposal x #gt
    """
    # find best roi for each gt, for summary only
    best_iou = tf.reduce_max(iou, axis=0)
    mean_best_iou = tf.reduce_mean(best_iou, name='best_iou_per_gt')
    summaries = [mean_best_iou]
    with tf.device('/cpu:0'):
        for th in [0.3, 0.5]:
            recall = tf.truediv(
                tf.count_nonzero(best_iou >= th),
                tf.size(best_iou, out_type=tf.int64),
                name='recall_iou{}'.format(th))
            summaries.append(recall)
    add_moving_summary(*summaries) 
Example #8
Source File: evaluate_model_utils.py    From lanenet-lane-detection with Apache License 2.0 6 votes vote down vote up
def calculate_model_fn(input_tensor, label_tensor):
    """
    calculate fn figure
    :param input_tensor:
    :param label_tensor:
    :return:
    """
    logits = tf.nn.softmax(logits=input_tensor)
    final_output = tf.expand_dims(tf.argmax(logits, axis=-1), axis=-1)

    idx = tf.where(tf.equal(label_tensor, 1))
    pix_cls_ret = tf.gather_nd(final_output, idx)
    label_cls_ret = tf.gather_nd(label_tensor, tf.where(tf.equal(label_tensor, 1)))
    mis_pred = tf.cast(tf.shape(label_cls_ret)[0], tf.int64) - tf.count_nonzero(pix_cls_ret)

    return tf.divide(mis_pred, tf.cast(tf.shape(label_cls_ret)[0], tf.int64)) 
Example #9
Source File: evaluate_model_utils.py    From lanenet-lane-detection with Apache License 2.0 6 votes vote down vote up
def calculate_model_fp(input_tensor, label_tensor):
    """
    calculate fp figure
    :param input_tensor:
    :param label_tensor:
    :return:
    """
    logits = tf.nn.softmax(logits=input_tensor)
    final_output = tf.expand_dims(tf.argmax(logits, axis=-1), axis=-1)

    idx = tf.where(tf.equal(final_output, 1))
    pix_cls_ret = tf.gather_nd(final_output, idx)
    false_pred = tf.cast(tf.shape(pix_cls_ret)[0], tf.int64) - tf.count_nonzero(
        tf.gather_nd(label_tensor, idx)
    )

    return tf.divide(false_pred, tf.cast(tf.shape(pix_cls_ret)[0], tf.int64)) 
Example #10
Source File: evaluate_model_utils.py    From lanenet-lane-detection with Apache License 2.0 6 votes vote down vote up
def calculate_model_precision(input_tensor, label_tensor):
    """
    calculate accuracy acc = correct_nums / ground_truth_nums
    :param input_tensor: binary segmentation logits
    :param label_tensor: binary segmentation label
    :return:
    """

    logits = tf.nn.softmax(logits=input_tensor)
    final_output = tf.expand_dims(tf.argmax(logits, axis=-1), axis=-1)

    idx = tf.where(tf.equal(final_output, 1))
    pix_cls_ret = tf.gather_nd(label_tensor, idx)
    accuracy = tf.count_nonzero(pix_cls_ret)
    accuracy = tf.divide(
        accuracy,
        tf.cast(tf.shape(tf.gather_nd(label_tensor, tf.where(tf.equal(label_tensor, 1))))[0], tf.int64))

    return accuracy 
Example #11
Source File: yellowfin.py    From training_results_v0.5 with Apache License 2.0 6 votes vote down vote up
def _grad_sparsity(self):
    """Gradient sparsity."""
    # If the sparse minibatch gradient has 10 percent of its entries
    # non-zero, its sparsity is 0.1.
    # The norm of dense gradient averaged from full dataset
    # are roughly estimated norm of minibatch
    # sparse gradient norm * sqrt(sparsity)
    # An extension maybe only correct the sparse blob.
    non_zero_cnt = tf.add_n([tf.count_nonzero(g) for g in self._grad])
    all_entry_cnt = tf.add_n([tf.size(g) for g in self._grad])
    self._sparsity = tf.cast(non_zero_cnt, self._grad[0].dtype)
    self._sparsity /= tf.cast(all_entry_cnt, self._grad[0].dtype)
    avg_op = self._moving_averager.apply([self._sparsity,])
    with tf.control_dependencies([avg_op]):
      self._sparsity_avg = self._moving_averager.average(self._sparsity)
    return avg_op 
Example #12
Source File: model.py    From ndvr-dml with Apache License 2.0 6 votes vote down vote up
def triplet_loss(self, anchor, positive, negative, gamma):
        """
          Triplet loss calculation.

          Args:
            anchor: anchor feature matrix (NxM)
            positive: positive feature matrix (NxM)
            negative: negative feature matrix (NxM)
            gamma: margin parameter

          Returns:
            loss: total triplet loss
            error: number of triplets with positive loss
        """
        with tf.name_scope('triplet_loss'):
            pos_dist = self.euclidean_distance(anchor, positive)
            neg_dist = self.euclidean_distance(anchor, negative)
            loss = tf.maximum(0., pos_dist - neg_dist + gamma)
            error = tf.count_nonzero(loss, dtype=tf.float32) / \
                    tf.cast(tf.shape(anchor)[0], tf.float32) * tf.constant(100.0)
            loss = tf.reduce_mean(loss)
            tf.summary.scalar('loss', loss)
            tf.summary.scalar('error', error)
            return loss, error 
Example #13
Source File: reduction_ops_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def _compare(self,
               x,
               reduction_axes,
               keep_dims,
               use_gpu=False,
               feed_dict=None):
    np_ans = (x != 0).astype(np.int32)
    if reduction_axes is None:
      np_ans = np.sum(np_ans, keepdims=keep_dims)
    else:
      reduction_axes = np.array(reduction_axes).astype(np.int32)
      for ra in reduction_axes.ravel()[::-1]:
        np_ans = np.sum(np_ans, axis=ra, keepdims=keep_dims)
    with self.test_session(use_gpu=use_gpu) as sess:
      tf_ans = tf.count_nonzero(x, reduction_axes, keep_dims)
      out = sess.run(tf_ans, feed_dict)
    self.assertAllClose(np_ans, out)
    self.assertShapeEqual(np_ans, tf_ans) 
Example #14
Source File: modules.py    From style-token_tacotron2 with MIT License 6 votes vote down vote up
def MaskedSigmoidCrossEntropy(targets, outputs, targets_lengths, hparams, mask=None):
    '''Computes a masked SigmoidCrossEntropy with logits
    '''

    # [batch_size, time_dimension]
    # example:
    # sequence_mask([1, 3, 2], 5) = [[1., 0., 0., 0., 0.],
    #							    [1., 1., 1., 0., 0.],
    #							    [1., 1., 0., 0., 0.]]
    # Note the maxlen argument that ensures mask shape is compatible with r>1
    # This will by default mask the extra paddings caused by r>1
    if mask is None:
        mask = sequence_mask(targets_lengths, hparams.outputs_per_step, False)

    with tf.control_dependencies([tf.assert_equal(tf.shape(targets), tf.shape(mask))]):
        # Use a weighted sigmoid cross entropy to measure the <stop_token> loss. Set hparams.cross_entropy_pos_weight to 1
        # will have the same effect as  vanilla tf.nn.sigmoid_cross_entropy_with_logits.
        losses = tf.nn.weighted_cross_entropy_with_logits(targets=targets, logits=outputs,
                                                          pos_weight=hparams.cross_entropy_pos_weight)

    with tf.control_dependencies([tf.assert_equal(tf.shape(mask), tf.shape(losses))]):
        masked_loss = losses * mask

    return tf.reduce_sum(masked_loss) / tf.count_nonzero(masked_loss, dtype=tf.float32) 
Example #15
Source File: modules.py    From style-token_tacotron2 with MIT License 6 votes vote down vote up
def MaskedCrossEntropyLoss(outputs, targets, lengths=None, mask=None, max_len=None):
	if lengths is None and mask is None:
		raise RuntimeError('Please provide either lengths or mask')

	#[batch_size, time_length]
	if mask is None:
		mask = sequence_mask(lengths, max_len, False)

	#One hot encode targets (outputs.shape[-1] = hparams.quantize_channels)
	targets_ = tf.one_hot(targets, depth=tf.shape(outputs)[-1])

	with tf.control_dependencies([tf.assert_equal(tf.shape(outputs), tf.shape(targets_))]):
		losses = tf.nn.softmax_cross_entropy_with_logits_v2(logits=outputs, labels=targets_)

	with tf.control_dependencies([tf.assert_equal(tf.shape(mask), tf.shape(losses))]):
		masked_loss = losses * mask

	return tf.reduce_sum(masked_loss) / tf.count_nonzero(masked_loss, dtype=tf.float32) 
Example #16
Source File: yellowfin.py    From fine-lm with MIT License 6 votes vote down vote up
def _grad_sparsity(self):
    """Gradient sparsity."""
    # If the sparse minibatch gradient has 10 percent of its entries
    # non-zero, its sparsity is 0.1.
    # The norm of dense gradient averaged from full dataset
    # are roughly estimated norm of minibatch
    # sparse gradient norm * sqrt(sparsity)
    # An extension maybe only correct the sparse blob.
    non_zero_cnt = tf.add_n([tf.count_nonzero(g) for g in self._grad])
    all_entry_cnt = tf.add_n([tf.size(g) for g in self._grad])
    self._sparsity = tf.cast(non_zero_cnt, self._grad[0].dtype)
    self._sparsity /= tf.cast(all_entry_cnt, self._grad[0].dtype)
    avg_op = self._moving_averager.apply([self._sparsity,])
    with tf.control_dependencies([avg_op]):
      self._sparsity_avg = self._moving_averager.average(self._sparsity)
    return avg_op 
Example #17
Source File: modules.py    From Tacotron-2 with MIT License 6 votes vote down vote up
def MaskedSigmoidCrossEntropy(targets, outputs, targets_lengths, hparams, mask=None):
	'''Computes a masked SigmoidCrossEntropy with logits
	'''

	#[batch_size, time_dimension]
	#example:
	#sequence_mask([1, 3, 2], 5) = [[1., 0., 0., 0., 0.],
	#							    [1., 1., 1., 0., 0.],
	#							    [1., 1., 0., 0., 0.]]
	#Note the maxlen argument that ensures mask shape is compatible with r>1
	#This will by default mask the extra paddings caused by r>1
	if mask is None:
		mask = sequence_mask(targets_lengths, hparams.outputs_per_step, False)

	with tf.control_dependencies([tf.assert_equal(tf.shape(targets), tf.shape(mask))]):
		#Use a weighted sigmoid cross entropy to measure the <stop_token> loss. Set hparams.cross_entropy_pos_weight to 1
		#will have the same effect as  vanilla tf.nn.sigmoid_cross_entropy_with_logits.
		losses = tf.nn.weighted_cross_entropy_with_logits(targets=targets, logits=outputs, pos_weight=hparams.cross_entropy_pos_weight)

	with tf.control_dependencies([tf.assert_equal(tf.shape(mask), tf.shape(losses))]):
		masked_loss = losses * mask

	return tf.reduce_sum(masked_loss) / tf.count_nonzero(masked_loss, dtype=tf.float32) 
Example #18
Source File: deep_localization_weighted_loss.py    From SequenceOfDigitsRecognition with MIT License 6 votes vote down vote up
def evaluation(self, logits, labels, predicted_positions, positions):
        with tf.name_scope("evaluation"):
            labels = tf.to_int64(labels)
            labels = tf.argmax(labels, 2)
            logits = tf.argmax(logits, 2)
            difference = tf.subtract(labels, logits, name="sub")
            character_errors = tf.count_nonzero(difference, axis=1, name="count_nonzero")
            total_wrong_characters = tf.reduce_sum(character_errors)
            total_characters = tf.to_int64(tf.size(labels))
            total_correct_characters = total_characters - total_wrong_characters
            corrects = tf.less_equal(character_errors, 0, name="is_zero")

            position_error = tf.losses.mean_squared_error(positions, predicted_positions)

            return self.tf_count(corrects,
                                 True), corrects, logits, position_error, predicted_positions, total_correct_characters, total_characters 
Example #19
Source File: base_gattn.py    From GAT with MIT License 6 votes vote down vote up
def micro_f1(logits, labels, mask):
        """Accuracy with masking."""
        predicted = tf.round(tf.nn.sigmoid(logits))

        # Use integers to avoid any nasty FP behaviour
        predicted = tf.cast(predicted, dtype=tf.int32)
        labels = tf.cast(labels, dtype=tf.int32)
        mask = tf.cast(mask, dtype=tf.int32)

        # expand the mask so that broadcasting works ([nb_nodes, 1])
        mask = tf.expand_dims(mask, -1)
        
        # Count true positives, true negatives, false positives and false negatives.
        tp = tf.count_nonzero(predicted * labels * mask)
        tn = tf.count_nonzero((predicted - 1) * (labels - 1) * mask)
        fp = tf.count_nonzero(predicted * (labels - 1) * mask)
        fn = tf.count_nonzero((predicted - 1) * labels * mask)

        # Calculate accuracy, precision, recall and F1 score.
        precision = tp / (tp + fp)
        recall = tp / (tp + fn)
        fmeasure = (2 * precision * recall) / (precision + recall)
        fmeasure = tf.cast(fmeasure, tf.float32)
        return fmeasure 
Example #20
Source File: model.py    From PReMVOS with MIT License 6 votes vote down vote up
def proposal_metrics(iou):
    """
    Add summaries for RPN proposals.

    Args:
        iou: nxm, #proposal x #gt
    """
    # find best roi for each gt, for summary only
    best_iou = tf.reduce_max(iou, axis=0)
    mean_best_iou = tf.reduce_mean(best_iou, name='best_iou_per_gt')
    summaries = [mean_best_iou]
    with tf.device('/cpu:0'):
        for th in [0.3, 0.5]:
            recall = tf.truediv(
                tf.count_nonzero(best_iou >= th),
                tf.size(best_iou, out_type=tf.int64),
                name='recall_iou{}'.format(th))
            summaries.append(recall)
    add_moving_summary(*summaries) 
Example #21
Source File: deep_localization_weighted_loss_variable_length.py    From SequenceOfDigitsRecognition with MIT License 6 votes vote down vote up
def evaluation(self, logits, labels, predicted_positions, positions):
        with tf.name_scope("evaluation"):
            labels = tf.to_int64(labels)
            labels = tf.argmax(labels, 2)
            logits = tf.argmax(logits, 2)
            difference = tf.subtract(labels, logits, name="sub")
            character_errors = tf.count_nonzero(difference, axis=1, name="count_nonzero")
            total_wrong_characters = tf.reduce_sum(character_errors)
            total_characters = tf.to_int64(tf.size(labels))
            total_correct_characters = total_characters - total_wrong_characters
            corrects = tf.less_equal(character_errors, 0, name="is_zero")

            position_error = tf.losses.mean_squared_error(positions, predicted_positions)

            return self.tf_count(corrects,
                                 True), corrects, logits, position_error, predicted_positions, total_correct_characters, total_characters 
Example #22
Source File: svhn_paper_convolution_dropout_output.py    From SequenceOfDigitsRecognition with MIT License 6 votes vote down vote up
def evaluation(self, logits, labels, predicted_positions, positions):
        with tf.name_scope("evaluation"):
            labels = tf.to_int64(labels)
            labels = tf.argmax(labels, 2)
            logits = tf.argmax(logits, 2)
            difference = tf.subtract(labels, logits, name="sub")
            character_errors = tf.count_nonzero(difference, axis=1, name="count_nonzero")
            total_wrong_characters = tf.reduce_sum(character_errors)
            total_characters = tf.to_int64(tf.size(labels))
            total_correct_characters = total_characters - total_wrong_characters
            corrects = tf.less_equal(character_errors, 0, name="is_zero")

            position_error = tf.losses.mean_squared_error(positions, predicted_positions)

            return self.tf_count(corrects,
                                 True), corrects, logits, position_error, predicted_positions, total_correct_characters, total_characters 
Example #23
Source File: svhn_transfer_learning_no_maxpool.py    From SequenceOfDigitsRecognition with MIT License 6 votes vote down vote up
def evaluation(self, logits, labels, predicted_positions, positions):
        with tf.name_scope("evaluation"):
            labels = tf.to_int64(labels)
            labels = tf.argmax(labels, 2)
            logits = tf.argmax(logits, 2)
            difference = tf.subtract(labels, logits, name="sub")
            character_errors = tf.count_nonzero(difference, axis=1, name="count_nonzero")
            total_wrong_characters = tf.reduce_sum(character_errors)
            total_characters = tf.to_int64(tf.size(labels))
            total_correct_characters = total_characters - total_wrong_characters
            corrects = tf.less_equal(character_errors, 0, name="is_zero")

            position_error = tf.losses.mean_squared_error(positions, predicted_positions)

            return self.tf_count(corrects,
                                 True), corrects, logits, position_error, predicted_positions, total_correct_characters, total_characters 
Example #24
Source File: deep_localization_weighted_loss_variable_length.py    From SequenceOfDigitsRecognition with MIT License 6 votes vote down vote up
def evaluation(self, logits, labels, predicted_positions, positions):
        with tf.name_scope("evaluation"):
            labels = tf.to_int64(labels)
            labels = tf.argmax(labels, 2)
            logits = tf.argmax(logits, 2)
            difference = tf.subtract(labels, logits, name="sub")
            character_errors = tf.count_nonzero(difference, axis=1, name="count_nonzero")
            total_wrong_characters = tf.reduce_sum(character_errors)
            total_characters = tf.to_int64(tf.size(labels))
            total_correct_characters = total_characters - total_wrong_characters
            corrects = tf.less_equal(character_errors, 0, name="is_zero")

            position_error = tf.losses.mean_squared_error(positions, predicted_positions)

            return self.tf_count(corrects,
                                 True), corrects, logits, position_error, predicted_positions, total_correct_characters, total_characters 
Example #25
Source File: deep_localization_weighted_loss_variable_length_deeper.py    From SequenceOfDigitsRecognition with MIT License 6 votes vote down vote up
def evaluation(self, logits, labels, predicted_positions, positions):
        with tf.name_scope("evaluation"):
            labels = tf.to_int64(labels)
            labels = tf.argmax(labels, 2)
            logits = tf.argmax(logits, 2)
            difference = tf.subtract(labels, logits, name="sub")
            character_errors = tf.count_nonzero(difference, axis=1, name="count_nonzero")
            total_wrong_characters = tf.reduce_sum(character_errors)
            total_characters = tf.to_int64(tf.size(labels))
            total_correct_characters = total_characters - total_wrong_characters
            corrects = tf.less_equal(character_errors, 0, name="is_zero")

            position_error = tf.losses.mean_squared_error(positions, predicted_positions)

            return self.tf_count(corrects,
                                 True), corrects, logits, position_error, predicted_positions, total_correct_characters, total_characters 
Example #26
Source File: GAT.py    From OpenHINE with MIT License 6 votes vote down vote up
def micro_f1(logits, labels, mask):
        """Accuracy with masking."""
        predicted = tf.round(tf.nn.sigmoid(logits))

        # Use integers to avoid any nasty FP behaviour
        predicted = tf.cast(predicted, dtype=tf.int32)
        labels = tf.cast(labels, dtype=tf.int32)
        mask = tf.cast(mask, dtype=tf.int32)

        # expand the mask so that broadcasting works ([nb_nodes, 1])
        mask = tf.expand_dims(mask, -1)

        # Count true positives, true negatives, false positives and false negatives.
        tp = tf.count_nonzero(predicted * labels * mask)
        tn = tf.count_nonzero((predicted - 1) * (labels - 1) * mask)
        fp = tf.count_nonzero(predicted * (labels - 1) * mask)
        fn = tf.count_nonzero((predicted - 1) * labels * mask)

        # Calculate accuracy, precision, recall and F1 score.
        precision = tp / (tp + fp)
        recall = tp / (tp + fn)
        fmeasure = (2 * precision * recall) / (precision + recall)
        fmeasure = tf.cast(fmeasure, tf.float32)
        return fmeasure 
Example #27
Source File: text2text.py    From OpenSeq2Seq with Apache License 2.0 6 votes vote down vote up
def build_graph(self):
    file_pattern = os.path.join(self.params['data_dir'],
                                self.params['file_pattern'])
    self.batched_dataset = _read_and_batch_from_files(
      file_pattern=file_pattern,
      batch_size=self.params['batch_size'],
      max_length=self.params['max_length'],
      num_cpu_cores=self.params.get('num_cpu_cores', 2),
      shuffle=self.params['shuffle'],
      repeat=self.params['repeat'],
      num_workers=self._num_workers,
      worker_id=self._worker_id,
      batch_in_tokens=self.params.get('batch_in_tokens', True),
      pad2eight=self.params.get('pad_data_to_eight', False))

    self._iterator = self.batched_dataset.make_initializable_iterator()
    x, y = self.iterator.get_next()

    len_x = tf.count_nonzero(x, axis=1, dtype=tf.int32)
    len_y = tf.count_nonzero(y, axis=1, dtype=tf.int32)
    if self.params['mode'] == 'train' or self.params['mode'] == 'eval':
      self._input_tensors['source_tensors'] = [x, len_x]
      self._input_tensors['target_tensors'] = [y, len_y]
    else:
      self._input_tensors['source_tensors'] = [x, len_x] 
Example #28
Source File: modules.py    From tacotron2-mandarin-griffin-lim with MIT License 6 votes vote down vote up
def MaskedSigmoidCrossEntropy(targets, outputs, targets_lengths, hparams, mask=None):
	'''Computes a masked SigmoidCrossEntropy with logits
	'''

	#[batch_size, time_dimension]
	#example:
	#sequence_mask([1, 3, 2], 5) = [[1., 0., 0., 0., 0.],
	#							    [1., 1., 1., 0., 0.],
	#							    [1., 1., 0., 0., 0.]]
	#Note the maxlen argument that ensures mask shape is compatible with r>1
	#This will by default mask the extra paddings caused by r>1
	if mask is None:
		mask = sequence_mask(targets_lengths, hparams.outputs_per_step, False)

	with tf.control_dependencies([tf.assert_equal(tf.shape(targets), tf.shape(mask))]):
		#Use a weighted sigmoid cross entropy to measure the <stop_token> loss. Set hparams.cross_entropy_pos_weight to 1
		#will have the same effect as  vanilla tf.nn.sigmoid_cross_entropy_with_logits.
		losses = tf.nn.weighted_cross_entropy_with_logits(targets=targets, logits=outputs, pos_weight=hparams.cross_entropy_pos_weight)

	with tf.control_dependencies([tf.assert_equal(tf.shape(mask), tf.shape(losses))]):
		masked_loss = losses * mask

	return tf.reduce_sum(masked_loss) / tf.count_nonzero(masked_loss, dtype=tf.float32) 
Example #29
Source File: ns_eu_dist.py    From openrec with Apache License 2.0 6 votes vote down vote up
def _build_training_graph(self):
        
        with tf.variable_scope(self._scope, reuse=self._reuse):
            tmp_user = tf.tile(tf.expand_dims(self._user, 1), [1, self._neg_num, 1])

            l2_user_pos = tf.tile(tf.reduce_sum(tf.square(tf.subtract(self._user, self._p_item)),
                                                reduction_indices=1,
                                                keep_dims=True, name="l2_user_pos"), [1, self._neg_num])
            l2_user_neg = tf.reduce_sum(tf.square(tf.subtract(tmp_user, self._n_item)),
                                        reduction_indices=2, 
                                        name="l2_user_neg")
            pos_score = (-l2_user_pos) + tf.tile(self._p_item_bias, [1, self._neg_num])
            neg_score = (-l2_user_neg) + tf.reduce_sum(self._n_item_bias, reduction_indices=2)
            scores = tf.maximum(self._margin - pos_score + neg_score, 0)
            weights = tf.count_nonzero(scores, axis=1)
            weights = tf.log(tf.floor(self._max_item * tf.to_float(weights) / self._neg_num) + 1.0)
            self._loss = tf.reduce_sum(weights * tf.reduce_max(scores, axis=1))
            # self._loss = tf.reduce_sum(tf.tile(tf.reshape(weights, [-1, 1]), [1, self._neg_num]) * scores) 
Example #30
Source File: ns_log.py    From openrec with Apache License 2.0 6 votes vote down vote up
def _build_training_graph(self):

        with tf.variable_scope(self._scope, reuse=self._reuse):
            tmp_user = tf.tile(tf.expand_dims(self._user, 1), [1, self._neg_num, 1])
            dot_user_pos = tf.tile(tf.reduce_sum(tf.multiply(self._user, self._p_item),
                                         reduction_indices=1,
                                         keep_dims=True,
                                         name="dot_user_pos"),[1,self._neg_num])
            dot_user_neg = tf.reduce_sum(tf.multiply(tmp_user, self._n_item),
                                         reduction_indices=2,
                                         name="dot_user_neg")
            
            pos_score = dot_user_pos + tf.tile(self._p_item_bias, [1, self._neg_num])
            neg_score = dot_user_neg + tf.reduce_sum(self._n_item_bias, reduction_indices=2)
            diff = pos_score - neg_score
            weights = tf.count_nonzero(tf.less(diff, 0.0), axis=1)
            weights = tf.log(tf.floor(self._max_item * tf.to_float(weights) / self._neg_num) + 1.0)
            self._loss = - tf.reduce_sum(tf.log(tf.sigmoid(tf.maximum(weights * tf.reduce_min(diff, axis = 1),
                                                                      -30.0))))