Python tensorflow.negative() Examples

The following are 30 code examples of tensorflow.negative(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: loss_functions.py    From AmpliGraph with Apache License 2.0 6 votes vote down vote up
def apply(self, scores_pos, scores_neg):
        """Interface to external world.
        This function does the input checks, preprocesses input and finally applies loss function.

        Parameters
        ----------
        scores_pos : tf.Tensor
            A tensor of scores assigned to positive statements.
        scores_neg : tf.Tensor
            A tensor of scores assigned to negative statements.

        Returns
        -------
        loss : tf.Tensor
            The loss value that must be minimized.
        """
        self._inputs_check(scores_pos, scores_neg)
        with tf.control_dependencies(self._dependencies):
            loss = self._apply(scores_pos, scores_neg)
        return loss 
Example #2
Source File: node_sequence.py    From graph-based-image-classification with MIT License 6 votes vote down vote up
def node_sequence(sequence, width, stride):
    """Normalizes a given sequence to have a fixed width by striding over the
    sequence. The returned sequence is padded with -1 if its length is lower
    than the requested width.

    Args:
        sequence: A 1d tensor.
        width: The length of the returned sequence.
        stride: The distance between two selected nodes.

    Returns:
        A 1d tensor.
    """

    with tf.name_scope('node_sequence', values=[sequence, width, stride]):
        # Stride the sequence based on the given stride size.
        sequence = tf.strided_slice(sequence, [0], [width*stride], [stride])

        # Pad right with -1 if the sequence length is lower than width.
        padding = tf.ones([width - tf.shape(sequence)[0]], dtype=tf.int32)
        padding = tf.negative(padding)
        sequence = tf.concat(0, [sequence, padding])

    return sequence 
Example #3
Source File: dqn.py    From tf2rl with MIT License 6 votes vote down vote up
def _train_body(self, states, actions, next_states, rewards, done, weights):
        with tf.device(self.device):
            with tf.GradientTape() as tape:
                if self._enable_categorical_dqn:
                    td_errors = self._compute_td_error_body_distributional(
                        states, actions, next_states, rewards, done)
                    q_func_loss = tf.reduce_mean(
                        huber_loss(tf.negative(td_errors),
                                   delta=self.max_grad) * weights)
                else:
                    td_errors = self._compute_td_error_body(
                        states, actions, next_states, rewards, done)
                    q_func_loss = tf.reduce_mean(
                        huber_loss(td_errors,
                                   delta=self.max_grad) * weights)

            q_func_grad = tape.gradient(
                q_func_loss, self.q_func.trainable_variables)
            self.q_func_optimizer.apply_gradients(
                zip(q_func_grad, self.q_func.trainable_variables))

            return td_errors, q_func_loss 
Example #4
Source File: aby3.py    From tf-encrypted with Apache License 2.0 6 votes vote down vote up
def _negative_public(prot, x):
    assert isinstance(x, ABY3PublicTensor), type(x)

    x_on_0, x_on_1, x_on_2 = x.unwrapped

    with tf.name_scope("negative"):
        with tf.device(prot.servers[0].device_name):
            x_on_0_neg = -x_on_0
        with tf.device(prot.servers[1].device_name):
            x_on_1_neg = -x_on_1
        with tf.device(prot.servers[2].device_name):
            x_on_2_neg = -x_on_2
        x_neg = ABY3PublicTensor(
            prot, [x_on_0_neg, x_on_1_neg, x_on_2_neg], x.is_scaled, x.share_type
        )
    return x_neg


#
# mul helpers
# 
Example #5
Source File: test_utils.py    From zhusuan with MIT License 6 votes vote down vote up
def testGetBackwardOpsSplit(self):
        # a -> b -> c
        #       \-> d
        a = tf.placeholder(tf.float32)
        b = tf.exp(a)
        c = tf.log(b)
        d = tf.negative(b)
        self.assertEqual(get_backward_ops([d]), [a.op, b.op, d.op])
        self.assertEqual(get_backward_ops([c]), [a.op, b.op, c.op])
        self.assertEqual(
            get_backward_ops([c, d]), [a.op, b.op, c.op, d.op])
        self.assertEqual(get_backward_ops([b, d]), [a.op, b.op, d.op])
        self.assertEqual(get_backward_ops([a, d]), [a.op, b.op, d.op])

        self.assertEqual(
            get_backward_ops([c, d], treat_as_inputs=[b]), [c.op, d.op])
        self.assertEqual(
            get_backward_ops([c], treat_as_inputs=[d]), [a.op, b.op, c.op]) 
Example #6
Source File: gather_and_scatter_mixin.py    From onnx-tensorflow with Apache License 2.0 6 votes vote down vote up
def chk_idx_out_of_bounds_along_axis(cls, data, axis, indices):
    """ Check indices out of bounds for ScatterElement
    In Tensorflow GPU version, if an out of bound index is found,
    the index is ignored for ScatterND/TensorScatterNDUpdate.
    But ONNX spec state that it is an error if any index values
    are out of bounds. Therefore the converter need to run this
    function to verify all the indices are in bounds along the
    axis before send it to Tensoflow. If out of bound is detected
    then the caller of this function need to throw
    InvalidArgumentError exception.
    """
    data_shape = tf.cast(tf_shape(data), indices.dtype)
    limit = data_shape[axis]
    cond1 = tf.greater_equal(indices, tf.negative(limit))
    cond2 = tf.less(indices, limit)
    return tf.logical_and(cond1, cond2) 
Example #7
Source File: gradient_reversal.py    From neuralmonkey with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def _reverse_gradient(x: tf.Tensor) -> tf.Tensor:
    """Flips the sign of the incoming gradient during training."""

    grad_name = "gradient_reversal_{}".format(x.name)

    # pylint: disable=unused-variable,invalid-name,unused-argument
    @ops.RegisterGradient(grad_name)
    def _flip_gradients(op, grad):
        return [tf.negative(grad)]
    # pylint: enable=unused-variable,invalid-name,unused-argument

    from neuralmonkey.experiment import Experiment
    graph = Experiment.get_current().graph
    with graph.gradient_override_map({"Identity": grad_name}):
        y = tf.identity(x)

    return y 
Example #8
Source File: utils.py    From minimal-entropy-correlation-alignment with MIT License 6 votes vote down vote up
def knn(X_test, X_ref, Y_ref, K = 5):
	

	nearest_neighbors=tf.Variable(tf.zeros([K]))

	distance = tf.negative(tf.reduce_sum(tf.abs(tf.subtract(X_ref, X_test[0])),axis=1)) #L1
	values,indices=tf.nn.top_k(distance,k=K,sorted=False)

	nn = []
	
	for k in range(K):
		nn.append(tf.argmax(Y_ref[indices[k]], 0)) 

	nearest_neighbors=nn
	y, idx, count = tf.unique_with_counts(nearest_neighbors)

	preds = tf.slice(y, begin=[tf.argmax(count, 0)], size=tf.constant([1], dtype=tf.int64))[0]
		
	return preds 
Example #9
Source File: sequence_erase.py    From onnx-tensorflow with Apache License 2.0 6 votes vote down vote up
def chk_pos_in_bounds(cls, input_seq, pos):
    """
    Check the position is in-bounds with respect to the sequence.
    Accepted range for 'position' is in [-n, n - 1], where n is the
    number of tensors in 'input_sequence'.

    :param input_seq: input sequence
    :param pos: position of the output tensor

    :return: True if position is in-bounds 
    """
    seq_length = tf.shape(input_seq.to_sparse(), out_type=pos.dtype)[0]

    cond1 = tf.greater_equal(pos, tf.negative(seq_length))
    cond2 = tf.less_equal(pos, seq_length - 1)

    # pos >= -n and pos < n
    return tf.reduce_all(tf.logical_and(cond1, cond2)) 
Example #10
Source File: sequence_insert.py    From onnx-tensorflow with Apache License 2.0 6 votes vote down vote up
def chk_pos_in_bounds(cls, input_seq, pos):
    """ 
    Check the position is in-bounds with respect to the sequence.
    Accepted range for 'position' is in [-n, n], where n is the 
    number of tensors in 'input_sequence'. 

    :param input_seq: input sequence
    :param pos: position to insert the tensor

    :return: True if position is in-bounds.
    """
    seq_length = tf.shape(input_seq.to_sparse(), out_type=pos.dtype)[0]

    cond1 = tf.greater_equal(pos, tf.negative(seq_length))
    cond2 = tf.less_equal(pos, seq_length)

    # pos >= -n and pos <= n
    return tf.reduce_all(tf.logical_and(cond1, cond2)) 
Example #11
Source File: pond.py    From tf-encrypted with Apache License 2.0 6 votes vote down vote up
def _sub_public_private(prot, x, y):
    assert isinstance(x, PondPublicTensor), type(x)
    assert isinstance(y, PondPrivateTensor), type(y)
    assert x.is_scaled == y.is_scaled, "Cannot mix different encodings: {} {}".format(
        x.is_scaled, y.is_scaled
    )

    x_on_0, _ = x.unwrapped
    y0, y1 = y.unwrapped

    with tf.name_scope("sub"):

        with tf.device(prot.server_0.device_name):
            z0 = x_on_0 - y0

        with tf.device(prot.server_1.device_name):
            z1 = y1.negative()

    return PondPrivateTensor(prot, z0, z1, x.is_scaled) 
Example #12
Source File: losses.py    From youtube-8m with Apache License 2.0 6 votes vote down vote up
def calculate_loss(self, predictions, labels, weights=None, **unused_params):
    with tf.name_scope("loss_xent"):
      epsilon = 10e-6
      if FLAGS.label_smoothing:
        float_labels = smoothing(labels)
      else:
        float_labels = tf.cast(labels, tf.float32)
      cross_entropy_loss = float_labels * tf.log(predictions + epsilon) + (
          1 - float_labels) * tf.log(1 - predictions + epsilon)
      cross_entropy_loss = tf.negative(cross_entropy_loss)
      if weights is not None:
        print cross_entropy_loss, weights
        weighted_loss = tf.einsum("ij,i->ij", cross_entropy_loss, weights)
        print "create weighted_loss", weighted_loss
        return tf.reduce_mean(tf.reduce_sum(weighted_loss, 1))
      else:
        return tf.reduce_mean(tf.reduce_sum(cross_entropy_loss, 1)) 
Example #13
Source File: losses.py    From youtube-8m with Apache License 2.0 6 votes vote down vote up
def calculate_loss_distill_relabel(self, predictions, labels_distill, labels, **unused_params):
    with tf.name_scope("loss_distill_relabel"):
      print("loss_distill_relabel")
      epsilon = 10e-6
      float_labels = tf.cast(labels, tf.float32)
      sum_labels = tf.cast(tf.reduce_sum(float_labels),dtype=tf.int32)
      pos_distill, _ = tf.nn.top_k(tf.reshape(labels_distill,[-1]), k=sum_labels)
      labels_true = tf.ones(tf.shape(labels))
      labels_false = tf.zeros(tf.shape(labels))
      labels_add = tf.where(tf.greater_equal(labels_distill, pos_distill[-1]), labels_true, labels_false)
      print(labels_add.get_shape().as_list())
      float_labels = float_labels+labels_add*(1.0-float_labels)
      cross_entropy_loss = float_labels * tf.log(predictions + epsilon) + (
          1 - float_labels) * tf.log(1 - predictions + epsilon)
      cross_entropy_loss = tf.negative(cross_entropy_loss)

      return tf.reduce_mean(tf.reduce_sum(cross_entropy_loss, 1)) 
Example #14
Source File: gradient_reversal.py    From neuralmonkey with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def _reverse_gradient(x: tf.Tensor) -> tf.Tensor:
    """Flips the sign of the incoming gradient during training."""

    grad_name = "gradient_reversal_{}".format(x.name)

    # pylint: disable=unused-variable,invalid-name,unused-argument
    @ops.RegisterGradient(grad_name)
    def _flip_gradients(op, grad):
        return [tf.negative(grad)]
    # pylint: enable=unused-variable,invalid-name,unused-argument

    from neuralmonkey.experiment import Experiment
    graph = Experiment.get_current().graph
    with graph.gradient_override_map({"Identity": grad_name}):
        y = tf.identity(x)

    return y 
Example #15
Source File: loss_functions.py    From AmpliGraph with Apache License 2.0 6 votes vote down vote up
def _apply(self, scores_pos, scores_neg):
        """Apply the loss function. Every inherited class must implement this function.
        (All the TF code must go in this function.)

        Parameters
        ----------
        scores_pos : tf.Tensor
            A tensor of scores assigned to positive statements.
        scores_neg : tf.Tensor
            A tensor of scores assigned to negative statements.

        Returns
        -------
        loss : tf.Tensor
            The loss value that must be minimized.
        """
        msg = 'This function is a placeholder in an abstract class.'
        logger.error(msg)
        NotImplementedError(msg) 
Example #16
Source File: loss_functions.py    From AmpliGraph with Apache License 2.0 6 votes vote down vote up
def _inputs_check(self, scores_pos, scores_neg):
        """Creates any dependencies that need to be checked before performing loss computations

        Parameters
        ----------
        scores_pos : tf.Tensor
            A tensor of scores assigned to positive statements.
        scores_neg : tf.Tensor
            A tensor of scores assigned to negative statements.
        """
        logger.debug('Creating dependencies before loss computations.')
        self._dependencies = []
        if LOSS_REGISTRY[self.name].class_params['require_same_size_pos_neg'] and self._loss_parameters['eta'] != 1:
            logger.debug('Dependencies found: \n\tRequired same size positive and negative. \n\tEta is not 1.')
            self._dependencies.append(tf.Assert(tf.equal(tf.shape(scores_pos)[0], tf.shape(scores_neg)[0]),
                                                [tf.shape(scores_pos)[0], tf.shape(scores_neg)[0]])) 
Example #17
Source File: loss_functions.py    From AmpliGraph with Apache License 2.0 6 votes vote down vote up
def _apply(self, scores_pos, scores_neg):
        """Apply the loss function.

        Parameters
        ----------
        scores_pos : tf.Tensor, shape [n, 1]
            A tensor of scores assigned to positive statements.
        scores_neg : tf.Tensor, shape [n, 1]
            A tensor of scores assigned to negative statements.

        Returns
        -------
        loss : tf.Tensor
            The loss value that must be minimized.

        """
        margin = tf.constant(self._loss_parameters['margin'], dtype=tf.float32, name='margin')
        loss = tf.reduce_sum(tf.maximum(margin - scores_pos + scores_neg, 0))
        return loss 
Example #18
Source File: loss_functions.py    From AmpliGraph with Apache License 2.0 6 votes vote down vote up
def _apply(self, scores_pos, scores_neg):
        """Apply the loss function.

        Parameters
        ----------
        scores_pos : tf.Tensor, shape [n, 1]
            A tensor of scores assigned to positive statements.
        scores_neg : tf.Tensor, shape [n, 1]
            A tensor of scores assigned to negative statements.

        Returns
        -------
        loss : tf.Tensor
            The loss value that must be minimized.

        """
        scores_neg = clip_before_exp(scores_neg)
        scores_pos = clip_before_exp(scores_pos)
        scores = tf.concat([-scores_pos, scores_neg], 0)
        return tf.reduce_sum(tf.log(1 + tf.exp(scores))) 
Example #19
Source File: path_model.py    From Gun-Detector with Apache License 2.0 6 votes vote down vote up
def _word_dropout(words, input_keep_prob):
  """Drops words with probability 1 - input_keep_prob.

  Args:
    words: a list of lemmas from the paths.
    input_keep_prob: the probability to keep the word.

  Returns:
    The revised list where some of the words are <UNK>ed.
  """
  # Create the mask: (-1) to drop, 1 to keep
  prob = tf.random_uniform(tf.shape(words), 0, 1)
  condition = tf.less(prob, (1 - input_keep_prob))
  mask = tf.where(condition,
                  tf.negative(tf.ones_like(words)), tf.ones_like(words))

  # We need to keep zeros (<PAD>), and change other numbers to 1 (<UNK>)
  # if their mask is -1. First, we multiply the mask and the words.
  # Zeros will stay zeros, and words to drop will become negative.
  # Then, we change negative values to 1.
  masked_words = tf.multiply(mask, words)
  condition = tf.less(masked_words, 0)
  dropped_words = tf.where(condition, tf.ones_like(words), words)
  return dropped_words 
Example #20
Source File: losses.py    From youtube-8m with Apache License 2.0 6 votes vote down vote up
def calculate_loss(self, predictions, labels, weights=None, **unused_params):
    with tf.name_scope("loss_xent"):
      epsilon = 10e-6
      if FLAGS.label_smoothing:
        float_labels = smoothing(labels)
      else:
        float_labels = tf.cast(labels, tf.float32)
      cross_entropy_loss = float_labels * tf.log(predictions + epsilon) + (
          1 - float_labels) * tf.log(1 - predictions + epsilon)
      cross_entropy_loss = tf.negative(cross_entropy_loss)
      if weights is not None:
        print cross_entropy_loss, weights
        weighted_loss = tf.einsum("ij,i->ij", cross_entropy_loss, weights)
        print "create weighted_loss", weighted_loss
        return tf.reduce_mean(tf.reduce_sum(weighted_loss, 1))
      else:
        return tf.reduce_mean(tf.reduce_sum(cross_entropy_loss, 1)) 
Example #21
Source File: losses_embedding.py    From youtube-8m with Apache License 2.0 6 votes vote down vote up
def calculate_loss(self, predictions, labels, **unused_params):
        bound = FLAGS.softmax_bound
        vocab_size_1 = bound
        with tf.name_scope("loss_softmax"):
            epsilon = 10e-8
            float_labels = tf.cast(labels, tf.float32)
            labels_1 = float_labels[:,:vocab_size_1]
            predictions_1 = predictions[:,:vocab_size_1]
            cross_entropy_loss = CrossEntropyLoss().calculate_loss(predictions_1,labels_1)
            lables_2 = float_labels[:,vocab_size_1:]
            predictions_2 = predictions[:,vocab_size_1:]
            # l1 normalization (labels are no less than 0)
            label_rowsum = tf.maximum(
                tf.reduce_sum(lables_2, 1, keep_dims=True),
                epsilon)
            label_append = 1.0-tf.reduce_max(lables_2, 1, keep_dims=True)
            norm_float_labels = tf.concat((tf.div(lables_2, label_rowsum),label_append),axis=1)
            predictions_append = 1.0-tf.reduce_sum(predictions_2, 1, keep_dims=True)
            softmax_outputs = tf.concat((predictions_2,predictions_append),axis=1)
            softmax_loss = norm_float_labels * tf.log(softmax_outputs + epsilon) + (
                                                                                       1 - norm_float_labels) * tf.log(1 - softmax_outputs + epsilon)
            softmax_loss = tf.negative(tf.reduce_sum(softmax_loss, 1))
        return tf.reduce_mean(softmax_loss) + cross_entropy_loss 
Example #22
Source File: losses.py    From youtube-8m with Apache License 2.0 6 votes vote down vote up
def calculate_loss(self, predictions, labels, **unused_params):
    bound = FLAGS.softmax_bound
    vocab_size_1 = bound
    with tf.name_scope("loss_softmax"):
      epsilon = 10e-8
      float_labels = tf.cast(labels, tf.float32)
      labels_1 = float_labels[:,:vocab_size_1]
      predictions_1 = predictions[:,:vocab_size_1]
      cross_entropy_loss = CrossEntropyLoss().calculate_loss(predictions_1,labels_1)
      lables_2 = float_labels[:,vocab_size_1:]
      predictions_2 = predictions[:,vocab_size_1:]
      # l1 normalization (labels are no less than 0)
      label_rowsum = tf.maximum(
          tf.reduce_sum(lables_2, 1, keep_dims=True),
          epsilon)
      label_append = 1.0-tf.reduce_max(lables_2, 1, keep_dims=True)
      norm_float_labels = tf.concat((tf.div(lables_2, label_rowsum),label_append),axis=1)
      predictions_append = 1.0-tf.reduce_sum(predictions_2, 1, keep_dims=True)
      softmax_outputs = tf.concat((predictions_2,predictions_append),axis=1)
      softmax_loss = norm_float_labels * tf.log(softmax_outputs + epsilon) + (
          1 - norm_float_labels) * tf.log(1 - softmax_outputs + epsilon)
      softmax_loss = tf.negative(tf.reduce_sum(softmax_loss, 1))
    return tf.reduce_mean(softmax_loss) + cross_entropy_loss 
Example #23
Source File: losses.py    From youtube-8m with Apache License 2.0 6 votes vote down vote up
def calculate_loss(self, predictions, labels, **unused_params):
    with tf.name_scope("loss_xent"):
      epsilon = 10e-6
      vocab_size = predictions.get_shape().as_list()[1]
      float_labels = tf.cast(labels, tf.float32)
      cross_entropy_loss = float_labels * tf.log(predictions + epsilon) + (
          1 - float_labels) * tf.log(1 - predictions + epsilon)
      cross_entropy_loss = tf.negative(cross_entropy_loss)
      neg_labels = 1 - float_labels
      predictions_pos = predictions*float_labels+10*neg_labels
      predictions_minpos = tf.reduce_min(predictions_pos,axis=1,keep_dims=True)
      predictions_neg = predictions*neg_labels-10*float_labels
      predictions_maxneg = tf.reduce_max(predictions_neg,axis=1,keep_dims=True)
      mask_1 = tf.cast(tf.greater_equal(predictions_neg, predictions_minpos),dtype=tf.float32)
      mask_2 = tf.cast(tf.less_equal(predictions_pos, predictions_maxneg),dtype=tf.float32)
      cross_entropy_loss = cross_entropy_loss*(mask_1+mask_2)*10 + cross_entropy_loss
      return tf.reduce_mean(tf.reduce_sum(cross_entropy_loss, 1)) 
Example #24
Source File: losses.py    From youtube-8m with Apache License 2.0 6 votes vote down vote up
def calculate_loss_distill_boost(self, predictions, labels_distill, labels, **unused_params):
    with tf.name_scope("loss_distill_boost"):
      print("loss_distill_boost")
      epsilon = 10e-6
      float_labels = tf.cast(labels, tf.float32)
      batch_size = tf.shape(float_labels)[0]
      float_labels_distill = tf.cast(labels_distill, tf.float32)
      error = tf.negative(float_labels * tf.log(float_labels_distill + epsilon) + (
          1 - float_labels) * tf.log(1 - float_labels_distill + epsilon))
      error = tf.reduce_sum(error,axis=1,keep_dims=True)
      alpha = error / tf.reduce_sum(error) * tf.cast(batch_size,dtype=tf.float32)
      alpha = tf.clip_by_value(alpha, 0.5, 5)
      alpha = alpha / tf.reduce_sum(alpha) * tf.cast(batch_size,dtype=tf.float32)
      cross_entropy_loss = float_labels * tf.log(predictions + epsilon) + (
          1 - float_labels) * tf.log(1 - predictions + epsilon)
      cross_entropy_loss = tf.negative(cross_entropy_loss * alpha)

      return tf.reduce_mean(tf.reduce_sum(cross_entropy_loss, 1)) 
Example #25
Source File: TransE.py    From AmpliGraph with Apache License 2.0 5 votes vote down vote up
def _fn(self, e_s, e_p, e_o):
        r"""The TransE scoring function.

        .. math::

            f_{TransE}=-||(\mathbf{e}_s + \mathbf{r}_p) - \mathbf{e}_o||_n

        Parameters
        ----------
        e_s : Tensor, shape [n]
            The embeddings of a list of subjects.
        e_p : Tensor, shape [n]
            The embeddings of a list of predicates.
        e_o : Tensor, shape [n]
            The embeddings of a list of objects.

        Returns
        -------
        score : TensorFlow operation
            The operation corresponding to the TransE scoring function.

        """

        return tf.negative(
            tf.norm(e_s + e_p - e_o, ord=self.embedding_model_params.get('norm', constants.DEFAULT_NORM_TRANSE),
                    axis=1)) 
Example #26
Source File: grl_op_grads.py    From Gun-Detector with Apache License 2.0 5 votes vote down vote up
def _GradientReversalGrad(_, grad):
  """The gradients for `gradient_reversal`.

  Args:
    _: The `gradient_reversal` `Operation` that we are differentiating,
      which we can use to find the inputs and outputs of the original op.
    grad: Gradient with respect to the output of the `gradient_reversal` op.

  Returns:
    Gradient with respect to the input of `gradient_reversal`, which is simply
    the negative of the input gradient.

  """
  return tf.negative(grad) 
Example #27
Source File: grl_ops_test.py    From Gun-Detector with Apache License 2.0 5 votes vote down vote up
def testGradientReversalOp(self):
    with tf.Graph().as_default():
      with self.test_session():
        # Test that in forward prop, gradient reversal op acts as the
        # identity operation.
        examples = tf.constant([5.0, 4.0, 3.0, 2.0, 1.0])
        output = grl_ops.gradient_reversal(examples)
        expected_output = examples
        self.assertAllEqual(output.eval(), expected_output.eval())

        # Test that shape inference works as expected.
        self.assertAllEqual(output.get_shape(), expected_output.get_shape())

        # Test that in backward prop, gradient reversal op multiplies
        # gradients by -1.
        examples = tf.constant([[1.0]])
        w = tf.get_variable(name='w', shape=[1, 1])
        b = tf.get_variable(name='b', shape=[1])
        init_op = tf.global_variables_initializer()
        init_op.run()
        features = tf.nn.xw_plus_b(examples, w, b)
        # Construct two outputs: features layer passes directly to output1, but
        # features layer passes through a gradient reversal layer before
        # reaching output2.
        output1 = features
        output2 = grl_ops.gradient_reversal(features)
        gold = tf.constant([1.0])
        loss1 = gold - output1
        loss2 = gold - output2
        opt = tf.train.GradientDescentOptimizer(learning_rate=0.01)
        grads_and_vars_1 = opt.compute_gradients(loss1,
                                                 tf.trainable_variables())
        grads_and_vars_2 = opt.compute_gradients(loss2,
                                                 tf.trainable_variables())
        self.assertAllEqual(len(grads_and_vars_1), len(grads_and_vars_2))
        for i in range(len(grads_and_vars_1)):
          g1 = grads_and_vars_1[i][0]
          g2 = grads_and_vars_2[i][0]
          # Verify that gradients of loss1 are the negative of gradients of
          # loss2.
          self.assertAllEqual(tf.negative(g1).eval(), g2.eval()) 
Example #28
Source File: aby3.py    From tf-encrypted with Apache License 2.0 5 votes vote down vote up
def negative(self) -> "ABY3Tensor":
        """
    :See: tf.negative

    :rtype: ABY3Tensor
    :returns: A new tensor with numerical negative value element-wise computed.
    """
        return self.prot.negative(self) 
Example #29
Source File: convert_test.py    From tf-encrypted with Apache License 2.0 5 votes vote down vote up
def run_neg(data):
    a = tf.placeholder(tf.float32, shape=data.shape, name="input")

    x = tf.negative(a)

    with tf.Session() as sess:
        output = sess.run(x, feed_dict={a: data})

    return output 
Example #30
Source File: convert_test.py    From tf-encrypted with Apache License 2.0 5 votes vote down vote up
def export_neg(filename: str, input_shape: List[int]):
    a = tf.placeholder(tf.float32, shape=input_shape, name="input")

    x = tf.negative(a)

    return export(x, filename)