Python tensorflow.python.ops.array_ops.zeros_like() Examples

The following are 30 code examples of tensorflow.python.ops.array_ops.zeros_like(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.python.ops.array_ops , or try the search function .
Example #1
Source File: math_grad.py    From lambda-packs with MIT License 6 votes vote down vote up
def _PowGrad(op, grad):
  """Returns grad * (y*x^(y-1), z*log(x))."""
  x = op.inputs[0]
  y = op.inputs[1]
  z = op.outputs[0]
  sx = array_ops.shape(x)
  sy = array_ops.shape(y)
  rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy)
  x = math_ops.conj(x)
  y = math_ops.conj(y)
  z = math_ops.conj(z)
  gx = array_ops.reshape(
      math_ops.reduce_sum(grad * y * math_ops.pow(x, y - 1), rx), sx)
  # Avoid false singularity at x = 0
  if x.dtype.is_complex:
    # real(x) < 0 is fine for the complex case
    log_x = array_ops.where(
        math_ops.not_equal(x, 0), math_ops.log(x), array_ops.zeros_like(x))
  else:
    # There's no sensible real value to return if x < 0, so return 0
    log_x = array_ops.where(x > 0, math_ops.log(x), array_ops.zeros_like(x))
  gy = array_ops.reshape(math_ops.reduce_sum(grad * z * log_x, ry), sy)
  return gx, gy 
Example #2
Source File: attention_decoder_fn.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def _init_attention(encoder_state):
  """Initialize attention. Handling both LSTM and GRU.

  Args:
    encoder_state: The encoded state to initialize the `dynamic_rnn_decoder`.

  Returns:
    attn: initial zero attention vector.
  """

  # Multi- vs single-layer
  # TODO(thangluong): is this the best way to check?
  if isinstance(encoder_state, tuple):
    top_state = encoder_state[-1]
  else:
    top_state = encoder_state

  # LSTM vs GRU
  if isinstance(top_state, core_rnn_cell_impl.LSTMStateTuple):
    attn = array_ops.zeros_like(top_state.h)
  else:
    attn = array_ops.zeros_like(top_state)

  return attn 
Example #3
Source File: learning_test.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def testMultipleGradientsWithVariables(self):
    gradient = constant_op.constant(self._grad_vec, dtype=dtypes.float32)
    variable = variables_lib.Variable(array_ops.zeros_like(gradient))
    grad_to_var = (gradient, variable)
    gradient_multipliers = {variable: self._multiplier}

    [grad_to_var] = learning.multiply_gradients([grad_to_var],
                                                gradient_multipliers)

    # Ensure the variable passed through.
    self.assertEqual(grad_to_var[1], variable)

    with self.test_session() as sess:
      actual_gradient = sess.run(grad_to_var[0])
    np_testing.assert_almost_equal(actual_gradient, self._multiplied_grad_vec,
                                   5) 
Example #4
Source File: losses_impl.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def _safe_div(numerator, denominator, name="value"):
  """Computes a safe divide which returns 0 if the denominator is zero.

  Note that the function contains an additional conditional check that is
  necessary for avoiding situations where the loss is zero causing NaNs to
  creep into the gradient computation.

  Args:
    numerator: An arbitrary `Tensor`.
    denominator: `Tensor` whose shape matches `numerator` and whose values are
      assumed to be non-negative.
    name: An optional name for the returned op.

  Returns:
    The element-wise value of the numerator divided by the denominator.
  """
  return array_ops.where(
      math_ops.greater(denominator, 0),
      math_ops.div(numerator, array_ops.where(
          math_ops.equal(denominator, 0),
          array_ops.ones_like(denominator), denominator)),
      array_ops.zeros_like(numerator),
      name=name) 
Example #5
Source File: losses_impl.py    From lambda-packs with MIT License 6 votes vote down vote up
def _safe_div(numerator, denominator, name="value"):
  """Computes a safe divide which returns 0 if the denominator is zero.

  Note that the function contains an additional conditional check that is
  necessary for avoiding situations where the loss is zero causing NaNs to
  creep into the gradient computation.

  Args:
    numerator: An arbitrary `Tensor`.
    denominator: `Tensor` whose shape matches `numerator` and whose values are
      assumed to be non-negative.
    name: An optional name for the returned op.

  Returns:
    The element-wise value of the numerator divided by the denominator.
  """
  return array_ops.where(
      math_ops.greater(denominator, 0),
      math_ops.div(numerator, array_ops.where(
          math_ops.equal(denominator, 0),
          array_ops.ones_like(denominator), denominator)),
      array_ops.zeros_like(numerator),
      name=name) 
Example #6
Source File: loss_ops.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def _safe_div(numerator, denominator, name="value"):
  """Computes a safe divide which returns 0 if the denominator is zero.

  Note that the function contains an additional conditional check that is
  necessary for avoiding situations where the loss is zero causing NaNs to
  creep into the gradient computation.

  Args:
    numerator: An arbitrary `Tensor`.
    denominator: A `Tensor` whose shape matches `numerator` and whose values are
      assumed to be non-negative.
    name: An optional name for the returned op.

  Returns:
    The element-wise value of the numerator divided by the denominator.
  """
  return array_ops.where(
      math_ops.greater(denominator, 0),
      math_ops.div(numerator, array_ops.where(
          math_ops.equal(denominator, 0),
          array_ops.ones_like(denominator), denominator)),
      array_ops.zeros_like(numerator),
      name=name) 
Example #7
Source File: zero_out_grad_2.py    From tensorflow-kr with Apache License 2.0 6 votes vote down vote up
def _zero_out_grad(op, grad):
  """The gradients for `zero_out`.

  Args:
    op: The `zero_out` `Operation` that we are differentiating, which we can use
      to find the inputs and outputs of the original op.
    grad: Gradient with respect to the output of the `zero_out` op.

  Returns:
    Gradients with respect to the input of `zero_out`.
  """
  to_zero = op.inputs[0]
  shape = array_ops.shape(to_zero)
  index = array_ops.zeros_like(shape)
  first_grad = array_ops.reshape(grad, [-1])[0]
  to_zero_grad = sparse_ops.sparse_to_dense(index, shape, first_grad, 0)
  return [to_zero_grad]  # List of one Tensor, since we have one input 
Example #8
Source File: mertric.py    From tf.fashionAI with Apache License 2.0 6 votes vote down vote up
def _safe_div(numerator, denominator, name):
  """Divides two tensors element-wise, returning 0 if the denominator is <= 0.

  Args:
    numerator: A real `Tensor`.
    denominator: A real `Tensor`, with dtype matching `numerator`.
    name: Name for the returned op.

  Returns:
    0 if `denominator` <= 0, else `numerator` / `denominator`
  """
  t = math_ops.truediv(numerator, denominator)
  zero = array_ops.zeros_like(t, dtype=denominator.dtype)
  condition = math_ops.greater(denominator, zero)
  zero = math_ops.cast(zero, t.dtype)
  return array_ops.where(condition, t, zero, name=name) 
Example #9
Source File: control_flow_ops.py    From lambda-packs with MIT License 6 votes vote down vote up
def ZerosLikeOutsideLoop(op, index):
  """Create zeros_like for the specified output of an op."""
  val = op.outputs[index]
  if not IsSwitch(op):
    return array_ops.zeros_like(val, optimize=False)
  else:
    op_ctxt = op._get_control_flow_context()
    if op_ctxt:
      # We are in a cond context. Use a switch to create zeros only when needed.
      pred = op_ctxt.pred
      branch = op_ctxt.branch
      switch_val = switch(op.inputs[0], pred)[1 - branch]
      zeros_shape = array_ops.shape_internal(switch_val, optimize=False)
      return array_ops.zeros(zeros_shape, dtype=val.dtype)
    else:
      return array_ops.zeros_like(val, optimize=False) 
Example #10
Source File: zero_out_grad_2.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def _zero_out_grad(op, grad):
  """The gradients for `zero_out`.

  Args:
    op: The `zero_out` `Operation` that we are differentiating, which we can use
      to find the inputs and outputs of the original op.
    grad: Gradient with respect to the output of the `zero_out` op.

  Returns:
    Gradients with respect to the input of `zero_out`.
  """
  to_zero = op.inputs[0]
  shape = array_ops.shape(to_zero)
  index = array_ops.zeros_like(shape)
  first_grad = array_ops.reshape(grad, [-1])[0]
  to_zero_grad = sparse_ops.sparse_to_dense(index, shape, first_grad, 0)
  return [to_zero_grad]  # List of one Tensor, since we have one input 
Example #11
Source File: loss_ops.py    From lambda-packs with MIT License 6 votes vote down vote up
def _safe_div(numerator, denominator, name="value"):
  """Computes a safe divide which returns 0 if the denominator is zero.

  Note that the function contains an additional conditional check that is
  necessary for avoiding situations where the loss is zero causing NaNs to
  creep into the gradient computation.

  Args:
    numerator: An arbitrary `Tensor`.
    denominator: A `Tensor` whose shape matches `numerator` and whose values are
      assumed to be non-negative.
    name: An optional name for the returned op.

  Returns:
    The element-wise value of the numerator divided by the denominator.
  """
  return array_ops.where(
      math_ops.greater(denominator, 0),
      math_ops.div(numerator, array_ops.where(
          math_ops.equal(denominator, 0),
          array_ops.ones_like(denominator), denominator)),
      array_ops.zeros_like(numerator),
      name=name) 
Example #12
Source File: my_attention_decoder_fn.py    From SentenceFunction with Apache License 2.0 6 votes vote down vote up
def _init_attention(encoder_state):
  """Initialize attention. Handling both LSTM and GRU.

  Args:
    encoder_state: The encoded state to initialize the `dynamic_rnn_decoder`.

  Returns:
    attn: initial zero attention vector.
  """

  # Multi- vs single-layer
  # TODO(thangluong): is this the best way to check?
  if isinstance(encoder_state, tuple):
    top_state = encoder_state[-1]
  else:
    top_state = encoder_state

  # LSTM vs GRU
  if isinstance(top_state, rnn_cell_impl.LSTMStateTuple):
    attn = array_ops.zeros_like(top_state.h)
  else:
    attn = array_ops.zeros_like(top_state)

  return attn 
Example #13
Source File: linear_operator_identity.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def _assert_self_adjoint(self):
    imag_multiplier = math_ops.imag(self.multiplier)
    return check_ops.assert_equal(
        array_ops.zeros_like(imag_multiplier),
        imag_multiplier,
        message="LinearOperator was not self-adjoint") 
Example #14
Source File: math_grad.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def _SelectGrad(op, grad):
  c = op.inputs[0]
  x = op.inputs[1]
  zeros = array_ops.zeros_like(x)
  return (None, math_ops.select(c, grad, zeros),
          math_ops.select(c, zeros, grad)) 
Example #15
Source File: control_flow_ops.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def ZerosLikeOutsideLoop(op, index):
  """Create zeros_like for the specified output of an op."""
  val = op.outputs[index]
  if not IsSwitch(op):
    return array_ops.zeros_like(val, optimize=False)
  else:
    op_ctxt = op._get_control_flow_context()
    pred = op_ctxt.pred
    branch = op_ctxt.branch
    switch_val = switch(op.inputs[0], pred)[1 - branch]
    zeros_shape = array_ops.shape_internal(switch_val, optimize=False)
    return array_ops.zeros(zeros_shape, dtype=val.dtype) 
Example #16
Source File: ops_test.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def test(self):
    like_lt = ops.zeros_like(self.original_lt)
    golden_lt = core.LabeledTensor(
        array_ops.zeros_like(self.original_lt.tensor), self.original_lt.axes)
    self.assertLabeledTensorsEqual(like_lt, golden_lt) 
Example #17
Source File: ops_test.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def test_name(self):
    like_lt = ops.zeros_like(self.original_lt)
    self.assertIn('lt_zeros_like', like_lt.name) 
Example #18
Source File: sparse_ops.py    From tf-slim with Apache License 2.0 5 votes vote down vote up
def sparse_row_envelope(sparse_input, row_axis=0, col_axis=1, name=None):
  """Returns the length of each 'row' in a `SparseTensor`.

  For example, if `sparse_input` has indices `[[0,0], [2, 0], [2, 1], [2, 2]]`
  and shape `[3, 3]`, this function will return `[1, 0, 3]`.

  Args:
    sparse_input: a `SparseTensor` of rank at least 2.
    row_axis: An integer. The axis for the row of the envelope matrix. Default
      is 0.
    col_axis: An integer. The axis for the col of the envelope matrix. Default
      is 1.
    name: A name for the operation (optional).

  Returns:
    A one-dimensional `Tensor` whose entries correspond to the length of each
    row of `SparseTensor`.

  Raises:
    ValueError: If row_axis and col_axis are the same axis or they are not
      integers.
  """
  if not (isinstance(row_axis, compat.integral_types) and
          isinstance(col_axis, compat.integral_types)):
    raise ValueError("`row_axis` and `col_axis` must be integers.")

  if row_axis == col_axis:
    raise ValueError("Row and column can not be the same axis.")

  with ops.name_scope(name, "sparse_row_envelope", [sparse_input]):
    indices = sparse_input.indices
    row_indices = indices[:, row_axis]
    col_indices = indices[:, col_axis]
    num_rows = math_ops.cast(sparse_input.dense_shape[row_axis], dtypes.int32)
    row_envelope = math_ops.unsorted_segment_max(
        col_indices + 1, row_indices, num_rows, name=name)
    zeros = array_ops.zeros_like(row_envelope)
    return array_ops.where(row_envelope > zeros, row_envelope, zeros) 
Example #19
Source File: target_column.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def logits_to_predictions(self, logits, proba=False):
    if proba:
      raise ValueError(
          "logits to probabilities is not supported for _BinarySvmTargetColumn")

    logits = array_ops.concat([array_ops.zeros_like(logits), logits], 1)
    return math_ops.argmax(logits, 1)


# TODO(zakaria): use contrib losses. 
Example #20
Source File: target_column.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def logits_to_predictions(self, logits, proba=False):
    if self.num_label_columns == 1:
      logits = array_ops.concat([array_ops.zeros_like(logits), logits], 1)

    if proba:
      return nn.softmax(logits)
    else:
      return math_ops.argmax(logits, 1) 
Example #21
Source File: control_flow_ops.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def ZerosLikeOutsideLoop(op, index):
  """Create zeros_like for the specified output of an op."""
  val = op.outputs[index]
  if not IsSwitch(op):
    return array_ops.zeros_like(val, optimize=False)
  else:
    op_ctxt = op._get_control_flow_context()
    pred = op_ctxt.pred
    branch = op_ctxt.branch
    switch_val = switch(op.inputs[0], pred)[1 - branch]
    zeros_shape = array_ops.shape_internal(switch_val, optimize=False)
    return array_ops.zeros(zeros_shape, dtype=val.dtype) 
Example #22
Source File: head.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def _one_class_to_two_class_logits(logits):
  return array_ops.concat((array_ops.zeros_like(logits), logits), 1) 
Example #23
Source File: learning_test.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def testMultipleOfNoneGradRaisesError(self):
    gradient = constant_op.constant(self._grad_vec, dtype=dtypes.float32)
    variable = variables_lib.Variable(array_ops.zeros_like(gradient))
    grad_to_var = (None, variable)
    gradient_multipliers = {variable: self._multiplier}
    with self.assertRaises(ValueError):
      learning.multiply_gradients(grad_to_var, gradient_multipliers) 
Example #24
Source File: learning_test.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def testNonDictMultiplierRaisesError(self):
    gradient = constant_op.constant(self._grad_vec, dtype=dtypes.float32)
    variable = variables_lib.Variable(array_ops.zeros_like(gradient))
    grad_to_var = (gradient, variable)
    with self.assertRaises(ValueError):
      learning.multiply_gradients([grad_to_var], 3) 
Example #25
Source File: learning_test.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def testEmptyMultiplesRaisesError(self):
    gradient = constant_op.constant(self._grad_vec, dtype=dtypes.float32)
    variable = variables_lib.Variable(array_ops.zeros_like(gradient))
    grad_to_var = (gradient, variable)
    with self.assertRaises(ValueError):
      learning.multiply_gradients([grad_to_var], {}) 
Example #26
Source File: linalg_grad.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def _SelfAdjointEigV2Grad(op, grad_e, grad_v):
  """Gradient for SelfAdjointEigV2."""
  e = op.outputs[0]
  v = op.outputs[1]
  # a = op.inputs[0], which satisfies
  # a[...,:,:] * v[...,:,i] = e[...,i] * v[...,i]
  with ops.control_dependencies([grad_e.op, grad_v.op]):
    if grad_v is not None:
      # Construct the matrix f(i,j) = (i != j ? 1 / (e_i - e_j) : 0).
      # Notice that because of the term involving f, the gradient becomes
      # infinite (or NaN in practice) when eigenvalues are not unique.
      # Mathematically this should not be surprising, since for (k-fold)
      # degenerate eigenvalues, the corresponding eigenvectors are only defined
      # up to arbitrary rotation in a (k-dimensional) subspace.
      f = array_ops.matrix_set_diag(
          math_ops.reciprocal(
              array_ops.expand_dims(e, -2) - array_ops.expand_dims(e, -1)),
          array_ops.zeros_like(e))
      grad_a = math_ops.matmul(
          v,
          math_ops.matmul(
              array_ops.matrix_diag(grad_e) + f * math_ops.matmul(
                  v, grad_v, adjoint_a=True),
              v,
              adjoint_b=True))
    else:
      grad_a = math_ops.matmul(
          v, math_ops.matmul(
              array_ops.matrix_diag(grad_e), v, adjoint_b=True))
    # The forward op only depends on the lower triangular part of a, so here we
    # symmetrize and take the lower triangle
    grad_a = array_ops.matrix_band_part(
        grad_a + array_ops.matrix_transpose(grad_a), -1, 0)
    grad_a = array_ops.matrix_set_diag(grad_a,
                                       0.5 * array_ops.matrix_diag_part(grad_a))
    return grad_a 
Example #27
Source File: losses_impl.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def _num_present(losses, weights, per_batch=False):
  """Computes the number of elements in the loss function induced by `weights`.

  A given weights tensor induces different numbers of usable elements in the
  `losses` tensor. The `weights` tensor is broadcast across `losses` for all
  possible dimensions. For example, if `losses` is a tensor of dimension
  `[4, 5, 6, 3]` and `weights` is a tensor of shape `[4, 5]`, then `weights` is,
  in effect, tiled to match the shape of `losses`. Following this effective
  tile, the total number of present elements is the number of non-zero weights.

  Args:
    losses: `Tensor` of shape `[batch_size, d1, ... dN]`.
    weights: `Tensor` of shape `[]`, `[batch_size]` or
      `[batch_size, d1, ... dK]`, where K < N.
    per_batch: Whether to return the number of elements per batch or as a sum
      total.

  Returns:
    The number of present (non-zero) elements in the losses tensor. If
      `per_batch` is `True`, the value is returned as a tensor of size
      `[batch_size]`. Otherwise, a single scalar tensor is returned.
  """
  with ops.name_scope(None, "num_present", (losses, weights)) as scope:
    weights = math_ops.to_float(weights)
    present = array_ops.where(
        math_ops.equal(weights, 0.0),
        array_ops.zeros_like(weights),
        array_ops.ones_like(weights))
    present = weights_broadcast_ops.broadcast_weights(present, losses)
    if per_batch:
      return math_ops.reduce_sum(
          present, axis=math_ops.range(1, array_ops.rank(present)),
          keep_dims=True, name=scope)
    return math_ops.reduce_sum(present, name=scope) 
Example #28
Source File: math_grad.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def _ComplexAbsGrad(op, grad):
  """Returns the gradient of ComplexAbs."""
  # TODO(b/27786104): The cast to complex could be removed once arithmetic
  # supports mixtures of complex64 and real values.
  return (math_ops.complex(grad, array_ops.zeros_like(grad)) *
          math_ops.sign(op.inputs[0])) 
Example #29
Source File: math_grad.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def _SelectGrad(op, grad):
  c = op.inputs[0]
  x = op.inputs[1]
  zeros = array_ops.zeros_like(x)
  return (None, array_ops.where(c, grad, zeros),
          array_ops.where(c, zeros, grad)) 
Example #30
Source File: tf_helpers.py    From Counterfactual-StoryRW with MIT License 5 votes vote down vote up
def __init__(self, inputs, sequence_length, time_major=False, name=None):
        """Initializer.

        Args:
          inputs: A (structure of) input tensors.
          sequence_length: An int32 vector tensor.
          time_major: Python bool.  Whether the tensors in `inputs` are time major.
            If `False` (default), they are assumed to be batch major.
          name: Name scope for any created operations.

        Raises:
          ValueError: if `sequence_length` is not a 1D tensor.
        """
        with ops.name_scope(name, "TrainingHelper", [inputs, sequence_length]):
            inputs = ops.convert_to_tensor(inputs, name="inputs")
            self._inputs = inputs
            if not time_major:
                inputs = nest.map_structure(_transpose_batch_time, inputs)

            self._input_tas = nest.map_structure(_unstack_ta, inputs)
            self._sequence_length = ops.convert_to_tensor(
                sequence_length, name="sequence_length")
            if self._sequence_length.get_shape().ndims != 1:
                raise ValueError(
                    "Expected sequence_length to be a vector, but received shape: %s" %
                    self._sequence_length.get_shape())

            self._zero_inputs = nest.map_structure(
                lambda inp: array_ops.zeros_like(inp[0, :]), inputs)

            self._batch_size = shape_list(sequence_length)[0]