Python tensorflow.python.ops.array_ops.where() Examples

The following are 30 code examples of tensorflow.python.ops.array_ops.where(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.python.ops.array_ops , or try the search function .
Example #1
Source File: dirichlet.py    From lambda-packs with MIT License 6 votes vote down vote up
def _mode(self):
    k = math_ops.cast(self.event_shape_tensor()[0], self.dtype)
    mode = (self.concentration - 1.) / (
        self.total_concentration[..., array_ops.newaxis] - k)
    if self.allow_nan_stats:
      nan = array_ops.fill(
          array_ops.shape(mode),
          np.array(np.nan, dtype=self.dtype.as_numpy_dtype()),
          name="nan")
      return array_ops.where(
          math_ops.reduce_all(self.concentration > 1., axis=-1),
          mode, nan)
    return control_flow_ops.with_dependencies([
        check_ops.assert_less(
            array_ops.ones([], self.dtype),
            self.concentration,
            message="Mode undefined when any concentration <= 1"),
    ], mode) 
Example #2
Source File: array_grad.py    From lambda-packs with MIT License 6 votes vote down vote up
def _SliceGrad(op, grad):
  """Gradient for Slice op."""
  # Create an Nx2 padding where the first column represents how many
  # zeros are to be prepended for each dimension, and the second
  # column indicates how many zeros are appended.
  #
  # The number of zeros to append is the shape of the input
  # elementwise-subtracted by both the begin vector and sizes vector.
  #
  # Some more reshaping is needed to assemble this tensor with the
  # right dimensions.
  input_vec = op.inputs[0]
  begin_vec = op.inputs[1]
  input_rank = array_ops.rank(input_vec)
  slice_size = array_ops.shape(op.outputs[0])

  shape = array_ops.stack([input_rank, 1])
  before_pad = array_ops.reshape(begin_vec, shape)
  after_pad = array_ops.reshape(
      array_ops.shape(input_vec) - slice_size - begin_vec, shape)
  paddings = array_ops.concat([before_pad, after_pad], 1)
  return array_ops.pad(grad, paddings), None, None 
Example #3
Source File: backend.py    From lambda-packs with MIT License 6 votes vote down vote up
def random_binomial(shape, p=0.0, dtype=None, seed=None):
  """Returns a tensor with random binomial distribution of values.

  Arguments:
      shape: A tuple of integers, the shape of tensor to create.
      p: A float, `0. <= p <= 1`, probability of binomial distribution.
      dtype: String, dtype of returned tensor.
      seed: Integer, random seed.

  Returns:
      A tensor.
  """
  if dtype is None:
    dtype = floatx()
  if seed is None:
    seed = np.random.randint(10e6)
  return array_ops.where(
      random_ops.random_uniform(shape, dtype=dtype, seed=seed) <= p,
      array_ops.ones(shape, dtype=dtype), array_ops.zeros(shape, dtype=dtype)) 
Example #4
Source File: tensor_forest.py    From lambda-packs with MIT License 6 votes vote down vote up
def average_impurity(self):
    """Constructs a TF graph for evaluating the average leaf impurity of a tree.

    If in regression mode, this is the leaf variance. If in classification mode,
    this is the gini impurity.

    Returns:
      The last op in the graph.
    """
    children = array_ops.squeeze(array_ops.slice(
        self.variables.tree, [0, 0], [-1, 1]), squeeze_dims=[1])
    is_leaf = math_ops.equal(constants.LEAF_NODE, children)
    leaves = math_ops.to_int32(array_ops.squeeze(array_ops.where(is_leaf),
                                                 squeeze_dims=[1]))
    counts = array_ops.gather(self.variables.node_sums, leaves)
    gini = self._weighted_gini(counts)
    # Guard against step 1, when there often are no leaves yet.
    def impurity():
      return gini
    # Since average impurity can be used for loss, when there's no data just
    # return a big number so that loss always decreases.
    def big():
      return array_ops.ones_like(gini, dtype=dtypes.float32) * 10000000.
    return control_flow_ops.cond(math_ops.greater(
        array_ops.shape(leaves)[0], 0), impurity, big) 
Example #5
Source File: metrics_impl.py    From lambda-packs with MIT License 6 votes vote down vote up
def _maybe_select_class_id(labels, predictions_idx, selected_id=None):
  """If class ID is specified, filter all other classes.

  Args:
    labels: `int64` `Tensor` or `SparseTensor` with shape
      [D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
      target classes for the associated prediction. Commonly, N=1 and `labels`
      has shape [batch_size, num_labels]. [D1, ... DN] must match
      `predictions_idx`.
    predictions_idx: `int64` `Tensor` of class IDs, with shape [D1, ... DN, k]
      where N >= 1. Commonly, N=1 and `predictions_idx` has shape
      [batch size, k].
    selected_id: Int id to select.

  Returns:
    Tuple of `labels` and `predictions_idx`, possibly with classes removed.
  """
  if selected_id is None:
    return labels, predictions_idx
  return (_select_class_id(labels, selected_id),
          _select_class_id(predictions_idx, selected_id)) 
Example #6
Source File: losses_impl.py    From lambda-packs with MIT License 6 votes vote down vote up
def _safe_div(numerator, denominator, name="value"):
  """Computes a safe divide which returns 0 if the denominator is zero.

  Note that the function contains an additional conditional check that is
  necessary for avoiding situations where the loss is zero causing NaNs to
  creep into the gradient computation.

  Args:
    numerator: An arbitrary `Tensor`.
    denominator: `Tensor` whose shape matches `numerator` and whose values are
      assumed to be non-negative.
    name: An optional name for the returned op.

  Returns:
    The element-wise value of the numerator divided by the denominator.
  """
  return array_ops.where(
      math_ops.greater(denominator, 0),
      math_ops.div(numerator, array_ops.where(
          math_ops.equal(denominator, 0),
          array_ops.ones_like(denominator), denominator)),
      array_ops.zeros_like(numerator),
      name=name) 
Example #7
Source File: beta.py    From lambda-packs with MIT License 6 votes vote down vote up
def _mode(self):
    mode = (self.concentration1 - 1.) / (self.total_concentration - 2.)
    if self.allow_nan_stats:
      nan = array_ops.fill(
          self.batch_shape_tensor(),
          np.array(np.nan, dtype=self.dtype.as_numpy_dtype()),
          name="nan")
      is_defined = math_ops.logical_and(self.concentration1 > 1.,
                                        self.concentration0 > 1.)
      return array_ops.where(is_defined, mode, nan)
    return control_flow_ops.with_dependencies([
        check_ops.assert_less(
            array_ops.ones([], dtype=self.dtype),
            self.concentration1,
            message="Mode undefined for concentration1 <= 1."),
        check_ops.assert_less(
            array_ops.ones([], dtype=self.dtype),
            self.concentration0,
            message="Mode undefined for concentration0 <= 1.")
    ], mode) 
Example #8
Source File: metrics_impl.py    From lambda-packs with MIT License 6 votes vote down vote up
def _safe_div(numerator, denominator, name):
  """Divides two values, returning 0 if the denominator is <= 0.

  Args:
    numerator: A real `Tensor`.
    denominator: A real `Tensor`, with dtype matching `numerator`.
    name: Name for the returned op.

  Returns:
    0 if `denominator` <= 0, else `numerator` / `denominator`
  """
  return array_ops.where(
      math_ops.greater(denominator, 0),
      math_ops.truediv(numerator, denominator),
      0,
      name=name) 
Example #9
Source File: head.py    From lambda-packs with MIT License 6 votes vote down vote up
def _create_output_alternatives(self, predictions):
    """Creates output alternative for the Head.

    Args:
      predictions: a dict of {tensor_name: Tensor}, where 'tensor_name' is a
        symbolic name for an output Tensor possibly but not necessarily taken
        from `PredictionKey`, and 'Tensor' is the corresponding output Tensor
        itself.

    Returns:
      `dict` of {submodel_name: (problem_type, {tensor_name: Tensor})}, where
      'submodel_name' is a submodel identifier that should be consistent across
      the pipeline (here likely taken from the head_name),
      'problem_type' is a `ProblemType`,
      'tensor_name' is a symbolic name for an output Tensor possibly but not
       necessarily taken from `PredictionKey`, and
      'Tensor' is the corresponding output Tensor itself.
    """
    return {self._head_name: (self._problem_type, predictions)}


# TODO(zakaria): use contrib losses. 
Example #10
Source File: run_summarization.py    From TransferRL with MIT License 6 votes vote down vote up
def scheduled_sampling(self, batch_size, sampling_probability, true, estimate):
    with variable_scope.variable_scope("ScheduledEmbedding"):
      # Return -1s where we do not sample, and sample_ids elsewhere
      select_sampler = bernoulli.Bernoulli(probs=sampling_probability, dtype=tf.bool)
      select_sample = select_sampler.sample(sample_shape=batch_size)
      sample_ids = array_ops.where(
                  select_sample,
                  tf.range(batch_size),
                  gen_array_ops.fill([batch_size], -1))
      where_sampling = math_ops.cast(
          array_ops.where(sample_ids > -1), tf.int32)
      where_not_sampling = math_ops.cast(
          array_ops.where(sample_ids <= -1), tf.int32)
      _estimate = array_ops.gather_nd(estimate, where_sampling)
      _true = array_ops.gather_nd(true, where_not_sampling)

      base_shape = array_ops.shape(true)
      result1 = array_ops.scatter_nd(indices=where_sampling, updates=_estimate, shape=base_shape)
      result2 = array_ops.scatter_nd(indices=where_not_sampling, updates=_true, shape=base_shape)
      result = result1 + result2
      return result1 + result2 
Example #11
Source File: layers.py    From tensornets with MIT License 6 votes vote down vote up
def dense_to_sparse(tensor, eos_token=0, outputs_collections=None, scope=None):
  """Converts a dense tensor into a sparse tensor.

  An example use would be to convert dense labels to sparse ones
  so that they can be fed to the ctc_loss.

  Args:
     tensor: An `int` `Tensor` to be converted to a `Sparse`.
     eos_token: An integer. It is part of the target label that signifies the
       end of a sentence.
     outputs_collections: Collection to add the outputs.
     scope: Optional scope for name_scope.
  """
  with variable_scope.variable_scope(scope, 'dense_to_sparse', [tensor]) as sc:
    tensor = ops.convert_to_tensor(tensor)
    indices = array_ops.where(
        math_ops.not_equal(tensor, constant_op.constant(eos_token,
                                                        tensor.dtype)))
    values = array_ops.gather_nd(tensor, indices)
    shape = array_ops.shape(tensor, out_type=dtypes.int64)
    outputs = sparse_tensor.SparseTensor(indices, values, shape)
    return utils.collect_named_outputs(outputs_collections, sc.name, outputs) 
Example #12
Source File: math_grad.py    From lambda-packs with MIT License 6 votes vote down vote up
def _MaximumMinimumGrad(op, grad, selector_op):
  """Factor out the code for the gradient of Maximum or Minimum."""
  x = op.inputs[0]
  y = op.inputs[1]
  gdtype = grad.dtype
  sx = array_ops.shape(x)
  sy = array_ops.shape(y)
  gradshape = array_ops.shape(grad)
  zeros = array_ops.zeros(gradshape, gdtype)
  xmask = selector_op(x, y)
  rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy)
  xgrad = array_ops.where(xmask, grad, zeros)
  ygrad = array_ops.where(math_ops.logical_not(xmask), grad, zeros)
  gx = array_ops.reshape(math_ops.reduce_sum(xgrad, rx), sx)
  gy = array_ops.reshape(math_ops.reduce_sum(ygrad, ry), sy)
  return (gx, gy) 
Example #13
Source File: layers.py    From tensornets with MIT License 6 votes vote down vote up
def softmax(logits, scope=None):
  """Performs softmax on Nth dimension of N-dimensional logit tensor.

  For two-dimensional logits this reduces to tf.nn.softmax. The N-th dimension
  needs to have a specified number of elements (number of classes).

  Args:
    logits: N-dimensional `Tensor` with logits, where N > 1.
    scope: Optional scope for variable_scope.

  Returns:
    A `Tensor` with same shape and type as logits.
  """
  # TODO(jrru): Add axis argument which defaults to last dimension.
  with variable_scope.variable_scope(scope, 'softmax', [logits]):
    num_logits = utils.last_dimension(logits.get_shape(), min_rank=2)
    logits_2d = array_ops.reshape(logits, [-1, num_logits])
    predictions = nn.softmax(logits_2d)
    predictions = array_ops.reshape(predictions, array_ops.shape(logits))
    if not context.executing_eagerly():
      predictions.set_shape(logits.get_shape())
    return predictions 
Example #14
Source File: student_t.py    From lambda-packs with MIT License 6 votes vote down vote up
def _mean(self):
    mean = self.loc * array_ops.ones(self.batch_shape_tensor(),
                                     dtype=self.dtype)
    if self.allow_nan_stats:
      nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype())
      return array_ops.where(
          math_ops.greater(
              self.df,
              array_ops.ones(self.batch_shape_tensor(), dtype=self.dtype)),
          mean,
          array_ops.fill(self.batch_shape_tensor(), nan, name="nan"))
    else:
      return control_flow_ops.with_dependencies(
          [
              check_ops.assert_less(
                  array_ops.ones([], dtype=self.dtype),
                  self.df,
                  message="mean not defined for components of df <= 1"),
          ],
          mean) 
Example #15
Source File: sequence_queueing_state_saver.py    From lambda-packs with MIT License 6 votes vote down vote up
def sequences(self):
    """A dict mapping keys of `input_sequences` to split and rebatched data.

    Returns:
      A dict mapping keys of `input_sequences` to tensors.
      If we had at input:

      ```python
      sequences["name"].get_shape() == [None, d1, d2, ...]
      ```

      where `None` meant the sequence time was dynamic, then for this property:

      ```python
      sequences["name"].get_shape() == [batch_size, num_unroll, d1, d2, ...].
      ```

    """
    return self._state_saver._received_sequences 
Example #16
Source File: math_grad.py    From lambda-packs with MIT License 6 votes vote down vote up
def _PowGrad(op, grad):
  """Returns grad * (y*x^(y-1), z*log(x))."""
  x = op.inputs[0]
  y = op.inputs[1]
  z = op.outputs[0]
  sx = array_ops.shape(x)
  sy = array_ops.shape(y)
  rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy)
  x = math_ops.conj(x)
  y = math_ops.conj(y)
  z = math_ops.conj(z)
  gx = array_ops.reshape(
      math_ops.reduce_sum(grad * y * math_ops.pow(x, y - 1), rx), sx)
  # Avoid false singularity at x = 0
  if x.dtype.is_complex:
    # real(x) < 0 is fine for the complex case
    log_x = array_ops.where(
        math_ops.not_equal(x, 0), math_ops.log(x), array_ops.zeros_like(x))
  else:
    # There's no sensible real value to return if x < 0, so return 0
    log_x = array_ops.where(x > 0, math_ops.log(x), array_ops.zeros_like(x))
  gy = array_ops.reshape(math_ops.reduce_sum(grad * z * log_x, ry), sy)
  return gx, gy 
Example #17
Source File: math_grad.py    From lambda-packs with MIT License 6 votes vote down vote up
def _SegmentMinOrMaxGrad(op, grad, is_sorted):
  """Gradient for SegmentMin and (unsorted) SegmentMax. They share similar code."""
  zeros = array_ops.zeros(array_ops.shape(op.inputs[0]),
                          dtype=op.inputs[0].dtype)

  # Get the number of selected (minimum or maximum) elements in each segment.
  gathered_outputs = array_ops.gather(op.outputs[0], op.inputs[1])
  is_selected = math_ops.equal(op.inputs[0], gathered_outputs)
  if is_sorted:
    num_selected = math_ops.segment_sum(math_ops.cast(is_selected, grad.dtype),
                                        op.inputs[1])
  else:
    num_selected = math_ops.unsorted_segment_sum(math_ops.cast(is_selected, grad.dtype),
                                                 op.inputs[1], op.inputs[2])

  # Compute the gradient for each segment. The gradient for the ith segment is
  # divided evenly among the selected elements in that segment.
  weighted_grads = math_ops.div(grad, num_selected)
  gathered_grads = array_ops.gather(weighted_grads, op.inputs[1])

  if is_sorted:
    return array_ops.where(is_selected, gathered_grads, zeros), None
  else:
    return array_ops.where(is_selected, gathered_grads, zeros), None, None 
Example #18
Source File: inverse_gamma.py    From lambda-packs with MIT License 6 votes vote down vote up
def _variance(self):
    var = (math_ops.square(self.rate)
           / math_ops.square(self.concentration - 1.)
           / (self.concentration - 2.))
    if self.allow_nan_stats:
      nan = array_ops.fill(
          self.batch_shape_tensor(),
          np.array(np.nan, dtype=self.dtype.as_numpy_dtype()),
          name="nan")
      return array_ops.where(self.concentration > 2., var, nan)
    else:
      return control_flow_ops.with_dependencies([
          check_ops.assert_less(
              constant_op.constant(2., dtype=self.dtype),
              self.concentration,
              message="variance undefined when any concentration <= 2"),
      ], var) 
Example #19
Source File: attention_wrapper.py    From lambda-packs with MIT License 5 votes vote down vote up
def _maybe_mask_score(score, memory_sequence_length, score_mask_value):
  if memory_sequence_length is None:
    return score
  message = ("All values in memory_sequence_length must greater than zero.")
  with ops.control_dependencies(
      [check_ops.assert_positive(memory_sequence_length, message=message)]):
    score_mask = array_ops.sequence_mask(
        memory_sequence_length, maxlen=array_ops.shape(score)[1])
    score_mask_values = score_mask_value * array_ops.ones_like(score)
    return array_ops.where(score_mask, score, score_mask_values) 
Example #20
Source File: head.py    From lambda-packs with MIT License 5 votes vote down vote up
def _class_labels_streaming_mean(labels, weights, class_id):
  return metrics_lib.streaming_mean(
      array_ops.where(
          math_ops.equal(
              math_ops.to_int32(class_id), math_ops.to_int32(labels)),
          array_ops.ones_like(labels), array_ops.zeros_like(labels)),
      weights=weights) 
Example #21
Source File: sequence_queueing_state_saver.py    From lambda-packs with MIT License 5 votes vote down vote up
def key(self):
    """The key names of the given truncated unrolled examples.

    The format of the key is:

    ```python
    "%05d_of_%05d:%s" % (sequence, sequence_count, original_key)
    ```

    where `original_key` is the unique key read in by the prefetcher.

    Returns:
      A string vector of length `batch_size`, the keys.
    """
    return self._state_saver._received_keys 
Example #22
Source File: sequence_queueing_state_saver.py    From lambda-packs with MIT License 5 votes vote down vote up
def _deconstruct_sparse_tensor_seq(input_sequence, shared_name=None):
  """Converts `SparseTensor` values into `Tensors` of IDs and meta data.

  Given a dict of keys -> `Tensor` or `SparseTensor` transforms the
  `SparseTensor` values into `Tensor` values of IDs by calling `_store_sparse`.
  The IDs are pointers into and underlying `SparseTensorsMap` that is being
  constructed. Additional meta data is returned in order to be able to
  reconstruct `SparseTensor` values after batching and segmenting the IDs
  `Tensor`.

  Args:
    input_sequence: dictionary with `Tensor` or `SparseTensor` values.
    shared_name: The shared name for the underlying `SparseTensorsMap`
      (optional, defaults to the name of the newly created op).
  Returns:
    A tuple `(sequence, sparse_tensor_keys, tensor_list)` where `sequence` is
    dictionary with the same keys as `input_sequence` but only `Tensor` values,
    `sparse_tensor_keys` is a list of the keys of the `SparseTensor` values that
    were converted, and `tensor_list` is a list of the same length with
    `Tensor` objects.
  """
  sparse_tensor_keys = [
      k for k in sorted(input_sequence.keys())
      if (isinstance(input_sequence[k], sparse_tensor.SparseTensor) or
          isinstance(input_sequence[k], sparse_tensor.SparseTensorValue))]
  if not sparse_tensor_keys:
    return input_sequence, None, sparse_tensor_keys
  sparse_tensor_list = [input_sequence[k] for k in sparse_tensor_keys]
  tensor_list = [_store_sparse(sp_tensor, shared_name=shared_name)
                 for sp_tensor in sparse_tensor_list]
  transformed_input_seq = dict(input_sequence)
  tensor_op_list = []
  for i, k in enumerate(sparse_tensor_keys):
    transformed_input_seq[k] = tensor_list[i]
    tensor_op_list += [tensor_list[i].op]
  return transformed_input_seq, sparse_tensor_keys, tensor_op_list 
Example #23
Source File: sparse_ops.py    From lambda-packs with MIT License 5 votes vote down vote up
def sparse_row_envelope(sparse_input, row_axis=0, col_axis=1, name=None):
  """Returns the length of each 'row' in a `SparseTensor`.

  For example, if `sparse_input` has indices `[[0,0], [2, 0], [2, 1], [2, 2]]`
  and shape `[3, 3]`, this function will return `[1, 0, 3]`.

  Args:
    sparse_input: a `SparseTensor` of rank at least 2.
    row_axis: An integer. The axis for the row of the envelope matrix. Default
      is 0.
    col_axis: An integer. The axis for the col of the envelope matrix. Default
      is 1.
    name: A name for the operation (optional).

  Returns:
    A one-dimensional `Tensor` whose entries correspond to the length of each
    row of `SparseTensor`.

  Raises:
    ValueError: If row_axis and col_axis are the same axis or they are not
      integers.
  """
  if not (isinstance(row_axis, compat.integral_types) and
          isinstance(col_axis, compat.integral_types)):
    raise ValueError("`row_axis` and `col_axis` must be integers.")

  if row_axis == col_axis:
    raise ValueError("Row and column can not be the same axis.")

  with ops.name_scope(name, "sparse_row_envelope", [sparse_input]):
    indices = sparse_input.indices
    row_indices = indices[:, row_axis]
    col_indices = indices[:, col_axis]
    num_rows = math_ops.cast(sparse_input.dense_shape[row_axis], dtypes.int32)
    row_envelope = math_ops.unsorted_segment_max(
        col_indices + 1, row_indices, num_rows, name=name)
    zeros = array_ops.zeros_like(row_envelope)
    return array_ops.where(row_envelope > zeros, row_envelope, zeros) 
Example #24
Source File: geometric.py    From lambda-packs with MIT License 5 votes vote down vote up
def _cdf(self, counts):
    if self.validate_args:
      # We set `check_integer=False` since the CDF is defined on whole real
      # line.
      counts = math_ops.floor(
          distribution_util.embed_check_nonnegative_discrete(
              counts, check_integer=False))
    counts *= array_ops.ones_like(self.probs)
    return array_ops.where(
        counts < 0.,
        array_ops.zeros_like(counts),
        -math_ops.expm1(
            (counts + 1) * math_ops.log1p(-self.probs))) 
Example #25
Source File: head.py    From lambda-packs with MIT License 5 votes vote down vote up
def _class_predictions_streaming_mean(predictions, weights, class_id):
  return metrics_lib.streaming_mean(
      array_ops.where(
          math_ops.equal(
              math_ops.to_int32(class_id), math_ops.to_int32(predictions)),
          array_ops.ones_like(predictions),
          array_ops.zeros_like(predictions)),
      weights=weights) 
Example #26
Source File: tensor_forest.py    From lambda-packs with MIT License 5 votes vote down vote up
def get_stats(self, session):
    num_nodes = self.variables.end_of_tree.eval(session=session) - 1
    num_leaves = array_ops.where(
        math_ops.equal(array_ops.squeeze(array_ops.slice(
            self.variables.tree, [0, 0], [-1, 1])), constants.LEAF_NODE)
        ).eval(session=session).shape[0]
    return TreeStats(num_nodes, num_leaves) 
Example #27
Source File: normalization.py    From lambda-packs with MIT License 5 votes vote down vote up
def _smart_select(pred, fn_then, fn_else):
  """Selects fn_then() or fn_else() based on the value of pred.

  The purpose of this function is the same as `utils.smart_cond`. However, at
  the moment there is a bug (b/36297356) that seems to kick in only when
  `smart_cond` delegates to `tf.cond`, which sometimes results in the training
  hanging when using parameter servers. This function will output the result
  of `fn_then` or `fn_else` if `pred` is known at graph construction time.
  Otherwise, it will use `tf.where` which will result in some redundant work
  (both branches will be computed but only one selected). However, the tensors
  involved will usually be small (means and variances in batchnorm), so the
  cost will be small and will not be incurred at all if `pred` is a constant.

  Args:
    pred: A boolean scalar `Tensor`.
    fn_then: A callable to use when pred==True.
    fn_else: A callable to use when pred==False.

  Returns:
    A `Tensor` whose value is fn_then() or fn_else() based on the value of pred.
  """
  pred_value = utils.constant_value(pred)
  if pred_value:
    return fn_then()
  elif pred_value is False:
    return fn_else()
  t_then = array_ops.expand_dims(fn_then(), 0)
  t_else = array_ops.expand_dims(fn_else(), 0)
  pred = array_ops.reshape(pred, [1])
  result = array_ops.where(pred, t_then, t_else)
  return array_ops.squeeze(result, [0]) 
Example #28
Source File: feature_column.py    From lambda-packs with MIT License 5 votes vote down vote up
def _transform_feature(self, inputs):
    input_tensor = _to_sparse_input(inputs.get(self.key))

    if not input_tensor.dtype.is_integer:
      raise ValueError(
          'Invalid input, not integer. key: {} dtype: {}'.format(
              self.key, input_tensor.dtype))

    values = math_ops.to_int64(input_tensor.values, name='values')
    num_buckets = math_ops.to_int64(self.num_buckets, name='num_buckets')
    zero = math_ops.to_int64(0, name='zero')
    if self.default_value is None:
      # Fail if values are out-of-range.
      assert_less = check_ops.assert_less(
          values, num_buckets, data=(values, num_buckets),
          name='assert_less_than_num_buckets')
      assert_greater = check_ops.assert_greater_equal(
          values, zero, data=(values,),
          name='assert_greater_or_equal_0')
      with ops.control_dependencies((assert_less, assert_greater)):
        values = array_ops.identity(values)
    else:
      # Assign default for out-of-range values.
      values = array_ops.where(
          math_ops.logical_or(
              values < zero, values >= num_buckets, name='out_of_range'),
          array_ops.fill(
              dims=array_ops.shape(values),
              value=math_ops.to_int64(self.default_value),
              name='default_values'),
          values)

    return sparse_tensor_lib.SparseTensor(
        indices=input_tensor.indices,
        values=values,
        dense_shape=input_tensor.dense_shape) 
Example #29
Source File: losses_impl.py    From lambda-packs with MIT License 5 votes vote down vote up
def _num_present(losses, weights, per_batch=False):
  """Computes the number of elements in the loss function induced by `weights`.

  A given weights tensor induces different numbers of usable elements in the
  `losses` tensor. The `weights` tensor is broadcast across `losses` for all
  possible dimensions. For example, if `losses` is a tensor of dimension
  `[4, 5, 6, 3]` and `weights` is a tensor of shape `[4, 5]`, then `weights` is,
  in effect, tiled to match the shape of `losses`. Following this effective
  tile, the total number of present elements is the number of non-zero weights.

  Args:
    losses: `Tensor` of shape `[batch_size, d1, ... dN]`.
    weights: `Tensor` of shape `[]`, `[batch_size]` or
      `[batch_size, d1, ... dK]`, where K < N.
    per_batch: Whether to return the number of elements per batch or as a sum
      total.

  Returns:
    The number of present (non-zero) elements in the losses tensor. If
      `per_batch` is `True`, the value is returned as a tensor of size
      `[batch_size]`. Otherwise, a single scalar tensor is returned.
  """
  with ops.name_scope(None, "num_present", (losses, weights)) as scope:
    weights = math_ops.to_float(weights)
    present = array_ops.where(
        math_ops.equal(weights, 0.0),
        array_ops.zeros_like(weights),
        array_ops.ones_like(weights))
    present = weights_broadcast_ops.broadcast_weights(present, losses)
    if per_batch:
      return math_ops.reduce_sum(
          present, axis=math_ops.range(1, array_ops.rank(present)),
          keep_dims=True, name=scope)
    return math_ops.reduce_sum(present, name=scope) 
Example #30
Source File: nn_grad.py    From lambda-packs with MIT License 5 votes vote down vote up
def _EluGradGrad(op, grad):
  x = op.inputs[1]
  return (gen_nn_ops._elu_grad(grad, op.outputs[0]),
          array_ops.where(
              x < 0., gen_nn_ops._elu_grad(grad, op.outputs[0] + 1),
              array_ops.zeros(shape=array_ops.shape(x), dtype=x.dtype)))