Python tensorflow.python.ops.array_ops.identity() Examples

The following are 30 code examples of tensorflow.python.ops.array_ops.identity(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.python.ops.array_ops , or try the search function .
Example #1
Source File: core.py    From lambda-packs with MIT License 6 votes vote down vote up
def identity(labeled_tensor, name=None):
  """The identity op.

  See tf.identity.

  Args:
    labeled_tensor: The input tensor.
    name: Optional op name.

  Returns:
    The tensor.
  """
  with ops.name_scope(name, 'lt_identity', [labeled_tensor]) as scope:
    labeled_tensor = convert_to_labeled_tensor(labeled_tensor)
    return LabeledTensor(
        array_ops.identity(
            labeled_tensor.tensor, name=scope),
        labeled_tensor.axes)


# We don't call this slice because that shadows a built-in. Instead, we alias
# this to lt.slice in __init__.py. 
Example #2
Source File: resource_variable_ops.py    From lambda-packs with MIT License 6 votes vote down vote up
def read_value(self):
    """Constructs an op which reads the value of this variable.

    Should be used when there are multiple reads, or when it is desirable to
    read the value only after some condition is true.

    Returns:
     the read operation.
    """
    with ops.name_scope("Read"):
      with ops.device(self._handle.device):
        value = gen_resource_variable_ops.read_variable_op(
            self._handle, dtype=self._dtype)
    # Return an identity so it can get placed on whatever device the context
    # specifies instead of the device where the variable is.
    return array_ops.identity(value) 
Example #3
Source File: adagrad_da.py    From lambda-packs with MIT License 6 votes vote down vote up
def _apply_dense(self, grad, var):
    g_acc = self.get_slot(var, "gradient_accumulator")
    gg_acc = self.get_slot(var, "gradient_squared_accumulator")
    # Performance optimization so that worker creates a copy of the global step
    # to avoid overloading the parameter server holding the global step.
    with ops.device(grad[0].device):
      global_step = array_ops.identity(self._global_step) + 1
    return training_ops.apply_adagrad_da(
        var,
        g_acc,
        gg_acc,
        grad,
        math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
        math_ops.cast(self._l1_regularization_strength, var.dtype.base_dtype),
        math_ops.cast(self._l2_regularization_strength, var.dtype.base_dtype),
        global_step,
        use_locking=self._use_locking) 
Example #4
Source File: adagrad_da.py    From lambda-packs with MIT License 6 votes vote down vote up
def _apply_sparse(self, grad, var):
    g_acc = self.get_slot(var, "gradient_accumulator")
    gg_acc = self.get_slot(var, "gradient_squared_accumulator")
    # Performance optimization so that worker creates a copy of the global step
    # to avoid overloading the parameter server holding the global step.
    with ops.device(grad[0].device):
      global_step = array_ops.identity(self._global_step) + 1
    return training_ops.sparse_apply_adagrad_da(
        var,
        g_acc,
        gg_acc,
        grad.values,
        grad.indices,
        math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
        math_ops.cast(self._l1_regularization_strength, var.dtype.base_dtype),
        math_ops.cast(self._l2_regularization_strength, var.dtype.base_dtype),
        global_step,
        use_locking=self._use_locking) 
Example #5
Source File: mvn_linear_operator.py    From lambda-packs with MIT License 6 votes vote down vote up
def _mean(self):
    shape = self.batch_shape.concatenate(self.event_shape)
    has_static_shape = shape.is_fully_defined()
    if not has_static_shape:
      shape = array_ops.concat([
          self.batch_shape_tensor(),
          self.event_shape_tensor(),
      ], 0)

    if self.loc is None:
      return array_ops.zeros(shape, self.dtype)

    if has_static_shape and shape == self.loc.get_shape():
      return array_ops.identity(self.loc)

    # Add dummy tensor of zeros to broadcast.  This is only necessary if shape
    # != self.loc.shape, but we could not determine if this is the case.
    return array_ops.identity(self.loc) + array_ops.zeros(shape, self.dtype) 
Example #6
Source File: vector_laplace_linear_operator.py    From lambda-packs with MIT License 6 votes vote down vote up
def _mean(self):
    shape = self.batch_shape.concatenate(self.event_shape)
    has_static_shape = shape.is_fully_defined()
    if not has_static_shape:
      shape = array_ops.concat([
          self.batch_shape_tensor(),
          self.event_shape_tensor(),
      ], 0)

    if self.loc is None:
      return array_ops.zeros(shape, self.dtype)

    if has_static_shape and shape == self.loc.get_shape():
      return array_ops.identity(self.loc)

    # Add dummy tensor of zeros to broadcast.  This is only necessary if shape
    # != self.loc.shape, but we could not determine if this is the case.
    return array_ops.identity(self.loc) + array_ops.zeros(shape, self.dtype) 
Example #7
Source File: tensor_array_ops.py    From lambda-packs with MIT License 6 votes vote down vote up
def grad(self, source, flow=None, name=None):
    # tensor_array_grad requires a flow input when forward
    # TensorArrays are dynamically sized.  This forces the creation
    # of the grad TensorArray only once the final forward array's size
    # is fixed.
    if flow is None:
      flow = self.flow
    with ops.name_scope(name, "TensorArrayGrad", [self._handle]):
      with ops.colocate_with(self._handle):
        g_handle, unused_flow = gen_data_flow_ops._tensor_array_grad_v3(
            handle=self._handle, source=source, flow_in=flow, name=name)
        with ops.control_dependencies([g_handle]):
          flow = array_ops.identity(flow, name="gradient_flow")
        g = TensorArray(
            dtype=self._dtype,
            handle=g_handle,
            flow=flow,
            infer_shape=self._infer_shape,
            colocate_with_first_write_call=False)
        g._element_shape = self._element_shape
        return g 
Example #8
Source File: tensor_array_ops.py    From lambda-packs with MIT License 6 votes vote down vote up
def identity(self):
    """Returns a TensorArray with the same content and properties.

    Returns:
      A new TensorArray object with flow that ensures the control dependencies
      from the contexts will become control dependencies for writes, reads, etc.
      Use this object all for subsequent operations.
    """
    flow = array_ops.identity(self._flow)
    ta = TensorArray(
        dtype=self._dtype, handle=self._handle, flow=flow,
        infer_shape=self._infer_shape,
        colocate_with_first_write_call=self._colocate_with_first_write_call)
    ta._element_shape = self._element_shape
    ta._colocate_with = self._colocate_with
    return ta 
Example #9
Source File: adagrad_da.py    From lambda-packs with MIT License 6 votes vote down vote up
def _resource_apply_sparse(self, grad, var, indices):
    g_acc = self.get_slot(var, "gradient_accumulator")
    gg_acc = self.get_slot(var, "gradient_squared_accumulator")
    # Performance optimization so that worker creates a copy of the global step
    # to avoid overloading the parameter server holding the global step.
    with ops.device(grad[0].device):
      global_step = array_ops.identity(self._global_step) + 1
    return training_ops.resource_sparse_apply_adagrad_da(
        var.handle,
        g_acc.handle,
        gg_acc.handle,
        grad,
        indices,
        math_ops.cast(self._learning_rate_tensor, grad.dtype),
        math_ops.cast(self._l1_regularization_strength, grad.dtype),
        math_ops.cast(self._l2_regularization_strength, grad.dtype),
        global_step,
        use_locking=self._use_locking) 
Example #10
Source File: state_saving_rnn_estimator.py    From lambda-packs with MIT License 6 votes vote down vote up
def state_tuple_to_dict(state):
  """Returns a dict containing flattened `state`.

  Args:
    state: A `Tensor` or a nested tuple of `Tensors`. All of the `Tensor`s must
    have the same rank and agree on all dimensions except the last.

  Returns:
    A dict containing the `Tensor`s that make up `state`. The keys of the dict
    are of the form "STATE_PREFIX_i" where `i` is the place of this `Tensor`
    in a depth-first traversal of `state`.
  """
  with ops.name_scope('state_tuple_to_dict'):
    flat_state = nest.flatten(state)
    state_dict = {}
    for i, state_component in enumerate(flat_state):
      state_name = _get_state_name(i)
      state_value = (None if state_component is None else array_ops.identity(
          state_component, name=state_name))
      state_dict[state_name] = state_value
  return state_dict 
Example #11
Source File: deterministic.py    From lambda-packs with MIT License 5 votes vote down vote up
def _prob(self, x):
    if self.validate_args:
      is_vector_check = check_ops.assert_rank_at_least(x, 1)
      right_vec_space_check = check_ops.assert_equal(
          self.event_shape_tensor(),
          array_ops.gather(array_ops.shape(x), array_ops.rank(x) - 1),
          message=
          "Argument 'x' not defined in the same space R^k as this distribution")
      with ops.control_dependencies([is_vector_check]):
        with ops.control_dependencies([right_vec_space_check]):
          x = array_ops.identity(x)
    return math_ops.cast(
        math_ops.reduce_all(math_ops.abs(x - self.loc) <= self._slack, axis=-1),
        dtype=self.dtype) 
Example #12
Source File: subscribe.py    From lambda-packs with MIT License 5 votes vote down vote up
def _subscribe_new(tensor, side_effects, control_cache):
  """Helper method that subscribes a single tensor to a list of side_effects.

  Args:
    tensor: `tf.Tensor`
    side_effects: List of side_effect functions see subscribe for details.
    control_cache: `_ControlOutputCache` helper to get control_outputs faster.
  Returns:
    The modified replacement to the passed in tensor which triggers the side
    effects.
  """
  update_input = []
  for consumer_op in list(tensor.consumers()):  # explicit copy
    update_input.append((consumer_op, list(consumer_op.inputs).index(tensor)))

  update_control_input = control_cache.get_control_outputs(tensor.op)

  # Trailing slash on name scope to replace the scope.
  name_scope = tensor.op.name + '/subscription/'
  with ops.name_scope(name_scope):
    outs = []
    for s in side_effects:
      outs += s(tensor)

    with ops.control_dependencies(outs):
      out = array_ops.identity(tensor)

  for consumer_op, index in update_input:
    consumer_op._update_input(index, out)  # pylint: disable=protected-access

  for consumer_op in update_control_input:
    consumer_op._control_inputs.remove(tensor.op)  # pylint: disable=protected-access
    consumer_op._control_inputs.append(out.op)  # pylint: disable=protected-access
    consumer_op._recompute_node_def()  # pylint: disable=protected-access

  return out 
Example #13
Source File: poisson.py    From lambda-packs with MIT License 5 votes vote down vote up
def _variance(self):
    return array_ops.identity(self.rate) 
Example #14
Source File: poisson.py    From lambda-packs with MIT License 5 votes vote down vote up
def _mean(self):
    return array_ops.identity(self.rate) 
Example #15
Source File: head.py    From lambda-packs with MIT License 5 votes vote down vote up
def regression_head(label_name=None,
                    weight_column_name=None,
                    label_dimension=1,
                    enable_centered_bias=False,
                    head_name=None):
  """Creates a `Head` for linear regression.

  Args:
    label_name: String, name of the key in label dict. Can be null if label
        is a tensor (single headed models).
    weight_column_name: A string defining feature column name representing
      weights. It is used to down weight or boost examples during training. It
      will be multiplied by the loss of the example.
    label_dimension: Number of regression labels per example. This is the size
      of the last dimension of the labels `Tensor` (typically, this has shape
      `[batch_size, label_dimension]`).
    enable_centered_bias: A bool. If True, estimator will learn a centered
      bias variable for each class. Rest of the model structure learns the
      residual after centered bias.
    head_name: name of the head. If provided, predictions, summary and metrics
      keys will be suffixed by `"/" + head_name` and the default variable scope
      will be `head_name`.

  Returns:
    An instance of `Head` for linear regression.
  """
  return _RegressionHead(
      label_name=label_name,
      weight_column_name=weight_column_name,
      label_dimension=label_dimension,
      enable_centered_bias=enable_centered_bias,
      head_name=head_name,
      loss_fn=_mean_squared_loss,
      link_fn=array_ops.identity) 
Example #16
Source File: hybrid_model.py    From lambda-packs with MIT License 5 votes vote down vote up
def _base_inference(self, data, data_spec=None):
    """Returns an op that performs inference without a softmax."""
    inference_result = self._do_layer_inference(self.layers[0], data)

    for layer in self.layers[1:]:
      inference_result = self._do_layer_inference(layer, inference_result)

    output_size = 1 if self.is_regression else self.params.num_classes
    output = layers.fully_connected(
        inference_result, output_size, activation_fn=array_ops.identity)

    return output 
Example #17
Source File: core.py    From lambda-packs with MIT License 5 votes vote down vote up
def call(self, inputs, training=False):
    def dropped_inputs():
      return nn.dropout(inputs, 1  - self.rate,
                        noise_shape=self._get_noise_shape(inputs),
                        seed=self.seed)
    return utils.smart_cond(training,
                            dropped_inputs,
                            lambda: array_ops.identity(inputs)) 
Example #18
Source File: clustering_ops.py    From lambda-packs with MIT License 5 votes vote down vote up
def _mini_batch_sync_updates_op(self, update_in_steps,
                                  cluster_centers_var, cluster_centers_updated,
                                  total_counts):
    if self._use_mini_batch and self._mini_batch_steps_per_iteration > 1:
      assert update_in_steps is not None
      with ops.colocate_with(update_in_steps):
        def _f():
          # Note that there is a race condition here, so we do a best effort
          # updates here. We reset update_in_steps first so that other workers
          # don't duplicate the updates. Also we update cluster_center_vars
          # before resetting total_counts to avoid large updates to
          # cluster_centers_updated based on partially updated
          # cluster_center_vars.
          with ops.control_dependencies([state_ops.assign(
              update_in_steps,
              self._mini_batch_steps_per_iteration - 1)]):
            with ops.colocate_with(cluster_centers_updated):
              if self._distance_metric == COSINE_DISTANCE:
                cluster_centers = nn_impl.l2_normalize(cluster_centers_updated,
                                                       dim=1)
              else:
                cluster_centers = cluster_centers_updated
            with ops.colocate_with(cluster_centers_var):
              with ops.control_dependencies([state_ops.assign(
                  cluster_centers_var,
                  cluster_centers)]):
                with ops.colocate_with(cluster_centers_var):
                  with ops.control_dependencies([
                      state_ops.assign(total_counts,
                                       array_ops.zeros_like(total_counts))]):
                    return array_ops.identity(update_in_steps)
        return control_flow_ops.cond(
            update_in_steps <= 0,
            _f,
            lambda: state_ops.assign_sub(update_in_steps, 1))
    else:
      return control_flow_ops.no_op() 
Example #19
Source File: sequence_queueing_state_saver.py    From lambda-packs with MIT License 5 votes vote down vote up
def _check_multiple_of(value, multiple_of):
  """Checks that value `value` is a non-zero multiple of `multiple_of`.

  Args:
    value: an int32 scalar Tensor.
    multiple_of: an int or int32 scalar Tensor.

  Returns:
    new_value: an int32 scalar Tensor matching `value`, but which includes an
      assertion that `value` is a multiple of `multiple_of`.
  """
  assert isinstance(value, ops.Tensor)
  with ops.control_dependencies([
      control_flow_ops.Assert(
          math_ops.logical_and(
              math_ops.equal(math_ops.mod(value, multiple_of), 0),
              math_ops.not_equal(value, 0)), [
                  string_ops.string_join([
                      "Tensor %s should be a multiple of: " % value.name,
                      string_ops.as_string(multiple_of), ", but saw value: ",
                      string_ops.as_string(value),
                      ". Consider setting pad=True."
                  ])
              ])
  ]):
    new_value = array_ops.identity(value, name="multiple_of_checked")
    return new_value 
Example #20
Source File: sequence_queueing_state_saver.py    From lambda-packs with MIT License 5 votes vote down vote up
def _check_rank(value, expected_rank):
  """Check the rank of Tensor `value`, via shape inference and assertions.

  Args:
    value: A Tensor, possibly with shape associated shape information.
    expected_rank: int32 scalar (optionally a `Tensor`).

  Returns:
    new_value: A Tensor matching `value`.  Accessing this tensor tests
      assertions on its rank.  If expected_rank is not a `Tensor`, then
      new_value's shape's rank has been set.

  Raises:
    ValueError: if `expected_rank` is not a `Tensor` and the rank of `value`
      is known and is not equal to `expected_rank`.
  """
  assert isinstance(value, ops.Tensor)
  with ops.control_dependencies([
      control_flow_ops.Assert(
          math_ops.equal(expected_rank, array_ops.rank(value)), [
              string_ops.string_join([
                  "Rank of tensor %s should be: " % value.name,
                  string_ops.as_string(expected_rank), ", shape received:"
              ]), array_ops.shape(value)
          ])
  ]):
    new_value = array_ops.identity(value, name="rank_checked")
    if isinstance(expected_rank, ops.Tensor):
      expected_rank_value = tensor_util.constant_value(expected_rank)
      if expected_rank_value is not None:
        expected_rank = int(expected_rank_value)
    if not isinstance(expected_rank, ops.Tensor):
      try:
        new_value.set_shape(new_value.get_shape().with_rank(expected_rank))
      except ValueError as e:
        raise ValueError("Rank check failed for %s: %s" % (value.name, str(e)))
    return new_value 
Example #21
Source File: ops.py    From lambda-packs with MIT License 5 votes vote down vote up
def foldl(fn, labeled_tensor, initial_value, name=None):
  """Left fold on the list of tensors unpacked from labeled_tensor.

  See tf.foldl.

  Args:
    fn: The function to apply to each unpacked LabeledTensor.
      It should have type (LabeledTensor, LabeledTensor) -> LabeledTensor.
      Its arguments are (accumulated_value, next_value).
    labeled_tensor: The input tensor.
    initial_value: The initial value of the accumulator.
    name: Optional op name.

  Returns:
    The accumulated value.
  """
  with ops.name_scope(name, 'lt_foldl',
                      [labeled_tensor, initial_value]) as scope:
    labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
    initial_value = core.convert_to_labeled_tensor(initial_value)

    @tc.returns(ops.Tensor)
    @tc.accepts(ops.Tensor, ops.Tensor)
    def tf_fn(accumulator, next_element):
      accumulator_lt = core.LabeledTensor(accumulator, initial_value.axes)
      next_element_lt = core.LabeledTensor(
          next_element, list(labeled_tensor.axes.values())[1:])
      return fn(accumulator_lt, next_element_lt).tensor

    foldl_op = functional_ops.foldl(
        tf_fn, labeled_tensor.tensor, initializer=initial_value.tensor)
    foldl_lt = core.LabeledTensor(foldl_op, initial_value.axes)

    return core.identity(foldl_lt, name=scope) 
Example #22
Source File: stochastic_gradient_estimators.py    From lambda-packs with MIT License 5 votes vote down vote up
def get_mean_baseline(ema_decay=0.99, name=None):
  """ExponentialMovingAverage baseline.

  Args:
    ema_decay: decay rate for the ExponentialMovingAverage.
    name: name for variable scope of the ExponentialMovingAverage.

  Returns:
    Callable baseline function that takes the `StochasticTensor` (unused) and
    the downstream `loss`, and returns an EMA of the loss.
  """

  def mean_baseline(_, loss):
    with vs.variable_scope(name, default_name="MeanBaseline"):
      reduced_loss = math_ops.reduce_mean(loss)

      ema = training.ExponentialMovingAverage(decay=ema_decay, zero_debias=True)
      update_op = ema.apply([reduced_loss])

      with ops.control_dependencies([update_op]):
        # Using `identity` causes an op to be added in this context, which
        # triggers the update. Removing the `identity` means nothing is updated.
        baseline = array_ops.identity(ema.average(reduced_loss))

      return baseline

  return mean_baseline 
Example #23
Source File: data_flow_ops.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def get(self, name=None):
    """Gets one element from this staging area.

    If the staging area is empty when this operation executes, it will block
    until there is an element to dequeue.

    The placement of the returned tensor will be determined by the current
    device scope when this function is called.

    Args:
      name: A name for the operation (optional).

    Returns:
      The tuple of tensors that was gotten.
    """
    if name is None:
      name = "%s_get" % self._name

    with ops.colocate_with(self._coloc_op):
      ret = gen_data_flow_ops.unstage(dtypes=self._dtypes,
                                      shared_name=self._name, name=name)

    curr_device_scope = control_flow_ops.no_op().device
    if curr_device_scope != self._coloc_op.device:
      for i in range(len(ret)):
        ret[i] = array_ops.identity(ret[i])

    for output, shape in zip(ret, self._shapes):
      output.set_shape(shape)

    return self._get_return_value(ret) 
Example #24
Source File: poisson.py    From lambda-packs with MIT License 5 votes vote down vote up
def __init__(self,
               rate,
               validate_args=False,
               allow_nan_stats=True,
               name="Poisson"):
    """Initialize a batch of Poisson distributions.

    Args:
      rate: Floating point tensor, the rate parameter of the
        distribution(s). `rate` must be positive.
      validate_args: Python `bool`, default `False`. When `True` distribution
        parameters are checked for validity despite possibly degrading runtime
        performance. When `False` invalid inputs may silently render incorrect
        outputs.
      allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
        (e.g., mean, mode, variance) use the value "`NaN`" to indicate the
        result is undefined. When `False`, an exception is raised if one or
        more of the statistic's batch members are undefined.
      name: Python `str` name prefixed to Ops created by this class.
    """
    parameters = locals()
    with ops.name_scope(name, values=[rate]):
      with ops.control_dependencies([check_ops.assert_positive(rate)] if
                                    validate_args else []):
        self._rate = array_ops.identity(rate, name="rate")
    super(Poisson, self).__init__(
        dtype=self._rate.dtype,
        reparameterization_type=distribution.NOT_REPARAMETERIZED,
        validate_args=validate_args,
        allow_nan_stats=allow_nan_stats,
        parameters=parameters,
        graph_parents=[self._rate],
        name=name) 
Example #25
Source File: subscribe.py    From lambda-packs with MIT License 5 votes vote down vote up
def _subscribe(tensor, side_effects, control_cache):
  """Helper method that subscribes a single tensor to a list of side_effects.

  This method will check if the given tensor has already been subscribed or if
  it's a tensor returned by a previous call to `subscribe()` and, if so, will
  reuse the existing identity op, appending the given side effects to the list
  of existing ones.

  Args:
    tensor: The `tf.Tensor` to be subscribed.
    side_effects: List of side_effect functions, see subscribe for details.
    control_cache: `_ControlOutputCache` helper to get control_outputs faster.
  Returns:
    The modified replacement to the passed in tensor which triggers the side
    effects or the given tensor, if it was already been subscribed.
  """
  # Check if the given tensor has a numpy compatible type (see dtypes.py).
  # If not, we cannot subscribe it, so we just return the original tensor.
  if not tensor.dtype.is_numpy_compatible:
    logging.debug(('Tensor {} has an un-supported {} type and cannot be '
                   'subscribed.').format(tensor.name, tensor.dtype))
    return tensor

  if _is_subscribed_identity(tensor):
    return _subscribe_extend(tensor, side_effects)

  # Check if the given tensor has already been subscribed by inspecting its
  # outputs.
  name_scope = tensor.op.name + '/subscription/Identity'
  consumers = tensor.consumers()
  matching_ops = [op for op in consumers if op.name.startswith(name_scope)]
  assert len(matching_ops) <= 1, ('Op {} must only have one subscription '
                                  'op connected to it').format(tensor.op.name)
  if len(matching_ops) == 1:
    candidate_tensor = matching_ops[0].outputs[0]
    if _is_subscribed_identity(candidate_tensor):
      return _subscribe_extend(candidate_tensor, side_effects)

  return _subscribe_new(tensor, side_effects, control_cache) 
Example #26
Source File: attention_wrapper.py    From lambda-packs with MIT License 5 votes vote down vote up
def zero_state(self, batch_size, dtype):
    with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
      if self._initial_cell_state is not None:
        cell_state = self._initial_cell_state
      else:
        cell_state = self._cell.zero_state(batch_size, dtype)
      error_message = (
          "When calling zero_state of AttentionWrapper %s: " % self._base_name +
          "Non-matching batch sizes between the memory "
          "(encoder output) and the requested batch size.  Are you using "
          "the BeamSearchDecoder?  If so, make sure your encoder output has "
          "been tiled to beam_width via tf.contrib.seq2seq.tile_batch, and "
          "the batch_size= argument passed to zero_state is "
          "batch_size * beam_width.")
      with ops.control_dependencies(
          [check_ops.assert_equal(batch_size,
                                  self._attention_mechanism.batch_size,
                                  message=error_message)]):
        cell_state = nest.map_structure(
            lambda s: array_ops.identity(s, name="checked_cell_state"),
            cell_state)
      if self._alignment_history:
        alignment_history = tensor_array_ops.TensorArray(
            dtype=dtype, size=0, dynamic_size=True)
      else:
        alignment_history = ()
      return AttentionWrapperState(
          cell_state=cell_state,
          time=array_ops.zeros([], dtype=dtypes.int32),
          attention=_zero_state_tensors(self._attention_size, batch_size,
                                        dtype),
          alignments=self._attention_mechanism.initial_alignments(
              batch_size, dtype),
          alignment_history=alignment_history) 
Example #27
Source File: input.py    From lambda-packs with MIT License 5 votes vote down vote up
def limit_epochs(tensor, num_epochs=None, name=None):
  """Returns tensor `num_epochs` times and then raises an `OutOfRange` error.

  Note: creates local counter `epochs`. Use `local_variables_initializer()` to
  initialize local variables.

  Args:
    tensor: Any `Tensor`.
    num_epochs: A positive integer (optional).  If specified, limits the number
      of steps the output tensor may be evaluated.
    name: A name for the operations (optional).

  Returns:
    tensor or `OutOfRange`.

  Raises:
    ValueError: if `num_epochs` is invalid.
  """
  if num_epochs is None:
    return tensor
  if num_epochs <= 0:
    raise ValueError("num_epochs must be > 0 not %d." % num_epochs)
  with ops.name_scope(name, "limit_epochs", [tensor]) as name:
    zero64 = constant_op.constant(0, dtype=dtypes.int64)
    epochs = vs.variable(
        zero64, name="epochs", trainable=False,
        collections=[ops.GraphKeys.LOCAL_VARIABLES])
    counter = epochs.count_up_to(num_epochs)
    with ops.control_dependencies([counter]):
      return array_ops.identity(tensor, name=name) 
Example #28
Source File: feature_column.py    From lambda-packs with MIT License 5 votes vote down vote up
def _transform_feature(self, inputs):
    input_tensor = _to_sparse_input(inputs.get(self.key))

    if not input_tensor.dtype.is_integer:
      raise ValueError(
          'Invalid input, not integer. key: {} dtype: {}'.format(
              self.key, input_tensor.dtype))

    values = math_ops.to_int64(input_tensor.values, name='values')
    num_buckets = math_ops.to_int64(self.num_buckets, name='num_buckets')
    zero = math_ops.to_int64(0, name='zero')
    if self.default_value is None:
      # Fail if values are out-of-range.
      assert_less = check_ops.assert_less(
          values, num_buckets, data=(values, num_buckets),
          name='assert_less_than_num_buckets')
      assert_greater = check_ops.assert_greater_equal(
          values, zero, data=(values,),
          name='assert_greater_or_equal_0')
      with ops.control_dependencies((assert_less, assert_greater)):
        values = array_ops.identity(values)
    else:
      # Assign default for out-of-range values.
      values = array_ops.where(
          math_ops.logical_or(
              values < zero, values >= num_buckets, name='out_of_range'),
          array_ops.fill(
              dims=array_ops.shape(values),
              value=math_ops.to_int64(self.default_value),
              name='default_values'),
          values)

    return sparse_tensor_lib.SparseTensor(
        indices=input_tensor.indices,
        values=values,
        dense_shape=input_tensor.dense_shape) 
Example #29
Source File: categorical.py    From lambda-packs with MIT License 5 votes vote down vote up
def _batch_shape_tensor(self):
    return array_ops.identity(self._batch_shape_val) 
Example #30
Source File: bernoulli.py    From lambda-packs with MIT License 5 votes vote down vote up
def _mean(self):
    return array_ops.identity(self.probs)