Python tensorflow.python.ops.math_ops.to_int64() Examples

The following are 30 code examples of tensorflow.python.ops.math_ops.to_int64(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.python.ops.math_ops , or try the search function .
Example #1
Source File: feature_column.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 6 votes vote down vote up
def _transform_feature(self, inputs):
    input_tensor = _to_sparse_input(inputs.get(self.key))

    if self.dtype.is_integer != input_tensor.dtype.is_integer:
      raise ValueError(
          'Column dtype and SparseTensors dtype must be compatible. '
          'key: {}, column dtype: {}, tensor dtype: {}'.format(
              self.key, self.dtype, input_tensor.dtype))

    _assert_string_or_int(
        input_tensor.dtype,
        prefix='column_name: {} input_tensor'.format(self.key))

    key_dtype = self.dtype
    if input_tensor.dtype.is_integer:
      # `index_table_from_tensor` requires 64-bit integer keys.
      key_dtype = dtypes.int64
      input_tensor = math_ops.to_int64(input_tensor)

    return lookup_ops.index_table_from_tensor(
        vocabulary_list=tuple(self.vocabulary_list),
        default_value=self.default_value,
        num_oov_buckets=self.num_oov_buckets,
        dtype=key_dtype,
        name='{}_lookup'.format(self.key)).lookup(input_tensor) 
Example #2
Source File: data_flow_ops.py    From lambda-packs with MIT License 6 votes vote down vote up
def set_global_step(self, new_global_step, name=None):
    """Sets the global time step of the accumulator.

    The operation logs a warning if we attempt to set to a time step that is
    lower than the accumulator's own time step.

    Args:
      new_global_step: Value of new time step. Can be a variable or a constant
      name: Optional name for the operation.

    Returns:
      Operation that sets the accumulator's time step.
    """
    return gen_data_flow_ops.accumulator_set_global_step(
        self._accumulator_ref,
        math_ops.to_int64(ops.convert_to_tensor(new_global_step)),
        name=name) 
Example #3
Source File: crf.py    From lambda-packs with MIT License 6 votes vote down vote up
def _lengths_to_masks(lengths, max_length):
  """Creates a binary matrix that can be used to mask away padding.

  Args:
    lengths: A vector of integers representing lengths.
    max_length: An integer indicating the maximum length. All values in
      lengths should be less than max_length.
  Returns:
    masks: Masks that can be used to get rid of padding.
  """
  tiled_ranges = array_ops.tile(
      array_ops.expand_dims(math_ops.range(max_length), 0),
      [array_ops.shape(lengths)[0], 1])
  lengths = array_ops.expand_dims(lengths, 1)
  masks = math_ops.to_float(
      math_ops.to_int64(tiled_ranges) < math_ops.to_int64(lengths))
  return masks 
Example #4
Source File: util.py    From lambda-packs with MIT License 6 votes vote down vote up
def assert_integer_form(
    x, data=None, summarize=None, message=None, name="assert_integer_form"):
  """Assert that x has integer components (or floats equal to integers).

  Args:
    x: Floating-point `Tensor`
    data: The tensors to print out if the condition is `False`. Defaults to
      error message and first few entries of `x` and `y`.
    summarize: Print this many entries of each tensor.
    message: A string to prefix to the default message.
    name: A name for this operation (optional).

  Returns:
    Op raising `InvalidArgumentError` if round(x) != x.
  """

  message = message or "x has non-integer components"
  x = ops.convert_to_tensor(x, name="x")
  casted_x = math_ops.to_int64(x)
  return check_ops.assert_equal(
      x, math_ops.cast(math_ops.round(casted_x), x.dtype),
      data=data, summarize=summarize, message=message, name=name) 
Example #5
Source File: tfexample_decoder.py    From lambda-packs with MIT License 6 votes vote down vote up
def tensors_to_item(self, keys_to_tensors):
    indices = keys_to_tensors[self._indices_key]
    values = keys_to_tensors[self._values_key]
    if self._shape_key:
      shape = keys_to_tensors[self._shape_key]
      if isinstance(shape, sparse_tensor.SparseTensor):
        shape = sparse_ops.sparse_tensor_to_dense(shape)
    elif self._shape:
      shape = self._shape
    else:
      shape = indices.dense_shape
    indices_shape = array_ops.shape(indices.indices)
    rank = indices_shape[1]
    ids = math_ops.to_int64(indices.values)
    indices_columns_to_preserve = array_ops.slice(
        indices.indices, [0, 0], array_ops.stack([-1, rank - 1]))
    new_indices = array_ops.concat(
        [indices_columns_to_preserve, array_ops.reshape(ids, [-1, 1])], 1)

    tensor = sparse_tensor.SparseTensor(new_indices, values.values, shape)
    if self._densify:
      tensor = sparse_ops.sparse_tensor_to_dense(tensor, self._default_value)
    return tensor 
Example #6
Source File: feature_column.py    From lambda-packs with MIT License 6 votes vote down vote up
def _transform_feature(self, inputs):
    input_tensor = _to_sparse_input(inputs.get(self.key))

    if self.dtype.is_integer != input_tensor.dtype.is_integer:
      raise ValueError(
          'Column dtype and SparseTensors dtype must be compatible. '
          'key: {}, column dtype: {}, tensor dtype: {}'.format(
              self.key, self.dtype, input_tensor.dtype))

    _assert_string_or_int(
        input_tensor.dtype,
        prefix='column_name: {} input_tensor'.format(self.key))

    key_dtype = self.dtype
    if input_tensor.dtype.is_integer:
      # `index_table_from_tensor` requires 64-bit integer keys.
      key_dtype = dtypes.int64
      input_tensor = math_ops.to_int64(input_tensor)

    return lookup_ops.index_table_from_tensor(
        vocabulary_list=tuple(self.vocabulary_list),
        default_value=self.default_value,
        dtype=key_dtype,
        name='{}_lookup'.format(self.key)).lookup(input_tensor) 
Example #7
Source File: data_flow_ops.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def set_global_step(self, new_global_step, name=None):
    """Sets the global time step of the accumulator.

    The operation logs a warning if we attempt to set to a time step that is
    lower than the accumulator's own time step.

    Args:
      new_global_step: Value of new time step. Can be a variable or a constant
      name: Optional name for the operation.

    Returns:
      Operation that sets the accumulator's time step.
    """
    return gen_data_flow_ops.accumulator_set_global_step(
        self._accumulator_ref,
        math_ops.to_int64(ops.convert_to_tensor(new_global_step)),
        name=name) 
Example #8
Source File: data_flow_ops.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def apply_grad(self, grad, local_step=0, name=None):
    """Attempts to apply a gradient to the accumulator.

    The attempt is silently dropped if the gradient is stale, i.e., local_step
    is less than the accumulator's global time step.

    Args:
      grad: The gradient tensor to be applied.
      local_step: Time step at which the gradient was computed.
      name: Optional name for the operation.

    Returns:
      The operation that (conditionally) applies a gradient to the accumulator.

    Raises:
      ValueError: If grad is of the wrong shape
    """
    grad = ops.convert_to_tensor(grad, self._dtype)
    grad.get_shape().assert_is_compatible_with(self._shape)
    local_step = math_ops.to_int64(ops.convert_to_tensor(local_step))
    return gen_data_flow_ops.accumulator_apply_gradient(
        self._accumulator_ref, local_step=local_step, gradient=grad, name=name) 
Example #9
Source File: tfexample_decoder.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def tensors_to_item(self, keys_to_tensors):
    indices = keys_to_tensors[self._indices_key]
    values = keys_to_tensors[self._values_key]
    if self._shape_key:
      shape = keys_to_tensors[self._shape_key]
      if isinstance(shape, sparse_tensor.SparseTensor):
        shape = sparse_ops.sparse_tensor_to_dense(shape)
    elif self._shape:
      shape = self._shape
    else:
      shape = indices.dense_shape
    indices_shape = array_ops.shape(indices.indices)
    rank = indices_shape[1]
    ids = math_ops.to_int64(indices.values)
    indices_columns_to_preserve = array_ops.slice(
        indices.indices, [0, 0], array_ops.stack([-1, rank - 1]))
    new_indices = array_ops.concat(
        [indices_columns_to_preserve, array_ops.reshape(ids, [-1, 1])], 1)

    tensor = sparse_tensor.SparseTensor(new_indices, values.values, shape)
    if self._densify:
      tensor = sparse_ops.sparse_tensor_to_dense(tensor, self._default_value)
    return tensor 
Example #10
Source File: crf.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def _lengths_to_masks(lengths, max_length):
  """Creates a binary matrix that can be used to mask away padding.

  Args:
    lengths: A vector of integers representing lengths.
    max_length: An integer indicating the maximum length. All values in
      lengths should be less than max_length.
  Returns:
    masks: Masks that can be used to get rid of padding.
  """
  tiled_ranges = array_ops.tile(
      array_ops.expand_dims(math_ops.range(max_length), 0),
      [array_ops.shape(lengths)[0], 1])
  lengths = array_ops.expand_dims(lengths, 1)
  masks = math_ops.to_float(
      math_ops.to_int64(tiled_ranges) < math_ops.to_int64(lengths))
  return masks 
Example #11
Source File: distribution_util.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def assert_integer_form(
    x, data=None, summarize=None, message=None, name="assert_integer_form"):
  """Assert that x has integer components (or floats equal to integers).

  Args:
    x: Numeric `Tensor`
    data: The tensors to print out if the condition is `False`. Defaults to
      error message and first few entries of `x` and `y`.
    summarize: Print this many entries of each tensor.
    message: A string to prefix to the default message.
    name: A name for this operation (optional).

  Returns:
    Op raising `InvalidArgumentError` if round(x) != x.
  """

  message = message or "x has non-integer components"
  x = ops.convert_to_tensor(x, name="x")
  casted_x = math_ops.to_int64(x)
  return check_ops.assert_equal(
      x, math_ops.cast(math_ops.round(casted_x), x.dtype),
      data=data, summarize=summarize, message=message, name=name) 
Example #12
Source File: crf.py    From tensorflow_nlp with Apache License 2.0 6 votes vote down vote up
def _lengths_to_masks(lengths, max_length):
  """Creates a binary matrix that can be used to mask away padding.

  Args:
    lengths: A vector of integers representing lengths.
    max_length: An integer indicating the maximum length. All values in
      lengths should be less than max_length.
  Returns:
    masks: Masks that can be used to get rid of padding.
  """
  tiled_ranges = array_ops.tile(
      array_ops.expand_dims(math_ops.range(max_length), 0),
      [array_ops.shape(lengths)[0], 1])
  lengths = array_ops.expand_dims(lengths, 1)
  masks = math_ops.to_float(
      math_ops.to_int64(tiled_ranges) < math_ops.to_int64(lengths))
  return masks

# 计算标签序列的非正则化得分 
Example #13
Source File: tfexample_decoder.py    From tf-slim with Apache License 2.0 6 votes vote down vote up
def tensors_to_item(self, keys_to_tensors):
    """Maps the given dictionary of tensors to a concatenated list of keypoints.

    Args:
      keys_to_tensors: a mapping of TF-Example keys to parsed tensors.

    Returns:
      [time, num_keypoints, 2] tensor of keypoint coordinates, in order [y, x].
          Whether the tensor is a SparseTensor or a dense Tensor is determined
          by the return_dense parameter. Empty positions in the sparse tensor
          are filled with -1.0 values.
    """
    coordinates = []
    for key in self._full_keys:
      value = keys_to_tensors[key]
      expanded_dims = array_ops.concat(
          [math_ops.to_int64(array_ops.shape(value)),
           constant_op.constant([1], dtype=dtypes.int64)], 0)
      coordinate = sparse_ops.sparse_reshape(value, expanded_dims)
      coordinates.append(coordinate)
    keypoints = sparse_ops.sparse_concat(2, coordinates)
    if self._return_dense:
      keypoints = sparse_ops.sparse_tensor_to_dense(
          keypoints, default_value=self._default_value)
    return keypoints 
Example #14
Source File: data_flow_ops.py    From lambda-packs with MIT License 6 votes vote down vote up
def apply_grad(self, grad, local_step=0, name=None):
    """Attempts to apply a gradient to the accumulator.

    The attempt is silently dropped if the gradient is stale, i.e., local_step
    is less than the accumulator's global time step.

    Args:
      grad: The gradient tensor to be applied.
      local_step: Time step at which the gradient was computed.
      name: Optional name for the operation.

    Returns:
      The operation that (conditionally) applies a gradient to the accumulator.

    Raises:
      ValueError: If grad is of the wrong shape
    """
    grad = ops.convert_to_tensor(grad, self._dtype)
    grad.get_shape().assert_is_compatible_with(self._shape)
    local_step = math_ops.to_int64(ops.convert_to_tensor(local_step))
    return gen_data_flow_ops.accumulator_apply_gradient(
        self._accumulator_ref, local_step=local_step, gradient=grad, name=name) 
Example #15
Source File: data_flow_ops.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 6 votes vote down vote up
def apply_grad(self, grad, local_step=0, name=None):
    """Attempts to apply a gradient to the accumulator.

    The attempt is silently dropped if the gradient is stale, i.e., local_step
    is less than the accumulator's global time step.

    Args:
      grad: The gradient tensor to be applied.
      local_step: Time step at which the gradient was computed.
      name: Optional name for the operation.

    Returns:
      The operation that (conditionally) applies a gradient to the accumulator.

    Raises:
      ValueError: If grad is of the wrong shape
    """
    grad = ops.convert_to_tensor(grad, self._dtype)
    grad.get_shape().assert_is_compatible_with(self._shape)
    local_step = math_ops.to_int64(ops.convert_to_tensor(local_step))
    return gen_data_flow_ops.accumulator_apply_gradient(
        self._accumulator_ref, local_step=local_step, gradient=grad, name=name) 
Example #16
Source File: data_flow_ops.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 6 votes vote down vote up
def set_global_step(self, new_global_step, name=None):
    """Sets the global time step of the accumulator.

    The operation logs a warning if we attempt to set to a time step that is
    lower than the accumulator's own time step.

    Args:
      new_global_step: Value of new time step. Can be a variable or a constant
      name: Optional name for the operation.

    Returns:
      Operation that sets the accumulator's time step.
    """
    return gen_data_flow_ops.accumulator_set_global_step(
        self._accumulator_ref,
        math_ops.to_int64(ops.convert_to_tensor(new_global_step)),
        name=name) 
Example #17
Source File: rf3.py    From deep-learning with MIT License 6 votes vote down vote up
def _get_eval_ops(self, features, targets, metrics):
    features, spec = data_ops.ParseDataTensorOrDict(features)
    labels = data_ops.ParseLabelTensorOrDict(targets)

    graph_builder = self.graph_builder_class(
        self.params, device_assigner=self.device_assigner, training=False,
        **self.construction_args)

    probabilities = graph_builder.inference_graph(features, data_spec=spec)

    # One-hot the labels.
    if not self.params.regression:
      labels = math_ops.to_int64(array_ops.one_hot(math_ops.to_int64(
          array_ops.squeeze(labels)), self.params.num_classes, 1, 0))

    if metrics is None:
      metrics = {self.accuracy_metric:
                 eval_metrics.get_metric(self.accuracy_metric)}

    result = {}
    for name, metric in six.iteritems(metrics):
      result[name] = metric(probabilities, labels)

    return result 
Example #18
Source File: penalty.py    From AMTTL with MIT License 6 votes vote down vote up
def _lengths_to_masks(lengths, max_length):
    """Creates a binary matrix that can be used to mask away padding.
    Args:
      lengths: A vector of integers representing lengths.
      max_length: An integer indicating the maximum length. All values in
        lengths should be less than max_length.
    Returns:
      masks: Masks that can be used to get rid of padding.
    """
    tiled_ranges = array_ops.tile(
        array_ops.expand_dims(math_ops.range(max_length), 0),
        [array_ops.shape(lengths)[0], 1])
    lengths = array_ops.expand_dims(lengths, 1)
    masks = math_ops.to_float(
        math_ops.to_int64(tiled_ranges) < math_ops.to_int64(lengths))
    return masks 
Example #19
Source File: distribution_util.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def assert_integer_form(
    x, data=None, summarize=None, message=None, name="assert_integer_form"):
  """Assert that x has integer components (or floats equal to integers).

  Args:
    x: Numeric `Tensor`
    data: The tensors to print out if the condition is `False`. Defaults to
      error message and first few entries of `x` and `y`.
    summarize: Print this many entries of each tensor.
    message: A string to prefix to the default message.
    name: A name for this operation (optional).

  Returns:
    Op raising `InvalidArgumentError` if round(x) != x.
  """

  message = message or "x has non-integer components"
  x = ops.convert_to_tensor(x, name="x")
  casted_x = math_ops.to_int64(x)
  return check_ops.assert_equal(
      x, math_ops.cast(math_ops.round(casted_x), x.dtype),
      data=data, summarize=summarize, message=message, name=name) 
Example #20
Source File: crf.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def _lengths_to_masks(lengths, max_length):
  """Creates a binary matrix that can be used to mask away padding.

  Args:
    lengths: A vector of integers representing lengths.
    max_length: An integer indicating the maximum length. All values in
      lengths should be less than max_length.
  Returns:
    masks: Masks that can be used to get rid of padding.
  """
  tiled_ranges = array_ops.tile(
      array_ops.expand_dims(math_ops.range(max_length), 0),
      [array_ops.shape(lengths)[0], 1])
  lengths = array_ops.expand_dims(lengths, 1)
  masks = math_ops.to_float(
      math_ops.to_int64(tiled_ranges) < math_ops.to_int64(lengths))
  return masks 
Example #21
Source File: tfexample_decoder.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def tensors_to_item(self, keys_to_tensors):
    indices = keys_to_tensors[self._indices_key]
    values = keys_to_tensors[self._values_key]
    if self._shape_key:
      shape = keys_to_tensors[self._shape_key]
      if isinstance(shape, sparse_tensor.SparseTensor):
        shape = sparse_ops.sparse_tensor_to_dense(shape)
    elif self._shape:
      shape = self._shape
    else:
      shape = indices.shape
    indices_shape = array_ops.shape(indices.indices)
    rank = indices_shape[1]
    ids = math_ops.to_int64(indices.values)
    indices_columns_to_preserve = array_ops.slice(
        indices.indices, [0, 0], array_ops.pack([-1, rank - 1]))
    new_indices = array_ops.concat(1, [indices_columns_to_preserve,
                                       array_ops.reshape(ids, [-1, 1])])

    tensor = sparse_tensor.SparseTensor(new_indices, values.values, shape)
    if self._densify:
      tensor = sparse_ops.sparse_tensor_to_dense(tensor, self._default_value)
    return tensor 
Example #22
Source File: data_flow_ops.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def apply_grad(self, grad, local_step=0, name=None):
    """Attempts to apply a gradient to the accumulator.

    The attempt is silently dropped if the gradient is stale, i.e., local_step
    is less than the accumulator's global time step.

    Args:
      grad: The gradient tensor to be applied.
      local_step: Time step at which the gradient was computed.
      name: Optional name for the operation.

    Returns:
      The operation that (conditionally) applies a gradient to the accumulator.

    Raises:
      ValueError: If grad is of the wrong shape
    """
    grad = ops.convert_to_tensor(grad, self._dtype)
    grad.get_shape().assert_is_compatible_with(self._shape)
    local_step = math_ops.to_int64(ops.convert_to_tensor(local_step))
    return gen_data_flow_ops.accumulator_apply_gradient(
        self._accumulator_ref, local_step=local_step, gradient=grad, name=name) 
Example #23
Source File: data_flow_ops.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def set_global_step(self, new_global_step, name=None):
    """Sets the global time step of the accumulator.

    The operation logs a warning if we attempt to set to a time step that is
    lower than the accumulator's own time step.

    Args:
      new_global_step: Value of new time step. Can be a variable or a constant
      name: Optional name for the operation.

    Returns:
      Operation that sets the accumulator's time step.
    """
    return gen_data_flow_ops.accumulator_set_global_step(
        self._accumulator_ref,
        math_ops.to_int64(ops.convert_to_tensor(new_global_step)),
        name=name) 
Example #24
Source File: tfexample_decoder.py    From tf-slim with Apache License 2.0 6 votes vote down vote up
def tensors_to_item(self, keys_to_tensors):
    """Maps the given dictionary of tensors to a concatenated list of bboxes.

    Args:
      keys_to_tensors: a mapping of TF-Example keys to parsed tensors.

    Returns:
      [time, num_boxes, 4] tensor of bounding box coordinates, in order
          [y_min, x_min, y_max, x_max]. Whether the tensor is a SparseTensor
          or a dense Tensor is determined by the return_dense parameter. Empty
          positions in the sparse tensor are filled with -1.0 values.
    """
    sides = []
    for key in self._full_keys:
      value = keys_to_tensors[key]
      expanded_dims = array_ops.concat(
          [math_ops.to_int64(array_ops.shape(value)),
           constant_op.constant([1], dtype=dtypes.int64)], 0)
      side = sparse_ops.sparse_reshape(value, expanded_dims)
      sides.append(side)
    bounding_boxes = sparse_ops.sparse_concat(2, sides)
    if self._return_dense:
      bounding_boxes = sparse_ops.sparse_tensor_to_dense(
          bounding_boxes, default_value=self._default_value)
    return bounding_boxes 
Example #25
Source File: sparse_grad.py    From lambda-packs with MIT License 5 votes vote down vote up
def _SparseDenseCwiseMulOrDivGrad(op, grad, is_mul):
  """Common code for SparseDenseCwise{Mul,Div} gradients."""
  x_indices = op.inputs[0]
  x_shape = op.inputs[2]
  y = op.inputs[3]

  y_shape = math_ops.to_int64(array_ops.shape(y))
  num_added_dims = array_ops.expand_dims(
      array_ops.size(x_shape) - array_ops.size(y_shape), 0)
  augmented_y_shape = array_ops.concat(
      [array_ops.ones(num_added_dims, ops.dtypes.int64), y_shape], 0)

  scaling = x_shape // augmented_y_shape
  scaled_indices = x_indices // scaling
  scaled_indices = array_ops.slice(scaled_indices,
                                   array_ops.concat([[0], num_added_dims], 0),
                                   [-1, -1])
  dense_vals = array_ops.gather_nd(y, scaled_indices)

  if is_mul:
    dx = grad * dense_vals
    dy_val = grad * op.inputs[1]
  else:
    dx = grad / dense_vals
    dy_val = grad * (-op.inputs[1] / math_ops.square(dense_vals))
  # indices can repeat after scaling, so we can't use sparse_to_dense().
  dy = sparse_ops.sparse_add(
      array_ops.zeros_like(y),
      sparse_tensor.SparseTensor(scaled_indices, dy_val, y_shape))

  # (sp_indices, sp_vals, sp_shape, dense)
  return (None, dx, None, dy) 
Example #26
Source File: feature_column.py    From lambda-packs with MIT License 5 votes vote down vote up
def _get_sparse_tensors(self, inputs, weight_collections=None,
                          trainable=None):
    input_tensor = inputs.get(self)
    batch_size = array_ops.shape(input_tensor)[0]
    # By construction, source_column is always one-dimensional.
    source_dimension = self.source_column.shape[0]

    i1 = array_ops.reshape(
        array_ops.tile(
            array_ops.expand_dims(math_ops.range(0, batch_size), 1),
            [1, source_dimension]),
        (-1,))
    i2 = array_ops.tile(math_ops.range(0, source_dimension), [batch_size])
    # Flatten the bucket indices and unique them across dimensions
    # E.g. 2nd dimension indices will range from k to 2*k-1 with k buckets
    bucket_indices = (
        array_ops.reshape(input_tensor, (-1,)) +
        (len(self.boundaries) + 1) * i2)

    indices = math_ops.to_int64(array_ops.transpose(array_ops.stack((i1, i2))))
    dense_shape = math_ops.to_int64(array_ops.stack(
        [batch_size, source_dimension]))
    sparse_tensor = sparse_tensor_lib.SparseTensor(
        indices=indices,
        values=bucket_indices,
        dense_shape=dense_shape)
    return _CategoricalColumn.IdWeightPair(sparse_tensor, None) 
Example #27
Source File: sparse_grad.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def _SparseReduceSumGrad(op, out_grad):
  """Similar to gradient for the Sum Op (i.e. tf.reduce_sum())."""
  sp_indices = op.inputs[0]
  sp_shape = op.inputs[2]
  output_shape_kept_dims = math_ops.reduced_shape(sp_shape, op.inputs[3])
  out_grad_reshaped = array_ops.reshape(out_grad, output_shape_kept_dims)
  scale = sp_shape // math_ops.to_int64(output_shape_kept_dims)
  # (sparse_indices, sparse_values, sparse_shape, reduction_axes)
  return (None, array_ops.gather_nd(out_grad_reshaped, sp_indices // scale),
          None, None) 
Example #28
Source File: feature_column.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def _to_dnn_input_layer(self,
                          input_tensor,
                          weight_collections=None,
                          trainable=True,
                          output_rank=2):
    if output_rank != 2:
      raise ValueError("BucketizedColumn currently only supports output_rank=2")
    return array_ops.reshape(
        array_ops.one_hot(
            math_ops.to_int64(input_tensor),
            self.length,
            1.,
            0.,
            name="one_hot"), [-1, self.length * self.source_column.dimension],
        name="reshape") 
Example #29
Source File: metric_loss_ops.py    From cluster-loss-tensorflow with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def get_cluster_assignment(pairwise_distances, centroid_ids):
  """Assign data points to the neareset centroids.

  Tensorflow has numerical instability and doesn't always choose
    the data point with theoretically zero distance as it's nearest neighbor.
    Thus, for each centroid in centroid_ids, explicitly assign
    the centroid itself as the nearest centroid.
    This is done through the mask tensor and the constraint_vect tensor.

  Args:
    pairwise_distances: 2-D Tensor of pairwise distances.
    centroid_ids: 1-D Tensor of centroid indices.

  Returns:
    y_fixed: 1-D tensor of cluster assignment.
  """
  predictions = math_ops.argmin(
      array_ops.gather(pairwise_distances, centroid_ids), dimension=0)
  batch_size = array_ops.shape(pairwise_distances)[0]

  # Deal with numerical instability
  mask = math_ops.reduce_any(array_ops.one_hot(
      centroid_ids, batch_size, True, False, axis=-1, dtype=dtypes.bool),
                             axis=0)
  constraint_one_hot = math_ops.multiply(
      array_ops.one_hot(centroid_ids,
                        batch_size,
                        array_ops.constant(1, dtype=dtypes.int64),
                        array_ops.constant(0, dtype=dtypes.int64),
                        axis=0,
                        dtype=dtypes.int64),
      math_ops.to_int64(math_ops.range(array_ops.shape(centroid_ids)[0])))
  constraint_vect = math_ops.reduce_sum(
      array_ops.transpose(constraint_one_hot), axis=0)

  y_fixed = array_ops.where(mask, constraint_vect, predictions)
  return y_fixed 
Example #30
Source File: feature_column.py    From lambda-packs with MIT License 5 votes vote down vote up
def _transform_feature(self, inputs):
    input_tensor = _to_sparse_input(inputs.get(self.key))

    if self.dtype.is_integer != input_tensor.dtype.is_integer:
      raise ValueError(
          'Column dtype and SparseTensors dtype must be compatible. '
          'key: {}, column dtype: {}, tensor dtype: {}'.format(
              self.key, self.dtype, input_tensor.dtype))

    _assert_string_or_int(
        input_tensor.dtype,
        prefix='column_name: {} input_tensor'.format(self.key))

    key_dtype = self.dtype
    if input_tensor.dtype.is_integer:
      # `index_table_from_file` requires 64-bit integer keys.
      key_dtype = dtypes.int64
      input_tensor = math_ops.to_int64(input_tensor)

    return lookup_ops.index_table_from_file(
        vocabulary_file=self.vocabulary_file,
        num_oov_buckets=self.num_oov_buckets,
        vocab_size=self.vocabulary_size,
        default_value=self.default_value,
        key_dtype=key_dtype,
        name='{}_lookup'.format(self.key)).lookup(input_tensor)