Python tensorflow.dynamic_stitch() Examples

The following are 30 code examples of tensorflow.dynamic_stitch(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: prediction_model.py    From Gun-Detector with Apache License 2.0 6 votes vote down vote up
def scheduled_sample(ground_truth_x, generated_x, batch_size, num_ground_truth):
  """Sample batch with specified mix of ground truth and generated data points.

  Args:
    ground_truth_x: tensor of ground-truth data points.
    generated_x: tensor of generated data points.
    batch_size: batch size
    num_ground_truth: number of ground-truth examples to include in batch.
  Returns:
    New batch with num_ground_truth sampled from ground_truth_x and the rest
    from generated_x.
  """
  idx = tf.random_shuffle(tf.range(int(batch_size)))
  ground_truth_idx = tf.gather(idx, tf.range(num_ground_truth))
  generated_idx = tf.gather(idx, tf.range(num_ground_truth, int(batch_size)))

  ground_truth_examps = tf.gather(ground_truth_x, ground_truth_idx)
  generated_examps = tf.gather(generated_x, generated_idx)
  return tf.dynamic_stitch([ground_truth_idx, generated_idx],
                           [ground_truth_examps, generated_examps]) 
Example #2
Source File: reinforce_simple_example.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def split_apply_merge(inp, partitions, fns):
  """Split input according to partitions.  Pass results through fns and merge.

  Args:
    inp: the input vector
    partitions: tensor of same length as input vector, having values 0, 1
    fns: the two functions.

  Returns:
    the vector routed, where routed[i] = fns[partitions[i]](inp[i])
  """
  new_inputs = tf.dynamic_partition(inp, partitions, len(fns))
  new_outputs = [fns[i](x) for i, x in enumerate(new_inputs)]
  new_indices = tf.dynamic_partition(
      tf.range(0, inp.get_shape()[0]), partitions, len(fns))
  return tf.dynamic_stitch(new_indices, new_outputs) 
Example #3
Source File: tf_utils.py    From rasa_core with Apache License 2.0 6 votes vote down vote up
def _arrange_back_fn(list_tensor_1d_mask_1d):
        """Arranges back tensor_1d to restore original order
            modified by `_rearrange_fn` according to mask_1d:
            - number of 0s in mask_1d values on the left are set to
              their corresponding places where mask_1d=0,
            - number of 1s in mask_1d values on the right are set to
              their corresponding places where mask_1d=1"""
        tensor_1d, mask_1d = list_tensor_1d_mask_1d

        mask_indices = tf.dynamic_partition(tf.range(tf.shape(tensor_1d)[0]),
                                            mask_1d, 2)

        mask_sum = tf.reduce_sum(mask_1d, axis=0)
        partitioned_tensor = [tf.zeros_like(tensor_1d[:-mask_sum]),
                              tensor_1d[-mask_sum:]]

        return tf.dynamic_stitch(mask_indices, partitioned_tensor) 
Example #4
Source File: prediction_model.py    From yolo_v2 with Apache License 2.0 6 votes vote down vote up
def scheduled_sample(ground_truth_x, generated_x, batch_size, num_ground_truth):
  """Sample batch with specified mix of ground truth and generated data points.

  Args:
    ground_truth_x: tensor of ground-truth data points.
    generated_x: tensor of generated data points.
    batch_size: batch size
    num_ground_truth: number of ground-truth examples to include in batch.
  Returns:
    New batch with num_ground_truth sampled from ground_truth_x and the rest
    from generated_x.
  """
  idx = tf.random_shuffle(tf.range(int(batch_size)))
  ground_truth_idx = tf.gather(idx, tf.range(num_ground_truth))
  generated_idx = tf.gather(idx, tf.range(num_ground_truth, int(batch_size)))

  ground_truth_examps = tf.gather(ground_truth_x, ground_truth_idx)
  generated_examps = tf.gather(generated_x, generated_idx)
  return tf.dynamic_stitch([ground_truth_idx, generated_idx],
                           [ground_truth_examps, generated_examps]) 
Example #5
Source File: dynamic_stitch_op_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def testHigherRank(self):
    with self.test_session() as sess:
      indices = [tf.constant(6), tf.constant([4, 1]),
                 tf.constant([[5, 2], [0, 3]])]
      data = [tf.constant([61, 62]), tf.constant([[41, 42], [11, 12]]),
              tf.constant([[[51, 52], [21, 22]], [[1, 2], [31, 32]]])]
      stitched_t = tf.dynamic_stitch(indices, data)
      stitched_val = stitched_t.eval()
      correct = 10 * np.arange(7)[:, None] + [1, 2]
      self.assertAllEqual(correct, stitched_val)
      self.assertEqual([None, 2], stitched_t.get_shape().as_list())
      # Test gradients
      stitched_grad = 7 * stitched_val
      grads = tf.gradients(stitched_t, indices + data, stitched_grad)
      self.assertEqual(grads[:3], [None] * 3)  # Indices have no gradients
      for datum, grad in zip(data, sess.run(grads[3:])):
        self.assertAllEqual(7 * datum.eval(), grad) 
Example #6
Source File: dynamic_stitch_op_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def testSimpleTwoDimensional(self):
    with self.test_session():
      indices = [tf.constant([0, 4, 7]),
                 tf.constant([1, 6]),
                 tf.constant([2, 3, 5])]
      data = [tf.constant([[0, 1], [40, 41], [70, 71]]),
              tf.constant([[10, 11], [60, 61]]),
              tf.constant([[20, 21], [30, 31], [50, 51]])]
      stitched_t = tf.dynamic_stitch(indices, data)
      stitched_val = stitched_t.eval()
      self.assertAllEqual(
          [[0, 1], [10, 11], [20, 21], [30, 31],
           [40, 41], [50, 51], [60, 61], [70, 71]], stitched_val)
      # Dimension 0 is determined by the max index in indices, so we
      # can only infer that the output is a matrix with 2 columns and
      # some unknown number of rows.
      self.assertEqual([None, 2], stitched_t.get_shape().as_list()) 
Example #7
Source File: dnc_cell.py    From ADNC with Apache License 2.0 6 votes vote down vote up
def _update_alloc_and_usage_vectors(self, pre_write_weightings, pre_read_weightings, pre_usage_vector, free_gates):

        retention_vector = tf.reduce_prod(1 - free_gates * pre_read_weightings, axis=1, keepdims=False,
                                          name='retention_prod')
        usage_vector = (
                           pre_usage_vector + pre_write_weightings - pre_usage_vector * pre_write_weightings) * retention_vector

        sorted_usage, free_list = tf.nn.top_k(-1 * usage_vector, self.h_N)
        sorted_usage = -1 * sorted_usage

        cumprod_sorted_usage = tf.cumprod(sorted_usage, axis=1, exclusive=True)
        corrected_free_list = free_list + self.const_batch_memory_range

        cumprod_sorted_usage_re = [tf.reshape(cumprod_sorted_usage, [-1, ]), ]
        corrected_free_list_re = [tf.reshape(corrected_free_list, [-1]), ]

        stitched_usage = tf.dynamic_stitch(corrected_free_list_re, cumprod_sorted_usage_re, name=None)

        stitched_usage = tf.reshape(stitched_usage, [self.h_B, self.h_N])

        alloc_weighting = (1 - usage_vector) * stitched_usage

        return alloc_weighting, usage_vector 
Example #8
Source File: expert_utils.py    From NMT_GAN with Apache License 2.0 6 votes vote down vote up
def Combine(self, x_tensors):
    """Reshuffles per-expert `Tensor`s to produce per-datashard `Tensor`s.

    Dispatch must have been called at least once first.

    The dimensions of all input and output `Tensor`s match, except for
    dimension 0.  In dimension 0, the input `Tensor`s match the corresponding
    outputs of `Dispatch`, and the output `Tensor`s match the corresponding
    `gates` `Tensor`s which were passed to the constructor.

    Args:
      x_tensors: a list of `Tensor`s, one per expert.

    Returns:
      a list of `Tensor`s, one per datashard.
    """
    parts = self._model_parallelism(tf.split, x_tensors,
                                    self._part_sizes_by_expert)
    d_tensors = self._data_parallelism(tf.dynamic_stitch, self._stitch_indices,
                                       TransposeListOfLists(parts))
    return d_tensors 
Example #9
Source File: sna_model.py    From video_prediction with MIT License 6 votes vote down vote up
def scheduled_sample(ground_truth_x, generated_x, batch_size, num_ground_truth):
    """Sample batch with specified mix of ground truth and generated data_files points.

    Args:
      ground_truth_x: tensor of ground-truth data_files points.
      generated_x: tensor of generated data_files points.
      batch_size: batch size
      num_ground_truth: number of ground-truth examples to include in batch.
    Returns:
      New batch with num_ground_truth sampled from ground_truth_x and the rest
      from generated_x.
    """
    idx = tf.random_shuffle(tf.range(int(batch_size)))
    ground_truth_idx = tf.gather(idx, tf.range(num_ground_truth))
    generated_idx = tf.gather(idx, tf.range(num_ground_truth, int(batch_size)))

    ground_truth_examps = tf.gather(ground_truth_x, ground_truth_idx)
    generated_examps = tf.gather(generated_x, generated_idx)
    return tf.dynamic_stitch([ground_truth_idx, generated_idx],
                             [ground_truth_examps, generated_examps]) 
Example #10
Source File: sv2p_model.py    From video_prediction with MIT License 6 votes vote down vote up
def scheduled_sample(ground_truth_x, generated_x, batch_size, num_ground_truth):
    """Sample batch with specified mix of ground truth and generated data points.

    Args:
        ground_truth_x: tensor of ground-truth data points.
        generated_x: tensor of generated data points.
        batch_size: batch size
        num_ground_truth: number of ground-truth examples to include in batch.
    Returns:
        New batch with num_ground_truth sampled from ground_truth_x and the rest
        from generated_x.
    """
    idx = tf.random_shuffle(tf.range(int(batch_size)))
    ground_truth_idx = tf.gather(idx, tf.range(num_ground_truth))
    generated_idx = tf.gather(idx, tf.range(num_ground_truth, int(batch_size)))

    ground_truth_examps = tf.gather(ground_truth_x, ground_truth_idx)
    generated_examps = tf.gather(generated_x, generated_idx)
    return tf.dynamic_stitch([ground_truth_idx, generated_idx],
                             [ground_truth_examps, generated_examps]) 
Example #11
Source File: next_frame.py    From fine-lm with MIT License 6 votes vote down vote up
def scheduled_sample(self,
                       ground_truth_x,
                       generated_x,
                       batch_size,
                       num_ground_truth):
    """Sample batch with specified mix of groundtruth and generated data points.

    Args:
      ground_truth_x: tensor of ground-truth data points.
      generated_x: tensor of generated data points.
      batch_size: batch size
      num_ground_truth: number of ground-truth examples to include in batch.
    Returns:
      New batch with num_ground_truth sampled from ground_truth_x and the rest
      from generated_x.
    """
    idx = tf.random_shuffle(tf.range(batch_size))
    ground_truth_idx = tf.gather(idx, tf.range(num_ground_truth))
    generated_idx = tf.gather(idx, tf.range(num_ground_truth, batch_size))

    ground_truth_examps = tf.gather(ground_truth_x, ground_truth_idx)
    generated_examps = tf.gather(generated_x, generated_idx)
    return tf.dynamic_stitch([ground_truth_idx, generated_idx],
                             [ground_truth_examps, generated_examps]) 
Example #12
Source File: dna_model.py    From video_prediction with MIT License 6 votes vote down vote up
def scheduled_sample(ground_truth_x, generated_x, batch_size, num_ground_truth):
    """Sample batch with specified mix of ground truth and generated data points.

    Args:
        ground_truth_x: tensor of ground-truth data points.
        generated_x: tensor of generated data points.
        batch_size: batch size
        num_ground_truth: number of ground-truth examples to include in batch.
    Returns:
        New batch with num_ground_truth sampled from ground_truth_x and the rest
        from generated_x.
    """
    idx = tf.random_shuffle(tf.range(int(batch_size)))
    ground_truth_idx = tf.gather(idx, tf.range(num_ground_truth))
    generated_idx = tf.gather(idx, tf.range(num_ground_truth, int(batch_size)))

    ground_truth_examps = tf.gather(ground_truth_x, ground_truth_idx)
    generated_examps = tf.gather(generated_x, generated_idx)
    return tf.dynamic_stitch([ground_truth_idx, generated_idx],
                             [ground_truth_examps, generated_examps]) 
Example #13
Source File: prediction_model.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def scheduled_sample(ground_truth_x, generated_x, batch_size, num_ground_truth):
  """Sample batch with specified mix of ground truth and generated data points.

  Args:
    ground_truth_x: tensor of ground-truth data points.
    generated_x: tensor of generated data points.
    batch_size: batch size
    num_ground_truth: number of ground-truth examples to include in batch.
  Returns:
    New batch with num_ground_truth sampled from ground_truth_x and the rest
    from generated_x.
  """
  idx = tf.random_shuffle(tf.range(int(batch_size)))
  ground_truth_idx = tf.gather(idx, tf.range(num_ground_truth))
  generated_idx = tf.gather(idx, tf.range(num_ground_truth, int(batch_size)))

  ground_truth_examps = tf.gather(ground_truth_x, ground_truth_idx)
  generated_examps = tf.gather(generated_x, generated_idx)
  return tf.dynamic_stitch([ground_truth_idx, generated_idx],
                           [ground_truth_examps, generated_examps]) 
Example #14
Source File: multi_write_dnc_cell.py    From ADNC with Apache License 2.0 5 votes vote down vote up
def _update_alloc_and_usage_vectors(self, pre_write_weightings, pre_read_weightings, pre_usage_vector, free_gates,
                                        write_gates):

        # usage update after write from last time step
        pre_write_weighting = 1 - tf.reduce_prod(1 - pre_write_weightings, [1], keepdims=False)
        usage_vector = pre_usage_vector + pre_write_weighting - pre_usage_vector * pre_write_weighting

        # usage update after read
        retention_vector = tf.reduce_prod(1 - free_gates * pre_read_weightings, axis=1, keepdims=False,
                                          name='retention_prod')
        usage_vector = usage_vector * retention_vector

        usage_vector_cp = tf.identity(usage_vector)

        alloc_list = []
        for w in range(self.h_WH):
            sorted_usage, free_list = tf.nn.top_k(-1 * usage_vector_cp, self.h_N)
            sorted_usage = -1 * sorted_usage

            cumprod_sorted_usage = tf.cumprod(sorted_usage, axis=1, exclusive=True)
            corrected_free_list = free_list + self.const_batch_memory_range

            corrected_free_list_un = [tf.reshape(corrected_free_list, [-1, ]), ]
            cumprod_sorted_usage_un = [tf.reshape(cumprod_sorted_usage, [-1, ]), ]

            stitched_usage = tf.dynamic_stitch(corrected_free_list_un, cumprod_sorted_usage_un, name=None)
            stitched_usage = tf.reshape(stitched_usage, [self.h_B, self.h_N])

            alloc_weighting = (1 - usage_vector_cp) * stitched_usage

            alloc_list.append(alloc_weighting)
            usage_vector_cp = usage_vector_cp + ((1 - usage_vector_cp) * write_gates[:, w, :] * alloc_weighting)

        alloc_weighting = tf.stack(alloc_list, 1)

        return alloc_weighting, usage_vector 
Example #15
Source File: ops.py    From BMW-TensorFlow-Training-GUI with Apache License 2.0 5 votes vote down vote up
def indices_to_dense_vector(indices,
                            size,
                            indices_value=1.,
                            default_value=0,
                            dtype=tf.float32):
  """Creates dense vector with indices set to specific value and rest to zeros.

  This function exists because it is unclear if it is safe to use
    tf.sparse_to_dense(indices, [size], 1, validate_indices=False)
  with indices which are not ordered.
  This function accepts a dynamic size (e.g. tf.shape(tensor)[0])

  Args:
    indices: 1d Tensor with integer indices which are to be set to
        indices_values.
    size: scalar with size (integer) of output Tensor.
    indices_value: values of elements specified by indices in the output vector
    default_value: values of other elements in the output vector.
    dtype: data type.

  Returns:
    dense 1D Tensor of shape [size] with indices set to indices_values and the
        rest set to default_value.
  """
  size = tf.to_int32(size)
  zeros = tf.ones([size], dtype=dtype) * default_value
  values = tf.ones_like(indices, dtype=dtype) * indices_value

  return tf.dynamic_stitch([tf.range(size), tf.to_int32(indices)],
                           [zeros, values]) 
Example #16
Source File: ops.py    From TLNet with Apache License 2.0 5 votes vote down vote up
def indices_to_dense_vector(indices,
                            size,
                            indices_value=1.,
                            default_value=0,
                            dtype=tf.float32):
    """Creates dense vector with indices set to specific value
       and rest to zeros.

      This function exists because it is unclear if it is safe to use
      tf.sparse_to_dense(indices, [size], 1, validate_indices=False)
      with indices which are not ordered. This function accepts a
      dynamic size (e.g. tf.shape(tensor)[0])

    Args:
      indices: 1d Tensor with integer indices which are to be set to
               indices_values.
      size: scalar with size (integer) of output Tensor.
      indices_value: values of elements specified by indices in the output
                     vector
      default_value: values of other elements in the output vector.
      dtype: data type.

    Returns:
      dense 1D Tensor of shape [size] with indices set to indices_values and the
          rest set to default_value.
    """
    size = tf.to_int32(size)
    zeros = tf.ones([size], dtype=dtype) * default_value
    values = tf.ones_like(indices, dtype=dtype) * indices_value

    return tf.dynamic_stitch([tf.range(size), tf.to_int32(indices)],
                             [zeros, values]) 
Example #17
Source File: test_bernoulli.py    From PlaneNet with MIT License 5 votes vote down vote up
def split_apply_merge(inp, partitions, fns):
  """Split input according to partitions.  Pass results through fns and merge.
  Args:
    inp: the input vector
    partitions: tensor of same length as input vector, having values 0, 1
    fns: the two functions.
  Returns:
    the vector routed, where routed[i] = fns[partitions[i]](inp[i])
  """
  new_inputs = tf.dynamic_partition(inp, partitions, len(fns))
  new_outputs = [fns[i](x) for i, x in enumerate(new_inputs)]
  new_indices = tf.dynamic_partition(
      tf.range(0, inp.get_shape()[0]), partitions, len(fns))
  return tf.dynamic_stitch(new_indices, new_outputs) 
Example #18
Source File: dynamic_stitch_op_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testSimpleOneDimensional(self):
    with self.test_session():
      indices = [tf.constant([0, 4, 7]),
                 tf.constant([1, 6, 2, 3, 5])]
      data = [tf.constant([0, 40, 70]),
              tf.constant([10, 60, 20, 30, 50])]
      stitched_t = tf.dynamic_stitch(indices, data)
      stitched_val = stitched_t.eval()
      self.assertAllEqual([0, 10, 20, 30, 40, 50, 60, 70], stitched_val)
      # Dimension 0 is determined by the max index in indices, so we
      # can only infer that the output is a vector of some unknown
      # length.
      self.assertEqual([None], stitched_t.get_shape().as_list()) 
Example #19
Source File: dynamic_stitch_op_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testErrorDataAndIndicesSizeMismatch(self):
    indices = [tf.constant([0, 4, 7]),
               tf.constant([1, 6, 2, 3, 5])]
    data = [tf.constant([0, 40, 70]),
            tf.constant([10, 60, 20, 30])]
    with self.assertRaises(ValueError):
      tf.dynamic_stitch(indices, data) 
Example #20
Source File: dynamic_stitch_op_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testScalar(self):
    with self.test_session():
      indices = [tf.constant(0), tf.constant(1)]
      data = [tf.constant(40), tf.constant(60)]
      for step in -1, 1:
        stitched_t = tf.dynamic_stitch(indices[::step], data)
        stitched_val = stitched_t.eval()
        self.assertAllEqual([40, 60][::step], stitched_val)
        # Dimension 0 is determined by the max index in indices, so we
        # can only infer that the output is a vector of some unknown
        # length.
        self.assertEqual([None], stitched_t.get_shape().as_list()) 
Example #21
Source File: embedding_ops_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testSumGradArgs(self):
    with self.test_session(use_gpu=False):
      indices = [tf.convert_to_tensor([0, 1, 2, 3]),
                 tf.convert_to_tensor([2, 3])]
      values = [tf.convert_to_tensor([2, 3, 5, 7]), tf.convert_to_tensor([1, 1])]
      self.assertAllEqual(
          tf.dynamic_stitch(indices, values).eval(), [2, 3, 1, 1])

  # We expect that the values are merged in order. 
Example #22
Source File: embedding_ops_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testInt32Gpu(self):
    with self.test_session(use_gpu=True):
      indices = [tf.convert_to_tensor([0, 1, 2]), tf.convert_to_tensor([2, 3])]
      values = [tf.convert_to_tensor([12, 23, 34]), tf.convert_to_tensor([1, 2])]
      self.assertAllEqual(
          tf.dynamic_stitch(indices, values).eval(), [12, 23, 1, 2]) 
Example #23
Source File: embedding_ops_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testInt32Cpu(self):
    with self.test_session(use_gpu=False):
      indices = [tf.convert_to_tensor([0, 1, 2]), tf.convert_to_tensor([2, 3])]
      values = [tf.convert_to_tensor([12, 23, 34]), tf.convert_to_tensor([1, 2])]
      self.assertAllEqual(
          tf.dynamic_stitch(indices, values).eval(), [12, 23, 1, 2]) 
Example #24
Source File: embedding_ops_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testCint32Gpu(self):
    with self.test_session(use_gpu=True):
      indices = [tf.convert_to_tensor([0, 1, 2]), tf.convert_to_tensor([2, 3])]
      values = [tf.convert_to_tensor([12, 23, 34]), tf.convert_to_tensor([1, 2])]
      self.assertAllEqual(
          tf.dynamic_stitch(indices, values).eval(), [12, 23, 1, 2]) 
Example #25
Source File: embedding_ops_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testCint32Cpu(self):
    with self.test_session(use_gpu=False):
      indices = [tf.convert_to_tensor([0, 1, 2]), tf.convert_to_tensor([2, 3])]
      values = [tf.convert_to_tensor([12, 23, 34]), tf.convert_to_tensor([1, 2])]
      self.assertAllEqual(
          tf.dynamic_stitch(indices, values).eval(), [12, 23, 1, 2]) 
Example #26
Source File: ops.py    From tpu_models with Apache License 2.0 5 votes vote down vote up
def indices_to_dense_vector(indices,
                            size,
                            indices_value=1.,
                            default_value=0,
                            dtype=tf.float32):
  """Creates dense vector with indices set to specific value and rest to zeros.

  This function exists because it is unclear if it is safe to use
    tf.sparse_to_dense(indices, [size], 1, validate_indices=False)
  with indices which are not ordered.
  This function accepts a dynamic size (e.g. tf.shape(tensor)[0])

  Args:
    indices: 1d Tensor with integer indices which are to be set to
        indices_values.
    size: scalar with size (integer) of output Tensor.
    indices_value: values of elements specified by indices in the output vector
    default_value: values of other elements in the output vector.
    dtype: data type.

  Returns:
    dense 1D Tensor of shape [size] with indices set to indices_values and the
        rest set to default_value.
  """
  size = tf.to_int32(size)
  zeros = tf.ones([size], dtype=dtype) * default_value
  values = tf.ones_like(indices, dtype=dtype) * indices_value

  return tf.dynamic_stitch([tf.range(size), tf.to_int32(indices)],
                           [zeros, values]) 
Example #27
Source File: ops.py    From tpu_models with Apache License 2.0 5 votes vote down vote up
def indices_to_dense_vector(indices,
                            size,
                            indices_value=1.,
                            default_value=0,
                            dtype=tf.float32):
  """Creates dense vector with indices set to specific value and rest to zeros.

  This function exists because it is unclear if it is safe to use
    tf.sparse_to_dense(indices, [size], 1, validate_indices=False)
  with indices which are not ordered.
  This function accepts a dynamic size (e.g. tf.shape(tensor)[0])

  Args:
    indices: 1d Tensor with integer indices which are to be set to
        indices_values.
    size: scalar with size (integer) of output Tensor.
    indices_value: values of elements specified by indices in the output vector
    default_value: values of other elements in the output vector.
    dtype: data type.

  Returns:
    dense 1D Tensor of shape [size] with indices set to indices_values and the
        rest set to default_value.
  """
  size = tf.to_int32(size)
  zeros = tf.ones([size], dtype=dtype) * default_value
  values = tf.ones_like(indices, dtype=dtype) * indices_value

  return tf.dynamic_stitch([tf.range(size), tf.to_int32(indices)],
                           [zeros, values]) 
Example #28
Source File: ops.py    From Gun-Detector with Apache License 2.0 5 votes vote down vote up
def indices_to_dense_vector(indices,
                            size,
                            indices_value=1.,
                            default_value=0,
                            dtype=tf.float32):
  """Creates dense vector with indices set to specific value and rest to zeros.

  This function exists because it is unclear if it is safe to use
    tf.sparse_to_dense(indices, [size], 1, validate_indices=False)
  with indices which are not ordered.
  This function accepts a dynamic size (e.g. tf.shape(tensor)[0])

  Args:
    indices: 1d Tensor with integer indices which are to be set to
        indices_values.
    size: scalar with size (integer) of output Tensor.
    indices_value: values of elements specified by indices in the output vector
    default_value: values of other elements in the output vector.
    dtype: data type.

  Returns:
    dense 1D Tensor of shape [size] with indices set to indices_values and the
        rest set to default_value.
  """
  size = tf.to_int32(size)
  zeros = tf.ones([size], dtype=dtype) * default_value
  values = tf.ones_like(indices, dtype=dtype) * indices_value

  return tf.dynamic_stitch([tf.range(size), tf.to_int32(indices)],
                           [zeros, values]) 
Example #29
Source File: tf_ops.py    From RetinaNet_Tensorflow_Rotation with MIT License 5 votes vote down vote up
def indices_to_dense_vector(indices,
                            size,
                            indices_value=1.,
                            default_value=0,
                            dtype=tf.float32):
  """Creates dense vector with indices set to specific (the para "indices_value" ) and rest to zeros.

  This function exists because it is unclear if it is safe to use
    tf.sparse_to_dense(indices, [size], 1, validate_indices=False)
  with indices which are not ordered.
  This function accepts a dynamic size (e.g. tf.shape(tensor)[0])

  Args:
    indices: 1d Tensor with integer indices which are to be set to
        indices_values.
    size: scalar with size (integer) of output Tensor.
    indices_value: values of elements specified by indices in the output vector
    default_value: values of other elements in the output vector.
    dtype: data type.

  Returns:
    dense 1D Tensor of shape [size] with indices set to indices_values and the
        rest set to default_value.
  """
  size = tf.to_int32(size)
  zeros = tf.ones([size], dtype=dtype) * default_value
  values = tf.ones_like(indices, dtype=dtype) * indices_value

  return tf.dynamic_stitch([tf.range(size), tf.to_int32(indices)],
                           [zeros, values]) 
Example #30
Source File: target_assigner.py    From tensorflow with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def _create_classification_targets(self, groundtruth_labels, match):
    """Create classification targets for each anchor.

    Assign a classification target of for each anchor to the matching
    groundtruth label that is provided by match.  Anchors that are not matched
    to anything are given the target self._unmatched_cls_target

    Args:
      groundtruth_labels:  a tensor of shape [num_gt_boxes, d_1, ... d_k]
        with labels for each of the ground_truth boxes. The subshape
        [d_1, ... d_k] can be empty (corresponding to scalar labels).
      match: a matcher.Match object that provides a matching between anchors
        and groundtruth boxes.

    Returns:
      cls_targets: a float32 tensor with shape [num_anchors, d_1, d_2 ... d_k],
        where the subshape [d_1, ..., d_k] is compatible with groundtruth_labels
        which has shape [num_gt_boxes, d_1, d_2, ... d_k].
    """
    matched_anchor_indices = match.matched_column_indices()
    unmatched_ignored_anchor_indices = (match.
                                        unmatched_or_ignored_column_indices())
    matched_gt_indices = match.matched_row_indices()
    matched_cls_targets = tf.gather(groundtruth_labels, matched_gt_indices)

    ones = self._unmatched_cls_target.shape.ndims * [1]
    unmatched_ignored_cls_targets = tf.tile(
        tf.expand_dims(self._unmatched_cls_target, 0),
        tf.stack([tf.size(unmatched_ignored_anchor_indices)] + ones))

    cls_targets = tf.dynamic_stitch(
        [matched_anchor_indices, unmatched_ignored_anchor_indices],
        [matched_cls_targets, unmatched_ignored_cls_targets])
    return cls_targets