Python tensorflow.segment_min() Examples
The following are 11
code examples of tensorflow.segment_min().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow
, or try the search function
.
Example #1
Source File: segment_reduction_ops_test.py From deep_image_model with Apache License 2.0 | 6 votes |
def testGradient(self): shape = [4, 4] indices = [0, 1, 2, 2] for tf_op in [tf.segment_sum, tf.segment_mean, tf.segment_min, tf.segment_max]: with self.test_session(): tf_x, np_x = self._input(shape, dtype=tf.float64) s = tf_op(data=tf_x, segment_ids=indices) jacob_t, jacob_n = tf.test.compute_gradient( tf_x, shape, s, [3, 4], x_init_value=np_x.astype(np.double), delta=1) self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
Example #2
Source File: metrics_utils.py From listen-attend-and-spell with Apache License 2.0 | 6 votes |
def dense_to_sparse(tensor, eos_id, merge_repeated=True): if merge_repeated: added_values = tf.cast( tf.fill((tf.shape(tensor)[0], 1), eos_id), tensor.dtype) # merge consecutive values concat_tensor = tf.concat((tensor, added_values), axis=-1) diff = tf.cast(concat_tensor[:, 1:] - concat_tensor[:, :-1], tf.bool) # trim after first eos token eos_indices = tf.where(tf.equal(concat_tensor, eos_id)) first_eos = tf.segment_min(eos_indices[:, 1], eos_indices[:, 0]) mask = tf.sequence_mask(first_eos, maxlen=tf.shape(tensor)[1]) indices = tf.where(diff & mask & tf.not_equal(tensor, -1)) values = tf.gather_nd(tensor, indices) shape = tf.shape(tensor, out_type=tf.int64) return tf.SparseTensor(indices, values, shape) else: return tf.contrib.layers.dense_to_sparse(tensor, eos_id)
Example #3
Source File: bert_seq_tpu_utils.py From BERT with Apache License 2.0 | 5 votes |
def get_finised_pos(token_seq, finished_index, max_length): tmp_indices = tf.where(tf.equal(token_seq, int(finished_index))) finished_pos = tf.segment_min(tmp_indices[:, 1], tmp_indices[:, 0]) sequence_mask = tf.sequence_mask(finished_pos+1, maxlen=max_length) return tf.cast(sequence_mask, tf.int32)
Example #4
Source File: bert_seq_utils.py From BERT with Apache License 2.0 | 5 votes |
def get_finised_pos(token_seq, finished_index, max_length): tmp_indices = tf.where(tf.equal(token_seq, int(finished_index))) finished_pos = tf.segment_min(tmp_indices[:, 1], tmp_indices[:, 0]) sequence_mask = tf.sequence_mask(finished_pos+1, maxlen=max_length) return tf.cast(sequence_mask, tf.int32)
Example #5
Source File: bert_seq_sample_utils.py From BERT with Apache License 2.0 | 5 votes |
def get_finised_pos(token_seq, finished_index, max_length): tmp_indices = tf.where(tf.equal(token_seq, int(finished_index))) finished_pos = tf.segment_min(tmp_indices[:, 1], tmp_indices[:, 0]) sequence_mask = tf.sequence_mask(finished_pos+1, maxlen=max_length) return tf.cast(sequence_mask, tf.int32)
Example #6
Source File: ops.py From tfdeploy with MIT License | 5 votes |
def test_SegmentMin(self): t = tf.segment_min(self.random(4, 2, 3), np.array([0, 1, 1, 2])) self.check(t)
Example #7
Source File: segment_reduction_ops_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def testValues(self): dtypes = [tf.float32, tf.float64, tf.int64, tf.int32, tf.complex64, tf.complex128] # Each item is np_op1, np_op2, tf_op ops_list = [(np.add, None, tf.segment_sum), (self._mean_cum_op, self._mean_reduce_op, tf.segment_mean), (np.ndarray.__mul__, None, tf.segment_prod), (np.minimum, None, tf.segment_min), (np.maximum, None, tf.segment_max)] # A subset of ops has been enabled for complex numbers complex_ops_list = [(np.add, None, tf.segment_sum), (np.ndarray.__mul__, None, tf.segment_prod)] n = 10 shape = [n, 2] indices = [i // 3 for i in range(n)] for dtype in dtypes: if dtype in (tf.complex64, tf.complex128): curr_ops_list = complex_ops_list else: curr_ops_list = ops_list with self.test_session(use_gpu=False): tf_x, np_x = self._input(shape, dtype=dtype) for np_op1, np_op2, tf_op in curr_ops_list: np_ans = self._segmentReduce(indices, np_x, np_op1, np_op2) s = tf_op(data=tf_x, segment_ids=indices) tf_ans = s.eval() self._assertAllClose(indices, np_ans, tf_ans) # NOTE(mrry): The static shape inference that computes # `tf_ans.shape` can only infer that sizes from dimension 1 # onwards, because the size of dimension 0 is data-dependent # and may therefore vary dynamically. self.assertAllEqual(np_ans.shape[1:], tf_ans.shape[1:])
Example #8
Source File: math_grad_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def testSegmentMinGradient(self): data = tf.constant([1.0, 2.0, 3.0], dtype=tf.float32) segment_ids = tf.constant([0, 0, 1], dtype=tf.int64) segment_min = tf.segment_min(data, segment_ids) with self.test_session(): error = tf.test.compute_gradient_error(data, [3], segment_min, [2]) self.assertLess(error, 1e-4)
Example #9
Source File: math_grad_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def testSegmentMinGradientWithTies(self): inputs = tf.constant([1.0], dtype=tf.float32) data = tf.concat(0, [inputs, inputs]) segment_ids = tf.constant([0, 0], dtype=tf.int64) segment_min = tf.segment_min(data, segment_ids) with self.test_session(): error = tf.test.compute_gradient_error(inputs, [1], segment_min, [1]) self.assertLess(error, 1e-4)
Example #10
Source File: mrt_utils.py From transformer-aan with BSD 3-Clause "New" or "Revised" License | 5 votes |
def get_len(sen, eos): indices = tf.where(tf.equal(sen, eos)) result = tf.segment_min(indices[:,1], indices[:,0]) return result
Example #11
Source File: segments.py From moonlight with Apache License 2.0 | 4 votes |
def _segments_1d(values, mode, name=None): """Labels consecutive runs of the same value. Args: values: 1D tensor of any type. mode: The SegmentsMode. Returns the start of each segment (STARTS), or the rounded center of each segment (CENTERS). name: Optional name for the op. Returns: run_centers: int32 tensor; the centers of each run with the same consecutive values. run_lengths: int32 tensor; the lengths of each run. Raises: ValueError: if mode is not recognized. """ with tf.name_scope(name, "segments", [values]): def do_segments(values): """Actually does segmentation. Args: values: 1D tensor of any type. Non-empty. Returns: run_centers: int32 tensor run_lengths: int32 tensor Raises: ValueError: if mode is not recognized. """ length = tf.shape(values)[0] values = tf.convert_to_tensor(values) # The first run has id 0, so we don't increment the id. # Otherwise, the id is incremented when the value changes. run_start_bool = tf.concat( [[False], tf.not_equal(values[1:], values[:-1])], axis=0) # Cumulative sum the run starts to get the run ids. segment_ids = tf.cumsum(tf.cast(run_start_bool, tf.int32)) if mode is SegmentsMode.STARTS: run_centers = tf.segment_min(tf.range(length), segment_ids) elif mode is SegmentsMode.CENTERS: run_centers = tf.segment_mean( tf.cast(tf.range(length), tf.float32), segment_ids) run_centers = tf.cast(tf.floor(run_centers), tf.int32) else: raise ValueError("Unexpected mode: %s" % mode) run_lengths = tf.segment_sum(tf.ones([length], tf.int32), segment_ids) return run_centers, run_lengths def empty_segments(): return (tf.zeros([0], tf.int32), tf.zeros([0], tf.int32)) return tf.cond( tf.greater(tf.shape(values)[0], 0), lambda: do_segments(values), empty_segments)