Python tensorflow.reduce_all() Examples
The following are 30
code examples of tensorflow.reduce_all().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow
, or try the search function
.
Example #1
Source File: utils.py From zhusuan with MIT License | 7 votes |
def is_same_dynamic_shape(x, y): """ Whether `x` and `y` has the same dynamic shape. :param x: A Tensor. :param y: A Tensor. :return: A scalar Tensor of `bool`. """ # There is a BUG of Tensorflow for not doing static shape inference # right in nested tf.cond()'s, so we are not comparing x and y's # shape directly but working with their concatenations. return tf.cond( tf.equal(tf.rank(x), tf.rank(y)), lambda: tf.reduce_all(tf.equal( tf.concat([tf.shape(x), tf.shape(y)], 0), tf.concat([tf.shape(y), tf.shape(x)], 0))), lambda: tf.convert_to_tensor(False, tf.bool))
Example #2
Source File: helpers.py From Tacotron-2 with MIT License | 6 votes |
def next_inputs(self, time, outputs, state, sample_ids, stop_token_prediction, name=None): '''Stop on EOS. Otherwise, pass the last output as the next input and pass through state.''' with tf.name_scope('TacoTestHelper'): #A sequence is finished when the output probability is > 0.5 finished = tf.cast(tf.round(stop_token_prediction), tf.bool) #Since we are predicting r frames at each step, two modes are #then possible: # Stop when the model outputs a p > 0.5 for any frame between r frames (Recommended) # Stop when the model outputs a p > 0.5 for all r frames (Safer) #Note: # With enough training steps, the model should be able to predict when to stop correctly # and the use of stop_at_any = True would be recommended. If however the model didn't # learn to stop correctly yet, (stops too soon) one could choose to use the safer option # to get a correct synthesis if self.stop_at_any: finished = tf.reduce_any(tf.reduce_all(finished, axis=0)) #Recommended else: finished = tf.reduce_all(tf.reduce_all(finished, axis=0)) #Safer option # Feed last output frame as next input. outputs is [N, output_dim * r] next_inputs = outputs[:, -self._output_dim:] next_state = state return (finished, next_inputs, next_state)
Example #3
Source File: tensor_utils.py From federated with Apache License 2.0 | 6 votes |
def zero_all_if_any_non_finite(structure): """Zeroes out all entries in input if any are not finite. Args: structure: A structure supported by tf.nest. Returns: A tuple (input, 0) if all entries are finite or the structure is empty, or a tuple (zeros, 1) if any non-finite entries were found. """ flat = tf.nest.flatten(structure) if not flat: return (structure, tf.constant(0)) flat_bools = [tf.reduce_all(tf.math.is_finite(t)) for t in flat] all_finite = functools.reduce(tf.logical_and, flat_bools) if all_finite: return (structure, tf.constant(0)) else: return (tf.nest.map_structure(tf.zeros_like, structure), tf.constant(1))
Example #4
Source File: metrics.py From keras-image-captioning with MIT License | 6 votes |
def categorical_accuracy_with_variable_timestep(y_true, y_pred): # Actually discarding is not needed if the dummy is an all-zeros array # (It is indeed encoded in an all-zeros array by # CaptionPreprocessing.preprocess_batch) y_true = y_true[:, :-1, :] # Discard the last timestep/word (dummy) y_pred = y_pred[:, :-1, :] # Discard the last timestep/word (dummy) # Flatten the timestep dimension shape = tf.shape(y_true) y_true = tf.reshape(y_true, [-1, shape[-1]]) y_pred = tf.reshape(y_pred, [-1, shape[-1]]) # Discard rows that are all zeros as they represent dummy or padding words. is_zero_y_true = tf.equal(y_true, 0) is_zero_row_y_true = tf.reduce_all(is_zero_y_true, axis=-1) y_true = tf.boolean_mask(y_true, ~is_zero_row_y_true) y_pred = tf.boolean_mask(y_pred, ~is_zero_row_y_true) accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(y_true, axis=1), tf.argmax(y_pred, axis=1)), dtype=tf.float32)) return accuracy # As Keras stores a function's name as its metric's name
Example #5
Source File: asserts.py From graphics with Apache License 2.0 | 6 votes |
def assert_binary(tensor, name=None): """Asserts that all the values in the tensor are zeros or ones. Args: tensor: A tensor of shape `[A1, ..., An]` containing the values we want to check. name: A name for this op. Defaults to "assert_binary". Returns: The input tensor, with dependence on the assertion operator in the graph. Raises: tf.errors.InvalidArgumentError: If any of the values in the tensor is not zero or one. """ if not FLAGS[tfg_flags.TFG_ADD_ASSERTS_TO_GRAPH].value: return tensor with tf.compat.v1.name_scope(name, 'assert_binary', [tensor]): tensor = tf.convert_to_tensor(value=tensor) condition = tf.reduce_all( input_tensor=tf.logical_or(tf.equal(tensor, 0), tf.equal(tensor, 1))) with tf.control_dependencies([tf.Assert(condition, data=[tensor])]): return tf.identity(tensor)
Example #6
Source File: sequence_insert.py From onnx-tensorflow with Apache License 2.0 | 6 votes |
def chk_pos_in_bounds(cls, input_seq, pos): """ Check the position is in-bounds with respect to the sequence. Accepted range for 'position' is in [-n, n], where n is the number of tensors in 'input_sequence'. :param input_seq: input sequence :param pos: position to insert the tensor :return: True if position is in-bounds. """ seq_length = tf.shape(input_seq.to_sparse(), out_type=pos.dtype)[0] cond1 = tf.greater_equal(pos, tf.negative(seq_length)) cond2 = tf.less_equal(pos, seq_length) # pos >= -n and pos <= n return tf.reduce_all(tf.logical_and(cond1, cond2))
Example #7
Source File: sequence_erase.py From onnx-tensorflow with Apache License 2.0 | 6 votes |
def chk_pos_in_bounds(cls, input_seq, pos): """ Check the position is in-bounds with respect to the sequence. Accepted range for 'position' is in [-n, n - 1], where n is the number of tensors in 'input_sequence'. :param input_seq: input sequence :param pos: position of the output tensor :return: True if position is in-bounds """ seq_length = tf.shape(input_seq.to_sparse(), out_type=pos.dtype)[0] cond1 = tf.greater_equal(pos, tf.negative(seq_length)) cond2 = tf.less_equal(pos, seq_length - 1) # pos >= -n and pos < n return tf.reduce_all(tf.logical_and(cond1, cond2))
Example #8
Source File: sequence_at.py From onnx-tensorflow with Apache License 2.0 | 6 votes |
def chk_pos_in_bounds(cls, input_seq, pos): """ Check the position is in-bounds with respect to the sequence. Accepted range for 'position' is in [-n, n - 1], where n is the number of tensors in 'input_sequence'. :param input_seq: input sequence :param pos: position of the output tensor :return: True if position is in-bounds or input length is dynamic. """ seq_length = input_seq.shape[0] if seq_length is None: return True seq_length = tf.cast(seq_length, pos.dtype) cond1 = tf.greater_equal(pos, tf.negative(seq_length)) cond2 = tf.less_equal(pos, seq_length - 1) # pos >= -n and pos < n return tf.reduce_all(tf.logical_and(cond1, cond2))
Example #9
Source File: helpers.py From vae_tacotron2 with MIT License | 6 votes |
def next_inputs(self, time, outputs, state, sample_ids, stop_token_prediction, name=None): '''Stop on EOS. Otherwise, pass the last output as the next input and pass through state.''' with tf.name_scope('TacoTestHelper'): #A sequence is finished when the output probability is > 0.5 finished = tf.cast(tf.round(stop_token_prediction), tf.bool) #Since we are predicting r frames at each step, two modes are #then possible: # Stop when the model outputs a p > 0.5 for any frame between r frames (Recommended) # Stop when the model outputs a p > 0.5 for all r frames (Safer) #Note: # With enough training steps, the model should be able to predict when to stop correctly # and the use of stop_at_any = True would be recommended. If however the model didn't # learn to stop correctly yet, (stops too soon) one could choose to use the safer option # to get a correct synthesis if hparams.stop_at_any: finished = tf.reduce_any(finished) #Recommended else: finished = tf.reduce_all(finished) #Safer option # Feed last output frame as next input. outputs is [N, output_dim * r] next_inputs = outputs[:, -self._output_dim:] next_state = state return (finished, next_inputs, next_state)
Example #10
Source File: Util.py From MOTSFusion with MIT License | 6 votes |
def random_crop_image(img, size, offset=None): # adapted from code from tf.random_crop shape = tf.shape(img) #remove the assertion for now since it makes the queue filling slow for some reason #check = tf.Assert( # tf.reduce_all(shape[:2] >= size), # ["Need value.shape >= size, got ", shape, size]) #with tf.control_dependencies([check]): # img = tf.identity(img) limit = shape[:2] - size + 1 dtype = tf.int32 if offset is None: offset = tf.random_uniform(shape=(2,), dtype=dtype, maxval=dtype.max, seed=None) % limit offset = tf.stack([offset[0], offset[1], 0]) size0 = size[0] if isinstance(size[0], int) else None size1 = size[1] if isinstance(size[1], int) else None size_im = tf.stack([size[0], size[1], img.get_shape().as_list()[2]]) img_cropped = tf.slice(img, offset, size_im) out_shape_img = [size0, size1, img.get_shape()[2]] img_cropped.set_shape(out_shape_img) return img_cropped, offset
Example #11
Source File: sampler.py From addons with Apache License 2.0 | 6 votes |
def next_inputs(self, time, outputs, state, sample_ids): (finished, base_next_inputs, state) = super().next_inputs( time=time, outputs=outputs, state=state, sample_ids=sample_ids ) def maybe_sample(): """Perform scheduled sampling.""" where_sampling = tf.cast(tf.where(sample_ids > -1), tf.int32) where_not_sampling = tf.cast(tf.where(sample_ids <= -1), tf.int32) sample_ids_sampling = tf.gather_nd(sample_ids, where_sampling) inputs_not_sampling = tf.gather_nd(base_next_inputs, where_not_sampling) sampled_next_inputs = self.embedding_fn(sample_ids_sampling) base_shape = tf.shape(base_next_inputs) return tf.scatter_nd( indices=where_sampling, updates=sampled_next_inputs, shape=base_shape ) + tf.scatter_nd( indices=where_not_sampling, updates=inputs_not_sampling, shape=base_shape, ) all_finished = tf.reduce_all(finished) next_inputs = tf.cond(all_finished, lambda: base_next_inputs, maybe_sample) return (finished, next_inputs, state)
Example #12
Source File: utils.py From SPADE-Tensorflow with MIT License | 6 votes |
def convert_from_color_segmentation(color_value_dict, arr_3d, tensor_type=False): if tensor_type : arr_2d = tf.zeros(shape=[tf.shape(arr_3d)[0], tf.shape(arr_3d)[1]], dtype=tf.uint8) for c, i in color_value_dict.items() : color_array = tf.reshape(np.asarray(c, dtype=np.uint8), shape=[1, 1, -1]) condition = tf.reduce_all(tf.equal(arr_3d, color_array), axis=-1) arr_2d = tf.where(condition, tf.cast(tf.fill(tf.shape(arr_2d), i), tf.uint8), arr_2d) return arr_2d else : arr_2d = np.zeros((np.shape(arr_3d)[0], np.shape(arr_3d)[1]), dtype=np.uint8) for c, i in color_value_dict.items(): color_array = np.asarray(c, np.float32).reshape([1, 1, -1]) m = np.all(arr_3d == color_array, axis=-1) arr_2d[m] = i return arr_2d
Example #13
Source File: inputter.py From OpenNMT-tf with MIT License | 6 votes |
def keep_for_training(self, features, maximum_length=None): if not isinstance(maximum_length, list): maximum_length = [maximum_length] # Unset maximum lengths are set to None (i.e. no constraint). maximum_length += [None] * (len(self.inputters) - len(maximum_length)) constraints = [] for i, inputter in enumerate(self.inputters): keep = inputter.keep_for_training( self._index_features(features, i), maximum_length=maximum_length[i]) if isinstance(keep, bool): if not keep: return False continue constraints.append(keep) if not constraints: return True return tf.reduce_all(constraints)
Example #14
Source File: losses.py From DOTA_models with Apache License 2.0 | 5 votes |
def log_quaternion_loss_batch(predictions, labels, params): """A helper function to compute the error between quaternions. Args: predictions: A Tensor of size [batch_size, 4]. labels: A Tensor of size [batch_size, 4]. params: A dictionary of parameters. Expecting 'use_logging', 'batch_size'. Returns: A Tensor of size [batch_size], denoting the error between the quaternions. """ use_logging = params['use_logging'] assertions = [] if use_logging: assertions.append( tf.Assert( tf.reduce_all( tf.less( tf.abs(tf.reduce_sum(tf.square(predictions), [1]) - 1), 1e-4)), ['The l2 norm of each prediction quaternion vector should be 1.'])) assertions.append( tf.Assert( tf.reduce_all( tf.less( tf.abs(tf.reduce_sum(tf.square(labels), [1]) - 1), 1e-4)), ['The l2 norm of each label quaternion vector should be 1.'])) with tf.control_dependencies(assertions): product = tf.multiply(predictions, labels) internal_dot_products = tf.reduce_sum(product, [1]) if use_logging: internal_dot_products = tf.Print( internal_dot_products, [internal_dot_products, tf.shape(internal_dot_products)], 'internal_dot_products:') logcost = tf.log(1e-4 + 1 - tf.abs(internal_dot_products)) return logcost
Example #15
Source File: rnn_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def testLoopState(self): with self.test_session(graph=tf.Graph()): max_time = 10 batch_size = 16 input_depth = 4 num_units = 3 inputs = np.random.randn(max_time, batch_size, input_depth) inputs_ta = tf.TensorArray(dtype=tf.float32, size=tf.shape(inputs)[0]) inputs_ta = inputs_ta.unpack(inputs) cell = tf.nn.rnn_cell.LSTMCell(num_units, state_is_tuple=True) def loop_fn(time_, cell_output, cell_state, loop_state): if cell_output is None: loop_state = tf.constant([0]) next_state = cell.zero_state(batch_size, tf.float32) else: loop_state = tf.stack([tf.squeeze(loop_state) + 1]) next_state = cell_state emit_output = cell_output # == None for time == 0 elements_finished = tf.tile([time_ >= max_time], [batch_size]) finished = tf.reduce_all(elements_finished) # For the very final iteration, we must emit a dummy input next_input = tf.cond( finished, lambda: tf.zeros([batch_size, input_depth], dtype=tf.float32), lambda: inputs_ta.read(time_)) return (elements_finished, next_input, next_state, emit_output, loop_state) r = tf.nn.raw_rnn(cell, loop_fn) loop_state = r[-1] self.assertEqual([10], loop_state.eval())
Example #16
Source File: staffline_distance.py From moonlight with Apache License 2.0 | 5 votes |
def _single_peak(values, relative_cutoff, minval, invalidate_distance): """Takes a single peak if it is high enough compared to all other peaks. Args: values: 1D tensor of values to take the peaks on. relative_cutoff: The fraction of the highest peak which all other peaks should be below. minval: The peak should have at least this value. invalidate_distance: Exclude values that are up to invalidate_distance away from the peak. Returns: The index of the single peak in `values`, or -1 if there is not a single peak that satisfies `relative_cutoff`. """ relative_cutoff = tf.convert_to_tensor(relative_cutoff, tf.float32) # argmax is safe because the histogram is always non-empty. peak = tf.to_int32(tf.argmax(values)) # Take values > minval away from the peak. other_values = tf.boolean_mask( values, tf.greater( tf.abs(tf.range(tf.shape(values)[0]) - peak), invalidate_distance)) should_take_peak = tf.logical_and( tf.greater_equal(values[peak], minval), # values[peak] * relative_cutoff must be >= other_values. tf.reduce_all( tf.greater_equal( tf.to_float(values[peak]) * relative_cutoff, tf.to_float(other_values)))) return tf.cond(should_take_peak, lambda: peak, lambda: -1)
Example #17
Source File: util.py From coref-ee with Apache License 2.0 | 5 votes |
def compute_b3_lost(p_m_entity, x_gold_class_cluster_ids_supgen, k, beta=2.0): # remove singleton entities gold_entities = tf.reduce_sum(x_gold_class_cluster_ids_supgen, 0) > 1.2 sys_m_e = tf.one_hot(tf.argmax(p_m_entity, 1), k) sys_entities = tf.reduce_sum(sys_m_e, 0) > 1.2 gold_entity_filter = tf.reshape(tf.where(gold_entities), [-1]) gold_cluster = tf.gather(tf.transpose(x_gold_class_cluster_ids_supgen), gold_entity_filter) sys_entity_filter, merge = tf.cond(pred=tf.reduce_any(sys_entities & gold_entities), fn1=lambda: (tf.reshape(tf.where(sys_entities), [-1]), tf.constant(0)), fn2=lambda: ( tf.reshape(tf.where(sys_entities | gold_entities), [-1]), tf.constant(1))) system_cluster = tf.gather(tf.transpose(p_m_entity), sys_entity_filter) # compute intersections gold_sys_intersect = tf.pow(tf.matmul(gold_cluster, system_cluster, transpose_b=True), 2) r_num = tf.reduce_sum(tf.reduce_sum(gold_sys_intersect, 1) / tf.reduce_sum(gold_cluster, 1)) r_den = tf.reduce_sum(gold_cluster) recall = tf.reshape(r_num / r_den, []) sys_gold_intersection = tf.transpose(gold_sys_intersect) p_num = tf.reduce_sum(tf.reduce_sum(sys_gold_intersection, 1) / tf.reduce_sum(system_cluster, 1)) p_den = tf.reduce_sum(system_cluster) prec = tf.reshape(p_num / p_den, []) beta_2 = beta ** 2 f_beta = (1 + beta_2) * prec * recall / (beta_2 * prec + recall) lost = -f_beta # lost = tf.Print(lost, [merge, # r_num, r_den, p_num, p_den, # gold_entity_filter, sys_entity_filter, # tf.reduce_sum(p_m_entity, 0), # beta, recall, prec, f_beta], summarize=1000) return tf.cond(pred=tf.reduce_all([r_num > .1, p_num > .1, r_den > .1, p_den > .1]), fn1=lambda: lost, fn2=lambda: tf.stop_gradient(tf.constant(0.)))
Example #18
Source File: rnn_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def testRawRNNScope(self): max_time = 10 batch_size = 16 input_depth = 4 num_units = 3 def factory(scope): inputs = tf.placeholder(shape=(max_time, batch_size, input_depth), dtype=tf.float32) sequence_length = tf.placeholder(shape=(batch_size,), dtype=tf.int32) inputs_ta = tf.TensorArray(dtype=tf.float32, size=tf.shape(inputs)[0]) inputs_ta = inputs_ta.unpack(inputs) cell = tf.nn.rnn_cell.LSTMCell(num_units, state_is_tuple=True) def loop_fn(time_, cell_output, cell_state, unused_loop_state): emit_output = cell_output # == None for time == 0 if cell_output is None: # time == 0 next_state = cell.zero_state(batch_size, tf.float32) else: next_state = cell_state elements_finished = (time_ >= sequence_length) finished = tf.reduce_all(elements_finished) # For the very final iteration, we must emit a dummy input next_input = tf.cond( finished, lambda: tf.zeros([batch_size, input_depth], dtype=tf.float32), lambda: inputs_ta.read(time_)) return (elements_finished, next_input, next_state, emit_output, None) return tf.nn.raw_rnn(cell, loop_fn, scope=scope) self._testScope(factory, use_outer_scope=True) self._testScope(factory, use_outer_scope=False) self._testScope(factory, prefix=None, use_outer_scope=False)
Example #19
Source File: rnn_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def testEmitDifferentStructureThanCellOutput(self): with self.test_session(graph=tf.Graph()) as sess: max_time = 10 batch_size = 16 input_depth = 4 num_units = 3 inputs = np.random.randn(max_time, batch_size, input_depth) inputs_ta = tf.TensorArray(dtype=tf.float32, size=tf.shape(inputs)[0]) inputs_ta = inputs_ta.unpack(inputs) cell = tf.nn.rnn_cell.LSTMCell(num_units, state_is_tuple=True) def loop_fn(time_, cell_output, cell_state, _): if cell_output is None: emit_output = (tf.zeros([2, 3], dtype=tf.int32), tf.zeros([1], dtype=tf.int64)) next_state = cell.zero_state(batch_size, tf.float32) else: emit_output = (tf.ones([batch_size, 2, 3], dtype=tf.int32), tf.ones([batch_size, 1], dtype=tf.int64)) next_state = cell_state elements_finished = tf.tile([time_ >= max_time], [batch_size]) finished = tf.reduce_all(elements_finished) # For the very final iteration, we must emit a dummy input next_input = tf.cond( finished, lambda: tf.zeros([batch_size, input_depth], dtype=tf.float32), lambda: inputs_ta.read(time_)) return (elements_finished, next_input, next_state, emit_output, None) r = tf.nn.raw_rnn(cell, loop_fn) output_ta = r[0] self.assertEqual(2, len(output_ta)) self.assertEqual([tf.int32, tf.int64], [ta.dtype for ta in output_ta]) output = [ta.pack() for ta in output_ta] output_vals = sess.run(output) self.assertAllEqual( np.ones((max_time, batch_size, 2, 3), np.int32), output_vals[0]) self.assertAllEqual( np.ones((max_time, batch_size, 1), np.int64), output_vals[1])
Example #20
Source File: rnn_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def testLoopStateWithTensorArray(self): with self.test_session(graph=tf.Graph()): max_time = 4 batch_size = 16 input_depth = 4 num_units = 3 inputs = np.random.randn(max_time, batch_size, input_depth) inputs_ta = tf.TensorArray(dtype=tf.float32, size=tf.shape(inputs)[0]) inputs_ta = inputs_ta.unpack(inputs) cell = tf.nn.rnn_cell.LSTMCell(num_units, state_is_tuple=True) def loop_fn(time_, cell_output, cell_state, loop_state): if cell_output is None: loop_state = tf.TensorArray( dynamic_size=True, size=0, dtype=tf.int32, clear_after_read=False) loop_state = loop_state.write(0, 1) next_state = cell.zero_state(batch_size, tf.float32) else: loop_state = loop_state.write( time_, loop_state.read(time_ - 1) + time_) next_state = cell_state emit_output = cell_output # == None for time == 0 elements_finished = tf.tile([time_ >= max_time], [batch_size]) finished = tf.reduce_all(elements_finished) # For the very final iteration, we must emit a dummy input next_input = tf.cond( finished, lambda: tf.zeros([batch_size, input_depth], dtype=tf.float32), lambda: inputs_ta.read(time_)) return (elements_finished, next_input, next_state, emit_output, loop_state) r = tf.nn.raw_rnn(cell, loop_fn) loop_state = r[-1] loop_state = loop_state.pack() self.assertAllEqual([1, 2, 2 + 2, 4 + 3, 7 + 4], loop_state.eval())
Example #21
Source File: uniform_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def testUniformSamplePdf(self): with self.test_session(): a = 10.0 b = [11.0, 100.0] uniform = tf.contrib.distributions.Uniform(a, b) self.assertTrue(tf.reduce_all(uniform.pdf(uniform.sample(10)) > 0).eval( ))
Example #22
Source File: variable_mgr.py From training_results_v0.5 with Apache License 2.0 | 5 votes |
def preprocess_device_grads(self, device_grads): compact_grads = (self.benchmark_cnn.params.use_fp16 and self.benchmark_cnn.params.compact_gradient_transfer) defer_grads = (self.benchmark_cnn.params.variable_consistency == 'relaxed') grads_to_reduce = [[g for g, _ in grad_vars] for grad_vars in device_grads] algorithm = batch_allreduce.algorithm_from_params(self.benchmark_cnn.params) reduced_grads, self._warmup_ops = algorithm.batch_all_reduce( grads_to_reduce, self.benchmark_cnn.params.gradient_repacking, compact_grads, defer_grads) assert not self._warmup_ops if (self.benchmark_cnn.params.use_fp16 and self.benchmark_cnn.enable_auto_loss_scale): # Check for infs or nans is_finite_list = [] with tf.name_scope('check_for_inf_and_nan'): for tower_grads in reduced_grads: with tf.colocate_with(tower_grads[0]): # TODO(tanmingxing): Create fused op that takes in a list of tensors # as input and returns scalar boolean True if there are any # infs/nans. is_finite_list.append(tf.reduce_all( [tf.reduce_all(tf.is_finite(g)) for g in tower_grads])) self.grad_has_inf_nan = tf.logical_not(tf.reduce_all(is_finite_list)) reduced_device_grads = [[ (g, v) for g, (_, v) in zip(grads, grad_vars) ] for grads, grad_vars in zip(reduced_grads, device_grads)] return self.benchmark_cnn.devices, reduced_device_grads
Example #23
Source File: sampler.py From addons with Apache License 2.0 | 5 votes |
def next_inputs(self, time, outputs, state, sample_ids): """next_inputs_fn for GreedyEmbeddingHelper.""" del time, outputs # unused by next_inputs_fn finished = tf.equal(sample_ids, self.end_token) all_finished = tf.reduce_all(finished) next_inputs = tf.cond( all_finished, # If we're finished, the next_inputs value doesn't matter lambda: self.start_inputs, lambda: self.embedding_fn(sample_ids), ) return (finished, next_inputs, state)
Example #24
Source File: pixelda_losses.py From yolo_v2 with Apache License 2.0 | 5 votes |
def log_quaternion_loss_batch(predictions, labels, params): """A helper function to compute the error between quaternions. Args: predictions: A Tensor of size [batch_size, 4]. labels: A Tensor of size [batch_size, 4]. params: A dictionary of parameters. Expecting 'use_logging', 'batch_size'. Returns: A Tensor of size [batch_size], denoting the error between the quaternions. """ use_logging = params['use_logging'] assertions = [] if use_logging: assertions.append( tf.Assert( tf.reduce_all( tf.less( tf.abs(tf.reduce_sum(tf.square(predictions), [1]) - 1), 1e-4)), ['The l2 norm of each prediction quaternion vector should be 1.'])) assertions.append( tf.Assert( tf.reduce_all( tf.less( tf.abs(tf.reduce_sum(tf.square(labels), [1]) - 1), 1e-4)), ['The l2 norm of each label quaternion vector should be 1.'])) with tf.control_dependencies(assertions): product = tf.multiply(predictions, labels) internal_dot_products = tf.reduce_sum(product, [1]) if use_logging: internal_dot_products = tf.Print(internal_dot_products, [ internal_dot_products, tf.shape(internal_dot_products) ], 'internal_dot_products:') logcost = tf.log(1e-4 + 1 - tf.abs(internal_dot_products)) return logcost
Example #25
Source File: segmentation_metrics.py From multi_object_datasets with Apache License 2.0 | 5 votes |
def _all_equal(values): """Whether values are all equal along the final axis.""" return tf.reduce_all(tf.equal(values, values[..., :1]), axis=-1)
Example #26
Source File: test_restorers.py From ashpy with Apache License 2.0 | 5 votes |
def _check_models_weights(trained: tf.keras.Model, restored: tf.keras.Model, i=0): """Test that the first layers of the restored and trained model have the same weights.""" try: for i, element in enumerate(trained.weights): assert tf.reduce_all(tf.equal(element, restored.weights[i])) except AssertionError: raise ModelNotConstructedError
Example #27
Source File: losses.py From yolo_v2 with Apache License 2.0 | 5 votes |
def log_quaternion_loss_batch(predictions, labels, params): """A helper function to compute the error between quaternions. Args: predictions: A Tensor of size [batch_size, 4]. labels: A Tensor of size [batch_size, 4]. params: A dictionary of parameters. Expecting 'use_logging', 'batch_size'. Returns: A Tensor of size [batch_size], denoting the error between the quaternions. """ use_logging = params['use_logging'] assertions = [] if use_logging: assertions.append( tf.Assert( tf.reduce_all( tf.less( tf.abs(tf.reduce_sum(tf.square(predictions), [1]) - 1), 1e-4)), ['The l2 norm of each prediction quaternion vector should be 1.'])) assertions.append( tf.Assert( tf.reduce_all( tf.less( tf.abs(tf.reduce_sum(tf.square(labels), [1]) - 1), 1e-4)), ['The l2 norm of each label quaternion vector should be 1.'])) with tf.control_dependencies(assertions): product = tf.multiply(predictions, labels) internal_dot_products = tf.reduce_sum(product, [1]) if use_logging: internal_dot_products = tf.Print( internal_dot_products, [internal_dot_products, tf.shape(internal_dot_products)], 'internal_dot_products:') logcost = tf.log(1e-4 + 1 - tf.abs(internal_dot_products)) return logcost
Example #28
Source File: test_tensorflow.py From training_results_v0.6 with Apache License 2.0 | 5 votes |
def test_horovod_allreduce_cpu_fused(self): """Test on CPU that the allreduce correctly sums 1D, 2D, 3D tensors with Tensor Fusion.""" hvd.init() size = hvd.size() with self.test_session(config=self.config) as session: dtypes = [tf.int32, tf.int64, tf.float32, tf.float64] dims = [1, 2, 3] tests = [] for dtype, dim in itertools.product(dtypes, dims): with tf.device("/cpu:0"): tf.set_random_seed(1234) tensor = tf.random_uniform( [17] * dim, -100, 100, dtype=dtype) summed = hvd.allreduce(tensor, average=False) multiplied = tensor * size max_difference = tf.reduce_max(tf.abs(summed - multiplied)) # Threshold for floating point equality depends on number of # ranks, since we're comparing against precise multiplication. if size <= 3 or dtype in [tf.int32, tf.int64]: threshold = 0 elif size < 10: threshold = 1e-4 elif size < 15: threshold = 5e-4 else: break test = max_difference <= threshold tests.append(test) self.assertTrue(session.run(tf.reduce_all(tests)), "hvd.allreduce produces incorrect results")
Example #29
Source File: test_tensorflow.py From training_results_v0.6 with Apache License 2.0 | 5 votes |
def test_horovod_allgather(self): """Test that the allgather correctly gathers 1D, 2D, 3D tensors.""" hvd.init() rank = hvd.rank() size = hvd.size() with self.test_session(config=self.config) as session: dtypes = [tf.uint8, tf.int8, tf.uint16, tf.int16, tf.int32, tf.int64, tf.float16, tf.float32, tf.float64, tf.bool] dims = [1, 2, 3] for dtype, dim in itertools.product(dtypes, dims): tensor = tf.ones([17] * dim) * rank if dtype == tf.bool: tensor = tensor % 2 tensor = tf.cast(tensor, dtype=dtype) gathered = hvd.allgather(tensor) gathered_tensor = session.run(gathered) self.assertEqual(list(gathered_tensor.shape), [17 * size] + [17] * (dim - 1)) for i in range(size): rank_tensor = tf.slice(gathered_tensor, [i * 17] + [0] * (dim - 1), [17] + [-1] * (dim - 1)) self.assertEqual(list(rank_tensor.shape), [17] * dim) # tf.equal() does not support tf.uint16 as of TensorFlow 1.2, # so need to cast rank_tensor to tf.int32. if dtype != tf.bool: value = i else: value = i % 2 self.assertTrue( session.run(tf.reduce_all( tf.equal(tf.cast(rank_tensor, tf.int32), value))), "hvd.allgather produces incorrect gathered tensor")
Example #30
Source File: test_tensorflow.py From training_results_v0.6 with Apache License 2.0 | 5 votes |
def test_horovod_broadcast(self): """Test that the broadcast correctly broadcasts 1D, 2D, 3D tensors.""" hvd.init() rank = hvd.rank() size = hvd.size() # This test does not apply if there is only one worker. if size == 1: return with self.test_session(config=self.config) as session: dtypes = [tf.uint8, tf.int8, tf.uint16, tf.int16, tf.int32, tf.int64, tf.float16, tf.float32, tf.float64, tf.bool] dims = [1, 2, 3] root_ranks = list(range(size)) for dtype, dim, root_rank in itertools.product(dtypes, dims, root_ranks): tensor = tf.ones([17] * dim) * rank root_tensor = tf.ones([17] * dim) * root_rank if dtype == tf.bool: tensor = tensor % 2 root_tensor = root_tensor % 2 tensor = tf.cast(tensor, dtype=dtype) root_tensor = tf.cast(root_tensor, dtype=dtype) broadcasted_tensor = hvd.broadcast(tensor, root_rank) self.assertTrue( session.run(tf.reduce_all(tf.equal( tf.cast(root_tensor, tf.int32), tf.cast(broadcasted_tensor, tf.int32)))), "hvd.broadcast produces incorrect broadcasted tensor")