Python tensorflow.assert_greater() Examples
The following are 17
code examples of tensorflow.assert_greater().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow
, or try the search function
.
![](https://www.programcreek.com/common/static/images/search.png)
Example #1
Source File: custom_optimizer.py From dreamer with Apache License 2.0 | 7 votes |
def maybe_minimize(self, condition, loss): with tf.name_scope('optimizer_{}'.format(self._name)): # loss = tf.cond(condition, lambda: loss, float) update_op, grad_norm = tf.cond( condition, lambda: self.minimize(loss), lambda: (tf.no_op(), 0.0)) with tf.control_dependencies([update_op]): summary = tf.cond( tf.logical_and(condition, self._log), lambda: self.summarize(grad_norm), str) if self._debug: # print_op = tf.print('{}_grad_norm='.format(self._name), grad_norm) message = 'Zero gradient norm in {} optimizer.'.format(self._name) assertion = lambda: tf.assert_greater(grad_norm, 0.0, message=message) assert_op = tf.cond(condition, assertion, tf.no_op) with tf.control_dependencies([assert_op]): summary = tf.identity(summary) return summary, grad_norm
Example #2
Source File: utils.py From neuron with GNU General Public License v3.0 | 6 votes |
def logistic_fixed_ends(x, start=-1., end=1., L=1., **kwargs): """ f is logistic with fixed ends, so that f(start) = 0, and f(end) = L. this is currently done a bit heuristically: it's a sigmoid, with a linear function added to correct the ends. """ assert end > start, 'End of fixed points should be greater than start' # tf.assert_greater(end, start, message='assert') # clip to start and end x = tf.clip_by_value(x, start, end) # logistic function xv = logistic(x, L=L, **kwargs) # ends of linear corrective function sv = logistic(start, L=L, **kwargs) ev = logistic(end, L=L, **kwargs) # corrective function df = end - start linear_corr = (end-x)/df * (- sv) + (x-start)/df * (-ev + L) # return fixed logistic return xv + linear_corr
Example #3
Source File: custom_optimizer.py From planet with Apache License 2.0 | 6 votes |
def maybe_minimize(self, condition, loss): # loss = tf.cond(condition, lambda: loss, float) update_op, grad_norm = tf.cond( condition, lambda: self.minimize(loss), lambda: (tf.no_op(), 0.0)) with tf.control_dependencies([update_op]): summary = tf.cond( tf.logical_and(condition, self._log), lambda: self.summarize(grad_norm), str) if self._debug: # print_op = tf.print('{}_grad_norm='.format(self._name), grad_norm) message = 'Zero gradient norm in {} optimizer.'.format(self._name) assertion = lambda: tf.assert_greater(grad_norm, 0.0, message=message) assert_op = tf.cond(condition, assertion, tf.no_op) with tf.control_dependencies([assert_op]): summary = tf.identity(summary) return summary, grad_norm
Example #4
Source File: check_ops_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def test_doesnt_raise_when_greater(self): with self.test_session(): small = tf.constant([3, 1], name="small") big = tf.constant([4, 2], name="big") with tf.control_dependencies([tf.assert_greater(big, small)]): out = tf.identity(small) out.eval()
Example #5
Source File: ppo.py From batch-ppo with Apache License 2.0 | 5 votes |
def _training(self): """Perform multiple training iterations of both policy and value baseline. Training on the episodes collected in the memory. Reset the memory afterwards. Always returns a summary string. Returns: Summary tensor. """ with tf.device('/gpu:0' if self._use_gpu else '/cpu:0'): with tf.name_scope('training'): assert_full = tf.assert_equal( self._num_finished_episodes, self._config.update_every) with tf.control_dependencies([assert_full]): data = self._finished_episodes.data() (observ, action, old_policy_params, reward), length = data # We set padding frames of the parameters to ones to prevent Gaussians # with zero variance. This would result in an infinite KL divergence, # which, even if masked out, would result in NaN gradients. old_policy_params = tools.nested.map( lambda param: self._mask(param, length, 1), old_policy_params) with tf.control_dependencies([tf.assert_greater(length, 0)]): length = tf.identity(length) observ = self._observ_filter.transform(observ) reward = self._reward_filter.transform(reward) update_summary = self._perform_update_steps( observ, action, old_policy_params, reward, length) with tf.control_dependencies([update_summary]): penalty_summary = self._adjust_penalty( observ, old_policy_params, length) with tf.control_dependencies([penalty_summary]): clear_memory = tf.group( self._finished_episodes.clear(), self._num_finished_episodes.assign(0)) with tf.control_dependencies([clear_memory]): weight_summary = utility.variable_summaries( tf.trainable_variables(), self._config.weight_summaries) return tf.summary.merge([ update_summary, penalty_summary, weight_summary])
Example #6
Source File: pooling.py From neuralmonkey with BSD 3-Clause "New" or "Revised" License | 5 votes |
def output(self) -> tf.Tensor: # Pad the sequence with a large negative value, but make sure it has # non-zero length. length = tf.reduce_sum(self._input_mask) with tf.control_dependencies([tf.assert_greater(length, 0.5)]): padded_input = self._masked_input + 1e-15 * (1 - self._input_mask) return tf.reduce_max(padded_input, axis=1)
Example #7
Source File: pooling.py From neuralmonkey with BSD 3-Clause "New" or "Revised" License | 5 votes |
def output(self) -> tf.Tensor: # Pad the sequence with a large negative value, but make sure it has # non-zero length. length = tf.reduce_sum(self._input_mask) with tf.control_dependencies([tf.assert_greater(length, 0.5)]): padded_input = self._masked_input + 1e-15 * (1 - self._input_mask) return tf.reduce_max(padded_input, axis=1)
Example #8
Source File: pooling.py From neuralmonkey with BSD 3-Clause "New" or "Revised" License | 5 votes |
def output(self) -> tf.Tensor: # Pad the sequence with a large negative value, but make sure it has # non-zero length. length = tf.reduce_sum(self._input_mask) with tf.control_dependencies([tf.assert_greater(length, 0.5)]): padded_input = self._masked_input + 1e-15 * (1 - self._input_mask) return tf.reduce_max(padded_input, axis=1)
Example #9
Source File: check_ops_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def test_raises_when_greater_but_non_broadcastable_shapes(self): with self.test_session(): small = tf.constant([1, 1, 1], name="small") big = tf.constant([3, 2], name="big") with self.assertRaisesRegexp(ValueError, "must be"): with tf.control_dependencies([tf.assert_greater(big, small)]): out = tf.identity(small) out.eval()
Example #10
Source File: check_ops_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def test_doesnt_raise_when_greater_and_broadcastable_shapes(self): with self.test_session(): small = tf.constant([1], name="small") big = tf.constant([3, 2], name="big") with tf.control_dependencies([tf.assert_greater(big, small)]): out = tf.identity(small) out.eval()
Example #11
Source File: algorithm.py From soccer-matlab with BSD 2-Clause "Simplified" License | 5 votes |
def _training(self): """Perform multiple training iterations of both policy and value baseline. Training on the episodes collected in the memory. Reset the memory afterwards. Always returns a summary string. Returns: Summary tensor. """ with tf.name_scope('training'): assert_full = tf.assert_equal( self._memory_index, self._config.update_every) with tf.control_dependencies([assert_full]): data = self._memory.data() (observ, action, old_mean, old_logstd, reward), length = data with tf.control_dependencies([tf.assert_greater(length, 0)]): length = tf.identity(length) observ = self._observ_filter.transform(observ) reward = self._reward_filter.transform(reward) policy_summary = self._update_policy( observ, action, old_mean, old_logstd, reward, length) with tf.control_dependencies([policy_summary]): value_summary = self._update_value(observ, reward, length) with tf.control_dependencies([value_summary]): penalty_summary = self._adjust_penalty( observ, old_mean, old_logstd, length) with tf.control_dependencies([penalty_summary]): clear_memory = tf.group( self._memory.clear(), self._memory_index.assign(0)) with tf.control_dependencies([clear_memory]): weight_summary = utility.variable_summaries( tf.trainable_variables(), self._config.weight_summaries) return tf.summary.merge([ policy_summary, value_summary, penalty_summary, weight_summary])
Example #12
Source File: check_ops_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def test_raises_when_less(self): with self.test_session(): small = tf.constant([1, 2], name="small") big = tf.constant([3, 4], name="big") with tf.control_dependencies([tf.assert_greater(small, big)]): out = tf.identity(big) with self.assertRaisesOpError("small.*big"): out.eval()
Example #13
Source File: check_ops_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def test_raises_when_equal(self): with self.test_session(): small = tf.constant([1, 2], name="small") with tf.control_dependencies( [tf.assert_greater(small, small, message="fail")]): out = tf.identity(small) with self.assertRaisesOpError("fail.*small.*small"): out.eval()
Example #14
Source File: univariate.py From zhusuan with MIT License | 5 votes |
def __init__(self, logits, n_experiments, dtype=tf.int32, group_ndims=0, check_numerics=False, **kwargs): self._logits = tf.convert_to_tensor(logits) param_dtype = assert_same_float_dtype( [(self._logits, 'Binomial.logits')]) assert_dtype_is_int_or_float(dtype) sign_err_msg = "n_experiments must be positive" if isinstance(n_experiments, int): if n_experiments <= 0: raise ValueError(sign_err_msg) self._n_experiments = n_experiments else: try: n_experiments = tf.convert_to_tensor(n_experiments, tf.int32) except ValueError: raise TypeError('n_experiments must be int32') _assert_rank_op = tf.assert_rank( n_experiments, 0, message="n_experiments should be a scalar (0-D Tensor).") _assert_positive_op = tf.assert_greater( n_experiments, 0, message=sign_err_msg) with tf.control_dependencies([_assert_rank_op, _assert_positive_op]): self._n_experiments = tf.identity(n_experiments) self._check_numerics = check_numerics super(Binomial, self).__init__( dtype=dtype, param_dtype=param_dtype, is_continuous=False, is_reparameterized=False, group_ndims=group_ndims, **kwargs)
Example #15
Source File: utils.py From zhusuan with MIT License | 5 votes |
def assert_positive_int32_scalar(value, name): """ Whether `value` is a integer(or 0-D `tf.int32` tensor) and positive. If `value` is the instance of built-in type, it will be checked directly. Otherwise, it will be converted to a `tf.int32` tensor and checked. :param value: The value to be checked. :param name: The name of `value` used in error message. :return: The checked value. """ if isinstance(value, (int, float)): if isinstance(value, int) and value > 0: return value elif isinstance(value, float): raise TypeError(name + " must be integer") elif value <= 0: raise ValueError(name + " must be positive") else: try: tensor = tf.convert_to_tensor(value, tf.int32) except (TypeError, ValueError): raise TypeError(name + ' must be (convertible to) tf.int32') _assert_rank_op = tf.assert_rank( tensor, 0, message=name + " should be a scalar (0-D Tensor).") _assert_positive_op = tf.assert_greater( tensor, tf.constant(0, tf.int32), message=name + " must be positive") with tf.control_dependencies([_assert_rank_op, _assert_positive_op]): tensor = tf.identity(tensor) return tensor
Example #16
Source File: algorithm.py From soccer-matlab with BSD 2-Clause "Simplified" License | 5 votes |
def _training(self): """Perform multiple training iterations of both policy and value baseline. Training on the episodes collected in the memory. Reset the memory afterwards. Always returns a summary string. Returns: Summary tensor. """ with tf.name_scope('training'): assert_full = tf.assert_equal( self._memory_index, self._config.update_every) with tf.control_dependencies([assert_full]): data = self._memory.data() (observ, action, old_mean, old_logstd, reward), length = data with tf.control_dependencies([tf.assert_greater(length, 0)]): length = tf.identity(length) observ = self._observ_filter.transform(observ) reward = self._reward_filter.transform(reward) update_summary = self._perform_update_steps( observ, action, old_mean, old_logstd, reward, length) with tf.control_dependencies([update_summary]): penalty_summary = self._adjust_penalty( observ, old_mean, old_logstd, length) with tf.control_dependencies([penalty_summary]): clear_memory = tf.group( self._memory.clear(), self._memory_index.assign(0)) with tf.control_dependencies([clear_memory]): weight_summary = utility.variable_summaries( tf.trainable_variables(), self._config.weight_summaries) return tf.summary.merge([ update_summary, penalty_summary, weight_summary])
Example #17
Source File: camera_utils.py From tf_mesh_renderer with Apache License 2.0 | 4 votes |
def look_at(eye, center, world_up): """Computes camera viewing matrices. Functionality mimes gluLookAt (third_party/GL/glu/include/GLU/glu.h). Args: eye: 2-D float32 tensor with shape [batch_size, 3] containing the XYZ world space position of the camera. center: 2-D float32 tensor with shape [batch_size, 3] containing a position along the center of the camera's gaze. world_up: 2-D float32 tensor with shape [batch_size, 3] specifying the world's up direction; the output camera will have no tilt with respect to this direction. Returns: A [batch_size, 4, 4] float tensor containing a right-handed camera extrinsics matrix that maps points from world space to points in eye space. """ batch_size = center.shape[0].value vector_degeneracy_cutoff = 1e-6 forward = center - eye forward_norm = tf.norm(forward, ord='euclidean', axis=1, keepdims=True) tf.assert_greater( forward_norm, vector_degeneracy_cutoff, message='Camera matrix is degenerate because eye and center are close.') forward = tf.divide(forward, forward_norm) to_side = tf.cross(forward, world_up) to_side_norm = tf.norm(to_side, ord='euclidean', axis=1, keepdims=True) tf.assert_greater( to_side_norm, vector_degeneracy_cutoff, message='Camera matrix is degenerate because up and gaze are close or' 'because up is degenerate.') to_side = tf.divide(to_side, to_side_norm) cam_up = tf.cross(to_side, forward) w_column = tf.constant( batch_size * [[0., 0., 0., 1.]], dtype=tf.float32) # [batch_size, 4] w_column = tf.reshape(w_column, [batch_size, 4, 1]) view_rotation = tf.stack( [to_side, cam_up, -forward, tf.zeros_like(to_side, dtype=tf.float32)], axis=1) # [batch_size, 4, 3] matrix view_rotation = tf.concat( [view_rotation, w_column], axis=2) # [batch_size, 4, 4] identity_batch = tf.tile(tf.expand_dims(tf.eye(3), 0), [batch_size, 1, 1]) view_translation = tf.concat([identity_batch, tf.expand_dims(-eye, 2)], 2) view_translation = tf.concat( [view_translation, tf.reshape(w_column, [batch_size, 1, 4])], 1) camera_matrices = tf.matmul(view_rotation, view_translation) return camera_matrices