Python tensorflow.python.ops.control_flow_ops.with_dependencies() Examples
The following are 30
code examples of tensorflow.python.ops.control_flow_ops.with_dependencies().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.python.ops.control_flow_ops
, or try the search function
.
Example #1
Source File: shape.py From lambda-packs with MIT License | 6 votes |
def _assert_non_negative_int32_scalar(self, x): """Helper which ensures that input is a non-negative, int32, scalar.""" x = ops.convert_to_tensor(x, name="x") if x.dtype.base_dtype != dtypes.int32.base_dtype: raise TypeError("%s.dtype=%s is not %s" % (x.name, x.dtype, dtypes.int32)) x_value_static = tensor_util.constant_value(x) if x.get_shape().ndims is not None and x_value_static is not None: if x.get_shape().ndims != 0: raise ValueError("%s.ndims=%d is not 0 (scalar)" % (x.name, x.get_shape().ndims)) if x_value_static < 0: raise ValueError("%s.value=%d cannot be negative" % (x.name, x_value_static)) return x if self.validate_args: x = control_flow_ops.with_dependencies([ check_ops.assert_rank(x, 0), check_ops.assert_non_negative(x)], x) return x
Example #2
Source File: image_ops_impl.py From lambda-packs with MIT License | 6 votes |
def flip_left_right(image): """Flip an image horizontally (left to right). Outputs the contents of `image` flipped along the second dimension, which is `width`. See also `reverse()`. Args: image: A 3-D tensor of shape `[height, width, channels].` Returns: A 3-D tensor of the same type and shape as `image`. Raises: ValueError: if the shape of `image` not supported. """ image = ops.convert_to_tensor(image, name='image') image = control_flow_ops.with_dependencies( _Check3DImage(image, require_static=False), image) return fix_image_flip_shape(image, array_ops.reverse(image, [1]))
Example #3
Source File: multinomial.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def _sample_n(self, n, seed=None): n_draws = math_ops.cast(self.n, dtype=dtypes.int32) if self.n.get_shape().ndims is not None: if self.n.get_shape().ndims != 0: raise NotImplementedError( "Sample only supported for scalar number of draws.") elif self.validate_args: is_scalar = check_ops.assert_rank( n_draws, 0, message="Sample only supported for scalar number of draws.") n_draws = control_flow_ops.with_dependencies([is_scalar], n_draws) k = self.event_shape()[0] # Flatten batch dims so logits has shape [B, k], # where B = reduce_prod(self.batch_shape()). logits = array_ops.reshape(self.logits, [-1, k]) draws = random_ops.multinomial(logits=logits, num_samples=n * n_draws, seed=seed) draws = array_ops.reshape(draws, shape=[-1, n, n_draws]) x = math_ops.reduce_sum(array_ops.one_hot(draws, depth=k), reduction_indices=-2) # shape: [B, n, k] x = array_ops.transpose(x, perm=[1, 0, 2]) final_shape = array_ops.concat([[n], self.batch_shape(), [k]], 0) return array_ops.reshape(x, final_shape)
Example #4
Source File: utils.py From FastMaskRCNN with Apache License 2.0 | 6 votes |
def _crop(image, offset_height, offset_width, crop_height, crop_width): original_shape = tf.shape(image) rank_assertion = tf.Assert( tf.equal(tf.rank(image), 3), ['Rank of image must be equal to 3.']) cropped_shape = control_flow_ops.with_dependencies( [rank_assertion], tf.stack([crop_height, crop_width, original_shape[2]])) size_assertion = tf.Assert( tf.logical_and( tf.greater_equal(original_shape[0], crop_height), tf.greater_equal(original_shape[1], crop_width)), ['Crop size greater than the image size.']) offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0])) # Use tf.slice instead of crop_to_bounding box as it accepts tensors to # define the crop size. image = control_flow_ops.with_dependencies( [size_assertion], tf.slice(image, offsets, cropped_shape)) return tf.reshape(image, cropped_shape)
Example #5
Source File: image_ops_impl.py From lambda-packs with MIT License | 6 votes |
def flip_up_down(image): """Flip an image horizontally (upside down). Outputs the contents of `image` flipped along the first dimension, which is `height`. See also `reverse()`. Args: image: A 3-D tensor of shape `[height, width, channels].` Returns: A 3-D tensor of the same type and shape as `image`. Raises: ValueError: if the shape of `image` not supported. """ image = ops.convert_to_tensor(image, name='image') image = control_flow_ops.with_dependencies( _Check3DImage(image, require_static=False), image) return fix_image_flip_shape(image, array_ops.reverse(image, [0]))
Example #6
Source File: numerics.py From lambda-packs with MIT License | 6 votes |
def verify_tensor_all_finite(t, msg, name=None): """Assert that the tensor does not contain any NaN's or Inf's. Args: t: Tensor to check. msg: Message to log on failure. name: A name for this operation (optional). Returns: Same tensor as `t`. """ with ops.name_scope(name, "VerifyFinite", [t]) as name: t = ops.convert_to_tensor(t, name="t") with ops.colocate_with(t): verify_input = array_ops.check_numerics(t, message=msg) out = control_flow_ops.with_dependencies([verify_input], t) return out
Example #7
Source File: graph_builder.py From DOTA_models with Apache License 2.0 | 6 votes |
def _AddLearningRate(self, initial_learning_rate, decay_steps): """Returns a learning rate that decays by 0.96 every decay_steps. Args: initial_learning_rate: initial value of the learning rate decay_steps: decay by 0.96 every this many steps Returns: learning rate variable. """ step = self.GetStep() return cf.with_dependencies( [self._IncrementCounter(step)], tf.train.exponential_decay(initial_learning_rate, step, decay_steps, 0.96, staircase=True))
Example #8
Source File: multinomial.py From lambda-packs with MIT License | 6 votes |
def _sample_n(self, n, seed=None): n_draws = math_ops.cast(self.total_count, dtype=dtypes.int32) if self.total_count.get_shape().ndims is not None: if self.total_count.get_shape().ndims != 0: raise NotImplementedError( "Sample only supported for scalar number of draws.") elif self.validate_args: is_scalar = check_ops.assert_rank( n_draws, 0, message="Sample only supported for scalar number of draws.") n_draws = control_flow_ops.with_dependencies([is_scalar], n_draws) k = self.event_shape_tensor()[0] # Flatten batch dims so logits has shape [B, k], # where B = reduce_prod(self.batch_shape_tensor()). draws = random_ops.multinomial( logits=array_ops.reshape(self.logits, [-1, k]), num_samples=n * n_draws, seed=seed) draws = array_ops.reshape(draws, shape=[-1, n, n_draws]) x = math_ops.reduce_sum(array_ops.one_hot(draws, depth=k), axis=-2) # shape: [B, n, k] x = array_ops.transpose(x, perm=[1, 0, 2]) final_shape = array_ops.concat([[n], self.batch_shape_tensor(), [k]], 0) return array_ops.reshape(x, final_shape)
Example #9
Source File: operator_pd_identity.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def _check_shape(self, shape): """Check that the init arg `shape` defines a valid operator.""" shape = ops.convert_to_tensor(shape, name="shape") if not self._verify_pd: return shape # Further checks are equivalent to verification that this is positive # definite. Why? Because the further checks simply check that this is a # square matrix, and combining the fact that this is square (and thus maps # a vector space R^k onto itself), with the behavior of .matmul(), this must # be the identity operator. rank = array_ops.size(shape) assert_matrix = check_ops.assert_less_equal(2, rank) with ops.control_dependencies([assert_matrix]): last_dim = array_ops.gather(shape, rank - 1) second_to_last_dim = array_ops.gather(shape, rank - 2) assert_square = check_ops.assert_equal(last_dim, second_to_last_dim) return control_flow_ops.with_dependencies([assert_matrix, assert_square], shape)
Example #10
Source File: numerics.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def verify_tensor_all_finite(t, msg, name=None): """Assert that the tensor does not contain any NaN's or Inf's. Args: t: Tensor to check. msg: Message to log on failure. name: A name for this operation (optional). Returns: Same tensor as `t`. """ with ops.name_scope(name, "VerifyFinite", [t]) as name: t = ops.convert_to_tensor(t, name="t") with ops.colocate_with(t): verify_input = array_ops.check_numerics(t, message=msg) out = control_flow_ops.with_dependencies([verify_input], t) return out
Example #11
Source File: beta.py From lambda-packs with MIT License | 6 votes |
def _mode(self): mode = (self.concentration1 - 1.) / (self.total_concentration - 2.) if self.allow_nan_stats: nan = array_ops.fill( self.batch_shape_tensor(), np.array(np.nan, dtype=self.dtype.as_numpy_dtype()), name="nan") is_defined = math_ops.logical_and(self.concentration1 > 1., self.concentration0 > 1.) return array_ops.where(is_defined, mode, nan) return control_flow_ops.with_dependencies([ check_ops.assert_less( array_ops.ones([], dtype=self.dtype), self.concentration1, message="Mode undefined for concentration1 <= 1."), check_ops.assert_less( array_ops.ones([], dtype=self.dtype), self.concentration0, message="Mode undefined for concentration0 <= 1.") ], mode)
Example #12
Source File: student_t.py From lambda-packs with MIT License | 6 votes |
def _mean(self): mean = self.loc * array_ops.ones(self.batch_shape_tensor(), dtype=self.dtype) if self.allow_nan_stats: nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype()) return array_ops.where( math_ops.greater( self.df, array_ops.ones(self.batch_shape_tensor(), dtype=self.dtype)), mean, array_ops.fill(self.batch_shape_tensor(), nan, name="nan")) else: return control_flow_ops.with_dependencies( [ check_ops.assert_less( array_ops.ones([], dtype=self.dtype), self.df, message="mean not defined for components of df <= 1"), ], mean)
Example #13
Source File: dirichlet.py From lambda-packs with MIT License | 6 votes |
def _mode(self): k = math_ops.cast(self.event_shape_tensor()[0], self.dtype) mode = (self.concentration - 1.) / ( self.total_concentration[..., array_ops.newaxis] - k) if self.allow_nan_stats: nan = array_ops.fill( array_ops.shape(mode), np.array(np.nan, dtype=self.dtype.as_numpy_dtype()), name="nan") return array_ops.where( math_ops.reduce_all(self.concentration > 1., axis=-1), mode, nan) return control_flow_ops.with_dependencies([ check_ops.assert_less( array_ops.ones([], self.dtype), self.concentration, message="Mode undefined when any concentration <= 1"), ], mode)
Example #14
Source File: gmm.py From lambda-packs with MIT License | 6 votes |
def _model_builder(self): """Creates a model function.""" def _model_fn(features, labels, mode): """Model function.""" assert labels is None, labels (all_scores, model_predictions, losses, training_op) = gmm_ops.gmm( self._parse_tensor_or_dict(features), self._training_initial_clusters, self._num_clusters, self._random_seed, self._covariance_type, self._params) incr_step = state_ops.assign_add(variables.get_global_step(), 1) loss = math_ops.reduce_sum(losses) training_op = with_dependencies([training_op, incr_step], loss) predictions = { GMM.ALL_SCORES: all_scores[0], GMM.ASSIGNMENTS: model_predictions[0][0], } eval_metric_ops = { GMM.SCORES: _streaming_sum(loss), } return model_fn_lib.ModelFnOps(mode=mode, predictions=predictions, eval_metric_ops=eval_metric_ops, loss=loss, train_op=training_op) return _model_fn
Example #15
Source File: softplus_impl.py From lambda-packs with MIT License | 6 votes |
def __init__(self, event_ndims=0, hinge_softness=None, validate_args=False, name="softplus"): with ops.name_scope(name, values=[hinge_softness]): if hinge_softness is not None: self._hinge_softness = ops.convert_to_tensor( hinge_softness, name="hinge_softness") else: self._hinge_softness = None if validate_args: nonzero_check = check_ops.assert_none_equal( ops.convert_to_tensor( 0, dtype=self.hinge_softness.dtype), self.hinge_softness, message="hinge_softness must be non-zero") self._hinge_softness = control_flow_ops.with_dependencies( [nonzero_check], self.hinge_softness) super(Softplus, self).__init__( event_ndims=event_ndims, validate_args=validate_args, name=name)
Example #16
Source File: operator_pd_identity.py From lambda-packs with MIT License | 6 votes |
def _check_shape(self, shape): """Check that the init arg `shape` defines a valid operator.""" shape = ops.convert_to_tensor(shape, name="shape") if not self._verify_pd: return shape # Further checks are equivalent to verification that this is positive # definite. Why? Because the further checks simply check that this is a # square matrix, and combining the fact that this is square (and thus maps # a vector space R^k onto itself), with the behavior of .matmul(), this must # be the identity operator. rank = array_ops.size(shape) assert_matrix = check_ops.assert_less_equal(2, rank) with ops.control_dependencies([assert_matrix]): last_dim = array_ops.gather(shape, rank - 1) second_to_last_dim = array_ops.gather(shape, rank - 2) assert_square = check_ops.assert_equal(last_dim, second_to_last_dim) return control_flow_ops.with_dependencies([assert_matrix, assert_square], shape)
Example #17
Source File: inverse_gamma.py From lambda-packs with MIT License | 6 votes |
def _variance(self): var = (math_ops.square(self.rate) / math_ops.square(self.concentration - 1.) / (self.concentration - 2.)) if self.allow_nan_stats: nan = array_ops.fill( self.batch_shape_tensor(), np.array(np.nan, dtype=self.dtype.as_numpy_dtype()), name="nan") return array_ops.where(self.concentration > 2., var, nan) else: return control_flow_ops.with_dependencies([ check_ops.assert_less( constant_op.constant(2., dtype=self.dtype), self.concentration, message="variance undefined when any concentration <= 2"), ], var)
Example #18
Source File: operator_pd_cholesky.py From lambda-packs with MIT License | 6 votes |
def _check_chol(self, chol): """Verify that `chol` is proper.""" chol = ops.convert_to_tensor(chol, name="chol") if not self.verify_pd: return chol shape = array_ops.shape(chol) rank = array_ops.rank(chol) is_matrix = check_ops.assert_rank_at_least(chol, 2) is_square = check_ops.assert_equal( array_ops.gather(shape, rank - 2), array_ops.gather(shape, rank - 1)) deps = [is_matrix, is_square] diag = array_ops.matrix_diag_part(chol) deps.append(check_ops.assert_positive(diag)) return control_flow_ops.with_dependencies(deps, chol)
Example #19
Source File: graph_builder.py From DOTA_models with Apache License 2.0 | 5 votes |
def AddEvaluation(self, task_context, batch_size, evaluation_max_steps=300, corpus_name='documents'): """Builds the forward network only without the training operation. Args: task_context: file path from which to read the task context. batch_size: batch size to request from reader op. evaluation_max_steps: max number of parsing actions during evaluation, only used in beam parsing. corpus_name: name of the task input to read parses from. Returns: Dictionary of named eval nodes. """ def _AssignTransitionScores(): return tf.assign(nodes['transition_scores'], nodes['logits'], validate_shape=False) def _Pass(): return tf.constant(-1.0) unused_evaluation_max_steps = evaluation_max_steps with tf.name_scope('evaluation'): nodes = self.evaluation nodes['transition_scores'] = self._AddVariable( [batch_size, self._num_actions], tf.float32, 'transition_scores', tf.constant_initializer(-1.0)) nodes.update(self._AddDecodedReader(task_context, batch_size, nodes[ 'transition_scores'], corpus_name)) nodes.update(self._BuildNetwork(nodes['feature_endpoints'], return_average=self._use_averaging)) nodes['eval_metrics'] = cf.with_dependencies( [tf.cond(tf.greater(tf.size(nodes['logits']), 0), _AssignTransitionScores, _Pass)], nodes['eval_metrics'], name='eval_metrics') return nodes
Example #20
Source File: cholesky_outer_product_impl.py From lambda-packs with MIT License | 5 votes |
def _forward(self, x): if self._static_event_ndims == 0: return math_ops.square(x) if self.validate_args: is_matrix = check_ops.assert_rank_at_least(x, 2) shape = array_ops.shape(x) is_square = check_ops.assert_equal(shape[-2], shape[-1]) x = control_flow_ops.with_dependencies([is_matrix, is_square], x) # For safety, explicitly zero-out the upper triangular part. x = array_ops.matrix_band_part(x, -1, 0) return math_ops.matmul(x, x, adjoint_b=True)
Example #21
Source File: operator_pd_diag.py From lambda-packs with MIT License | 5 votes |
def _check_diag(self, diag): """Verify that `diag` is positive.""" diag = ops.convert_to_tensor(diag, name="diag") if not self.verify_pd: return diag deps = [check_ops.assert_positive(diag)] return control_flow_ops.with_dependencies(deps, diag)
Example #22
Source File: softmax_centered_impl.py From lambda-packs with MIT License | 5 votes |
def _inverse_event_shape_tensor(self, output_shape): ndims = array_ops.shape(output_shape)[0] if self.validate_args: # It is not possible for a negative shape so we need only check <= 1. is_one = check_ops.assert_equal( ndims, 1, message="event_ndims must be 1") ndims = control_flow_ops.with_dependencies([is_one], ndims) if self._static_event_ndims == 0: return ops.convert_to_tensor([], dtype=dtypes.int32, name="output_shape") return array_ops.expand_dims(output_shape[0] - 1, dim=0)
Example #23
Source File: inverse_gamma.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def _cdf(self, x): x = control_flow_ops.with_dependencies([check_ops.assert_positive(x)] if self.validate_args else [], x) # Note that igammac returns the upper regularized incomplete gamma # function Q(a, x), which is what we want for the CDF. return math_ops.igammac(self.alpha, self.beta / x)
Example #24
Source File: operator_pd_identity.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def _check_scale(self, scale, dtype): """Check that the init arg `scale` defines a valid operator.""" if scale is None: return constant_op.constant(1.0, dtype=dtype) scale = ops.convert_to_tensor(scale, dtype=dtype, name="scale") if not self._verify_pd: return scale # Further check that this is a rank 0, positive tensor. scale = contrib_tensor_util.assert_scalar(scale) return control_flow_ops.with_dependencies( [check_ops.assert_positive(scale)], scale)
Example #25
Source File: affine_impl.py From lambda-packs with MIT License | 5 votes |
def _maybe_validate_identity_multiplier(self, identity_multiplier, validate_args): """Check that the init arg `identity_multiplier` is valid.""" if identity_multiplier is None or not validate_args: return identity_multiplier if validate_args: identity_multiplier = control_flow_ops.with_dependencies( [check_ops.assert_positive(identity_multiplier)], identity_multiplier) return identity_multiplier
Example #26
Source File: inverse_gamma.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def _log_prob(self, x): x = control_flow_ops.with_dependencies([check_ops.assert_positive(x)] if self.validate_args else [], x) return (self.alpha * math_ops.log(self.beta) - math_ops.lgamma(self.alpha) - (self.alpha + 1.) * math_ops.log(x) - self.beta / x)
Example #27
Source File: power_transform_impl.py From lambda-packs with MIT License | 5 votes |
def _maybe_assert_valid_x(self, x): if not self.validate_args or self.power == 0.: return x is_valid = check_ops.assert_non_negative( 1. + self.power * x, message="Forward transformation input must be at least {}.".format( -1. / self.power)) return control_flow_ops.with_dependencies([is_valid], x)
Example #28
Source File: power_transform_impl.py From lambda-packs with MIT License | 5 votes |
def _maybe_assert_valid_y(self, y): if not self.validate_args: return y is_valid = check_ops.assert_positive( y, message="Inverse transformation input must be greater than 0.") return control_flow_ops.with_dependencies([is_valid], y)
Example #29
Source File: quantized_distribution.py From lambda-packs with MIT License | 5 votes |
def _check_integer(self, value): with ops.name_scope("check_integer", values=[value]): value = ops.convert_to_tensor(value, name="value") if not self.validate_args: return value dependencies = [distribution_util.assert_integer_form( value, message="value has non-integer components.")] return control_flow_ops.with_dependencies(dependencies, value)
Example #30
Source File: relaxed_onehot_categorical.py From lambda-packs with MIT License | 5 votes |
def _assert_valid_sample(self, x): if not self.validate_args: return x return control_flow_ops.with_dependencies([ check_ops.assert_non_positive(x), distribution_util.assert_close( array_ops.zeros([], dtype=self.dtype), math_ops.reduce_logsumexp(x, axis=[-1])), ], x)