Python tensorflow.python.framework.tensor_util.constant_value() Examples
The following are 30
code examples of tensorflow.python.framework.tensor_util.constant_value().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.python.framework.tensor_util
, or try the search function
.
Example #1
Source File: array_ops.py From deep_image_model with Apache License 2.0 | 6 votes |
def _FillShape(op): """Shape function for the Fill op. This op takes a vector of dimensions and a scalar, and produces a tensor with the given dimensions. Args: op: A Fill Operation. Returns: A single-element list containing the shape of the output. Raises: ValueError: If the shapes or arguments are known to be invalid. """ op.inputs[0].get_shape().assert_has_rank(1) op.inputs[1].get_shape().assert_has_rank(0) fill_dims = tensor_util.constant_value(op.inputs[0]) if fill_dims is not None and any(d < 0 for d in fill_dims): raise ValueError("Fill dimensions must be >= 0") return [tensor_util.constant_value_as_shape(op.inputs[0])]
Example #2
Source File: beam_search_decoder_from_tensorflow.py From tensorflow_end2end_speech_recognition with MIT License | 6 votes |
def _merge_batch_beams(self, t, s=None): """Merges the tensor from a batch of beams into a batch by beams. More exactly, t is a tensor of dimension [batch_size, beam_width, s]. We reshape this into [batch_size*beam_width, s] Args: t: Tensor of dimension [batch_size, beam_width, s] s: (Possibly known) depth shape. Returns: A reshaped version of t with dimension [batch_size * beam_width, s]. """ if isinstance(s, ops.Tensor): s = tensor_shape.as_shape(tensor_util.constant_value(s)) else: s = tensor_shape.TensorShape(s) t_shape = tf.shape(t) static_batch_size = tensor_util.constant_value(self._batch_size) batch_size_beam_width = ( None if static_batch_size is None else static_batch_size * self._beam_width) reshaped_t = tf.reshape( t, tf.concat( ([self._batch_size * self._beam_width], t_shape[2:]), 0)) reshaped_t.set_shape( (tensor_shape.TensorShape([batch_size_beam_width]).concatenate(s))) return reshaped_t
Example #3
Source File: utils.py From tensornets with MIT License | 6 votes |
def smart_cond(pred, fn1, fn2, name=None): """Return either fn1() or fn2() based on the boolean predicate/value `pred`. If `pred` is bool or has a constant value it would use `static_cond`, otherwise it would use `tf.cond`. Args: pred: A scalar determining whether to return the result of `fn1` or `fn2`. fn1: The callable to be performed if pred is true. fn2: The callable to be performed if pred is false. name: Optional name prefix when using tf.cond Returns: Tensors returned by the call to either `fn1` or `fn2`. """ pred_value = constant_value(pred) if pred_value is not None: # Use static_cond if pred has a constant value. return static_cond(pred_value, fn1, fn2) else: # Use dynamic cond otherwise. return control_flow_ops.cond(pred, fn1, fn2, name)
Example #4
Source File: softmax_centered_impl.py From lambda-packs with MIT License | 6 votes |
def __init__(self, event_ndims=0, validate_args=False, name="softmax_centered"): self._graph_parents = [] self._name = name with self._name_scope("init", values=[event_ndims]): event_ndims = ops.convert_to_tensor(event_ndims, name="event_ndims") event_ndims = tensor_util.constant_value(event_ndims) if event_ndims is None or event_ndims not in [0, 1]: raise ValueError("`event_ndims` must be a TF constant which is 0 or 1") self._static_event_ndims = event_ndims super(SoftmaxCentered, self).__init__( event_ndims=event_ndims, validate_args=validate_args, name=name)
Example #5
Source File: bijector_impl.py From lambda-packs with MIT License | 6 votes |
def _event_dims_tensor(self, sample): """Return a 1D `int32` tensor: `range(rank(sample))[-event_ndims:]`.""" if self.event_ndims is None: raise ValueError("Jacobian cannot be computed with unknown event_ndims") static_event_ndims = tensor_util.constant_value(self.event_ndims) static_rank = sample.get_shape().ndims if static_event_ndims is not None and static_rank is not None: return ops.convert_to_tensor( static_rank + np.arange(-static_event_ndims, 0).astype(np.int32)) if static_event_ndims is not None: event_range = np.arange(-static_event_ndims, 0).astype(np.int32) else: event_range = math_ops.range(-self.event_ndims, 0, dtype=dtypes.int32) if static_rank is not None: return event_range + static_rank else: return event_range + array_ops.rank(sample)
Example #6
Source File: utils.py From lambda-packs with MIT License | 6 votes |
def constant_value(pred): """Return the bool value for `pred`, or None if `pred` had a dynamic value. Arguments: pred: A scalar, either a Python bool or a TensorFlow boolean variable or tensor. Returns: True or False if `pred` has a constant boolean value, None otherwise. Raises: TypeError is pred is not a Variable, Tensor or bool. """ if isinstance(pred, bool): pred_value = pred elif isinstance(pred, variables.Variable): pred_value = None elif isinstance(pred, ops.Tensor): pred_value = tensor_util.constant_value(pred) else: raise TypeError('`pred` must be a Tensor, a Variable, or a Python bool.') return pred_value
Example #7
Source File: beam_search_decoder.py From lambda-packs with MIT License | 6 votes |
def _length_penalty(sequence_lengths, penalty_factor): """Calculates the length penalty. See https://arxiv.org/abs/1609.08144. Args: sequence_lengths: The sequence length of all hypotheses, a tensor of shape [beam_size, vocab_size]. penalty_factor: A scalar that weights the length penalty. Returns: The length penalty factor, a tensor fo shape [beam_size]. """ penalty_factor = ops.convert_to_tensor(penalty_factor, name="penalty_factor") penalty_factor.set_shape(()) # penalty should be a scalar. static_penalty = tensor_util.constant_value(penalty_factor) if static_penalty is not None and static_penalty == 0: return 1.0 return math_ops.div((5. + math_ops.to_float(sequence_lengths)) **penalty_factor, (5. + 1.)**penalty_factor)
Example #8
Source File: utils.py From lambda-packs with MIT License | 6 votes |
def smart_cond(pred, fn1, fn2, name=None): """Return either fn1() or fn2() based on the boolean predicate/value `pred`. If `pred` is bool or has a constant value it would use `static_cond`, otherwise it would use `tf.cond`. Args: pred: A scalar determining whether to return the result of `fn1` or `fn2`. fn1: The callable to be performed if pred is true. fn2: The callable to be performed if pred is false. name: Optional name prefix when using tf.cond Returns: Tensors returned by the call to either `fn1` or `fn2`. """ pred_value = constant_value(pred) if pred_value is not None: # Use static_cond if pred has a constant value. return static_cond(pred_value, fn1, fn2) else: # Use dynamic cond otherwise. return control_flow_ops.cond(pred, fn1, fn2, name)
Example #9
Source File: distribution_util.py From lambda-packs with MIT License | 6 votes |
def prefer_static_broadcast_shape( shape1, shape2, name="prefer_static_broadcast_shape"): """Convenience function which statically broadcasts shape when possible. Args: shape1: `1-D` integer `Tensor`. Already converted to tensor! shape2: `1-D` integer `Tensor`. Already converted to tensor! name: A string name to prepend to created ops. Returns: The broadcast shape, either as `TensorShape` (if broadcast can be done statically), or as a `Tensor`. """ with ops.name_scope(name, values=[shape1, shape2]): if (tensor_util.constant_value(shape1) is not None and tensor_util.constant_value(shape2) is not None): return array_ops.broadcast_static_shape( tensor_shape.TensorShape(tensor_util.constant_value(shape1)), tensor_shape.TensorShape(tensor_util.constant_value(shape2))) return array_ops.broadcast_dynamic_shape(shape1, shape2)
Example #10
Source File: shape.py From lambda-packs with MIT License | 6 votes |
def _assert_non_negative_int32_scalar(self, x): """Helper which ensures that input is a non-negative, int32, scalar.""" x = ops.convert_to_tensor(x, name="x") if x.dtype.base_dtype != dtypes.int32.base_dtype: raise TypeError("%s.dtype=%s is not %s" % (x.name, x.dtype, dtypes.int32)) x_value_static = tensor_util.constant_value(x) if x.get_shape().ndims is not None and x_value_static is not None: if x.get_shape().ndims != 0: raise ValueError("%s.ndims=%d is not 0 (scalar)" % (x.name, x.get_shape().ndims)) if x_value_static < 0: raise ValueError("%s.value=%d cannot be negative" % (x.name, x_value_static)) return x if self.validate_args: x = control_flow_ops.with_dependencies([ check_ops.assert_rank(x, 0), check_ops.assert_non_negative(x)], x) return x
Example #11
Source File: utils.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def constant_value(pred): """Return the bool value for `pred`, or None if `pred` had a dynamic value. Arguments: pred: A scalar, either a Python bool or a TensorFlow boolean variable or tensor. Returns: True or False if `pred` has a constant boolean value, None otherwise. Raises: TypeError is pred is not a Variable, Tensor or bool. """ if isinstance(pred, bool): pred_value = pred elif isinstance(pred, variables.Variable): pred_value = None elif isinstance(pred, ops.Tensor): pred_value = tensor_util.constant_value(pred) else: raise TypeError('`pred` must be a Tensor, a Variable, or a Python bool.') return pred_value
Example #12
Source File: utils.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def smart_cond(pred, fn1, fn2, name=None): """Return either fn1() or fn2() based on the boolean predicate/value `pred`. If `pred` is bool or has a constant value it would use `static_cond`, otherwise it would use `tf.cond`. Args: pred: A scalar determining whether to return the result of `fn1` or `fn2`. fn1: The callable to be performed if pred is true. fn2: The callable to be performed if pred is false. name: Optional name prefix when using tf.cond Returns: Tensors returned by the call to either `fn1` or `fn2`. """ pred_value = constant_value(pred) if pred_value is not None: # Use static_cond if pred has a constant value. return static_cond(pred_value, fn1, fn2) else: # Use dynamic cond otherwise. return control_flow_ops.cond(pred, fn1, fn2, name)
Example #13
Source File: shape.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def _assert_non_negative_int32_scalar(self, x): """Helper which ensures that input is a non-negative, int32, scalar.""" x = ops.convert_to_tensor(x, name="x") if x.dtype.base_dtype != dtypes.int32.base_dtype: raise TypeError("%s.dtype=%s is not %s" % (x.name, x.dtype, dtypes.int32)) x_value_static = tensor_util.constant_value(x) if x.get_shape().ndims is not None and x_value_static is not None: if x.get_shape().ndims != 0: raise ValueError("%s.ndims=%d is not 0 (scalar)" % (x.name, x.get_shape().ndims)) if x_value_static < 0: raise ValueError("%s.value=%d cannot be negative" % (x.name, x_value_static)) return x if self.validate_args: x = control_flow_ops.with_dependencies([ check_ops.assert_rank(x, 0), check_ops.assert_non_negative(x)], x) return x
Example #14
Source File: bijector.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def __init__(self, event_ndims=0, validate_args=False, name="softmax_centered"): self._graph_parents = [] self._name = name with self._name_scope("init", values=[event_ndims]): event_ndims = ops.convert_to_tensor(event_ndims, name="event_ndims") event_ndims = tensor_util.constant_value(event_ndims) if event_ndims is None or event_ndims not in [0, 1]: raise ValueError("`event_ndims` must be a TF constant which is 0 or 1") self._static_event_ndims = event_ndims super(SoftmaxCentered, self).__init__( batch_ndims=0, # We'll regard all non-event dims as sample dims. event_ndims=event_ndims, validate_args=validate_args, name=name)
Example #15
Source File: utils.py From tf-slim with Apache License 2.0 | 6 votes |
def smart_cond(pred, fn1, fn2, name=None): """Return either fn1() or fn2() based on the boolean predicate/value `pred`. If `pred` is bool or has a constant value it would use `static_cond`, otherwise it would use `tf.cond`. Args: pred: A scalar determining whether to return the result of `fn1` or `fn2`. fn1: The callable to be performed if pred is true. fn2: The callable to be performed if pred is false. name: Optional name prefix when using tf.cond Returns: Tensors returned by the call to either `fn1` or `fn2`. """ pred_value = constant_value(pred) if pred_value is not None: # Use static_cond if pred has a constant value. return static_cond(pred_value, fn1, fn2) else: # Use dynamic cond otherwise. return control_flow_ops.cond(pred, fn1, fn2, name)
Example #16
Source File: transformed_distribution.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def _static_value(x): """Returns the static value of a `Tensor` or `None`.""" return tensor_util.constant_value(ops.convert_to_tensor(x))
Example #17
Source File: shape.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def _introspect_ndims(self, ndims): """Helper to establish some properties of input ndims args.""" if self._is_all_constant_helper(ndims): return (tensor_util.constant_value(ndims), tensor_util.constant_value(ndims) == 0) return None, math_ops.equal(ndims, 0)
Example #18
Source File: beam_search_decoder_from_tensorflow.py From tensorflow_end2end_speech_recognition with MIT License | 5 votes |
def _tensor_gather_helper(gather_indices, gather_from, batch_size, range_size, gather_shape): """Helper for gathering the right indices from the tensor. This works by reshaping gather_from to gather_shape (e.g. [-1]) and then gathering from that according to the gather_indices, which are offset by the right amounts in order to preserve the batch order. Args: gather_indices: The tensor indices that we use to gather. gather_from: The tensor that we are gathering from. batch_size: The input batch size. range_size: The number of values in each range. Likely equal to beam_width. gather_shape: What we should reshape gather_from to in order to preserve the correct values. An example is when gather_from is the attention from an AttentionWrapperState with shape [batch_size, beam_width, attention_size]. There, we want to preserve the attention_size elements, so gather_shape is [batch_size * beam_width, -1]. Then, upon reshape, we still have the attention_size as desired. Returns: output: Gathered tensor of shape tf.shape(gather_from)[:1+len(gather_shape)] """ range_ = tf.expand_dims(tf.range(batch_size) * range_size, 1) gather_indices = tf.reshape(gather_indices + range_, [-1]) output = tf.gather( tf.reshape(gather_from, gather_shape), gather_indices) final_shape = tf.shape(gather_from)[:1 + len(gather_shape)] static_batch_size = tensor_util.constant_value(batch_size) final_static_shape = (tensor_shape.TensorShape([static_batch_size]) .concatenate( gather_from.shape[1:1 + len(gather_shape)])) output = tf.reshape(output, final_shape) output.set_shape(final_static_shape) return output
Example #19
Source File: shape.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def _is_all_constant_helper(self, *args): """Helper which returns True if all inputs are constant_value.""" return all(tensor_util.constant_value(x) is not None for x in args)
Example #20
Source File: utils.py From tf-slim with Apache License 2.0 | 5 votes |
def constant_value(value_or_tensor_or_var, dtype=None): """Returns value if value_or_tensor_or_var has a constant value. Args: value_or_tensor_or_var: A value, a `Tensor` or a `Variable`. dtype: Optional `tf.dtype`, if set it would check it has the right dtype. Returns: The constant value or None if it not constant. Raises: ValueError: if value_or_tensor_or_var is None or the tensor_variable has the wrong dtype. """ if value_or_tensor_or_var is None: raise ValueError('value_or_tensor_or_var cannot be None') value = value_or_tensor_or_var if isinstance(value_or_tensor_or_var, (ops.Tensor, variables.Variable)): if dtype and value_or_tensor_or_var.dtype != dtype: raise ValueError('It has the wrong type %s instead of %s' % ( value_or_tensor_or_var.dtype, dtype)) if isinstance(value_or_tensor_or_var, variables.Variable): value = None else: value = tensor_util.constant_value(value_or_tensor_or_var) return value
Example #21
Source File: shape.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def get_sample_ndims(self, x, name="get_sample_ndims"): """Returns number of dimensions corresponding to iid draws ("sample"). Args: x: `Tensor`. name: `String`. The name to give this op. Returns: sample_ndims: `Tensor` (0D, `int32`). Raises: ValueError: if `sample_ndims` is calculated to be negative. """ with self._name_scope(name, values=[x]): ndims = self.get_ndims(x, name=name) if self._is_all_constant_helper(ndims, self.batch_ndims, self.event_ndims): ndims = tensor_util.constant_value(ndims) sample_ndims = (ndims - self._batch_ndims_static - self._event_ndims_static) if sample_ndims < 0: raise ValueError( "expected batch_ndims(%d) + event_ndims(%d) <= ndims(%d)" % (self._batch_ndims_static, self._event_ndims_static, ndims)) return ops.convert_to_tensor(sample_ndims, name="sample_ndims") else: with ops.name_scope(name="sample_ndims"): sample_ndims = ndims - self.batch_ndims - self.event_ndims if self.validate_args: sample_ndims = control_flow_ops.with_dependencies( [check_ops.assert_non_negative(sample_ndims)], sample_ndims) return sample_ndims
Example #22
Source File: operator_pd_identity.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def __init__(self, shape, dtype, scale=None, verify_pd=True, name="OperatorPDIdentity"): """Initialize an `OperatorPDIdentity`. Args: shape: `int32` rank 1 `Tensor` of length at least 2, and with the last two entries equal (since this is a square matrix). dtype: Data type of the matrix that this operator represents. scale: floating point rank 0 `Tensor` representing a scalar to multiply the identity matrix by. This will default to a scale of 1. This will be converted to the dtype `dtype`. verify_pd: `Boolean`, if `True`, asserts are added to the initialization args to ensure they define this operator as a square (batch) matrix. name: Name to prepend to `Ops`. """ # Grab static shape if available now. with ops.name_scope(name): with ops.name_scope("init", values=[shape, scale]): self._dtype = dtypes.as_dtype(dtype) self._verify_pd = verify_pd self._name = name # Store the static shape (if possible) right now before adding the # asserts, since the asserts prevent .constant_value from working. shape = ops.convert_to_tensor(shape, name="shape") self._get_shape = tensor_shape.TensorShape( tensor_util.constant_value(shape)) self._shape_arg = self._check_shape(shape) self._scale = self._check_scale(scale, self._dtype)
Example #23
Source File: data_flow_ops.py From lambda-packs with MIT License | 5 votes |
def dequeue_many(self, n, name=None): """Dequeues and concatenates `n` elements from this queue. This operation concatenates queue-element component tensors along the 0th dimension to make a single component tensor. All of the components in the dequeued tuple will have size `n` in the 0th dimension. If the queue is closed and there are less than `n` elements left, then an `OutOfRange` exception is raised. At runtime, this operation may raise an error if the queue is @{tf.QueueBase.close} before or during its execution. If the queue is closed, the queue contains fewer than `n` elements, and there are no pending enqueue operations that can fulfill this request, `tf.errors.OutOfRangeError` will be raised. If the session is @{tf.Session.close}, `tf.errors.CancelledError` will be raised. Args: n: A scalar `Tensor` containing the number of elements to dequeue. name: A name for the operation (optional). Returns: The tuple of concatenated tensors that was dequeued. """ if name is None: name = "%s_DequeueMany" % self._name ret = gen_data_flow_ops._queue_dequeue_many_v2( self._queue_ref, n=n, component_types=self._dtypes, name=name) # NOTE(mrry): Not using a shape function because we need access to # the Queue object. op = ret[0].op batch_dim = tensor_shape.Dimension(tensor_util.constant_value(op.inputs[1])) for output, shape in zip(op.values(), self._shapes): output.set_shape(tensor_shape.TensorShape([batch_dim]).concatenate(shape)) return self._dequeue_return_value(ret)
Example #24
Source File: sequence_queueing_state_saver.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def _check_rank(value, expected_rank): """Check the rank of Tensor `value`, via shape inference and assertions. Args: value: A Tensor, possibly with shape associated shape information. expected_rank: int32 scalar (optionally a `Tensor`). Returns: new_value: A Tensor matching `value`. Accessing this tensor tests assertions on its rank. If expected_rank is not a `Tensor`, then new_value's shape's rank has been set. Raises: ValueError: if `expected_rank` is not a `Tensor` and the rank of `value` is known and is not equal to `expected_rank`. """ assert isinstance(value, ops.Tensor) with ops.control_dependencies([ control_flow_ops.Assert( math_ops.equal(expected_rank, array_ops.rank(value)), [ string_ops.string_join([ "Rank of tensor %s should be: " % value.name, string_ops.as_string(expected_rank), ", shape received:" ]), array_ops.shape(value) ]) ]): new_value = array_ops.identity(value, name="rank_checked") if isinstance(expected_rank, ops.Tensor): expected_rank_value = tensor_util.constant_value(expected_rank) if expected_rank_value is not None: expected_rank = int(expected_rank_value) if not isinstance(expected_rank, ops.Tensor): try: new_value.set_shape(new_value.get_shape().with_rank(expected_rank)) except ValueError as e: raise ValueError("Rank check failed for %s: %s" % (value.name, str(e))) return new_value
Example #25
Source File: linear.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def _add_bias_column(feature_columns, columns_to_tensors, bias_variable, columns_to_variables): """Adds a fake bias feature column filled with all 1s.""" # TODO(b/31008490): Move definition to a common constants place. bias_column_name = "tf_virtual_bias_column" if any(col.name is bias_column_name for col in feature_columns): raise ValueError("%s is a reserved column name." % bias_column_name) if not feature_columns: raise ValueError("feature_columns can't be empty.") # Loop through input tensors until we can figure out batch_size. batch_size = None for column in columns_to_tensors.values(): if isinstance(column, tuple): column = column[0] if isinstance(column, sparse_tensor.SparseTensor): shape = tensor_util.constant_value(column.dense_shape) if shape is not None: batch_size = shape[0] break else: batch_size = array_ops.shape(column)[0] break if batch_size is None: raise ValueError("Could not infer batch size from input features.") bias_column = layers.real_valued_column(bias_column_name) columns_to_tensors[bias_column] = array_ops.ones([batch_size, 1], dtype=dtypes.float32) columns_to_variables[bias_column] = [bias_variable]
Example #26
Source File: utils.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def smart_cond(pred, fn1, fn2, name=None): """Return either `fn1()` or `fn2()` based on the boolean predicate `pred`. If `pred` is a bool or has a constant value, we return either `fn1()` or `fn2()`, otherwise we use `tf.cond` to dynamically route to both. Arguments: pred: A scalar determining whether to return the result of `fn1` or `fn2`. fn1: The callable to be performed if pred is true. fn2: The callable to be performed if pred is false. name: Optional name prefix when using `tf.cond`. Returns: Tensors returned by the call to either `fn1` or `fn2`. Raises: TypeError is fn1 or fn2 is not callable. """ if not callable(fn1): raise TypeError('`fn1` must be callable.') if not callable(fn2): raise TypeError('`fn2` must be callable.') pred_value = constant_value(pred) if pred_value is not None: if pred_value: return fn1() else: return fn2() else: return control_flow_ops.cond(pred, fn1, fn2, name)
Example #27
Source File: gradients_impl.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def _IndexedSlicesToTensor(value, dtype=None, name=None, as_ref=False): """Converts an IndexedSlices object `value` to a Tensor. NOTE(mrry): This function is potentially expensive. Args: value: An ops.IndexedSlices object. dtype: The dtype of the Tensor to be returned. name: Optional name to use for the returned Tensor. as_ref: True if a ref is requested. Returns: A dense Tensor representing the values in the given IndexedSlices. Raises: ValueError: If the IndexedSlices does not have the same dtype. """ _ = as_ref if dtype and not dtype.is_compatible_with(value.dtype): raise ValueError( "Tensor conversion requested dtype %s for IndexedSlices with dtype %s" % (dtype.name, value.dtype.name)) if value.dense_shape is None: raise ValueError( "Tensor conversion requested for IndexedSlices without dense_shape: %s" % str(value)) # TODO(mrry): Consider adding static shape information to # IndexedSlices, to avoid using numpy here. dense_shape_value = tensor_util.constant_value(value.dense_shape) if dense_shape_value is not None: num_elements = np.prod(dense_shape_value) if num_elements >= _LARGE_SPARSE_NUM_ELEMENTS: warnings.warn( "Converting sparse IndexedSlices to a dense Tensor with %d elements. " "This may consume a large amount of memory." % num_elements) else: warnings.warn( "Converting sparse IndexedSlices to a dense Tensor of unknown shape. " "This may consume a large amount of memory.") return math_ops.unsorted_segment_sum( value.values, value.indices, value.dense_shape[0], name=name)
Example #28
Source File: data_flow_ops.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def dequeue_many(self, n, name=None): """Dequeues and concatenates `n` elements from this queue. This operation concatenates queue-element component tensors along the 0th dimension to make a single component tensor. All of the components in the dequeued tuple will have size `n` in the 0th dimension. If the queue is closed and there are less than `n` elements left, then an `OutOfRange` exception is raised. At runtime, this operation may raise an error if the queue is [closed](#QueueBase.close) before or during its execution. If the queue is closed, the queue contains fewer than `n` elements, and there are no pending enqueue operations that can fulfill this request, `tf.errors.OutOfRangeError` will be raised. If the session is [closed](../../api_docs/python/client.md#Session.close), `tf.errors.CancelledError` will be raised. Args: n: A scalar `Tensor` containing the number of elements to dequeue. name: A name for the operation (optional). Returns: The tuple of concatenated tensors that was dequeued. """ if name is None: name = "%s_DequeueMany" % self._name ret = gen_data_flow_ops._queue_dequeue_many_v2( self._queue_ref, n=n, component_types=self._dtypes, name=name) # NOTE(mrry): Not using a shape function because we need access to # the Queue object. op = ret[0].op batch_dim = tensor_shape.Dimension(tensor_util.constant_value(op.inputs[1])) for output, shape in zip(op.values(), self._shapes): output.set_shape(tensor_shape.TensorShape([batch_dim]).concatenate(shape)) return self._dequeue_return_value(ret)
Example #29
Source File: tensor_array_ops.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def split(self, value, lengths, name=None): """Split the values of a `Tensor` into the TensorArray. Args: value: (N+1)-D. Tensor of type `dtype`. The Tensor to split. lengths: 1-D. int32 vector with the lengths to use when splitting `value` along its first dimension. name: A name for the operation (optional). Returns: A new TensorArray object with flow that ensures the split occurs. Use this object all for subsequent operations. Raises: ValueError: if the shape inference fails. """ with ops.name_scope(name, "TensorArraySplit", [self._handle, value, lengths]): value = ops.convert_to_tensor(value, name="value") _maybe_set_device(self._handle.op, value) lengths_64 = math_ops.to_int64(lengths) with ops.colocate_with(self._handle): flow_out = gen_data_flow_ops._tensor_array_split_v3( handle=self._handle, value=value, lengths=lengths_64, flow_in=self._flow, name=name) ta = TensorArray(dtype=self._dtype, handle=self._handle, flow=flow_out) ta._infer_shape = self._infer_shape ta._element_shape = self._element_shape if ta._infer_shape: val_shape = flow_out.op.inputs[1].get_shape() clengths = tensor_util.constant_value(flow_out.op.inputs[2]) element_shape = tensor_shape.unknown_shape() if val_shape.dims is not None: if clengths is not None and clengths.max() == clengths.min(): element_shape = tensor_shape.TensorShape([clengths[0]] + val_shape.dims[1:]) ta._merge_element_shape(element_shape) return ta
Example #30
Source File: power_transform_impl.py From lambda-packs with MIT License | 5 votes |
def __init__(self, power=0., event_ndims=0, validate_args=False, name="power_transform"): """Instantiates the `PowerTransform` bijector. Args: power: Python `float` scalar indicating the transform power, i.e., `Y = g(X) = (1 + X * c)**(1 / c)` where `c` is the `power`. event_ndims: Python scalar indicating the number of dimensions associated with a particular draw from the distribution. validate_args: Python `bool` indicating whether arguments should be checked for correctness. name: Python `str` name given to ops managed by this object. Raises: ValueError: if `power < 0` or is not known statically. """ self._graph_parents = [] self._name = name self._validate_args = validate_args with self._name_scope("init", values=[power]): power = tensor_util.constant_value( ops.convert_to_tensor(power, name="power")) if power is None or power < 0: raise ValueError("`power` must be a non-negative TF constant.") self._power = power super(PowerTransform, self).__init__( event_ndims=event_ndims, validate_args=validate_args, name=name)