Python tensorflow.python.framework.tensor_shape.TensorShape() Examples
The following are 30
code examples of tensorflow.python.framework.tensor_shape.TensorShape().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.python.framework.tensor_shape
, or try the search function
.
Example #1
Source File: tensor_array_ops.py From lambda-packs with MIT License | 6 votes |
def _merge_element_shape(self, shape): """Changes the element shape of the array given a shape to merge with. Args: shape: A `TensorShape` object to merge with. Raises: ValueError: if the provided shape is incompatible with the current element shape of the `TensorArray`. """ if self._element_shape: if not shape.is_compatible_with(self._element_shape[0]): raise ValueError( "Inconsistent shapes: saw %s but expected %s " "(and infer_shape=True)" % (shape, self._element_shape[0])) self._element_shape[0] = self._element_shape[0].merge_with(shape) else: self._element_shape.append(shape)
Example #2
Source File: tensor_util.py From lambda-packs with MIT License | 6 votes |
def ShapeEquals(tensor_proto, shape): """Returns True if "tensor_proto" has the given "shape". Args: tensor_proto: A TensorProto. shape: A tensor shape, expressed as a TensorShape, list, or tuple. Returns: True if "tensor_proto" has the given "shape", otherwise False. Raises: TypeError: If "tensor_proto" is not a TensorProto, or shape is not a TensorShape, list, or tuple. """ if not isinstance(tensor_proto, tensor_pb2.TensorProto): raise TypeError("tensor_proto is not a tensor_pb2.TensorProto object") if isinstance(shape, tensor_shape_pb2.TensorShapeProto): shape = [d.size for d in shape.dim] elif not isinstance(shape, (list, tuple)): raise TypeError("shape is not a list or tuple") tensor_shape_list = [d.size for d in tensor_proto.tensor_shape.dim] return all(x == y for x, y in zip(tensor_shape_list, shape))
Example #3
Source File: pooling.py From lambda-packs with MIT License | 6 votes |
def _compute_output_shape(self, input_shape): input_shape = tensor_shape.TensorShape(input_shape).as_list() if self.data_format == 'channels_first': len_dim1 = input_shape[2] len_dim2 = input_shape[3] len_dim3 = input_shape[4] else: len_dim1 = input_shape[1] len_dim2 = input_shape[2] len_dim3 = input_shape[3] len_dim1 = utils.conv_output_length(len_dim1, self.pool_size[0], self.padding, self.strides[0]) len_dim2 = utils.conv_output_length(len_dim2, self.pool_size[1], self.padding, self.strides[1]) len_dim3 = utils.conv_output_length(len_dim3, self.pool_size[2], self.padding, self.strides[2]) if self.data_format == 'channels_first': return tensor_shape.TensorShape( [input_shape[0], input_shape[1], len_dim1, len_dim2, len_dim3]) else: return tensor_shape.TensorShape( [input_shape[0], len_dim1, len_dim2, len_dim3, input_shape[4]])
Example #4
Source File: tf_helpers.py From Counterfactual-StoryRW with MIT License | 6 votes |
def __init__(self, initialize_fn, sample_fn, next_inputs_fn, sample_ids_shape=None, sample_ids_dtype=None): """Initializer. Args: initialize_fn: callable that returns `(finished, next_inputs)` for the first iteration. sample_fn: callable that takes `(time, outputs, state)` and emits tensor `sample_ids`. next_inputs_fn: callable that takes `(time, outputs, state, sample_ids)` and emits `(finished, next_inputs, next_state)`. sample_ids_shape: Either a list of integers, or a 1-D Tensor of type `int32`, the shape of each value in the `sample_ids` batch. Defaults to a scalar. sample_ids_dtype: The dtype of the `sample_ids` tensor. Defaults to int32. """ self._initialize_fn = initialize_fn self._sample_fn = sample_fn self._next_inputs_fn = next_inputs_fn self._batch_size = None self._sample_ids_shape = tensor_shape.TensorShape(sample_ids_shape or []) self._sample_ids_dtype = sample_ids_dtype or dtypes.int32
Example #5
Source File: convolutional.py From lambda-packs with MIT License | 6 votes |
def _compute_output_shape(self, input_shape): input_shape = tensor_shape.TensorShape(input_shape).as_list() if self.data_format == 'channels_first': rows = input_shape[2] cols = input_shape[3] else: rows = input_shape[1] cols = input_shape[2] rows = utils.conv_output_length(rows, self.kernel_size[0], self.padding, self.strides[0]) cols = utils.conv_output_length(cols, self.kernel_size[1], self.padding, self.strides[1]) if self.data_format == 'channels_first': return tensor_shape.TensorShape( [input_shape[0], self.filters, rows, cols]) else: return tensor_shape.TensorShape( [input_shape[0], rows, cols, self.filters])
Example #6
Source File: convolutional.py From lambda-packs with MIT License | 6 votes |
def _compute_output_shape(self, input_shape): input_shape = tensor_shape.TensorShape(input_shape).as_list() output_shape = list(input_shape) if self.data_format == 'channels_first': c_axis, h_axis, w_axis = 1, 2, 3 else: c_axis, h_axis, w_axis = 3, 1, 2 kernel_h, kernel_w = self.kernel_size stride_h, stride_w = self.strides output_shape[c_axis] = self.filters output_shape[h_axis] = utils.deconv_output_length( output_shape[h_axis], kernel_h, self.padding, stride_h) output_shape[w_axis] = utils.deconv_output_length( output_shape[w_axis], kernel_w, self.padding, stride_w) return tensor_shape.TensorShape(output_shape)
Example #7
Source File: tf_helpers.py From Counterfactual-StoryRW with MIT License | 6 votes |
def __init__(self, sample_fn, sample_shape, sample_dtype, start_inputs, end_fn, next_inputs_fn=None): """Initializer. Args: sample_fn: A callable that takes `outputs` and emits tensor `sample_ids`. sample_shape: Either a list of integers, or a 1-D Tensor of type `int32`, the shape of the each sample in the batch returned by `sample_fn`. sample_dtype: the dtype of the sample returned by `sample_fn`. start_inputs: The initial batch of inputs. end_fn: A callable that takes `sample_ids` and emits a `bool` vector shaped `[batch_size]` indicating whether each sample is an end token. next_inputs_fn: (Optional) A callable that takes `sample_ids` and returns the next batch of inputs. If not provided, `sample_ids` is used as the next batch of inputs. """ self._sample_fn = sample_fn self._end_fn = end_fn self._sample_shape = tensor_shape.TensorShape(sample_shape) self._sample_dtype = sample_dtype self._next_inputs_fn = next_inputs_fn self._batch_size = array_ops.shape(start_inputs)[0] self._start_inputs = ops.convert_to_tensor( start_inputs, name="start_inputs")
Example #8
Source File: constant_op.py From lambda-packs with MIT License | 6 votes |
def _tensor_shape_tensor_conversion_function(s, dtype=None, name=None, as_ref=False): _ = as_ref if not s.is_fully_defined(): raise ValueError( "Cannot convert a partially known TensorShape to a Tensor: %s" % s) s_list = s.as_list() int64_value = 0 for dim in s_list: if dim >= 2**31: int64_value = dim break if dtype is not None: if dtype not in (dtypes.int32, dtypes.int64): raise TypeError("Cannot convert a TensorShape to dtype: %s" % dtype) if dtype == dtypes.int32 and int64_value: raise ValueError("Cannot convert a TensorShape to dtype int32; " "a dimension is too large (%s)" % int64_value) else: dtype = dtypes.int64 if int64_value else dtypes.int32 if name is None: name = "shape_as_tensor" return constant(s_list, dtype=dtype, name=name)
Example #9
Source File: utils.py From tensornets with MIT License | 6 votes |
def channel_dimension(shape, data_format, min_rank=1): """Returns the channel dimension of shape, while checking it has min_rank. Args: shape: A `TensorShape`. data_format: `channels_first` or `channels_last`. min_rank: Integer, minimum rank of shape. Returns: The value of the first dimension. Raises: ValueError: if inputs don't have at least min_rank dimensions, or if the first dimension value is not defined. """ return _get_dimension(shape, 1 if data_format == 'channels_first' else -1, min_rank=min_rank)
Example #10
Source File: topology.py From lambda-packs with MIT License | 6 votes |
def _compute_output_shape(self, input_shape): """Computes the output shape of the layer. Assumes that the layer will be built to match that input shape provided. Arguments: input_shape: Shape tuple (tuple of integers) or list of shape tuples (one per output tensor of the layer). Shape tuples can include None for free dimensions, instead of an integer. Returns: An input shape tuple. """ if isinstance(input_shape, list): return [tensor_shape.TensorShape(shape) for shape in input_shape] else: return tensor_shape.TensorShape(input_shape)
Example #11
Source File: dynamic_decoder.py From tensorflow_end2end_speech_recognition with MIT License | 6 votes |
def _transpose_batch_time(x): """Transpose the batch and time dimensions of a Tensor. Retains as much of the static shape information as possible. Args: x: A tensor of rank 2 or higher. Returns: x transposed along the first two dimensions. Raises: ValueError: if `x` is rank 1 or lower. """ x_static_shape = x.get_shape() if x_static_shape.ndims is not None and x_static_shape.ndims < 2: raise ValueError( "Expected input tensor %s to have rank at least 2, but saw shape: %s" % (x, x_static_shape)) x_rank = array_ops.rank(x) x_t = array_ops.transpose( x, array_ops.concat( ([1, 0], math_ops.range(2, x_rank)), axis=0)) x_t.set_shape( tensor_shape.TensorShape([ x_static_shape[1].value, x_static_shape[0].value ]).concatenate(x_static_shape[2:])) return x_t
Example #12
Source File: beam_search_decoder_from_tensorflow.py From tensorflow_end2end_speech_recognition with MIT License | 6 votes |
def _tile_batch(t, multiplier): """Core single-tensor implementation of tile_batch.""" t = ops.convert_to_tensor(t, name="t") shape_t = tf.shape(t) if t.shape.ndims is None or t.shape.ndims < 1: raise ValueError("t must have statically known rank") tiling = [1] * (t.shape.ndims + 1) tiling[1] = multiplier tiled_static_batch_size = ( t.shape[0].value * multiplier if t.shape[0].value is not None else None) tiled = tf.tile(tf.expand_dims(t, 1), tiling) tiled = tf.reshape( tiled, tf.concat(([shape_t[0] * multiplier], shape_t[1:]), 0)) tiled.set_shape( tensor_shape.TensorShape( [tiled_static_batch_size]).concatenate(t.shape[1:])) return tiled
Example #13
Source File: beam_search_decoder_from_tensorflow.py From tensorflow_end2end_speech_recognition with MIT License | 6 votes |
def _rnn_output_size(self): size = self._cell.output_size if self._output_layer is None: return size else: # To use layer's compute_output_shape, we need to convert the # RNNCell's output_size entries into shapes with an unknown # batch size. We then pass this through the layer's # compute_output_shape and read off all but the first (batch) # dimensions to get the output size of the rnn with the layer # applied to the top. output_shape_with_unknown_batch = nest.map_structure( lambda s: tensor_shape.TensorShape([None]).concatenate(s), size) layer_output_shape = self._output_layer._compute_output_shape( # pylint: disable=protected-access output_shape_with_unknown_batch) return nest.map_structure(lambda s: s[1:], layer_output_shape)
Example #14
Source File: beam_search_decoder_from_tensorflow.py From tensorflow_end2end_speech_recognition with MIT License | 6 votes |
def _maybe_split_batch_beams(self, t, s): """Maybe splits the tensor from a batch by beams into a batch of beams. We do this so that we can use nest and not run into problems with shapes. Args: t: Tensor of dimension [batch_size*beam_width, s] s: Tensor, Python int, or TensorShape. Returns: Either a reshaped version of t with dimension [batch_size, beam_width, s] if t's first dimension is of size batch_size*beam_width or t if not. Raises: TypeError: If t is an instance of TensorArray. ValueError: If the rank of t is not statically known. """ _check_maybe(t) if t.shape.ndims >= 1: return self._split_batch_beams(t, s) else: return t
Example #15
Source File: variable_mgr_util.py From benchmarks with Apache License 2.0 | 6 votes |
def __call__(self, getter, *args, **kwargs): size = tf.TensorShape(kwargs['shape']).num_elements() if size < self.small_variable_size_threshold: device_name = self.device_for_small_variables else: device_index, _ = min(enumerate(self.sizes), key=operator.itemgetter(1)) device_name = self.devices[device_index] self.sizes[device_index] += size kwargs['caching_device'] = device_name var = getter(*args, **kwargs) return var # To be used with custom_getter on tf.get_variable. Ensures the created variable # is in LOCAL_VARIABLES and not GLOBAL_VARIBLES collection.
Example #16
Source File: data_flow_ops.py From lambda-packs with MIT License | 6 votes |
def __init__(self, dtype, shape, accumulator_ref): """Creates a new ConditionalAccumulator. Args: dtype: Datatype of the accumulated gradients. shape: Shape of the accumulated gradients. accumulator_ref: A handle to the conditional accumulator, created by sub- classes """ self._dtype = dtype if shape is not None: self._shape = tensor_shape.TensorShape(shape) else: self._shape = tensor_shape.unknown_shape() self._accumulator_ref = accumulator_ref self._name = self._accumulator_ref.op.name.split("/")[-1]
Example #17
Source File: resource_variable_ops.py From lambda-packs with MIT License | 6 votes |
def _GatherGrad(op, grad): """Gradient for gather op.""" # Build appropriately shaped IndexedSlices # Walk graph back until the original handle is found. # TODO(apassos): more robust way of getting the shape. handle = op.inputs[0] while handle.op.type != "VarHandleOp": handle = handle.op.inputs[0] params_shape = ops.convert_to_tensor( tensor_shape.TensorShape(handle.op.get_attr("shape"))) indices = op.inputs[1] size = array_ops.expand_dims(array_ops.size(indices), 0) values_shape = array_ops.concat([size, params_shape[1:]], 0) values = array_ops.reshape(grad, values_shape) indices = array_ops.reshape(indices, size) return [ops.IndexedSlices(values, indices, params_shape), None]
Example #18
Source File: core.py From lambda-packs with MIT License | 6 votes |
def build(self, input_shape): input_shape = tensor_shape.TensorShape(input_shape) if input_shape[-1].value is None: raise ValueError('The last dimension of the inputs to `Dense` ' 'should be defined. Found `None`.') self.input_spec = base.InputSpec(min_ndim=2, axes={-1: input_shape[-1].value}) self.kernel = self.add_variable('kernel', shape=[input_shape[-1].value, self.units], initializer=self.kernel_initializer, regularizer=self.kernel_regularizer, dtype=self.dtype, trainable=True) if self.use_bias: self.bias = self.add_variable('bias', shape=[self.units,], initializer=self.bias_initializer, regularizer=self.bias_regularizer, dtype=self.dtype, trainable=True) else: self.bias = None self.built = True
Example #19
Source File: custom_decoder.py From linguistic-style-transfer with Apache License 2.0 | 6 votes |
def _rnn_output_size(self): size = self._cell.output_size if self._output_layer is None: return size else: # To use layer's compute_output_shape, we need to convert the # RNNCell's output_size entries into shapes with an unknown # batch size. We then pass this through the layer's # compute_output_shape and read off all but the first (batch) # dimensions to get the output size of the rnn with the layer # applied to the top. output_shape_with_unknown_batch = nest.map_structure( lambda s: tensor_shape.TensorShape([None]).concatenate(s), size) layer_output_shape = self._output_layer.compute_output_shape( # pylint: disable=protected-access output_shape_with_unknown_batch) return nest.map_structure(lambda s: s[1:], layer_output_shape)
Example #20
Source File: beam_search_decoder_from_tensorflow.py From tensorflow_end2end_speech_recognition with MIT License | 6 votes |
def _maybe_merge_batch_beams(self, t, s): """Splits the tensor from a batch by beams into a batch of beams. More exactly, t is a tensor of dimension [batch_size*beam_width, s]. We reshape this into [batch_size, beam_width, s] Args: t: Tensor of dimension [batch_size*beam_width, s] s: Tensor, Python int, or TensorShape. Returns: A reshaped version of t with dimension [batch_size, beam_width, s]. Raises: TypeError: If t is an instance of TensorArray. ValueError: If the rank of t is not statically known. """ _check_maybe(t) if t.shape.ndims >= 2: return self._merge_batch_beams(t, s) else: return t
Example #21
Source File: array_ops.py From lambda-packs with MIT License | 6 votes |
def _TileGradShape(op): """Shape function for the TileGrad op.""" multiples_shape = op.inputs[1].get_shape().with_rank(1) input_shape = op.inputs[0].get_shape().with_rank(multiples_shape[0]) # NOTE(mrry): Represent `multiples` as a `TensorShape` because (i) # it is a vector of non-negative integers, and (ii) doing so allows # us to handle partially-known multiples. multiples = tensor_util.constant_value_as_shape(op.inputs[1]).with_rank( input_shape.ndims) if multiples.ndims is None: return [tensor_shape.unknown_shape()] else: output_dims = [] for dim, multiple in zip(input_shape.dims, multiples.dims): output_dims.append(dim // multiple) return [tensor_shape.TensorShape(output_dims)]
Example #22
Source File: constant_op.py From lambda-packs with MIT License | 5 votes |
def _dimension_tensor_conversion_function(d, dtype=None, name=None, as_ref=False): _ = as_ref if d.value is None: raise ValueError("Cannot convert an unknown Dimension to a Tensor: %s" % d) if dtype is not None: if dtype not in (dtypes.int32, dtypes.int64): raise TypeError("Cannot convert a TensorShape to dtype: %s" % dtype) else: dtype = dtypes.int32 if name is None: name = "shape_as_tensor" return constant(d.value, dtype=dtype, name=name)
Example #23
Source File: control_flow_ops.py From lambda-packs with MIT License | 5 votes |
def _resource_safe_shape(t): """Returns the shape of t or the variable it points to.""" if t.dtype == dtypes.resource: while t.op.inputs: t = t.op.inputs[0] return tensor_shape.TensorShape(t.op.get_attr("shape")) return array_ops.shape_internal(t, optimize=False) # TODO(yuanbyu): Consider having a unified notion of context for # not only conditionals and loops but also control dependency and # subgraphs.
Example #24
Source File: tensor_array_ops.py From lambda-packs with MIT License | 5 votes |
def scatter(self, indices, value, name=None): """Scatter the values of a `Tensor` in specific indices of a `TensorArray`. Args: indices: A `1-D` `Tensor` taking values in `[0, max_value)`. If the `TensorArray` is not dynamic, `max_value=size()`. value: (N+1)-D. Tensor of type `dtype`. The Tensor to unpack. name: A name for the operation (optional). Returns: A new TensorArray object with flow that ensures the scatter occurs. Use this object all for subsequent operations. Raises: ValueError: if the shape inference fails. """ with ops.name_scope(name, "TensorArrayScatter", [self._handle, value, indices]): value = ops.convert_to_tensor(value, name="value") with self._maybe_colocate_with(value): flow_out = gen_data_flow_ops._tensor_array_scatter_v3( handle=self._handle, indices=indices, value=value, flow_in=self._flow, name=name) ta = TensorArray( dtype=self._dtype, handle=self._handle, flow=flow_out, colocate_with_first_write_call=self._colocate_with_first_write_call) ta._infer_shape = self._infer_shape ta._element_shape = self._element_shape ta._colocate_with = self._colocate_with if ta._infer_shape: val_shape = flow_out.op.inputs[2].get_shape() element_shape = tensor_shape.unknown_shape() if val_shape.dims is not None: element_shape = tensor_shape.TensorShape(val_shape.dims[1:]) ta._merge_element_shape(element_shape) return ta
Example #25
Source File: tensor_array_ops.py From lambda-packs with MIT License | 5 votes |
def concat(self, name=None): """Return the values in the TensorArray as a concatenated `Tensor`. All of the values must have been written, their ranks must match, and and their shapes must all match for all dimensions except the first. Args: name: A name for the operation (optional). Returns: All the tensors in the TensorArray concatenated into one tensor. """ if self._element_shape and self._element_shape[0].dims is not None: element_shape_except0 = ( tensor_shape.TensorShape(self._element_shape[0].dims[1:])) else: element_shape_except0 = tensor_shape.TensorShape(None) value, _ = gen_data_flow_ops._tensor_array_concat_v3( handle=self._handle, flow_in=self._flow, dtype=self._dtype, name=name, element_shape_except0=element_shape_except0) if self._element_shape and self._element_shape[0].dims is not None: value.set_shape([None] + self._element_shape[0].dims[1:]) return value
Example #26
Source File: transformed_distribution.py From lambda-packs with MIT License | 5 votes |
def _event_shape(self): # If there's a chance that the event_shape has been overriden, we return # what we statically know about the `event_shape_override`. This works # because: `_is_maybe_event_override` means `static_override` is `None` or a # non-empty list, i.e., we don't statically know the `event_shape` or we do. # # Since the `bijector` may change the `event_shape`, we then forward what we # know to the bijector. This allows the `bijector` to have final say in the # `event_shape`. static_override = tensor_util.constant_value(self._override_event_shape) return self.bijector.forward_event_shape( tensor_shape.TensorShape(static_override) if self._is_maybe_event_override else self.distribution.event_shape)
Example #27
Source File: distribution.py From lambda-packs with MIT License | 5 votes |
def param_static_shapes(cls, sample_shape): """param_shapes with static (i.e. `TensorShape`) shapes. This is a class method that describes what key/value arguments are required to instantiate the given `Distribution` so that a particular shape is returned for that instance's call to `sample()`. Assumes that the sample's shape is known statically. Subclasses should override class method `_param_shapes` to return constant-valued tensors when constant values are fed. Args: sample_shape: `TensorShape` or python list/tuple. Desired shape of a call to `sample()`. Returns: `dict` of parameter name to `TensorShape`. Raises: ValueError: if `sample_shape` is a `TensorShape` and is not fully defined. """ if isinstance(sample_shape, tensor_shape.TensorShape): if not sample_shape.is_fully_defined(): raise ValueError("TensorShape sample_shape must be fully defined") sample_shape = sample_shape.as_list() params = cls.param_shapes(sample_shape) static_params = {} for name, shape in params.items(): static_shape = tensor_util.constant_value(shape) if static_shape is None: raise ValueError( "sample_shape must be a fully-defined TensorShape or list/tuple") static_params[name] = tensor_shape.TensorShape(static_shape) return static_params
Example #28
Source File: bijector_impl.py From lambda-packs with MIT License | 5 votes |
def forward_event_shape(self, input_shape): """Shape of a single sample from a single batch as a `TensorShape`. Same meaning as `forward_event_shape_tensor`. May be only partially defined. Args: input_shape: `TensorShape` indicating event-portion shape passed into `forward` function. Returns: forward_event_shape_tensor: `TensorShape` indicating event-portion shape after applying `forward`. Possibly unknown. """ return self._forward_event_shape(tensor_shape.TensorShape(input_shape))
Example #29
Source File: distribution.py From lambda-packs with MIT License | 5 votes |
def _batch_shape(self): return tensor_shape.TensorShape(None)
Example #30
Source File: distribution.py From lambda-packs with MIT License | 5 votes |
def batch_shape(self): """Shape of a single sample from a single event index as a `TensorShape`. May be partially defined or unknown. The batch dimensions are indexes into independent, non-identical parameterizations of this distribution. Returns: batch_shape: `TensorShape`, possibly unknown. """ return self._batch_shape()