Python tensorflow.python.framework.tensor_shape.unknown_shape() Examples
The following are 30
code examples of tensorflow.python.framework.tensor_shape.unknown_shape().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.python.framework.tensor_shape
, or try the search function
.
Example #1
Source File: stack_ops_test.py From deep_image_model with Apache License 2.0 | 6 votes |
def _testStackWhileSwap(self, use_gpu): with self.test_session(use_gpu=use_gpu): n = tf.constant(0) h = gen_data_flow_ops._stack(tf.float32, stack_name="foo") def c(x): return tf.less(x, 10) def b(x): with tf.control_dependencies([x]): a = tf.constant(np.ones(2000), dtype=tf.float32) v = gen_data_flow_ops._stack_push(h, a, swap_memory=True) with tf.control_dependencies([v]): return tf.add(x, 1) r = tf.while_loop(c, b, [n]) v = tf.constant(np.zeros(2000), dtype=tf.float32) def c1(x, y): return tf.greater(x, 0) def b1(x, y): nx = tf.sub(x, 1) ny = y + gen_data_flow_ops._stack_pop(h, tf.float32) return [nx, ny] rx, ry = tf.while_loop(c1, b1, [r, v], [r.get_shape(), tensor_shape.unknown_shape()]) self.assertAllClose(np.ones(2000) * 10.0, ry.eval())
Example #2
Source File: image_ops_impl.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def fix_image_flip_shape(image, result): """Set the shape to 3 dimensional if we don't know anything else. Args: image: original image size result: flipped or transformed image Returns: An image whose shape is at least None,None,None. """ image_shape = image.get_shape() if image_shape == tensor_shape.unknown_shape(): result.set_shape([None, None, None]) else: result.set_shape(image_shape) return result
Example #3
Source File: tensor_array_ops_test.py From deep_image_model with Apache License 2.0 | 6 votes |
def testSplitShape(self): with self.test_session(): ta = tensor_array_ops.TensorArray( dtype=tf.float32, tensor_array_name="foo", size=0, dynamic_size=True, infer_shape=True) value = tf.constant([[1.0, -1.0], [2.0, -2.0], [3.0, -3.0]]) w0 = ta.split(value, [1, 1, 1]) r0 = w0.read(0) self.assertAllEqual((1, 2), r0.get_shape()) ta1 = tensor_array_ops.TensorArray( dtype=tf.float32, tensor_array_name="foo1", size=0, dynamic_size=True, infer_shape=True) w0 = ta1.split(value, [1, 2]) r0 = w0.read(0) self.assertAllEqual(r0.get_shape(), tensor_shape.unknown_shape())
Example #4
Source File: array_ops.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def _TileGradShape(op): """Shape function for the TileGrad op.""" multiples_shape = op.inputs[1].get_shape().with_rank(1) input_shape = op.inputs[0].get_shape().with_rank(multiples_shape[0]) # NOTE(mrry): Represent `multiples` as a `TensorShape` because (i) # it is a vector of non-negative integers, and (ii) doing so allows # us to handle partially-known multiples. multiples = tensor_util.constant_value_as_shape(op.inputs[1]).with_rank( input_shape.ndims) if multiples.ndims is None: return [tensor_shape.unknown_shape()] else: output_dims = [] for dim, multiple in zip(input_shape.dims, multiples.dims): output_dims.append(dim // multiple) return [tensor_shape.TensorShape(output_dims)]
Example #5
Source File: control_flow_ops_py_test.py From deep_image_model with Apache License 2.0 | 6 votes |
def testWhile_5(self): with self.test_session(): def compute(i, c, o): c = tf.slice(x, tf.expand_dims(i, 0), [1]) o = tf.concat(0, [o, c]) i = tf.add(i, 1) return [i, c, o] i = tf.convert_to_tensor(0) c = tf.convert_to_tensor([0]) o = tf.convert_to_tensor([0]) x = tf.convert_to_tensor([1, 2, 3, 4, 5, 6]) s = tf.size(x) r = tf.while_loop( lambda i, c, o: tf.less(i, s), compute, [i, c, o], [i.get_shape(), c.get_shape(), tensor_shape.unknown_shape()]) result = r[2].eval() self.assertTrue(check_op_order(i.graph)) self.assertAllEqual(np.array([0, 1, 2, 3, 4, 5, 6]), result)
Example #6
Source File: data_flow_ops.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def __init__(self, dtype, shape, accumulator_ref): """Creates a new ConditionalAccumulator. Args: dtype: Datatype of the accumulated gradients. shape: Shape of the accumulated gradients. accumulator_ref: A handle to the conditional accumulator, created by sub- classes """ self._dtype = dtype if shape is not None: self._shape = tensor_shape.TensorShape(shape) else: self._shape = tensor_shape.unknown_shape() self._accumulator_ref = accumulator_ref self._name = self._accumulator_ref.op.name.split("/")[-1]
Example #7
Source File: data_flow_ops.py From deep_image_model with Apache License 2.0 | 6 votes |
def _DynamicPartitionShape(op): """Shape function for data_flow_ops.dynamic_partition.""" data_shape = op.inputs[0].get_shape() partitions_shape = op.inputs[1].get_shape() # If we don't know the rank of partitions, we don't know anything mid = partitions_shape.ndims if mid is None: result_shape = tensor_shape.unknown_shape() else: # data_shape must start with partitions_shape partitions_shape.assert_is_compatible_with(data_shape[:mid]) # The partition shape is dynamic in the 0th dimension, and matches # data_shape in the remaining dimensions. result_shape = tensor_shape.TensorShape([None]).concatenate( data_shape[mid:]) return [result_shape] * op.get_attr("num_partitions")
Example #8
Source File: ops.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def __init__(self, op, value_index, dtype): """Creates a new `Tensor`. Args: op: An `Operation`. `Operation` that computes this tensor. value_index: An `int`. Index of the operation's endpoint that produces this tensor. dtype: A `DType`. Type of elements stored in this tensor. Raises: TypeError: If the op is not an `Operation`. """ if not isinstance(op, Operation): raise TypeError("op needs to be an Operation: %s" % op) self._op = op self._value_index = value_index self._dtype = dtypes.as_dtype(dtype) self._shape = tensor_shape.unknown_shape() # List of operations that use this Tensor as input. We maintain this list # to easily navigate a computation graph. self._consumers = [] # Attributes used for C++ shape inference. Not inspected, only forwarded. self._handle_shape = tensor_shape_pb2.TensorShapeProto() self._handle_dtype = types_pb2.DT_INVALID
Example #9
Source File: ops.py From lambda-packs with MIT License | 6 votes |
def __init__(self, op, value_index, dtype): """Creates a new `Tensor`. Args: op: An `Operation`. `Operation` that computes this tensor. value_index: An `int`. Index of the operation's endpoint that produces this tensor. dtype: A `DType`. Type of elements stored in this tensor. Raises: TypeError: If the op is not an `Operation`. """ if not isinstance(op, Operation): raise TypeError("op needs to be an Operation: %s" % op) self._op = op self._value_index = value_index self._dtype = dtypes.as_dtype(dtype) self._shape = tensor_shape.unknown_shape() # List of operations that use this Tensor as input. We maintain this list # to easily navigate a computation graph. self._consumers = [] # Attributes used for C++ shape inference. Not inspected, only forwarded. self._handle_shape = tensor_shape_pb2.TensorShapeProto() self._handle_dtype = types_pb2.DT_INVALID
Example #10
Source File: control_flow_ops_py_test.py From deep_image_model with Apache License 2.0 | 6 votes |
def testWhileFuncBasic(self): @function.Defun(tf.float32) def func(x): return tf.square(tf.square(x)) with self.test_session(): x = tf.constant(2.0, tf.float32) r = tf.while_loop( lambda i, v: i < 2, lambda i, v: [i + 1, func(v)], [tf.constant(0), x], [tensor_shape.unknown_shape(), tensor_shape.unknown_shape()]) self.assertEqual(r[1].eval(), 65536.0) r = tf.gradients(r, x)[0] self.assertEqual(r.eval(), 524288.0) self.assertEqual(len([op for op in x.graph.get_operations() if op.type == "Stack"]), 1)
Example #11
Source File: image_ops_impl.py From lambda-packs with MIT License | 6 votes |
def fix_image_flip_shape(image, result): """Set the shape to 3 dimensional if we don't know anything else. Args: image: original image size result: flipped or transformed image Returns: An image whose shape is at least None,None,None. """ image_shape = image.get_shape() if image_shape == tensor_shape.unknown_shape(): result.set_shape([None, None, None]) else: result.set_shape(image_shape) return result
Example #12
Source File: tf_image.py From MobileNet with Apache License 2.0 | 6 votes |
def fix_image_flip_shape(image, result): """Set the shape to 3 dimensional if we don't know anything else. Args: image: original image size result: flipped or transformed image Returns: An image whose shape is at least None,None,None. """ image_shape = image.get_shape() if image_shape == tensor_shape.unknown_shape(): result.set_shape([None, None, None]) else: result.set_shape(image_shape) return result # =========================================================================== # # Image + BBoxes methods: cropping, resizing, flipping, ... # =========================================================================== #
Example #13
Source File: tf_image.py From pixel_link with MIT License | 6 votes |
def fix_image_flip_shape(image, result): """Set the shape to 3 dimensional if we don't know anything else. Args: image: original image size result: flipped or transformed image Returns: An image whose shape is at least None,None,None. """ image_shape = image.get_shape() if image_shape == tensor_shape.unknown_shape(): result.set_shape([None, None, None]) else: result.set_shape(image_shape) return result # =========================================================================== # # Image + BBoxes methods: cropping, resizing, flipping, ... # =========================================================================== #
Example #14
Source File: array_ops.py From lambda-packs with MIT License | 6 votes |
def _TileGradShape(op): """Shape function for the TileGrad op.""" multiples_shape = op.inputs[1].get_shape().with_rank(1) input_shape = op.inputs[0].get_shape().with_rank(multiples_shape[0]) # NOTE(mrry): Represent `multiples` as a `TensorShape` because (i) # it is a vector of non-negative integers, and (ii) doing so allows # us to handle partially-known multiples. multiples = tensor_util.constant_value_as_shape(op.inputs[1]).with_rank( input_shape.ndims) if multiples.ndims is None: return [tensor_shape.unknown_shape()] else: output_dims = [] for dim, multiple in zip(input_shape.dims, multiples.dims): output_dims.append(dim // multiple) return [tensor_shape.TensorShape(output_dims)]
Example #15
Source File: tf_image.py From SSD_tensorflow_VOC with Apache License 2.0 | 6 votes |
def fix_image_flip_shape(image, result): """Set the shape to 3 dimensional if we don't know anything else. Args: image: original image size result: flipped or transformed image Returns: An image whose shape is at least None,None,None. """ image_shape = image.get_shape() if image_shape == tensor_shape.unknown_shape(): result.set_shape([None, None, None]) else: result.set_shape(image_shape) return result # =========================================================================== # # Image + BBoxes methods: cropping, resizing, flipping, ... # =========================================================================== #
Example #16
Source File: tf_image.py From seglink with GNU General Public License v3.0 | 6 votes |
def fix_image_flip_shape(image, result): """Set the shape to 3 dimensional if we don't know anything else. Args: image: original image size result: flipped or transformed image Returns: An image whose shape is at least None,None,None. """ image_shape = image.get_shape() if image_shape == tensor_shape.unknown_shape(): result.set_shape([None, None, None]) else: result.set_shape(image_shape) return result # =========================================================================== # # Image + BBoxes methods: cropping, resizing, flipping, ... # =========================================================================== #
Example #17
Source File: data_flow_ops.py From lambda-packs with MIT License | 6 votes |
def __init__(self, dtype, shape, accumulator_ref): """Creates a new ConditionalAccumulator. Args: dtype: Datatype of the accumulated gradients. shape: Shape of the accumulated gradients. accumulator_ref: A handle to the conditional accumulator, created by sub- classes """ self._dtype = dtype if shape is not None: self._shape = tensor_shape.TensorShape(shape) else: self._shape = tensor_shape.unknown_shape() self._accumulator_ref = accumulator_ref self._name = self._accumulator_ref.op.name.split("/")[-1]
Example #18
Source File: array_ops.py From deep_image_model with Apache License 2.0 | 6 votes |
def _TileGradShape(op): """Shape function for the TileGrad op.""" multiples_shape = op.inputs[1].get_shape().with_rank(1) input_shape = op.inputs[0].get_shape().with_rank(multiples_shape[0]) # NOTE(mrry): Represent `multiples` as a `TensorShape` because (i) # it is a vector of non-negative integers, and (ii) doing so allows # us to handle partially-known multiples. multiples = tensor_util.constant_value_as_shape(op.inputs[1]).with_rank( input_shape.ndims) if multiples.ndims is None: return [tensor_shape.unknown_shape()] else: output_dims = [] for dim, multiple in zip(input_shape.dims, multiples.dims): output_dims.append(dim // multiple) return [tensor_shape.TensorShape(output_dims)]
Example #19
Source File: data_flow_ops.py From deep_image_model with Apache License 2.0 | 5 votes |
def __init__(self, dtypes, shapes, names, queue_ref): """Constructs a queue object from a queue reference. The two optional lists, `shapes` and `names`, must be of the same length as `dtypes` if provided. The values at a given index `i` indicate the shape and name to use for the corresponding queue component in `dtypes`. Args: dtypes: A list of types. The length of dtypes must equal the number of tensors in each element. shapes: Constraints on the shapes of tensors in an element: A list of shape tuples or None. This list is the same length as dtypes. If the shape of any tensors in the element are constrained, all must be; shapes can be None if the shapes should not be constrained. names: Optional list of names. If provided, the `enqueue()` and `dequeue()` methods will use dictionaries with these names as keys. Must be None or a list or tuple of the same length as `dtypes`. queue_ref: The queue reference, i.e. the output of the queue op. Raises: ValueError: If one of the arguments is invalid. """ self._dtypes = dtypes if shapes is not None: if len(shapes) != len(dtypes): raise ValueError("Queue shapes must have the same length as dtypes") self._shapes = [tensor_shape.TensorShape(s) for s in shapes] else: self._shapes = [tensor_shape.unknown_shape() for _ in self._dtypes] if names is not None: if len(names) != len(dtypes): raise ValueError("Queue names must have the same length as dtypes") self._names = names else: self._names = None self._queue_ref = queue_ref self._name = self._queue_ref.op.name.split("/")[-1]
Example #20
Source File: array_ops.py From deep_image_model with Apache License 2.0 | 5 votes |
def _TileShape(op): """Shape function for the Tile op. This op has two inputs: * input: A rank-N tensor. * multiples: A length-N vector, in which the i^th element contains the factor by which `input` will be tiled in the i^th dimension. It has one output, which has the same rank as input, and additional elements according to the values in multiples Args: op: A Tile Operation. Returns: A single-element list containing the shape of the output. """ multiples_shape = op.inputs[1].get_shape().with_rank(1) input_shape = op.inputs[0].get_shape().with_rank(multiples_shape[0].value) # NOTE(mrry): Represent `multiples` as a `TensorShape` because (i) # it is a vector of non-negative integers, and (ii) doing so allows # us to handle partially-known multiples. multiples = tensor_util.constant_value_as_shape(op.inputs[1]).with_rank( input_shape.ndims) if multiples.ndims is None: return [tensor_shape.unknown_shape()] else: output_dims = [] for dim, multiple in zip(input_shape.dims, multiples.dims): output_dims.append(dim * multiple) return [tensor_shape.TensorShape(output_dims)]
Example #21
Source File: array_ops.py From deep_image_model with Apache License 2.0 | 5 votes |
def _SliceShape(op): """Shape function for array_ops.slice.""" input_shape = op.inputs[0].get_shape() begin_shape = op.inputs[1].get_shape().with_rank(1) sizes_shape = op.inputs[2].get_shape().with_rank(1) ndims = begin_shape.merge_with(sizes_shape)[0].value if ndims is not None: input_shape.assert_has_rank(ndims) # NOTE(mrry): Use `constant_value_as_shape()` to handle # partially-known values. begin_value = tensor_util.constant_value_as_shape( op.inputs[1]).with_rank(ndims) # NOTE(mrry): We can't use `constant_value_as_shape()` for `sizes` # because it might contain -1, which can't be represented as a # `TensorShape`. sizes_value = tensor_util.constant_value(op.inputs[2]) if sizes_value is not None: returned_dims = [] for i, (slice_size, begin_dim) in enumerate(zip(sizes_value.ravel(), begin_value.dims)): if slice_size != -1: returned_dims.append(slice_size) else: returned_dims.append(input_shape[i] - begin_dim) return [tensor_shape.TensorShape(returned_dims)] else: if input_shape.ndims is not None: return [tensor_shape.unknown_shape(ndims=input_shape.ndims)] elif ndims is not None: return [tensor_shape.unknown_shape(ndims=ndims)] else: return [tensor_shape.unknown_shape()]
Example #22
Source File: mocks.py From deep_image_model with Apache License 2.0 | 5 votes |
def get_shape(self): return tensor_shape.unknown_shape()
Example #23
Source File: rnn.py From MIMN with MIT License | 5 votes |
def _reverse_seq(input_seq, lengths): """Reverse a list of Tensors up to specified lengths. Args: input_seq: Sequence of seq_len tensors of dimension (batch_size, n_features) or nested tuples of tensors. lengths: A `Tensor` of dimension batch_size, containing lengths for each sequence in the batch. If "None" is specified, simply reverses the list. Returns: time-reversed sequence """ if lengths is None: return list(reversed(input_seq)) flat_input_seq = tuple(nest.flatten(input_) for input_ in input_seq) flat_results = [[] for _ in range(len(input_seq))] for sequence in zip(*flat_input_seq): input_shape = tensor_shape.unknown_shape( ndims=sequence[0].get_shape().ndims) for input_ in sequence: input_shape.merge_with(input_.get_shape()) input_.set_shape(input_shape) # Join into (time, batch_size, depth) s_joined = array_ops.stack(sequence) # Reverse along dimension 0 s_reversed = array_ops.reverse_sequence(s_joined, lengths, 0, 1) # Split again into list result = array_ops.unstack(s_reversed) for r, flat_result in zip(result, flat_results): r.set_shape(input_shape) flat_result.append(r) results = [nest.pack_sequence_as(structure=input_, flat_sequence=flat_result) for input_, flat_result in zip(input_seq, flat_results)] return results
Example #24
Source File: gradients_impl.py From deep_image_model with Apache License 2.0 | 5 votes |
def _AccumulatorShape(inputs): shape = tensor_shape.unknown_shape() for i in inputs: if isinstance(i, ops.Tensor): shape = shape.merge_with(i.get_shape()) return shape
Example #25
Source File: data_flow_ops.py From deep_image_model with Apache License 2.0 | 5 votes |
def _DynamicStitchShape(op): """Shape function for data_flow_ops.dynamic_stitch.""" num_partitions = op.get_attr("N") indices_shapes = [t.get_shape() for t in op.inputs[0:num_partitions]] data_shapes = [t.get_shape() for t in op.inputs[num_partitions:]] output_shape = tensor_shape.unknown_shape() extra_shape = tensor_shape.TensorShape(None) for indices_shape, data_shape in zip(indices_shapes, data_shapes): indices_ndims = indices_shape.ndims if indices_ndims is not None: # Assert that data_shape starts with indices_shape indices_shape.merge_with(data_shape[:indices_ndims]) # The rest belongs to output extra_shape = extra_shape.merge_with(data_shape[indices_ndims:]) return [tensor_shape.TensorShape([None]).concatenate(extra_shape)]
Example #26
Source File: tensor_shape_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def testAsProto(self): self.assertTrue(tensor_shape.unknown_shape().as_proto().unknown_rank) self.assertFalse( tensor_shape.unknown_shape(ndims=3).as_proto().unknown_rank) self.assertFalse( tensor_shape.TensorShape([1, 2, 3]).as_proto().unknown_rank) self.assertFalse( tensor_shape.TensorShape([1, None, 3]).as_proto().unknown_rank)
Example #27
Source File: data_flow_ops.py From deep_image_model with Apache License 2.0 | 5 votes |
def _shape_common(s1, s2): """The greatest lower bound (ordered by specificity) TensorShape.""" s1 = tensor_shape.TensorShape(s1) s2 = tensor_shape.TensorShape(s2) if s1.ndims is None or s2.ndims is None or s1.ndims != s2.ndims: return tensor_shape.unknown_shape() d = [ d1 if d1 is not None and d1 == d2 else None for (d1, d2) in zip(s1.as_list(), s2.as_list())] return tensor_shape.TensorShape(d) # pylint: disable=protected-access
Example #28
Source File: state_ops.py From deep_image_model with Apache License 2.0 | 5 votes |
def variable_op(shape, dtype, name="Variable", set_shape=True, container="", shared_name=""): """Create a variable Operation. See also variables.Variable. Args: shape: The shape of the tensor managed by this variable dtype: The underlying type of the tensor values. name: optional name to use for the variable op. set_shape: If True, set the shape property of the returned Tensor to the shape argument. container: An optional string. Defaults to "". If non-empty, this variable is placed in the given container. Otherwise, a default container is used. shared_name: An optional string. Defaults to "". If non-empty, this variable is named in the given bucket with this shared_name. Otherwise, the node name is used instead. Returns: A variable tensor. """ if not set_shape: shape = tensor_shape.unknown_shape() ret = gen_state_ops._variable(shape=shape, dtype=dtype, name=name, container=container, shared_name=shared_name) # TODO(mrry): Move this to where it is used, so we can get rid of this op # wrapper? if set_shape: ret.set_shape(shape) return ret # NOTE(mrry): Shapes are conditionally set in the Python wrapper.
Example #29
Source File: variable_ops_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def testAssignNoShapeNoValidateShape(self): with self.test_session(): value = self._NewShapelessTensor() var = state_ops.variable_op([1, 2], tf.float32, set_shape=False) self.assertEqual(tensor_shape.unknown_shape(), var.get_shape()) self.assertEqual(tensor_shape.unknown_shape(), tf.assign(var, value, validate_shape=False).get_shape())
Example #30
Source File: variable_ops_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def testAssignNoShape(self): with self.test_session(): value = self._NewShapelessTensor() var = state_ops.variable_op([1, 2], tf.float32, set_shape=False) self.assertEqual(tensor_shape.unknown_shape(), var.get_shape()) self.assertEqual(tensor_shape.unknown_shape(), tf.assign(var, value).get_shape())