Python tensorflow.python.framework.dtypes.as_dtype() Examples
The following are 30
code examples of tensorflow.python.framework.dtypes.as_dtype().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.python.framework.dtypes
, or try the search function
.
Example #1
Source File: linear_operator_test_util.py From lambda-packs with MIT License | 6 votes |
def random_positive_definite_matrix(shape, dtype, force_well_conditioned=False): """[batch] positive definite matrix. Args: shape: `TensorShape` or Python list. Shape of the returned matrix. dtype: `TensorFlow` `dtype` or Python dtype. force_well_conditioned: Python bool. If `True`, returned matrix has eigenvalues with modulus in `(1, 4)`. Otherwise, eigenvalues are chi-squared random variables. Returns: `Tensor` with desired shape and dtype. """ dtype = dtypes.as_dtype(dtype) if not contrib_tensor_util.is_tensor(shape): shape = tensor_shape.TensorShape(shape) # Matrix must be square. shape[-1].assert_is_compatible_with(shape[-2]) with ops.name_scope("random_positive_definite_matrix"): tril = random_tril_matrix( shape, dtype, force_well_conditioned=force_well_conditioned) return math_ops.matmul(tril, tril, adjoint_b=True)
Example #2
Source File: init_ops.py From lambda-packs with MIT License | 6 votes |
def __init__(self, scale=1.0, mode="fan_in", distribution="normal", seed=None, dtype=dtypes.float32): if scale <= 0.: raise ValueError("`scale` must be positive float.") if mode not in {"fan_in", "fan_out", "fan_avg"}: raise ValueError("Invalid `mode` argument:", mode) distribution = distribution.lower() if distribution not in {"normal", "uniform"}: raise ValueError("Invalid `distribution` argument:", distribution) self.scale = scale self.mode = mode self.distribution = distribution self.seed = seed self.dtype = _assert_float_dtype(dtypes.as_dtype(dtype))
Example #3
Source File: ops.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def __init__(self, op, value_index, dtype): """Creates a new `Tensor`. Args: op: An `Operation`. `Operation` that computes this tensor. value_index: An `int`. Index of the operation's endpoint that produces this tensor. dtype: A `DType`. Type of elements stored in this tensor. Raises: TypeError: If the op is not an `Operation`. """ if not isinstance(op, Operation): raise TypeError("op needs to be an Operation: %s" % op) self._op = op self._value_index = value_index self._dtype = dtypes.as_dtype(dtype) self._shape = tensor_shape.unknown_shape() # List of operations that use this Tensor as input. We maintain this list # to easily navigate a computation graph. self._consumers = [] # Attributes used for C++ shape inference. Not inspected, only forwarded. self._handle_shape = tensor_shape_pb2.TensorShapeProto() self._handle_dtype = types_pb2.DT_INVALID
Example #4
Source File: ops.py From lambda-packs with MIT License | 6 votes |
def __init__(self, op, value_index, dtype): """Creates a new `Tensor`. Args: op: An `Operation`. `Operation` that computes this tensor. value_index: An `int`. Index of the operation's endpoint that produces this tensor. dtype: A `DType`. Type of elements stored in this tensor. Raises: TypeError: If the op is not an `Operation`. """ if not isinstance(op, Operation): raise TypeError("op needs to be an Operation: %s" % op) self._op = op self._value_index = value_index self._dtype = dtypes.as_dtype(dtype) self._shape = tensor_shape.unknown_shape() # List of operations that use this Tensor as input. We maintain this list # to easily navigate a computation graph. self._consumers = [] # Attributes used for C++ shape inference. Not inspected, only forwarded. self._handle_shape = tensor_shape_pb2.TensorShapeProto() self._handle_dtype = types_pb2.DT_INVALID
Example #5
Source File: linear_operator_test_util.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def random_positive_definite_matrix(shape, dtype, force_well_conditioned=False): """[batch] positive definite matrix. Args: shape: `TensorShape` or Python list. Shape of the returned matrix. dtype: `TensorFlow` `dtype` or Python dtype. force_well_conditioned: Python bool. If `True`, returned matrix has eigenvalues with modulus in `(1, 4)`. Otherwise, eigenvalues are chi-squared random variables. Returns: `Tensor` with desired shape and dtype. """ dtype = dtypes.as_dtype(dtype) if not contrib_tensor_util.is_tensor(shape): shape = tensor_shape.TensorShape(shape) # Matrix must be square. shape[-1].assert_is_compatible_with(shape[-2]) with ops.name_scope("random_positive_definite_matrix"): tril = random_tril_matrix( shape, dtype, force_well_conditioned=force_well_conditioned) return math_ops.matmul(tril, tril, adjoint_b=True)
Example #6
Source File: io_ops.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def _restore_slice(file_pattern, tensor_name, shape_and_slice, tensor_type, name="restore_slice", preferred_shard=-1): """Restore a tensor slice from a set of files with a given pattern. Example usage: RestoreSlice("/foo/bar-?????-of-?????", "w", "10 10 0,2:-", DT_FLOAT) Args: file_pattern: the file pattern used to match a set of checkpoint files. tensor_name: the name of the tensor to restore. shape_and_slice: the shape-and-slice spec of the slice. tensor_type: the type of the tensor to restore. name: string. Optional name for the op. preferred_shard: Int. Optional shard to open first in the checkpoint file. Returns: A tensor of type "tensor_type". """ base_type = dtypes.as_dtype(tensor_type).base_dtype return gen_io_ops._restore_slice( file_pattern, tensor_name, shape_and_slice, base_type, preferred_shard, name=name)
Example #7
Source File: io_ops.py From lambda-packs with MIT License | 6 votes |
def _restore_slice(file_pattern, tensor_name, shape_and_slice, tensor_type, name="restore_slice", preferred_shard=-1): """Restore a tensor slice from a set of files with a given pattern. Example usage: RestoreSlice("/foo/bar-?????-of-?????", "w", "10 10 0,2:-", DT_FLOAT) Args: file_pattern: the file pattern used to match a set of checkpoint files. tensor_name: the name of the tensor to restore. shape_and_slice: the shape-and-slice spec of the slice. tensor_type: the type of the tensor to restore. name: string. Optional name for the op. preferred_shard: Int. Optional shard to open first in the checkpoint file. Returns: A tensor of type "tensor_type". """ base_type = dtypes.as_dtype(tensor_type).base_dtype return gen_io_ops._restore_slice( file_pattern, tensor_name, shape_and_slice, base_type, preferred_shard, name=name)
Example #8
Source File: gradients_impl.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def _VerifyGeneratedGradients(grads, op): """Verify that gradients are valid in number and type. Args: grads: List of generated gradients. op: Operation for which the gradients where generated. Raises: ValueError: if the gradients are invalid. """ if len(grads) != len(op.inputs): raise ValueError("Num gradients %d generated for op %s do not match num " "inputs %d" % (len(grads), op.node_def, len(op.inputs))) for i in xrange(len(grads)): grad = grads[i] inp = op.inputs[i] if grad is not None: if not grad.dtype.is_compatible_with(inp.dtype): raise ValueError("Gradient type %s generated for op %s does " "not match input type %s" % (dtypes.as_dtype(grad.dtype).name, op.node_def, dtypes.as_dtype(inp.dtype).name))
Example #9
Source File: predict_utils.py From model_server with Apache License 2.0 | 6 votes |
def _prepare_output_as_AppendArrayToTensorProto( inference_output, model_available_outputs): response = predict_pb2.PredictResponse() for response_output_name, model_output_name in \ model_available_outputs.items(): if model_output_name in inference_output: dtype = dtypes.as_dtype(inference_output[model_output_name].dtype) output_tensor = tensor_pb2.TensorProto( dtype=dtype.as_datatype_enum, tensor_shape=tensor_shape.as_shape( inference_output[model_output_name].shape).as_proto()) result = inference_output[model_output_name].flatten() tensor_util._NP_TO_APPEND_FN[dtype.as_numpy_dtype](output_tensor, result) response.outputs[response_output_name].CopyFrom(output_tensor) return response
Example #10
Source File: gradients_impl.py From deep_image_model with Apache License 2.0 | 6 votes |
def _VerifyGeneratedGradients(grads, op): """Verify that gradients are valid in number and type. Args: grads: List of generated gradients. op: Operation for which the gradients where generated. Raises: ValueError: if the gradients are invalid. """ if len(grads) != len(op.inputs): raise ValueError("Num gradients %d generated for op %s do not match num " "inputs %d" % (len(grads), op.node_def, len(op.inputs))) for i in xrange(len(grads)): grad = grads[i] inp = op.inputs[i] if grad is not None: if not grad.dtype.is_compatible_with(inp.dtype): raise ValueError("Gradient type %s generated for op %s does " "not match input type %s" % (dtypes.as_dtype(grad.dtype).name, op.node_def, dtypes.as_dtype(inp.dtype).name))
Example #11
Source File: array_ops.py From deep_image_model with Apache License 2.0 | 5 votes |
def zeros(shape, dtype=dtypes.float32, name=None): """Creates a tensor with all elements set to zero. This operation returns a tensor of type `dtype` with shape `shape` and all elements set to zero. For example: ```python tf.zeros([3, 4], tf.int32) ==> [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]] ``` Args: shape: Either a list of integers, or a 1-D `Tensor` of type `int32`. dtype: The type of an element in the resulting `Tensor`. name: A name for the operation (optional). Returns: A `Tensor` with all elements set to zero. """ dtype = dtypes.as_dtype(dtype).base_dtype with ops.name_scope(name, "zeros", [shape]) as name: zero = False if dtype == dtypes.bool else 0 try: shape = tensor_shape.as_shape(shape) output = constant(zero, shape=shape, dtype=dtype, name=name) except (TypeError, ValueError): shape = ops.convert_to_tensor(shape, dtype=dtypes.int32, name="shape") output = fill(shape, constant(zero, dtype=dtype), name=name) assert output.dtype.base_dtype == dtype return output
Example #12
Source File: gradients_impl.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def _IsTrainable(tensor): dtype = dtypes.as_dtype(tensor.dtype) return dtype.base_dtype in (dtypes.float16, dtypes.float32, dtypes.float64, dtypes.complex64, dtypes.complex128)
Example #13
Source File: test_get_model_metadata_utils.py From model_server with Apache License 2.0 | 5 votes |
def test_prepare_signature(layers, tensor_key, np_type): dtype_model = dtypes.as_dtype(np_type) output = _prepare_signature( layers=layers, model_keys=tensor_key) for key, value in tensor_key.items(): assert key in output assert value in output[key].name shape = [d.size for d in output[key].tensor_shape.dim] assert list(layers[value].shape) == shape tensor_dtype = dtypes.as_dtype(output[key].dtype) assert dtype_model == tensor_dtype
Example #14
Source File: strcuture.py From BERT with Apache License 2.0 | 5 votes |
def __init__(self, dtype, element_shape, dynamic_size, infer_shape): self._dtype = dtypes.as_dtype(dtype) self._element_shape = tensor_shape.as_shape(element_shape) self._dynamic_size = dynamic_size self._infer_shape = infer_shape
Example #15
Source File: strcuture.py From BERT with Apache License 2.0 | 5 votes |
def __init__(self, dtype, dense_shape): self._dtype = dtypes.as_dtype(dtype) self._dense_shape = tensor_shape.as_shape(dense_shape)
Example #16
Source File: strcuture.py From BERT with Apache License 2.0 | 5 votes |
def __init__(self, dtype, shape): self._dtype = dtypes.as_dtype(dtype) self._shape = tensor_shape.as_shape(shape)
Example #17
Source File: gradients_impl.py From deep_image_model with Apache License 2.0 | 5 votes |
def _DefaultGradYs(grad_ys, ys, colocate_gradients_with_ops): """Fill in default values for grad_ys. Args: grad_ys: List of gradients, can contain None. ys: List of tensors. colocate_gradients_with_ops: If True, try colocating gradients with the corresponding op. Returns: A list of gradients to use, without None. Raises: ValueError: If one of the grad_ys is invalid. """ if len(grad_ys) != len(ys): raise ValueError("Passed %d grad_ys for %d ys" % (len(grad_ys), len(ys))) grad_ys = ops.convert_n_to_tensor_or_indexed_slices(grad_ys, name="grad_y") for i in xrange(len(grad_ys)): grad_y = grad_ys[i] y = ys[i] if grad_y is None: with _maybe_colocate_with(y.op, colocate_gradients_with_ops): grad_ys[i] = array_ops.fill( array_ops.shape(y), constant_op.constant( 1, dtype=y.dtype)) else: if grad_y.dtype != y.dtype: raise ValueError("Y and ys_grad must be of the same type, " "not y: %s, ys_grad: %s " % (dtypes.as_dtype(y.dtype).name, dtypes.as_dtype(grad_y.dtype).name)) return grad_ys
Example #18
Source File: data_feeder.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def _check_dtype(dtype): if dtypes.as_dtype(dtype) == dtypes.float64: logging.warn( 'float64 is not supported by many models, consider casting to float32.') return dtype
Example #19
Source File: ops.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def internal_convert_to_tensor_or_indexed_slices(value, dtype=None, name=None, as_ref=False): """Converts the given object to an `Tensor` or an `IndexedSlices`. If `value` is an `IndexedSlices` or `SparseTensor` it is returned unmodified. Otherwise, it is converted to a `Tensor` using `convert_to_tensor()`. Args: value: An `IndexedSlices`, `SparseTensor`, or an object that can be consumed by `convert_to_tensor()`. dtype: (Optional.) The required `DType` of the returned `Tensor` or `IndexedSlices`. name: (Optional.) A name to use if a new `Tensor` is created. as_ref: True if the caller wants the results as ref tensors. Returns: An `Tensor`, `IndexedSlices`, or `SparseTensor` based on `value`. Raises: ValueError: If `dtype` does not match the element type of `value`. """ if isinstance(value, _TensorLike): if dtype and not dtypes.as_dtype(dtype).is_compatible_with(value.dtype): raise ValueError( "Tensor conversion requested dtype %s for Tensor with dtype %s: %r" % (dtypes.as_dtype(dtype).name, value.dtype.name, str(value))) return value else: return internal_convert_to_tensor(value, dtype=dtype, name=name, as_ref=as_ref)
Example #20
Source File: linear_operator_test_util.py From lambda-packs with MIT License | 5 votes |
def random_normal(shape, mean=0.0, stddev=1.0, dtype=dtypes.float32, seed=None): """Tensor with (possibly complex) Gaussian entries. Samples are distributed like ``` N(mean, stddev^2), if dtype is real, X + iY, where X, Y ~ N(mean, stddev^2) if dtype is complex. ``` Args: shape: `TensorShape` or Python list. Shape of the returned tensor. mean: `Tensor` giving mean of normal to sample from. stddev: `Tensor` giving stdev of normal to sample from. dtype: `TensorFlow` `dtype` or numpy dtype seed: Python integer seed for the RNG. Returns: `Tensor` with desired shape and dtype. """ dtype = dtypes.as_dtype(dtype) with ops.name_scope("random_normal"): samples = random_ops.random_normal( shape, mean=mean, stddev=stddev, dtype=dtype.real_dtype, seed=seed) if dtype.is_complex: if seed is not None: seed += 1234 more_samples = random_ops.random_normal( shape, mean=mean, stddev=stddev, dtype=dtype.real_dtype, seed=seed) samples = math_ops.complex(samples, more_samples) return samples
Example #21
Source File: importer.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def _SingleArgToTypes(node_def, arg_def): types = _ArgToTypesNoRef(node_def, arg_def) if arg_def.is_ref: return [dtypes.as_dtype(dt)._as_ref.as_datatype_enum for dt in types] # pylint: disable=protected-access return types
Example #22
Source File: op_def_library.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def _MakeType(v, attr_def): try: v = dtypes.as_dtype(v).base_dtype except TypeError: raise TypeError("Expected DataType for argument '%s' not %s." % (attr_def.name, repr(v))) i = v.as_datatype_enum _SatisfiesTypeConstraint(i, attr_def, param_name=attr_def.name) return i
Example #23
Source File: sparse_tensor.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def convert_to_tensor_or_sparse_tensor(value, dtype=None, name=None): """Converts value to a `SparseTensor` or `Tensor`. Args: value: A `SparseTensor`, `SparseTensorValue`, or an object whose type has a registered `Tensor` conversion function. dtype: Optional element type for the returned tensor. If missing, the type is inferred from the type of `value`. name: Optional name to use if a new `Tensor` is created. Returns: A `SparseTensor` or `Tensor` based on `value`. Raises: RuntimeError: If result type is incompatible with `dtype`. """ if dtype is not None: dtype = dtypes.as_dtype(dtype) if isinstance(value, SparseTensorValue): value = SparseTensor.from_value(value) if isinstance(value, SparseTensor): if dtype and not dtype.is_compatible_with(value.dtype): raise RuntimeError( "Sparse dtype: requested = %s, actual = %s" % ( dtype.name, value.dtype.name)) return value return ops.internal_convert_to_tensor( value, dtype=dtype, name=name)
Example #24
Source File: gradient_checker.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def _compute_gradient(x, x_shape, dx, y, y_shape, dy, x_init_value=None, delta=1e-3, extra_feed_dict=None): """Computes the theoretical and numerical jacobian.""" t = dtypes.as_dtype(x.dtype) allowed_types = [dtypes.float16, dtypes.float32, dtypes.float64, dtypes.complex64, dtypes.complex128] assert t.base_dtype in allowed_types, "Don't support type %s for x" % t.name t2 = dtypes.as_dtype(y.dtype) assert t2.base_dtype in allowed_types, "Don't support type %s for y" % t2.name if x_init_value is not None: i_shape = list(x_init_value.shape) assert(list(x_shape) == i_shape), "x_shape = %s, init_data shape = %s" % ( x_shape, i_shape) x_data = x_init_value else: if t == dtypes.float16: dtype = np.float16 elif t == dtypes.float32: dtype = np.float32 else: dtype = np.float64 x_data = np.asfarray(np.random.random_sample(x_shape), dtype=dtype) jacob_t = _compute_theoretical_jacobian( x, x_shape, x_data, dy, y_shape, dx, extra_feed_dict=extra_feed_dict) jacob_n = _compute_numeric_jacobian( x, x_shape, x_data, y, y_shape, delta, extra_feed_dict=extra_feed_dict) return jacob_t, jacob_n
Example #25
Source File: linear_operator_test_util.py From lambda-packs with MIT License | 5 votes |
def random_uniform(shape, minval=None, maxval=None, dtype=dtypes.float32, seed=None): """Tensor with (possibly complex) Uniform entries. Samples are distributed like ``` Uniform[minval, maxval], if dtype is real, X + iY, where X, Y ~ Uniform[minval, maxval], if dtype is complex. ``` Args: shape: `TensorShape` or Python list. Shape of the returned tensor. minval: `0-D` `Tensor` giving the minimum values. maxval: `0-D` `Tensor` giving the maximum values. dtype: `TensorFlow` `dtype` or Python dtype seed: Python integer seed for the RNG. Returns: `Tensor` with desired shape and dtype. """ dtype = dtypes.as_dtype(dtype) with ops.name_scope("random_uniform"): samples = random_ops.random_uniform( shape, dtype=dtype.real_dtype, minval=minval, maxval=maxval, seed=seed) if dtype.is_complex: if seed is not None: seed += 12345 more_samples = random_ops.random_uniform( shape, dtype=dtype.real_dtype, minval=minval, maxval=maxval, seed=seed) samples = math_ops.complex(samples, more_samples) return samples
Example #26
Source File: math_ops.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def saturate_cast(value, dtype, name=None): """Performs a safe saturating cast of `value` to `dtype`. This function casts the input to `dtype` without applying any scaling. If there is a danger that values would over or underflow in the cast, this op applies the appropriate clamping before the cast. Args: value: A `Tensor`. dtype: The desired output `DType`. name: A name for the operation (optional). Returns: `value` safely cast to `dtype`. """ # When casting to a type with smaller representable range, clamp. # Note that this covers casting to unsigned types as well. with ops.name_scope(name, "saturate_cast", [value]) as name: value = ops.convert_to_tensor(value, name="value") dtype = dtypes.as_dtype(dtype).base_dtype if value.dtype.min < dtype.min: value = gen_math_ops.maximum( value, ops.convert_to_tensor( dtype.min, dtype=value.dtype, name="min")) if value.dtype.max > dtype.max: value = gen_math_ops.minimum( value, ops.convert_to_tensor( dtype.max, dtype=value.dtype, name="max")) return cast(value, dtype, name=name)
Example #27
Source File: math_ops.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def cast(x, dtype, name=None): """Casts a tensor to a new type. The operation casts `x` (in case of `Tensor`) or `x.values` (in case of `SparseTensor`) to `dtype`. For example: ```python # tensor `a` is [1.8, 2.2], dtype=tf.float tf.cast(a, tf.int32) ==> [1, 2] # dtype=tf.int32 ``` Args: x: A `Tensor` or `SparseTensor`. dtype: The destination type. name: A name for the operation (optional). Returns: A `Tensor` or `SparseTensor` with same shape as `x`. Raises: TypeError: If `x` cannot be cast to the `dtype`. """ base_type = dtypes.as_dtype(dtype).base_dtype with ops.name_scope(name, "Cast", [x]) as name: if isinstance(x, sparse_tensor.SparseTensor): values_cast = cast(x.values, base_type, name=name) return sparse_tensor.SparseTensor(x.indices, values_cast, x.dense_shape) else: # TODO(touts): Handle what Josh said. # # Could return ops.convert_to_tensor(x, dtype=dtype, ...) here, but that # allows some conversions that cast() can't do, e.g. casting numbers to # strings. x = ops.convert_to_tensor(x, name="x") if x.dtype.base_dtype == base_type: return x return gen_math_ops.cast(x, base_type, name=name)
Example #28
Source File: utils_impl.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def build_tensor_info(tensor): """Utility function to build TensorInfo proto. Args: tensor: Tensor whose name, dtype and shape are used to build the TensorInfo. Returns: A TensorInfo protocol buffer constructed based on the supplied argument. """ dtype_enum = dtypes.as_dtype(tensor.dtype).as_datatype_enum return meta_graph_pb2.TensorInfo( name=tensor.name, dtype=dtype_enum, tensor_shape=tensor.get_shape().as_proto())
Example #29
Source File: gradients_impl.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def _DefaultGradYs(grad_ys, ys, colocate_gradients_with_ops): """Fill in default values for grad_ys. Args: grad_ys: List of gradients, can contain None. ys: List of tensors. colocate_gradients_with_ops: If True, try colocating gradients with the corresponding op. Returns: A list of gradients to use, without None. Raises: ValueError: If one of the grad_ys is invalid. """ if len(grad_ys) != len(ys): raise ValueError("Passed %d grad_ys for %d ys" % (len(grad_ys), len(ys))) grad_ys = ops.convert_n_to_tensor_or_indexed_slices(grad_ys, name="grad_y") for i in xrange(len(grad_ys)): grad_y = grad_ys[i] y = ys[i] if grad_y is None: with _maybe_colocate_with(y.op, colocate_gradients_with_ops): grad_ys[i] = array_ops.fill( array_ops.shape(y), constant_op.constant( 1, dtype=y.dtype)) else: if grad_y.dtype != y.dtype: raise ValueError("Y and ys_grad must be of the same type, " "not y: %s, ys_grad: %s " % (dtypes.as_dtype(y.dtype).name, dtypes.as_dtype(grad_y.dtype).name)) return grad_ys
Example #30
Source File: array_ops.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def zeros(shape, dtype=dtypes.float32, name=None): """Creates a tensor with all elements set to zero. This operation returns a tensor of type `dtype` with shape `shape` and all elements set to zero. For example: ```python tf.zeros([3, 4], tf.int32) ==> [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]] ``` Args: shape: Either a list of integers, or a 1-D `Tensor` of type `int32`. dtype: The type of an element in the resulting `Tensor`. name: A name for the operation (optional). Returns: A `Tensor` with all elements set to zero. """ dtype = dtypes.as_dtype(dtype).base_dtype with ops.name_scope(name, "zeros", [shape]) as name: zero = False if dtype == dtypes.bool else 0 try: shape = tensor_shape.as_shape(shape) output = constant(zero, shape=shape, dtype=dtype, name=name) except (TypeError, ValueError): shape = ops.convert_to_tensor(shape, dtype=dtypes.int32, name="shape") output = fill(shape, constant(zero, dtype=dtype), name=name) assert output.dtype.base_dtype == dtype return output