Python tensorflow.python.framework.dtypes.float32() Examples
The following are 30
code examples of tensorflow.python.framework.dtypes.float32().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.python.framework.dtypes
, or try the search function
.
Example #1
Source File: exporter_test.py From vehicle_counting_tensorflow with MIT License | 6 votes |
def _save_checkpoint_from_mock_model(self, checkpoint_path, use_moving_averages, enable_quantization=False): g = tf.Graph() with g.as_default(): mock_model = FakeModel() preprocessed_inputs, true_image_shapes = mock_model.preprocess( tf.placeholder(tf.float32, shape=[None, None, None, 3])) predictions = mock_model.predict(preprocessed_inputs, true_image_shapes) mock_model.postprocess(predictions, true_image_shapes) if use_moving_averages: tf.train.ExponentialMovingAverage(0.0).apply() tf.train.get_or_create_global_step() if enable_quantization: graph_rewriter_config = graph_rewriter_pb2.GraphRewriter() graph_rewriter_config.quantization.delay = 500000 graph_rewriter_fn = graph_rewriter_builder.build( graph_rewriter_config, is_training=False) graph_rewriter_fn() saver = tf.train.Saver() init = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init) saver.save(sess, checkpoint_path)
Example #2
Source File: metrics_impl.py From lambda-packs with MIT License | 6 votes |
def _create_local(name, shape, collections=None, validate_shape=True, dtype=dtypes.float32): """Creates a new local variable. Args: name: The name of the new or existing variable. shape: Shape of the new or existing variable. collections: A list of collection names to which the Variable will be added. validate_shape: Whether to validate the shape of the variable. dtype: Data type of the variables. Returns: The created variable. """ # Make sure local variables are added to tf.GraphKeys.LOCAL_VARIABLES collections = list(collections or []) collections += [ops.GraphKeys.LOCAL_VARIABLES] return variable_scope.variable( array_ops.zeros(shape, dtype=dtype), name=name, trainable=False, collections=collections, validate_shape=validate_shape)
Example #3
Source File: math_ops.py From lambda-packs with MIT License | 6 votes |
def log_sigmoid(x, name=None): """Computes log sigmoid of `x` element-wise. Specifically, `y = log(1 / (1 + exp(-x)))`. For numerical stability, we use `y = -tf.nn.softplus(-x)`. Args: x: A Tensor with type `float32` or `float64`. name: A name for the operation (optional). Returns: A Tensor with the same type as `x`. """ with ops.name_scope(name, "LogSigmoid", [x]) as name: x = ops.convert_to_tensor(x, name="x") return gen_math_ops._neg(gen_nn_ops.softplus(-x), name=name)
Example #4
Source File: session_debug_testlib.py From lambda-packs with MIT License | 6 votes |
def testGraphStructureLookupGivesNodesAndAttributes(self): u_name, _, _, dump = self._session_run_for_graph_structure_lookup() u_read_name = u_name + "/read" # Test node name list lookup of the DebugDumpDir object. node_names = dump.nodes() self.assertTrue(u_name in node_names) self.assertTrue(u_read_name in node_names) # Test querying node attributes. u_attr = dump.node_attributes(u_name) self.assertEqual(dtypes.float32, u_attr["dtype"].type) self.assertEqual(1, len(u_attr["shape"].shape.dim)) self.assertEqual(2, u_attr["shape"].shape.dim[0].size) with self.assertRaisesRegexp(ValueError, "No node named \"foo\" exists"): dump.node_attributes("foo")
Example #5
Source File: math_ops.py From lambda-packs with MIT License | 6 votes |
def round(x, name=None): """Rounds the values of a tensor to the nearest integer, element-wise. Rounds half to even. Also known as bankers rounding. If you want to round according to the current system rounding mode use tf::cint. For example: ```python # 'a' is [0.9, 2.5, 2.3, 1.5, -4.5] tf.round(a) ==> [ 1.0, 2.0, 2.0, 2.0, -4.0 ] ``` Args: x: A `Tensor` of type `float32` or `float64`. name: A name for the operation (optional). Returns: A `Tensor` of same shape and type as `x`. """ x = ops.convert_to_tensor(x, name="x") if x.dtype.is_integer: return x else: return gen_math_ops.round(x, name=name)
Example #6
Source File: omniglot.py From glas with Apache License 2.0 | 6 votes |
def dataset(directory, subset, num_folds, fold, holdout): """ Return the mnist dataset """ local_file = learn.datasets.base.maybe_download('omniglot.mat', directory, _DOWNLOAD_URL) data = scipy.io.loadmat(local_file) images = data[_SUBSET_TO_GROUP[subset]].astype(np.float32) images = images.transpose([1, 0]).reshape([-1] + IMAGE_SHAPE) if subset == 'train': images = images[:-_VALIDATION_SIZE] elif subset == 'validate': images = images[-_VALIDATION_SIZE:] images = get_folds(images, num_folds, fold, holdout) return slim.dataset.Dataset( images, None, None, images.shape[0], _ITEMS_TO_DESCRIPTIONS, data_shape=IMAGE_SHAPE)
Example #7
Source File: variable_scope.py From lambda-packs with MIT License | 6 votes |
def __init__(self, reuse, name="", initializer=None, regularizer=None, caching_device=None, partitioner=None, custom_getter=None, name_scope="", dtype=dtypes.float32, use_resource=None): """Creates a new VariableScope with the given properties.""" self._name = name self._initializer = initializer self._regularizer = regularizer self._reuse = reuse self._caching_device = caching_device self._partitioner = partitioner self._custom_getter = custom_getter self._name_scope = name_scope self._dtype = dtype self._use_resource = use_resource
Example #8
Source File: nn_ops.py From lambda-packs with MIT License | 6 votes |
def softmax(logits, dim=-1, name=None): """Computes softmax activations. For each batch `i` and class `j` we have softmax = exp(logits) / reduce_sum(exp(logits), dim) Args: logits: A non-empty `Tensor`. Must be one of the following types: `half`, `float32`, `float64`. dim: The dimension softmax would be performed on. The default is -1 which indicates the last dimension. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `logits`. Same shape as `logits`. Raises: InvalidArgumentError: if `logits` is empty or `dim` is beyond the last dimension of `logits`. """ return _softmax(logits, gen_nn_ops._softmax, dim, name)
Example #9
Source File: nn_ops.py From lambda-packs with MIT License | 6 votes |
def log_softmax(logits, dim=-1, name=None): """Computes log softmax activations. For each batch `i` and class `j` we have logsoftmax = logits - log(reduce_sum(exp(logits), dim)) Args: logits: A non-empty `Tensor`. Must be one of the following types: `half`, `float32`, `float64`. dim: The dimension softmax would be performed on. The default is -1 which indicates the last dimension. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `logits`. Same shape as `logits`. Raises: InvalidArgumentError: if `logits` is empty or `dim` is beyond the last dimension of `logits`. """ return _softmax(logits, gen_nn_ops._log_softmax, dim, name)
Example #10
Source File: numerics.py From lambda-packs with MIT License | 6 votes |
def add_check_numerics_ops(): """Connect a `check_numerics` to every floating point tensor. `check_numerics` operations themselves are added for each `half`, `float`, or `double` tensor in the graph. For all ops in the graph, the `check_numerics` op for all of its (`half`, `float`, or `double`) inputs is guaranteed to run before the `check_numerics` op on any of its outputs. Returns: A `group` op depending on all `check_numerics` ops added. """ check_op = [] # This code relies on the ordering of ops in get_operations(). # The producer of a tensor always comes before that tensor's consumer in # this list. This is true because get_operations() returns ops in the order # added, and an op can only be added after its inputs are added. for op in ops.get_default_graph().get_operations(): for output in op.outputs: if output.dtype in [dtypes.float16, dtypes.float32, dtypes.float64]: message = op.name + ":" + str(output.value_index) with ops.control_dependencies(check_op): check_op = [array_ops.check_numerics(output, message=message)] return control_flow_ops.group(*check_op)
Example #11
Source File: kitti_seg_input.py From KittiSeg with MIT License | 6 votes |
def shuffle_join(tensor_list_list, capacity, min_ad, phase): name = 'shuffel_input' types = _dtypes(tensor_list_list) queue = data_flow_ops.RandomShuffleQueue( capacity=capacity, min_after_dequeue=min_ad, dtypes=types) # Build enque Operations _enqueue_join(queue, tensor_list_list) full = (math_ops.cast(math_ops.maximum(0, queue.size() - min_ad), dtypes.float32) * (1. / (capacity - min_ad))) # Note that name contains a '/' at the end so we intentionally do not place # a '/' after %s below. summary_name = ( "queue/%s/fraction_over_%d_of_%d_full" % (name + '_' + phase, min_ad, capacity - min_ad)) tf.summary.scalar(summary_name, full) dequeued = queue.dequeue(name='shuffel_deqeue') # dequeued = _deserialize_sparse_tensors(dequeued, sparse_info) return dequeued
Example #12
Source File: math_ops.py From lambda-packs with MIT License | 6 votes |
def erf(x, name=None): """Computes the Gauss error function of `x` element-wise. Args: x: A `Tensor` of `SparseTensor`. Must be one of the following types: `half`, `float32`, `float64`. name: A name for the operation (optional). Returns: A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`. """ with ops.name_scope(name, "Erf", [x]) as name: if isinstance(x, sparse_tensor.SparseTensor): x_erf = gen_math_ops.erf(x.values, name=name) return sparse_tensor.SparseTensor( indices=x.indices, values=x_erf, dense_shape=x.dense_shape) else: return gen_math_ops.erf(x, name=name)
Example #13
Source File: math_ops.py From lambda-packs with MIT License | 6 votes |
def sqrt(x, name=None): r"""Computes square root of x element-wise. I.e., \\(y = \sqrt{x} = x^{1/2}\\). Args: x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`, `float32`, `float64`, `complex64`, `complex128`. name: A name for the operation (optional). Returns: A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`. """ with ops.name_scope(name, "Sqrt", [x]) as name: if isinstance(x, sparse_tensor.SparseTensor): x_sqrt = gen_math_ops.sqrt(x.values, name=name) return sparse_tensor.SparseTensor( indices=x.indices, values=x_sqrt, dense_shape=x.dense_shape) else: return gen_math_ops.sqrt(x, name=name)
Example #14
Source File: math_ops.py From lambda-packs with MIT License | 6 votes |
def square(x, name=None): r"""Computes square of x element-wise. I.e., \\(y = x * x = x^2\\). Args: x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`, `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`. name: A name for the operation (optional). Returns: A `Tensor` or `SparseTensor`. Has the same type as `x`. """ with ops.name_scope(name, "Square", [x]) as name: if isinstance(x, sparse_tensor.SparseTensor): x_square = gen_math_ops.square(x.values, name=name) return sparse_tensor.SparseTensor( indices=x.indices, values=x_square, dense_shape=x.dense_shape) else: return gen_math_ops.square(x, name=name)
Example #15
Source File: train_crnn.py From 2019-CCF-BDCI-OCR-MCZJ-OCR-IdentificationIDElement with MIT License | 6 votes |
def distort_color(image, color_ordering=0, scope=None): """ 随机进行图像增强(亮度、对比度操作) :param image: 输入图片 :param color_ordering:模式 :param scope: 命名空间 :return: 增强后的图片 """ with tf.name_scope(scope, 'distort_color', [image]): if color_ordering == 0: # 模式0.先调整亮度,再调整对比度 rand_temp = random_ops.random_uniform([], -55, 20, seed=None) # [-70, 30] for generate img, [-50, 20] for true img image = math_ops.add(image, math_ops.cast(rand_temp, dtypes.float32)) image = tf.image.random_contrast(image, lower=0.45, upper=1.5) # [0.3, 1.75] for generate img, [0.45, 1.5] for true img else: image = tf.image.random_contrast(image, lower=0.45, upper=1.5) rand_temp = random_ops.random_uniform([], -55, 30, seed=None) image = math_ops.add(image, math_ops.cast(rand_temp, dtypes.float32)) # The random_* ops do not necessarily clamp. print(color_ordering) return tf.clip_by_value(image, 0.0, 255.0) # 限定在0-255 ##########################################################################
Example #16
Source File: exporter_test.py From vehicle_counting_tensorflow with MIT License | 6 votes |
def test_rewrite_nn_resize_op(self): g = tf.Graph() with g.as_default(): x = array_ops.placeholder(dtypes.float32, shape=(8, 10, 10, 8)) y = array_ops.placeholder(dtypes.float32, shape=(8, 20, 20, 8)) s = ops.nearest_neighbor_upsampling(x, 2) t = s + y exporter.rewrite_nn_resize_op() resize_op_found = False for op in g.get_operations(): if op.type == 'ResizeNearestNeighbor': resize_op_found = True self.assertEqual(op.inputs[0], x) self.assertEqual(op.outputs[0].consumers()[0], t.op) break self.assertTrue(resize_op_found)
Example #17
Source File: exporter_test.py From vehicle_counting_tensorflow with MIT License | 6 votes |
def postprocess(self, prediction_dict, true_image_shapes): with tf.control_dependencies(prediction_dict.values()): postprocessed_tensors = { 'detection_boxes': tf.constant([[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.8, 0.8]], [[0.5, 0.5, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0]]], tf.float32), 'detection_scores': tf.constant([[0.7, 0.6], [0.9, 0.0]], tf.float32), 'detection_classes': tf.constant([[0, 1], [1, 0]], tf.float32), 'num_detections': tf.constant([2, 1], tf.float32) } if self._add_detection_keypoints: postprocessed_tensors['detection_keypoints'] = tf.constant( np.arange(48).reshape([2, 2, 6, 2]), tf.float32) if self._add_detection_masks: postprocessed_tensors['detection_masks'] = tf.constant( np.arange(64).reshape([2, 2, 4, 4]), tf.float32) return postprocessed_tensors
Example #18
Source File: spectral_grad.py From lambda-packs with MIT License | 5 votes |
def _IFFT3DGrad(_, grad): rsize = 1. / math_ops.cast(_FFTSizeForGrad(grad, 3), dtypes.float32) return spectral_ops.fft3d(grad) * math_ops.complex(rsize, 0.)
Example #19
Source File: math_ops.py From lambda-packs with MIT License | 5 votes |
def pow(x, y, name=None): r"""Computes the power of one value to another. Given a tensor `x` and a tensor `y`, this operation computes \\\\(x^y\\\\) for corresponding elements in `x` and `y`. For example: ``` # tensor 'x' is [[2, 2], [3, 3]] # tensor 'y' is [[8, 16], [2, 3]] tf.pow(x, y) ==> [[256, 65536], [9, 27]] ``` Args: x: A `Tensor` of type `float32`, `float64`, `int32`, `int64`, `complex64`, or `complex128`. y: A `Tensor` of type `float32`, `float64`, `int32`, `int64`, `complex64`, or `complex128`. name: A name for the operation (optional). Returns: A `Tensor`. """ with ops.name_scope(name, "Pow", [x]) as name: return gen_math_ops._pow(x, y, name=name) # pylint: disable=redefined-builtin,redefined-outer-name
Example #20
Source File: init_ops.py From lambda-packs with MIT License | 5 votes |
def __init__(self, dtype=dtypes.float32): self.dtype = dtypes.as_dtype(dtype)
Example #21
Source File: linalg_ops.py From lambda-packs with MIT License | 5 votes |
def cholesky_solve(chol, rhs, name=None): """Solves systems of linear eqns `A X = RHS`, given Cholesky factorizations. ```python # Solve 10 separate 2x2 linear systems: A = ... # shape 10 x 2 x 2 RHS = ... # shape 10 x 2 x 1 chol = tf.cholesky(A) # shape 10 x 2 x 2 X = tf.cholesky_solve(chol, RHS) # shape 10 x 2 x 1 # tf.matmul(A, X) ~ RHS X[3, :, 0] # Solution to the linear system A[3, :, :] x = RHS[3, :, 0] # Solve five linear systems (K = 5) for every member of the length 10 batch. A = ... # shape 10 x 2 x 2 RHS = ... # shape 10 x 2 x 5 ... X[3, :, 2] # Solution to the linear system A[3, :, :] x = RHS[3, :, 2] ``` Args: chol: A `Tensor`. Must be `float32` or `float64`, shape is `[..., M, M]`. Cholesky factorization of `A`, e.g. `chol = tf.cholesky(A)`. For that reason, only the lower triangular parts (including the diagonal) of the last two dimensions of `chol` are used. The strictly upper part is assumed to be zero and not accessed. rhs: A `Tensor`, same type as `chol`, shape is `[..., M, K]`. name: A name to give this `Op`. Defaults to `cholesky_solve`. Returns: Solution to `A x = rhs`, shape `[..., M, K]`. """ # To solve C C^* x = rhs, we # 1. Solve C y = rhs for y, thus y = C^* x # 2. Solve C^* x = y for x with ops.name_scope(name, 'cholesky_solve', [chol, rhs]): y = gen_linalg_ops.matrix_triangular_solve( chol, rhs, adjoint=False, lower=True) x = gen_linalg_ops.matrix_triangular_solve( chol, y, adjoint=True, lower=True) return x
Example #22
Source File: math_grad.py From lambda-packs with MIT License | 5 votes |
def _ProdGrad(op, grad): """Gradient for Prod.""" # The gradient can be expressed by dividing the product by each entry of the # input tensor, but this approach can't deal with zeros in the input. # Here, we avoid this problem by composing the output as a product of two # cumprod operations. input_shape = array_ops.shape(op.inputs[0]) # Reshape reduction indices for the case where the parameter is a scalar reduction_indices = array_ops.reshape(op.inputs[1], [-1]) # Expand grad to full input shape output_shape_kept_dims = math_ops.reduced_shape(input_shape, op.inputs[1]) tile_scaling = _safe_shape_div(input_shape, output_shape_kept_dims) grad = array_ops.reshape(grad, output_shape_kept_dims) grad = array_ops.tile(grad, tile_scaling) # Pack all reduced dimensions into a single one, so we can perform the # cumprod ops. If the reduction dims list is empty, it defaults to float32, # so we need to cast here. We put all the shape-related ops on CPU to avoid # copying back and forth, and since listdiff is CPU only. with ops.device("/cpu:0"): reduced = math_ops.cast(reduction_indices, dtypes.int32) idx = math_ops.range(0, array_ops.rank(op.inputs[0])) other, _ = array_ops.setdiff1d(idx, reduced) perm = array_ops.concat([reduced, other], 0) reduced_num = math_ops.reduce_prod(array_ops.gather(input_shape, reduced)) other_num = math_ops.reduce_prod(array_ops.gather(input_shape, other)) permuted = array_ops.transpose(op.inputs[0], perm) permuted_shape = array_ops.shape(permuted) reshaped = array_ops.reshape(permuted, (reduced_num, other_num)) # Calculate product, leaving out the current entry left = math_ops.cumprod(reshaped, axis=0, exclusive=True) right = math_ops.cumprod(reshaped, axis=0, exclusive=True, reverse=True) y = array_ops.reshape(left * right, permuted_shape) # Invert the transpose and reshape operations. # Make sure to set the statically known shape information through a reshape. out = grad * array_ops.transpose(y, array_ops.invert_permutation(perm)) return array_ops.reshape(out, input_shape), None
Example #23
Source File: math_ops.py From lambda-packs with MIT License | 5 votes |
def sign(x, name=None): """Returns an element-wise indication of the sign of a number. `y = sign(x) = -1` if `x < 0`; 0 if `x == 0` or `tf.is_nan(x)`; 1 if `x > 0`. Zero is returned for NaN inputs. For complex numbers, `y = sign(x) = x / |x|` if `x != 0`, otherwise `y = 0`. Args: x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`, `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`. name: A name for the operation (optional). Returns: A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`. @compatibility(numpy) Equivalent to numpy.sign except for the behaviour for input values of NaN. @end_compatibility """ with ops.name_scope(name, "Sign", [x]) as name: if isinstance(x, sparse_tensor.SparseTensor): x_sign = gen_math_ops.sign(x.values, name=name) return sparse_tensor.SparseTensor( indices=x.indices, values=x_sign, dense_shape=x.dense_shape) else: return gen_math_ops.sign(x, name=name)
Example #24
Source File: image_ops_impl.py From lambda-packs with MIT License | 5 votes |
def per_image_standardization(image): """Linearly scales `image` to have zero mean and unit norm. This op computes `(x - mean) / adjusted_stddev`, where `mean` is the average of all values in image, and `adjusted_stddev = max(stddev, 1.0/sqrt(image.NumElements()))`. `stddev` is the standard deviation of all values in `image`. It is capped away from zero to protect against division by 0 when handling uniform images. Args: image: 3-D tensor of shape `[height, width, channels]`. Returns: The standardized image with same shape as `image`. Raises: ValueError: if the shape of 'image' is incompatible with this function. """ image = ops.convert_to_tensor(image, name='image') image = control_flow_ops.with_dependencies( _Check3DImage(image, require_static=False), image) num_pixels = math_ops.reduce_prod(array_ops.shape(image)) image = math_ops.cast(image, dtype=dtypes.float32) image_mean = math_ops.reduce_mean(image) variance = (math_ops.reduce_mean(math_ops.square(image)) - math_ops.square(image_mean)) variance = gen_nn_ops.relu(variance) stddev = math_ops.sqrt(variance) # Apply a minimum normalization that protects us against uniform images. min_stddev = math_ops.rsqrt(math_ops.cast(num_pixels, dtypes.float32)) pixel_value_scale = math_ops.maximum(stddev, min_stddev) pixel_value_offset = image_mean image = math_ops.subtract(image, pixel_value_offset) image = math_ops.div(image, pixel_value_scale) return image
Example #25
Source File: gradient_checker.py From lambda-packs with MIT License | 5 votes |
def _compute_gradient(x, x_shape, dx, y, y_shape, dy, x_init_value=None, delta=1e-3, extra_feed_dict=None): """Computes the theoretical and numerical jacobian.""" t = dtypes.as_dtype(x.dtype) allowed_types = [dtypes.float16, dtypes.float32, dtypes.float64, dtypes.complex64, dtypes.complex128] assert t.base_dtype in allowed_types, "Don't support type %s for x" % t.name t2 = dtypes.as_dtype(y.dtype) assert t2.base_dtype in allowed_types, "Don't support type %s for y" % t2.name if x_init_value is not None: i_shape = list(x_init_value.shape) assert(list(x_shape) == i_shape), "x_shape = %s, init_data shape = %s" % ( x_shape, i_shape) x_data = x_init_value else: x_data = np.random.random_sample(x_shape).astype(t.as_numpy_dtype) if t.is_complex: x_data.imag = np.random.random_sample(x_shape) jacob_t = _compute_theoretical_jacobian( x, x_shape, x_data, dy, y_shape, dx, extra_feed_dict=extra_feed_dict) jacob_n = _compute_numeric_jacobian( x, x_shape, x_data, y, y_shape, delta, extra_feed_dict=extra_feed_dict) return jacob_t, jacob_n
Example #26
Source File: math_ops.py From lambda-packs with MIT License | 5 votes |
def truediv(x, y, name=None): """Divides x / y elementwise (using Python 3 division operator semantics). NOTE: Prefer using the Tensor operator or tf.divide which obey Python division operator semantics. This function forces Python 3 division operator semantics where all integer arguments are cast to floating types first. This op is generated by normal `x / y` division in Python 3 and in Python 2.7 with `from __future__ import division`. If you want integer division that rounds down, use `x // y` or `tf.floordiv`. `x` and `y` must have the same numeric type. If the inputs are floating point, the output will have the same type. If the inputs are integral, the inputs are cast to `float32` for `int8` and `int16` and `float64` for `int32` and `int64` (matching the behavior of Numpy). Args: x: `Tensor` numerator of numeric type. y: `Tensor` denominator of numeric type. name: A name for the operation (optional). Returns: `x / y` evaluated in floating point. Raises: TypeError: If `x` and `y` have different dtypes. """ return _truediv_python3(x, y, name)
Example #27
Source File: math_ops.py From lambda-packs with MIT License | 5 votes |
def to_float(x, name="ToFloat"): """Casts a tensor to type `float32`. Args: x: A `Tensor` or `SparseTensor`. name: A name for the operation (optional). Returns: A `Tensor` or `SparseTensor` with same shape as `x` with type `float32`. Raises: TypeError: If `x` cannot be cast to the `float32`. """ return cast(x, dtypes.float32, name=name)
Example #28
Source File: init_ops.py From lambda-packs with MIT License | 5 votes |
def __init__(self, mean=0.0, stddev=1.0, seed=None, dtype=dtypes.float32): self.mean = mean self.stddev = stddev self.seed = seed self.dtype = _assert_float_dtype(dtypes.as_dtype(dtype))
Example #29
Source File: math_ops.py From lambda-packs with MIT License | 5 votes |
def imag(input, name=None): r"""Returns the imaginary part of a complex number. Given a tensor `input` of complex numbers, this operation returns a tensor of type `float32` or `float64` that is the imaginary part of each element in `input`. All elements in `input` must be complex numbers of the form \\(a + bj\\), where *a* is the real part and *b* is the imaginary part returned by this operation. For example: ``` # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] tf.imag(input) ==> [4.75, 5.75] ``` Args: input: A `Tensor`. Must be one of the following types: `complex64`, `complex128`. name: A name for the operation (optional). Returns: A `Tensor` of type `float32` or `float64`. """ with ops.name_scope(name, "Imag", [input]) as name: return gen_math_ops.imag(input, Tout=input.dtype.real_dtype, name=name) # pylint: enable=redefined-outer-name,redefined-builtin
Example #30
Source File: math_ops.py From lambda-packs with MIT License | 5 votes |
def real(input, name=None): r"""Returns the real part of a complex number. Given a tensor `input` of complex numbers, this operation returns a tensor of type `float32` or `float64` that is the real part of each element in `input`. All elements in `input` must be complex numbers of the form \\(a + bj\\), where *a* is the real part returned by this operation and *b* is the imaginary part. For example: ``` # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] tf.real(input) ==> [-2.25, 3.25] ``` If `input` is already real, it is returned unchanged. Args: input: A `Tensor`. Must have numeric type. name: A name for the operation (optional). Returns: A `Tensor` of type `float32` or `float64`. """ with ops.name_scope(name, "Real", [input]) as name: real_dtype = input.dtype.real_dtype if input.dtype.base_dtype == real_dtype: return input return gen_math_ops.real(input, Tout=real_dtype, name=name)