Python tensorflow.is_tensor() Examples
The following are 30
code examples of tensorflow.is_tensor().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow
, or try the search function
.
Example #1
Source File: quantization.py From model-optimization with Apache License 2.0 | 6 votes |
def __init__(self, bits=8, stochastic=True): """Initializer for the PerChannelUniformQuantizationEncodingStage. Args: bits: The number of bits to quantize to. Must be an integer between 1 and 16. Can be either a TensorFlow or a Python value. stochastic: A Python bool, whether to use stochastic or deterministic rounding. If `True`, the encoding is randomized and on expectation unbiased. If `False`, the encoding is deterministic. Raises: ValueError: The inputs do not satisfy the above constraints. """ if (not tf.is_tensor(bits) and bits not in self._ALLOWED_BITS_ARG): raise ValueError('The bits argument must be an integer between 1 and 16.') self._bits = bits if not isinstance(stochastic, bool): raise TypeError('The stochastic argument must be a bool.') self._stochastic = stochastic
Example #2
Source File: core_encoder.py From model-optimization with Apache License 2.0 | 6 votes |
def _decode_after_sum_impl(self, encoded_tensors, decode_params, num_summands, shape): """Implementation for the `decode_after_sum` method.""" if not self.stage.commutes_with_sum: # This should have been decoded earlier in the decode_before_sum method. assert tf.is_tensor(encoded_tensors) return encoded_tensors temp_encoded_tensors = {} for key, value in six.iteritems(encoded_tensors): if key in self.children: with tf.compat.v1.name_scope(None, '/'.join([self.stage.name, key])): temp_encoded_tensors[key] = self.children[key]._decode_after_sum_impl( # pylint: disable=protected-access value, decode_params[EncoderKeys.CHILDREN][key], num_summands, shape[EncoderKeys.CHILDREN][key]) else: temp_encoded_tensors[key] = value return self.stage.decode(temp_encoded_tensors, decode_params[EncoderKeys.PARAMS], num_summands=num_summands, shape=shape[EncoderKeys.SHAPE])
Example #3
Source File: test_utils.py From model-optimization with Apache License 2.0 | 6 votes |
def maybe_evaluate(self, fetches, session=None): """Evaluates `fetches`, if containing any `Tensor` objects. Args: fetches: Any nested structure compatible with `tf.nest`. session: Optional. A `tf.Session` object in the context of which the evaluation is to happen. Returns: `fetches` with any `Tensor` objects replaced by numpy values. """ if any((tf.is_tensor(t) for t in tf.nest.flatten(fetches))): if session: fetches = session.run(fetches) else: fetches = self.evaluate(fetches) return fetches
Example #4
Source File: saving.py From alibi-detect with Apache License 2.0 | 6 votes |
def state_aegmm(od: OutlierAEGMM) -> Dict: """ OutlierAEGMM parameters to save. Parameters ---------- od Outlier detector object. """ if not all(tf.is_tensor(_) for _ in [od.phi, od.mu, od.cov, od.L, od.log_det_cov]): logger.warning('Saving AEGMM detector that has not been fit.') state_dict = {'threshold': od.threshold, 'n_gmm': od.aegmm.n_gmm, 'recon_features': od.aegmm.recon_features, 'phi': od.phi, 'mu': od.mu, 'cov': od.cov, 'L': od.L, 'log_det_cov': od.log_det_cov} return state_dict
Example #5
Source File: saving.py From alibi-detect with Apache License 2.0 | 6 votes |
def state_vaegmm(od: OutlierVAEGMM) -> Dict: """ OutlierVAEGMM parameters to save. Parameters ---------- od Outlier detector object. """ if not all(tf.is_tensor(_) for _ in [od.phi, od.mu, od.cov, od.L, od.log_det_cov]): logger.warning('Saving VAEGMM detector that has not been fit.') state_dict = {'threshold': od.threshold, 'samples': od.samples, 'n_gmm': od.vaegmm.n_gmm, 'latent_dim': od.vaegmm.latent_dim, 'beta': od.vaegmm.beta, 'recon_features': od.vaegmm.recon_features, 'phi': od.phi, 'mu': od.mu, 'cov': od.cov, 'L': od.L, 'log_det_cov': od.log_det_cov} return state_dict
Example #6
Source File: py_utils.py From model-optimization with Apache License 2.0 | 6 votes |
def static_or_dynamic_shape(value): """Returns shape of the input `Tensor` or a `np.ndarray`. If `value` is a `np.ndarray` or a `Tensor` with statically known shape, it returns a Python object. Otherwise, returns result of `tf.shape(value)`. Args: value: A `Tensor` or a `np.ndarray` object. Returns: Static or dynamic shape of `value`. Raises: TypeError: If the input is not a `Tensor` or a `np.ndarray` object. """ if tf.is_tensor(value): return value.shape if value.shape.is_fully_defined() else tf.shape(value) elif isinstance(value, np.ndarray): return value.shape else: raise TypeError('The provided input is not a Tensor or numpy array.')
Example #7
Source File: tensor_utils.py From federated with Apache License 2.0 | 6 votes |
def is_scalar(tensor): """Returns True iff the given tensor is a scalar. Args: tensor: The tensor to test for being a scalar. Returns: True if 'tensor' is a scalar, i.e. all dims are 1, False otherwise. Raises: TypeError: when the argument is not a tensor. """ if not tf.is_tensor(tensor): raise TypeError('Expected a tensor, found "{}".'.format( py_typecheck.type_string(type(tensor)))) return (hasattr(tensor, 'get_shape') and all(dim == 1 for dim in tensor.get_shape()))
Example #8
Source File: tokenizer.py From OpenNMT-tf with MIT License | 6 votes |
def _detokenize(self, tokens, sequence_length): if isinstance(tokens, tf.RaggedTensor): rank = len(tokens.shape) if rank == 1: return self._detokenize_tensor(tokens.values) elif rank == 2: return self._detokenize_ragged_tensor(tokens) else: raise ValueError("Unsupported RaggedTensor rank %d for detokenization" % rank) elif tf.is_tensor(tokens): rank = len(tokens.shape) if rank == 1: return self._detokenize_tensor(tokens) elif rank == 2: if sequence_length is None: raise ValueError("sequence_length is required for Tensor detokenization") return self._detokenize_batch_tensor(tokens, sequence_length) else: raise ValueError("Unsupported tensor rank %d for detokenization" % rank) elif isinstance(tokens, list) and tokens and isinstance(tokens[0], list): return list(map(self.detokenize, tokens)) else: tokens = [tf.compat.as_text(token) for token in tokens] return self._detokenize_string(tokens)
Example #9
Source File: deferred_tensor.py From tensorflow_constrained_optimization with Apache License 2.0 | 6 votes |
def __eq__(self, other): if not isinstance(other, _StaticExplicitDeferredTensorState): return False if self.auto_cast != other.auto_cast: return False # If at least one of the objects is a Tensor, then we check that they're the # same object, instead of calling __eq__. # # In eager mode, we could potentially check for value-equality, by using the # np.array_equal() code below after *explicitly* casting the Tensors to # numpy arrays by calling Tensor.numpy(). This would probably be a bad idea, # though, since if the Tensor is actually a tf.Variable, its value could # change in the future. if tf.is_tensor(self.value) or tf.is_tensor(other.value): return self.value is other.value # Every other allowed type can be handled by numpy. # # We can hope that in most cases, this will be quick (e.g. same object --> # equal, different shapes --> unequal), but if we're unlucky, this has the # potential to be slow. return np.array_equal(self.value, other.value)
Example #10
Source File: deferred_tensor.py From tensorflow_constrained_optimization with Apache License 2.0 | 6 votes |
def __init__(self, value, auto_cast): """Creates a new `_StaticExplicitDeferredTensorState`. Args: value: `Tensor`-like, the value of the `DeferredTensor`. auto_cast: `Boolean`, whether the value should be automatically type-promoted, if necessary. Only applies if "value" is a `Tensor`: non-`Tensor` types are always auto-castable. """ assert not callable(value) self._value = value self._auto_cast = auto_cast # For non-Tensor types, we make a deep copy to make extra-certain that it is # immutable (since we'll hash it). if not tf.is_tensor(self._value): self._value = copy.deepcopy(self._value) # We memoize the hash, since it can be expensive to compute. self._hash = None
Example #11
Source File: bridge.py From OpenNMT-tf with MIT License | 6 votes |
def assert_state_is_compatible(expected_state, state): """Asserts that states are compatible. Args: expected_state: The reference state. state: The state that must be compatible with :obj:`expected_state`. Raises: ValueError: if the states are incompatible. """ # Check structure compatibility. tf.nest.assert_same_structure(expected_state, state) # Check shape compatibility. expected_state_flat = tf.nest.flatten(expected_state) state_flat = tf.nest.flatten(state) for x, y in zip(expected_state_flat, state_flat): if tf.is_tensor(x): expected_depth = x.shape[-1] depth = y.shape[-1] if depth != expected_depth: raise ValueError("Tensor in state has shape %s which is incompatible " "with the target shape %s" % (y.shape, x.shape))
Example #12
Source File: loss.py From tensorflow_constrained_optimization with Apache License 2.0 | 6 votes |
def _convert_to_binary_classification_predictions(predictions): """Converts a `Tensor` into a set of binary classification predictions. This function checks that the given `Tensor` is floating-point, and that it is trivially convertible to rank-1, i.e. has only one "nontrivial" dimension (e.g. the shapes [1000] and [1, 1, None, 1] are allowed, but [None, 1, None] and [50, 10] are not). If it satisfies these conditions, then it is reshaped to be rank-1 (if necessary) and returned. Args: predictions: a rank-1 floating-point `Tensor` of predictions. Returns: The predictions `Tensor`, reshaped to be rank-1, if necessary. Raises: TypeError: if "predictions" is not a floating-point `Tensor`. ValueError: if "predictions" is not trivially convertible to rank-1. """ if not tf.is_tensor(predictions): raise TypeError("predictions must be a Tensor") if not predictions.dtype.is_floating: raise TypeError("predictions must be floating-point") return helpers.convert_to_1d_tensor(predictions, name="predictions")
Example #13
Source File: audio_featurizer.py From athena with Apache License 2.0 | 5 votes |
def __call__(self, audio=None, sr=None, speed=1.0): """extract feature from audo data :param audio data or audio file :sr sample rate :return feature """ if audio is not None and not tf.is_tensor(audio): audio = tf.convert_to_tensor(audio) if sr is not None and not tf.is_tensor(sr): sr = tf.convert_to_tensor(sr) return self.__impl(audio, sr, speed)
Example #14
Source File: keras_utils.py From addons with Apache License 2.0 | 5 votes |
def is_tensor_or_variable(x): return tf.is_tensor(x) or isinstance(x, tf.Variable)
Example #15
Source File: tf_computation_utils.py From federated with Apache License 2.0 | 5 votes |
def identity(source): """Applies `tf.identity` pointwise to `source`. This utility function provides the exact same behavior as `tf.identity`, but it generalizes to a wider class of objects, including ordinary tensors, variables, as well as various types of nested structures. It would typically be used together with `tf.control_dependencies` in non-eager TensorFlow. Args: source: A nested structure composed of tensors or variables embedded in containers that are compatible with `tf.nest`, or instances of `anonymous_tuple.AnonymousTuple`. Elements that represent variables have their content extracted prior to identity mapping by first invoking `tf.Variable.read_value`. Returns: The result of applying `tf.identity` to read all elements of the `source` pointwise, with the same structure as `source`. Raises: TypeError: If types mismatch. """ def _mapping_fn(x): if not tf.is_tensor(x): raise TypeError('Expected a tensor, found {}.'.format( py_typecheck.type_string(type(x)))) if hasattr(x, 'read_value'): x = x.read_value() return tf.identity(x) # TODO(b/113112108): Extend this to containers of mixed types. if isinstance(source, anonymous_tuple.AnonymousTuple): return anonymous_tuple.map_structure(_mapping_fn, source) else: return tf.nest.map_structure(_mapping_fn, source)
Example #16
Source File: canonical_form_utils_test.py From federated with Apache License 2.0 | 5 votes |
def evaluate(self, value): if tf.is_tensor(value): return super().evaluate(value) elif isinstance(value, (np.ndarray, np.number)): return value else: raise TypeError('Cannot evaluate value of type `{!s}`.'.format( type(value)))
Example #17
Source File: base_head.py From estimator with Apache License 2.0 | 5 votes |
def create_eval_metrics_tuple(fn, kwargs): """Creates TPU eval metrics tuple. Helper function to make eval_metric tuple (eval_metric_fn, fn_kwargs) used by `TPUEstimator`. TPUEstimator requires that `eval_metric_fn` take exclusively Tensor arguments. This helper can help create such a function from a more generic function that can take both Tensor and non-Tensor arguments. Args: fn: A eval_metric_fn that takes both Tensor and non-Tensor arguments. This function must return a dict of form {'metric name': (metric_tensor, eval_op)} kwargs: Dict of arguments for `fn`. Returns: `eval_metric` tuple that can be passed to a `model_fn._TPUEstimatorSpec`. """ tensor_kwargs = {} nontensor_kwargs = {} for k, v in six.iteritems(kwargs): if tf.is_tensor(v): tensor_kwargs[k] = v else: nontensor_kwargs[k] = v def _fn(**tensors): return fn(**dict(nontensor_kwargs, **tensors)) return (_fn, tensor_kwargs)
Example #18
Source File: tensorflow_deserialization_test.py From federated with Apache License 2.0 | 5 votes |
def test_deserialize_and_call_tf_computation_with_add_one(self): identity_type = computation_types.TensorType(tf.int32) identity_fn = building_block_factory.create_compiled_identity(identity_type) init_op, result = tensorflow_deserialization.deserialize_and_call_tf_computation( identity_fn.proto, tf.constant(10), tf.compat.v1.get_default_graph()) self.assertTrue(tf.is_tensor(result)) with tf.compat.v1.Session() as sess: if init_op: sess.run(init_op) result_val = sess.run(result) self.assertEqual(result_val, 10)
Example #19
Source File: tensorflow_utils_test.py From federated with Apache License 2.0 | 5 votes |
def _assert_binding_matches_type_and_value(self, binding, type_spec, val, graph): """Asserts that 'bindings' matches the given type, value, and graph.""" self.assertIsInstance(binding, pb.TensorFlow.Binding) self.assertIsInstance(type_spec, computation_types.Type) binding_oneof = binding.WhichOneof('binding') if binding_oneof == 'tensor': self.assertTrue(tf.is_tensor(val)) if not isinstance(val, tf.Variable): # We insert a read_value() op for Variables, which produces # a name we don't control. Otherwise, names should match: self.assertEqual(binding.tensor.tensor_name, val.name) self.assertIsInstance(type_spec, computation_types.TensorType) self.assertEqual(type_spec.dtype, val.dtype.base_dtype) self.assertEqual(repr(type_spec.shape), repr(val.shape)) elif binding_oneof == 'sequence': self.assertIsInstance(val, type_conversions.TF_DATASET_REPRESENTATION_TYPES) sequence_oneof = binding.sequence.WhichOneof('binding') self.assertEqual(sequence_oneof, 'variant_tensor_name') variant_tensor = graph.get_tensor_by_name( binding.sequence.variant_tensor_name) op = str(variant_tensor.op.type) self.assertTrue((op == 'Placeholder') or ('Dataset' in op)) self.assertEqual(variant_tensor.dtype, tf.variant) self.assertIsInstance(type_spec, computation_types.SequenceType) self.assertEqual( computation_types.to_type(val.element_spec), type_spec.element) elif binding_oneof == 'tuple': self.assertIsInstance(type_spec, computation_types.NamedTupleType) if not isinstance(val, (list, tuple, anonymous_tuple.AnonymousTuple)): self.assertIsInstance(val, dict) if isinstance(val, collections.OrderedDict): val = list(val.values()) else: val = [v for _, v in sorted(val.items())] for idx, e in enumerate(anonymous_tuple.to_elements(type_spec)): self._assert_binding_matches_type_and_value(binding.tuple.element[idx], e[1], val[idx], graph) else: self.fail('Unknown binding.')
Example #20
Source File: saving.py From alibi-detect with Apache License 2.0 | 5 votes |
def init_od_aegmm(state_dict: Dict, aegmm: tf.keras.Model) -> OutlierAEGMM: """ Initialize OutlierAEGMM. Parameters ---------- state_dict Dictionary containing the parameter values. aegmm Loaded AEGMM. Returns ------- Initialized OutlierAEGMM instance. """ od = OutlierAEGMM(threshold=state_dict['threshold'], aegmm=aegmm) od.phi = state_dict['phi'] od.mu = state_dict['mu'] od.cov = state_dict['cov'] od.L = state_dict['L'] od.log_det_cov = state_dict['log_det_cov'] if not all(tf.is_tensor(_) for _ in [od.phi, od.mu, od.cov, od.L, od.log_det_cov]): logger.warning('Loaded AEGMM detector has not been fit.') return od
Example #21
Source File: saving.py From alibi-detect with Apache License 2.0 | 5 votes |
def init_od_vaegmm(state_dict: Dict, vaegmm: tf.keras.Model) -> OutlierVAEGMM: """ Initialize OutlierVAEGMM. Parameters ---------- state_dict Dictionary containing the parameter values. vaegmm Loaded VAEGMM. Returns ------- Initialized OutlierVAEGMM instance. """ od = OutlierVAEGMM(threshold=state_dict['threshold'], vaegmm=vaegmm, samples=state_dict['samples']) od.phi = state_dict['phi'] od.mu = state_dict['mu'] od.cov = state_dict['cov'] od.L = state_dict['L'] od.log_det_cov = state_dict['log_det_cov'] if not all(tf.is_tensor(_) for _ in [od.phi, od.mu, od.cov, od.L, od.log_det_cov]): logger.warning('Loaded VAEGMM detector has not been fit.') return od
Example #22
Source File: audio_featurizer.py From athena with Apache License 2.0 | 5 votes |
def __call__(self, audio=None, sr=None, speed=1.0): """extract feature from audo data :param audio data or audio file :sr sample rate :return feature """ if audio is not None and not tf.is_tensor(audio): audio = tf.convert_to_tensor(audio) if sr is not None and not tf.is_tensor(sr): sr = tf.convert_to_tensor(sr) return self.__impl(audio, sr, speed)
Example #23
Source File: head.py From estimator with Apache License 2.0 | 5 votes |
def _create_eval_metrics_tuple(fn, kwargs): """Creates TPU eval metrics tuple. Helper function to make eval_metric tuple (eval_metric_fn, fn_kwargs) used by `TPUEstimator`. TPUEstimator requires that `eval_metric_fn` take exclusively Tensor arguments. This helper can help create such a function from a more generic function that can take both Tensor and non-Tensor arguments. Args: fn: A eval_metric_fn that takes both Tensor and non-Tensor arguments. This function must return a dict of form {'metric name': (metric_tensor, eval_op)} kwargs: Dict of arguments for `fn`. Returns: `eval_metric` tuple that can be passed to a `model_fn._TPUEstimatorSpec`. """ tensor_kwargs = {} nontensor_kwargs = {} for k, v in six.iteritems(kwargs): if tf.is_tensor(v): tensor_kwargs[k] = v else: nontensor_kwargs[k] = v def _fn(**tensors): return fn(**dict(nontensor_kwargs, **tensors)) return (_fn, tensor_kwargs)
Example #24
Source File: keras.py From estimator with Apache License 2.0 | 5 votes |
def _convert_tensor(x): """Create or cast tensor if needed.""" if not tf.is_tensor(x): # x is a numpy array x = tf.compat.v1.convert_to_tensor_or_sparse_tensor(x) return x
Example #25
Source File: graph_and_eager_test_case.py From tensorflow_constrained_optimization with Apache License 2.0 | 5 votes |
def run(self, tensor, feed_dict=None): """Evaluates the given `Tensor`. Unlike `tf.Session.run()`, this method expects a single `Tensor` argument (i.e. not a list of `Tensor`s or any other more complex structure). Args: tensor: the `Tensor` to evaluate, or a nullary function returning a `Tensor`. feed_dict: dict mapping placeholder `Tensor`s to their desired values. Returns: The value of the given `Tensor`. Raises: TypeError: if (i) the given tensor is neither a `Tensor` nor a nullary function returning a `Tensor`, or (ii) we're given a feed_dict but the tensor argument is not callable. """ if callable(tensor): tensor = tensor() elif feed_dict: raise TypeError("if a feed_dict is provided to run(), then the tensor " "argument must be a nullary function returning a Tensor") if not tf.is_tensor(tensor): raise TypeError("_GraphWrappedSession.run expects a Tensor argument, " "or a nullary function returning a Tensor") return self._session.run(tensor, feed_dict=feed_dict)
Example #26
Source File: tokenizer.py From OpenNMT-tf with MIT License | 5 votes |
def _tokenize(self, text): if tf.is_tensor(text): rank = len(text.shape) if rank == 0: return self._tokenize_tensor(text) elif rank == 1: return self._tokenize_batch_tensor(text) else: raise ValueError("Unsupported tensor rank %d for tokenization" % rank) elif isinstance(text, list): return list(map(self.tokenize, text)) else: text = tf.compat.as_text(text) return self._tokenize_string(text)
Example #27
Source File: knowledge_distillation.py From zoo with Apache License 2.0 | 5 votes |
def _process_inputs(call_inputs): _basic_usage = ( "OutputDistributionMatchingLossLayer should be called with a list of length 3, where:\n " " - the first element contains a tensor that will be returned as is,\n" " - the second argument contains the logits of the teacher,\n" " - and the third the logits of the student.\n\n Instead, it was called with: " ) if isinstance(call_inputs, list): if len(call_inputs) == 3 and all([tf.is_tensor(ci) for ci in call_inputs]): return tuple(call_inputs) raise ValueError(_basic_usage + str([type(ci) for ci in call_inputs])) else: raise ValueError(_basic_usage + str(type(call_inputs)))
Example #28
Source File: training.py From OpenNMT-tf with MIT License | 5 votes |
def _apply_gradients(self, gradients, scale=1): """Applies the gradients.""" gradient_scale = scale * self.num_replicas if tf.is_tensor(gradient_scale) or gradient_scale != 1: gradients = [ self._all_reduce_sum(gradient / tf.cast(gradient_scale, gradient.dtype)) for gradient in gradients] self._optimizer.apply_gradients(list(zip(gradients, self._model.trainable_variables)))
Example #29
Source File: helpers.py From tensorflow_constrained_optimization with Apache License 2.0 | 5 votes |
def get_num_columns_of_2d_tensor(tensor, name="tensor"): """Gets the number of columns of a rank-two `Tensor`. Args: tensor: a rank-2 `Tensor` with a known number of columns. name: str, how to refer to the tensor in error messages. Returns: The number of columns in the tensor. Raises: TypeError: if "tensor" is not a `Tensor`. ValueError: if "tensor" is not a rank-2 `Tensor` with a known number of columns. """ if not tf.is_tensor(tensor): raise TypeError("%s must be a Tensor" % name) dims = tensor.shape.dims if dims is None: raise ValueError("%s must have a known rank" % name) if len(dims) != 2: raise ValueError("%s must be rank 2 (it is rank %d)" % (name, len(dims))) columns = dims[1].value if columns is None: raise ValueError("%s must have a known number of columns" % name) return columns
Example #30
Source File: layers_base.py From larq with Apache License 2.0 | 5 votes |
def __init__(self, *args, pad_values=0.0, **kwargs): self.pad_values = pad_values super().__init__(*args, **kwargs) self._is_native_padding = self.padding != "same" or ( not tf.is_tensor(self.pad_values) and self.pad_values == 0.0 )