Python tensorflow.compat.v2.uint8() Examples
The following are 11
code examples of tensorflow.compat.v2.uint8().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.compat.v2
, or try the search function
.
Example #1
Source File: resisc45_densenet121_univpatch_and_univperturbation_adversarial_224x224.py From armory with MIT License | 6 votes |
def _info(self): return tfds.core.DatasetInfo( builder=self, description=_DESCRIPTION, features=tfds.features.FeaturesDict( { "images": { "clean": tfds.features.Image( shape=[224, 224, 3], dtype=tf.uint8, encoding_format="png" ), "adversarial_univperturbation": tfds.features.Image( shape=[224, 224, 3], dtype=tf.uint8, encoding_format="png" ), "adversarial_univpatch": tfds.features.Image( shape=[224, 224, 3], dtype=tf.uint8, encoding_format="png" ), }, "label": tfds.features.ClassLabel(names=_LABELS), "imagename": tfds.features.Text(), } ), supervised_keys=("images", "label"), homepage=_URL, citation=_CITATION, )
Example #2
Source File: imagenet_adversarial.py From armory with MIT License | 6 votes |
def _info(self): return tfds.core.DatasetInfo( builder=self, description=_DESCRIPTION, features=tfds.features.FeaturesDict( { "images": { "clean": tfds.features.Tensor( shape=[224, 224, 3], dtype=tf.uint8 ), "adversarial": tfds.features.Tensor( shape=[224, 224, 3], dtype=tf.uint8 ), }, "label": tfds.features.Tensor(shape=(), dtype=tf.int64), } ), supervised_keys=("images", "label"), )
Example #3
Source File: wider_face.py From datasets with Apache License 2.0 | 6 votes |
def _info(self): features = { 'image': tfds.features.Image(encoding_format='jpeg'), 'image/filename': tfds.features.Text(), 'faces': tfds.features.Sequence({ 'bbox': tfds.features.BBoxFeature(), 'blur': tf.uint8, 'expression': tf.bool, 'illumination': tf.bool, 'occlusion': tf.uint8, 'pose': tf.bool, 'invalid': tf.bool, }), } return tfds.core.DatasetInfo( builder=self, description=_DESCRIPTION, features=tfds.features.FeaturesDict(features), homepage=_PROJECT_URL, citation=_CITATION, )
Example #4
Source File: exporter_lib_v2.py From models with Apache License 2.0 | 6 votes |
def _run_inference_on_images(self, image): """Cast image to float and run inference. Args: image: uint8 Tensor of shape [1, None, None, 3] Returns: Tensor dictionary holding detections. """ label_id_offset = 1 image = tf.cast(image, tf.float32) image, shapes = self._model.preprocess(image) prediction_dict = self._model.predict(image, shapes) detections = self._model.postprocess(prediction_dict, shapes) classes_field = fields.DetectionResultFields.detection_classes detections[classes_field] = ( tf.cast(detections[classes_field], tf.float32) + label_id_offset) for key, val in detections.items(): detections[key] = tf.cast(val, tf.float32) return detections
Example #5
Source File: imagenet_adversarial.py From armory with MIT License | 5 votes |
def _generate_examples(self, path): """Yields examples.""" clean_key = "clean" adversarial_key = "adversarial" def _parse(serialized_example): ds_features = { "height": tf.io.FixedLenFeature([], tf.int64), "width": tf.io.FixedLenFeature([], tf.int64), "label": tf.io.FixedLenFeature([], tf.int64), "adv-image": tf.io.FixedLenFeature([], tf.string), "clean-image": tf.io.FixedLenFeature([], tf.string), } example = tf.io.parse_single_example(serialized_example, ds_features) img_clean = tf.io.decode_raw(example["clean-image"], tf.float32) img_adv = tf.io.decode_raw(example["adv-image"], tf.float32) # float values are integers in [0.0, 255.0] for clean and adversarial img_clean = tf.cast(img_clean, tf.uint8) img_clean = tf.reshape(img_clean, (example["height"], example["width"], 3)) img_adv = tf.cast(img_adv, tf.uint8) img_adv = tf.reshape(img_adv, (example["height"], example["width"], 3)) return {clean_key: img_clean, adversarial_key: img_adv}, example["label"] ds = tf.data.TFRecordDataset(filenames=[path]) ds = ds.map(lambda x: _parse(x)) default_graph = tf.compat.v1.keras.backend.get_session().graph ds = tfds.as_numpy(ds, graph=default_graph) for i, (img, label) in enumerate(ds): yield str(i), { "images": img, "label": label, }
Example #6
Source File: extensions.py From trax with Apache License 2.0 | 5 votes |
def _canonicalize_jit_arg(x): if isinstance(x, tf_np.ndarray): return x.data else: try: # We need to convert `int` to the most precise dtype, otherwise the dtype # of the result may be different from numpy's. For example, when a binary # op takes in a Python integer 5 and an array of uint32, numpy will pick # uint32 as 5's dtype, while tf.convert_to_tensor will choose int32 which # will cause the two arguments to be promoted to int64. We pick uint8 # here, which will be promoted to uint32 by the binary op. # Note that we prefer unsigned int to signed int when both are equally # precise. For example, for 5, we pick uint8 instead of int8. There is no # reason to prefer one to the other, because for each there is a case # where the behavior diverges from numpy. If we prefer signed int, # consider the case where the first operand is 5 and the second is # 2**64-1. Numpy picks uint64 as the result dtype, but because we choose a # signed type for 5 such as int8, the result type will be float64. On the # other hand, if we prefer unsigned int, consider the case where the first # operand is 2**31-1 and the second is -1. Numpy will pick int32, but # because we choose uint32 for 2*32-1, the result will be int64. The root # of the problem is that `jit` converts `int` to tensors (hence committing # to a dtype) too early, when we don't have enough information about the # jitted function (e.g. which subset of the arguments should be promoted # together using np.result_type). tf.function doesn't have this problem # because it doesn't convert `int` to tensors. jax.jit doesn't have this # problem because it converts `int` to "int tracer" which doesn't commit # to a dtype. # TODO(wangpeng): Revisit this design and see whether we can improve `jit` # and tf.function. dtype = most_precise_int_dtype(x) if dtype is None and isinstance(x, float): dtype = tf_np.default_float_type() return tf.convert_to_tensor(value=x, dtype=dtype) except (TypeError, ValueError): return x
Example #7
Source File: learner.py From valan with Apache License 2.0 | 5 votes |
def _convert_uint8_to_bfloat16(ts: Any): """Casts uint8 to bfloat16 if input is uint8. Args: ts: any tensor or nested tensor structure, such as EnvOutput. Returns: Converted structure. """ return tf.nest.map_structure( lambda t: tf.cast(t, tf.bfloat16) if t.dtype == tf.uint8 else t, ts)
Example #8
Source File: search_test.py From hub with Apache License 2.0 | 5 votes |
def fake_image_dataset(*args, **kwargs): num_examples = 30 return tf.data.Dataset.from_generator( lambda: ({ "image": np.ones(shape=(32, 32, 3), dtype=np.uint8), "label": i % 10, } for i in range(num_examples)), output_types={"image": tf.uint8, "label": tf.int64}, output_shapes={"image": (32, 32, 3), "label": ()}, )
Example #9
Source File: exporter_lib_tf2_test.py From models with Apache License 2.0 | 5 votes |
def get_dummy_input(self, input_type): """Get dummy input for the given input type.""" if input_type == 'image_tensor': return np.zeros(shape=(1, 20, 20, 3), dtype=np.uint8) if input_type == 'float_image_tensor': return np.zeros(shape=(1, 20, 20, 3), dtype=np.float32) elif input_type == 'encoded_image_string_tensor': image = Image.new('RGB', (20, 20)) byte_io = io.BytesIO() image.save(byte_io, 'PNG') return [byte_io.getvalue()] elif input_type == 'tf_example': image_tensor = tf.zeros((20, 20, 3), dtype=tf.uint8) encoded_jpeg = tf.image.encode_jpeg(tf.constant(image_tensor)).numpy() example = tf.train.Example( features=tf.train.Features( feature={ 'image/encoded': dataset_util.bytes_feature(encoded_jpeg), 'image/format': dataset_util.bytes_feature(six.b('jpeg')), 'image/source_id': dataset_util.bytes_feature(six.b('image_id')), })).SerializeToString() return [example]
Example #10
Source File: exporter_lib_v2.py From models with Apache License 2.0 | 5 votes |
def __call__(self, input_tensor): with tf.device('cpu:0'): image = tf.map_fn( _decode_image, elems=input_tensor, dtype=tf.uint8, parallel_iterations=32, back_prop=False) return self._run_inference_on_images(image)
Example #11
Source File: exporter_lib_v2.py From models with Apache License 2.0 | 5 votes |
def __call__(self, input_tensor): with tf.device('cpu:0'): image = tf.map_fn( _decode_tf_example, elems=input_tensor, dtype=tf.uint8, parallel_iterations=32, back_prop=False) return self._run_inference_on_images(image)