Python tensorflow.numpy_function() Examples
The following are 8
code examples of tensorflow.numpy_function().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow
, or try the search function
.
Example #1
Source File: dataset.py From CVPR2019-DeepTreeLearningForZeroShotFaceAntispoofing with MIT License | 6 votes |
def parse_fn(self, file): config = self.config image_size = config.IMAGE_SIZE dmap_size = config.MAP_SIZE label_size = 1 def _parse_function(_file): _file = _file.decode('UTF-8') image_bytes = image_size * image_size * 3 dmap_bytes = dmap_size * dmap_size bin = np.fromfile(_file, dtype='uint8') image = np.transpose(bin[0:image_bytes].reshape((3, image_size, image_size)) / 255, (1, 2, 0)) dmap = np.transpose(bin[image_bytes:image_bytes+dmap_bytes].reshape((1, dmap_size, dmap_size)) / 255, (1, 2, 0)) label = bin[image_bytes+dmap_bytes:image_bytes+dmap_bytes+label_size] / 1 dmap1 = dmap * (1-label) dmap2 = np.ones_like(dmap) * label dmap = np.concatenate([dmap1, dmap2], axis=2) return image.astype(np.float32), dmap.astype(np.float32), label.astype(np.float32) image_ts, dmap_ts, label_ts = tf.numpy_function(_parse_function, [file], [tf.float32, tf.float32, tf.float32]) image_ts = tf.ensure_shape(image_ts, [config.IMAGE_SIZE, config.IMAGE_SIZE, 3]) dmap_ts = tf.ensure_shape(dmap_ts, [config.MAP_SIZE, config.MAP_SIZE, 2]) label_ts = tf.ensure_shape(label_ts, [1]) return image_ts, dmap_ts, label_ts
Example #2
Source File: sequence_tagger.py From OpenNMT-tf with MIT License | 6 votes |
def update_metrics(self, metrics, predictions, labels): weights = tf.sequence_mask( labels["length"], maxlen=tf.shape(labels["tags"])[1], dtype=tf.float32) metrics["accuracy"].update_state( labels["tags_id"], predictions["tags_id"], sample_weight=weights) if self.tagging_scheme in ("bioes",): flag_fn = None if self.tagging_scheme == "bioes": flag_fn = flag_bioes_tags gold_flags, predicted_flags = tf.numpy_function( flag_fn, [labels["tags"], predictions["tags"], labels["length"]], [tf.bool, tf.bool]) metrics["f1"].update_state(gold_flags, predicted_flags)
Example #3
Source File: in_graph_batch_env.py From rex-gym with Apache License 2.0 | 6 votes |
def simulate(self, action): """Step the batch of environments. The results of the step can be accessed from the variables defined below. Args: action: Tensor holding the batch of actions to apply. Returns: Operation. """ with tf.name_scope('environment/simulate'): if action.dtype in (tf.float16, tf.float32, tf.float64): action = tf.debugging.check_numerics(action, 'action') observ_dtype = self._parse_dtype(self._batch_env.observation_space) observ, reward, done = tf.numpy_function(lambda a: self._batch_env.step(a)[:3], [action], [observ_dtype, tf.float32, tf.bool], name='step') observ = tf.debugging.check_numerics(observ, 'observ') reward = tf.debugging.check_numerics(reward, 'reward') return tf.group(self._observ.assign(observ), self._action.assign(action), self._reward.assign(reward), self._done.assign(done))
Example #4
Source File: in_graph_batch_env.py From rex-gym with Apache License 2.0 | 6 votes |
def reset(self, indices=None): """Reset the batch of environments. Args: indices: The batch indices of the environments to reset; defaults to all. Returns: Batch tensor of the new observations. """ if indices is None: indices = tf.range(len(self._batch_env)) observ_dtype = self._parse_dtype(self._batch_env.observation_space) observ = tf.numpy_function(self._batch_env.reset, [indices], observ_dtype, name='reset') observ = tf.debugging.check_numerics(observ, 'observ') reward = tf.zeros_like(indices, tf.float32) done = tf.zeros_like(indices, tf.bool) with tf.control_dependencies([ tf.compat.v1.scatter_update(self._observ, indices, observ), tf.compat.v1.scatter_update(self._reward, indices, reward), tf.compat.v1.scatter_update(self._done, indices, done) ]): return tf.identity(observ)
Example #5
Source File: base_dataset.py From pyslam with GNU General Public License v3.0 | 5 votes |
def _get_data(self, files): def _read_image(img_path): channels = 1 if self.read_gray else 3 if 'all_jpeg' in self.config and self.config['all_jpeg']: img = tf.image.decode_jpeg(tf.io.read_file(img_path), channels=channels, dct_method='INTEGER_ACCURATE') else: img = tf.image.decode_image(tf.io.read_file(img_path), channels=channels) img.set_shape((None, None, channels)) return tf.cast(img, tf.float32) def _read_dump(path): f = h5py.File(path, 'r') return (f['reg_feat'][()].astype(np.float32), f['loc_info'][()].astype(np.float32)) def _read_gen_train(path): f = h5py.File(path, 'r') return (f['aug_feat'][()].astype(np.float32), f['loc_info'][()][:, 0:2].astype(np.float32), f['loc_info'][()][:, 4].astype(np.float32)) image_paths = tf.data.Dataset.from_tensor_slices(files['image_paths']) dump_paths = tf.data.Dataset.from_tensor_slices(files['dump_paths']) if self.config['stage'] == 'loc' or self.config['stage'] == 'reg': images = image_paths.map(_read_image) data = tf.data.Dataset.zip( {'image': images, 'dump_path': dump_paths, 'image_path': image_paths}) elif self.config['stage'] == 'aug': dump_data = dump_paths.map(lambda path: tf.numpy_function( _read_dump, [path], [tf.float32, tf.float32])) data = tf.data.Dataset.zip({'dump_data': dump_data, 'dump_path': dump_paths}) elif self.config['stage'] == 'post_format': dump_data = dump_paths.map(lambda path: tf.numpy_function( _read_gen_train, [path], [tf.float32, tf.float32, tf.float32])) data = tf.data.Dataset.zip( {'dump_data': dump_data, 'dump_path': dump_paths, 'image_path': image_paths}) else: raise NotImplementedError return data
Example #6
Source File: photometric_augmentation.py From MVSNet with MIT License | 5 votes |
def motion_blur(image, max_kernel_size=10): def _py_motion_blur(img): # Either vertial, hozirontal or diagonal blur mode = np.random.choice(['h', 'v', 'diag_down', 'diag_up']) ksize = np.random.randint(0, (max_kernel_size+1)/2)*2 + 1 # make sure is odd center = int((ksize-1)/2) kernel = np.zeros((ksize, ksize)) if mode == 'h': kernel[center, :] = 1. elif mode == 'v': kernel[:, center] = 1. elif mode == 'diag_down': kernel = np.eye(ksize) elif mode == 'diag_up': kernel = np.flip(np.eye(ksize), 0) var = ksize * ksize / 16. grid = np.repeat(np.arange(ksize)[:, np.newaxis], ksize, axis=-1) gaussian = np.exp(-(np.square(grid-center)+np.square(grid.T-center))/(2.*var)) kernel *= gaussian kernel /= np.sum(kernel) img = cv.filter2D(img, -1, kernel) return img blurred = tf.numpy_function(_py_motion_blur, [image], tf.float32) return tf.reshape(blurred, tf.shape(image))
Example #7
Source File: gif_utils.py From slac with MIT License | 5 votes |
def gif_summary_v2(name, tensor, max_outputs, fps, family=None, step=None): def py_gif_event(step, tag, tensor, max_outputs, fps): summary = py_gif_summary(tag, tensor, max_outputs, fps) if isinstance(summary, bytes): summ = summary_pb2.Summary() summ.ParseFromString(summary) summary = summ event = event_pb2.Event(summary=summary) event.wall_time = time.time() event.step = step event_pb = event.SerializeToString() return event_pb def function(tag, scope): # Note the identity to move the tensor to the CPU. event = tf.numpy_function( py_gif_event, [_choose_step(step), tag, tf.identity(tensor), max_outputs, fps], tf.string) return summary_ops_v2.import_event(event, name=scope) return summary_ops_v2.summary_writer_function( name, tensor, function, family=family)
Example #8
Source File: train_regression.py From nuke-ML-server with Apache License 2.0 | 4 votes |
def get_data(self, data_list, batch_size=16, epoch=100, shuffle_buffer_size=1000): def read_and_preprocess_data(path_img, param): """Read image in path_img, resize it to patch_size, convert to grayscale and apply a random gamma grade to it Returns: input_data: stack of both original and graded image histograms param: groundtruth gamma value """ if self.is_exr: # ['exr', 'EXR'] img = tf.numpy_function(read_resize_exr, [path_img, self.patch_size], [tf.float32]) img = tf.numpy_function(linear_to_srgb, [img], [tf.float32]) img = tf.reshape(img, [self.patch_size, self.patch_size, self.channels]) img = tf.image.rgb_to_grayscale(img) else: # ['jpg', 'jpeg', 'png', 'bmp', 'JPG', 'JPEG', 'PNG', 'BMP'] img_raw = tf.io.read_file(path_img) img_tensor = tf.image.decode_png(img_raw, channels=3) img = tf.cast(img_tensor, tf.float32) / 255.0 img = tf.image.rgb_to_grayscale(img) img = tf.image.resize(img, [self.patch_size, self.patch_size]) # Depending on what parameter(s) you want to learn, modify the training # input data. Here to learn gamma correction, our input data trainX is # a stack of both original and gamma-graded histograms. input_data = gamma_correction(img, param) return input_data, param with tf.compat.v1.variable_scope('input'): # Ensure preprocessing is done on the CPU (to let the GPU focus on training) with tf.device('/cpu:0'): data_tensor = tf.convert_to_tensor(data_list, dtype=tf.string) path_dataset = tf.data.Dataset.from_tensor_slices((data_tensor)) path_dataset = path_dataset.shuffle(shuffle_buffer_size).repeat(epoch) # Depending on what parameter(s) you want to learn, modify the random # uniform range. Here create random gamma values between 0.2 and 5 param_tensor = tf.random.uniform( [len(data_list)*epoch, self.output_param_number], 0.2, 5.0) param_dataset = tf.data.Dataset.from_tensor_slices((param_tensor)) dataset = tf.data.Dataset.zip((path_dataset, param_dataset)) # Apply read_and_preprocess_data function to all input in the path_dataset dataset = dataset.map(read_and_preprocess_data, num_parallel_calls=4) dataset = dataset.batch(batch_size) # Always prefetch one batch and make sure there is always one ready dataset = dataset.prefetch(buffer_size=1) return dataset