Python tensorflow.py_function() Examples
The following are 30
code examples of tensorflow.py_function().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow
, or try the search function
.
Example #1
Source File: models.py From tf2-yolo3 with Apache License 2.0 | 8 votes |
def yolo_nms(outputs, anchors, masks, num_classes, iou_threshold=0.6, score_threshold=0.15): boxes, confs, classes = [], [], [] for o in outputs: boxes.append(tf.reshape(o[0], (tf.shape(o[0])[0], -1, tf.shape(o[0])[-1]))) confs.append(tf.reshape(o[1], (tf.shape(o[0])[0], -1, tf.shape(o[1])[-1]))) classes.append(tf.reshape(o[2], (tf.shape(o[0])[0], -1, tf.shape(o[2])[-1]))) boxes = tf.concat(boxes, axis=1) confs = tf.concat(confs, axis=1) class_probs = tf.concat(classes, axis=1) box_scores = confs * class_probs mask = box_scores >= score_threshold mask = tf.reduce_any(mask, axis=-1) class_boxes = tf.boolean_mask(boxes, mask) class_boxes = tf.reshape(class_boxes, (tf.shape(boxes)[0], -1, 4)) class_box_scores = tf.boolean_mask(box_scores, mask) class_box_scores = tf.reshape(class_box_scores, (tf.shape(boxes)[0], -1, num_classes)) class_boxes, class_box_scores = tf.py_function(func=batched_nms, inp=[class_boxes, class_box_scores, num_classes, iou_threshold], Tout=[tf.float32, tf.float32]) classes = tf.argmax(class_box_scores, axis=-1) return class_boxes, class_box_scores, classes
Example #2
Source File: multi_thread_env.py From tf2rl with MIT License | 7 votes |
def step(self, actions, name=None): """take 1-step in all environments. :param tf.Tensor action: float32[batch_size, dim_action] :param name: Operatorå :rtype: (tf.Tensor, tf.Tensor, tf.Tensor) :return: (obs, reward, done) obs = [batch_size, dim_obs] reward = [batch_size] done = [batch_size] reach_limit = [batch_size] : whether each environment reached time limit or not. """ assert isinstance(actions, tf.Tensor) # with tf.variable_scope(name, default_name="MultiStep"): obs, reward, done = tf.py_function( func=self.py_step, inp=[actions], Tout=[tf.float32, tf.float32, tf.float32], name="py_step") obs.set_shape((self.batch_size,) + self.observation_shape) reward.set_shape((self.batch_size,)) done.set_shape((self.batch_size,)) return obs, reward, done, None
Example #3
Source File: tokenizer.py From OpenNMT-tf with MIT License | 7 votes |
def _tokenize_tensor(self, text): """Tokenizes a tensor. When not overriden, this default implementation calls the string-based tokenization. Args: text: A 1-D string ``tf.Tensor``. Returns: A 1-D string ``tf.Tensor``. """ def _python_wrapper(string_t): string = tf.compat.as_text(string_t.numpy()) tokens = self._tokenize_string(string) return tf.constant(tokens, dtype=tf.string) tokens = tf.py_function(_python_wrapper, [text], tf.string) tokens.set_shape([None]) return tokens
Example #4
Source File: metrics.py From dreamer with Apache License 2.0 | 6 votes |
def set_tags(self, **tags): keys, values = zip(*sorted(tags.items(), key=lambda x: x[0])) def inner(*values): parsed = [] for index, value in enumerate(values): if value.dtype == tf.string: parsed.append(value.numpy().decode('utf-8')) elif value.dtype in (tf.float32, tf.float64): parsed.append(float(value.numpy())) elif value.dtype in (tf.int32, tf.int64): parsed.append(int(value.numpy())) else: raise NotImplementedError(value.dtype) tags = dict(zip(keys, parsed)) return self._metrics.set_tags(**tags) # String tensors in tf.py_function are only supported on CPU. with tf.device('/cpu:0'): return tf.py_function(inner, values, [], 'set_tags')
Example #5
Source File: dqn.py From huskarl with MIT License | 6 votes |
def masked_q_loss(data, y_pred): """Computes the MSE between the Q-values of the actions that were taken and the cumulative discounted rewards obtained after taking those actions. Updates trace priorities if using PrioritizedExperienceReplay. """ action_batch, target_qvals = data[:, 0], data[:, 1] seq = tf.cast(tf.range(0, tf.shape(action_batch)[0]), tf.int32) action_idxs = tf.transpose(tf.stack([seq, tf.cast(action_batch, tf.int32)])) qvals = tf.gather_nd(y_pred, action_idxs) if isinstance(self.memory, memory.PrioritizedExperienceReplay): def update_priorities(_qvals, _target_qvals, _traces_idxs): """Computes the TD error and updates memory priorities.""" td_error = np.abs((_target_qvals - _qvals).numpy()) _traces_idxs = (tf.cast(_traces_idxs, tf.int32)).numpy() self.memory.update_priorities(_traces_idxs, td_error) return _qvals qvals = tf.py_function(func=update_priorities, inp=[qvals, target_qvals, data[:,2]], Tout=tf.float32) return tf.keras.losses.mse(qvals, target_qvals)
Example #6
Source File: ops_test.py From seed_rl with Apache License 2.0 | 6 votes |
def test_not_fully_specified_outputs2(self): address = self.get_unix_address() server = ops.Server([address]) @tf.function(input_signature=[tf.TensorSpec([1], tf.int32)]) def foo(x): result, = tf.py_function(lambda x: x, [x], [tf.int32]) result.set_shape([None]) return result server.bind(foo, batched=True) server.start() client = ops.Client(address) self.assertAllEqual(42, client.foo(42)) server.shutdown()
Example #7
Source File: ops_test.py From seed_rl with Apache License 2.0 | 6 votes |
def test_shutdown_while_in_call(self, dim, batched): address = self.get_unix_address() server = ops.Server([address]) is_waiting = threading.Event() @tf.function(input_signature=[tf.TensorSpec(dim, tf.int32)]) def foo(x): tf.py_function(is_waiting.set, [], []) tf.py_function(time.sleep, [1], []) return x + 1 server.bind(foo, batched=batched) server.start() client = ops.Client(address) with futures.ThreadPoolExecutor(max_workers=1) as executor: f = executor.submit(client.foo, 42) is_waiting.wait() server.shutdown() with self.assertRaisesRegexp(tf.errors.UnavailableError, 'server closed'): f.result()
Example #8
Source File: librispeech.py From rnnt-speech-recognition with MIT License | 6 votes |
def tf_parse_line(line, data_dir, split_names): line_split = tf.strings.split(line, ' ') audio_fn = line_split[0] transcription = tf.py_function( lambda x: b' '.join(x.numpy()).decode('utf8'), inp=[line_split[1:]], Tout=tf.string) speaker_id, chapter_id, _ = tf.unstack(tf.strings.split(audio_fn, '-'), 3) all_fps = tf.map_fn( lambda split_name: tf.strings.join([data_dir, split_name, speaker_id, chapter_id, audio_fn], '/') + '.flac', tf.constant(split_names)) audio_filepath_idx = tf.where( tf.map_fn(tf_file_exists, all_fps, dtype=tf.bool))[0][0] audio_filepath = all_fps[audio_filepath_idx] audio, sr = tf_load_audio(audio_filepath) return audio, sr, transcription
Example #9
Source File: tokenizer.py From OpenNMT-tf with MIT License | 6 votes |
def _detokenize_tensor(self, tokens): """Detokenizes tokens. When not overriden, this default implementation calls the string-based detokenization. Args: tokens: A 1-D ``tf.Tensor``. Returns: A 0-D string ``tf.Tensor``. """ def _python_wrapper(tokens_t): tokens = [tf.compat.as_text(s) for s in tokens_t.numpy()] string = self._detokenize_string(tokens) return tf.constant(string) text = tf.py_function(_python_wrapper, [tokens], tf.string) text.set_shape([]) return text
Example #10
Source File: metrics.py From dreamer with Apache License 2.0 | 5 votes |
def reset_tags(self): return tf.py_function( self._metrics.reset_tags, [], [], 'reset_tags')
Example #11
Source File: __init__.py From tfpyth with MIT License | 5 votes |
def tensorflow_from_torch(func, inp, Tout, name=None): """ Executes a PyTorch function into a TensorFlow op and output tensor (ie can be evaluated within Tensorflow).\ :param func: Function that takes PyTorch tensors and returns a PyTorch tensor. :param inp: TensorFlow input tensors :param Tout: TensorFlow output dtype :param name: Name of the output tensor :return: Differentiable Tensorflow output tensor. """ eager_compute = eager_tensorflow_from_torch(func) return tf.py_function(eager_compute, inp, Tout, name=name)
Example #12
Source File: transformer_ted.py From artificial_neural_networks with Apache License 2.0 | 5 votes |
def tf_encode(lang1, lang2): """ Receive an eager tensor having a numpy attribute that contains the string value """ return tf.py_function(encode, [lang1, lang2], [tf.int64, tf.int64]) # %% # Seq2Seq model
Example #13
Source File: ddpg.py From huskarl with MIT License | 5 votes |
def q_loss(data, qvals): """Computes the MSE between the Q-values of the actions that were taken and the cumulative discounted rewards obtained after taking those actions. Updates trace priorities if using PrioritizedExperienceReplay. """ target_qvals = data[:, 0, np.newaxis] if isinstance(self.memory, memory.PrioritizedExperienceReplay): def update_priorities(_qvals, _target_qvals, _traces_idxs): """Computes the TD error and updates memory priorities.""" td_error = np.abs((_target_qvals - _qvals).numpy())[:, 0] _traces_idxs = (tf.cast(_traces_idxs, tf.int32)).numpy() self.memory.update_priorities(_traces_idxs, td_error) return _qvals qvals = tf.py_function(func=update_priorities, inp=[qvals, target_qvals, data[:, 1]], Tout=tf.float32) return MSE(target_qvals, qvals)
Example #14
Source File: data_process.py From TransformerModel with MIT License | 5 votes |
def tf_encode(self, feature, target): """ :param feature: The input feature text :param target: The output target text :return: The encoded vector representations of the feature and target text. """ return tf.py_function(self.encode, [feature, target], [tf.int64, tf.int64])
Example #15
Source File: dataset_wrapper.py From DeepDanbooru with MIT License | 5 votes |
def map_transform_image_and_label(self, image, tag_string): return tf.py_function(self.map_transform_image_and_label_py, (image, tag_string), (tf.float32, tf.float32))
Example #16
Source File: data.py From mobilenetv2-yolov3 with MIT License | 5 votes |
def parse_text(self, line): values = tf.strings.split([line], ' ').values image = tf.image.decode_image(tf.io.read_file(values[0]), channels=3, dtype=tf.float32) image.set_shape([None, None, 3]) reshaped_data = tf.reshape(values[1:], [-1, 5]) xmins = tf.strings.to_number(reshaped_data[:, 0], tf.float32) xmaxs = tf.strings.to_number(reshaped_data[:, 2], tf.float32) ymins = tf.strings.to_number(reshaped_data[:, 1], tf.float32) ymaxs = tf.strings.to_number(reshaped_data[:, 3], tf.float32) labels = tf.strings.to_number(reshaped_data[:, 4], tf.int64) image, bbox = get_random_data(image, xmins, xmaxs, ymins, ymaxs, labels, self.input_shape, train=self.mode == DATASET_MODE.TRAIN) y1, y2, y3 = tf.py_function( preprocess_true_boxes, [bbox, self.input_shape, self.anchors, self.num_classes], [tf.float32, tf.float32, tf.float32]) y1.set_shape([None, None, len(self.anchors) // 3, self.num_classes + 5]) y2.set_shape([None, None, len(self.anchors) // 3, self.num_classes + 5]) y3.set_shape([None, None, len(self.anchors) // 3, self.num_classes + 5]) return image, (y1, y2, y3)
Example #17
Source File: data.py From mobilenetv2-yolov3 with MIT License | 5 votes |
def parse_tfrecord(self, example_proto): feature_description = { 'image/encoded': tf.io.FixedLenFeature([], tf.string), 'image/object/bbox/xmin': tf.io.VarLenFeature(tf.float32), 'image/object/bbox/xmax': tf.io.VarLenFeature(tf.float32), 'image/object/bbox/ymin': tf.io.VarLenFeature(tf.float32), 'image/object/bbox/ymax': tf.io.VarLenFeature(tf.float32), 'image/object/bbox/label': tf.io.VarLenFeature(tf.int64) } features = tf.io.parse_single_example(example_proto, feature_description) image = tf.image.decode_image(features['image/encoded'], channels=3, dtype=tf.float32) image.set_shape([None, None, 3]) xmins = features['image/object/bbox/xmin'].values xmaxs = features['image/object/bbox/xmax'].values ymins = features['image/object/bbox/ymin'].values ymaxs = features['image/object/bbox/ymax'].values labels = features['image/object/bbox/label'].values image, bbox = get_random_data(image, xmins, xmaxs, ymins, ymaxs, labels, self.input_shape, train=self.mode == DATASET_MODE.TRAIN) y1, y2, y3 = tf.py_function( preprocess_true_boxes, [bbox, self.input_shape, self.anchors, self.num_classes], [tf.float32, tf.float32, tf.float32]) y1.set_shape([None, None, len(self.anchors) // 3, self.num_classes + 5]) y2.set_shape([None, None, len(self.anchors) // 3, self.num_classes + 5]) y3.set_shape([None, None, len(self.anchors) // 3, self.num_classes + 5]) return image, (y1, y2, y3)
Example #18
Source File: metrics.py From dreamer with Apache License 2.0 | 5 votes |
def flush(self): def inner(): _ = self._metrics.flush() return tf.py_function(inner, [], [], 'flush')
Example #19
Source File: metrics.py From dreamer with Apache License 2.0 | 5 votes |
def add_scalar(self, name, value): assert len(value.shape) == 0, (name, value) def inner(value): self._metrics.add_scalar(name, value.numpy()) return tf.py_function(inner, [value], [], 'add_scalar_' + name)
Example #20
Source File: metrics.py From dreamer with Apache License 2.0 | 5 votes |
def add_scalars(self, name, value): assert len(value.shape) == 1, (name, value) def inner(value): self._metrics.add_scalars(name, value.numpy()) return tf.py_function(inner, [value], [], 'add_scalars_' + name)
Example #21
Source File: data_loader.py From tf2-mobile-pose-estimation with Apache License 2.0 | 5 votes |
def input_fn(self, params=None): """Input function which provides a single batch for train or eval. Args: params: `dict` of parameters passed from the `TPUEstimator`. `params['batch_size']` is always provided and should be used as the effective batch size. Returns: A `tf.data.Dataset` object. doc reference: https://www.tensorflow.org/api_docs/python/tf/data/TFRecordDataset """ dataset = tf.data.Dataset.from_tensor_slices(self.imgIds) dataset = dataset.apply(tf.data.experimental.map_and_batch( map_func=lambda imgId: tuple( tf.py_function( func=self._parse_function, inp=[imgId], Tout=[tf.float32, tf.float32])), batch_size=self.config_training["batch_size"], num_parallel_calls=self.config_training["multiprocessing_num"], drop_remainder=True)) # cache entire dataset in memory after preprocessing # dataset = dataset.cache() # do not use this code for OOM problem dataset = dataset.map(self._set_shapes, num_parallel_calls=self.config_training["multiprocessing_num"]) # Prefetch overlaps in-feed with training # dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE) # tf.data.experimental.AUTOTUNE have to be upper than 1.13 dataset = dataset.prefetch(buffer_size=self.config_training["batch_size"] * 3) # tf.logging.info('[Input_fn] dataset pipeline building complete') return dataset
Example #22
Source File: librispeech.py From rnnt-speech-recognition with MIT License | 5 votes |
def tf_load_audio(filepath): return tf.py_function( lambda x: load_audio(x.numpy()), inp=[filepath], Tout=[tf.float32, tf.int32])
Example #23
Source File: librispeech.py From rnnt-speech-recognition with MIT License | 5 votes |
def tf_file_exists(filepath): return tf.py_function( lambda x: os.path.exists(x.numpy()), inp=[filepath], Tout=tf.bool)
Example #24
Source File: encoding.py From rnnt-speech-recognition with MIT License | 5 votes |
def tf_wordpiece_encode(text, encoder): return tf.py_function(lambda x: wordpiece_encode(x, encoder), inp=[text], Tout=tf.int32)
Example #25
Source File: encoding.py From rnnt-speech-recognition with MIT License | 5 votes |
def tf_wordpiece_decode(ids, encoder): return tf.py_function(lambda x: wordpiece_decode(x, encoder), inp=[ids], Tout=[tf.string])[0]
Example #26
Source File: preprocessing.py From rnnt-speech-recognition with MIT License | 5 votes |
def print_tensor(t, template='{}'): return tf.py_function( lambda x: print(template.format(x.numpy())), inp=[t], Tout=[])
Example #27
Source File: preprocessing.py From rnnt-speech-recognition with MIT License | 5 votes |
def tf_serialize_example(mel_specs, pred_inp, spec_lengths, label_lengths, labels): tf_string = tf.py_function( serialize_example, (mel_specs, pred_inp, spec_lengths, label_lengths, labels), tf.string) return tf.reshape(tf_string, ())
Example #28
Source File: preprocessing.py From rnnt-speech-recognition with MIT License | 5 votes |
def tf_plot_spec(spec, sr, transcription, name): spec_t = tf.transpose(spec) return tf.py_function( lambda _spec, _sr, trans: plot_spec( _spec.numpy(), _sr.numpy(), trans.numpy().decode('utf8'), name ), inp=[spec_t, sr, transcription], Tout=[])
Example #29
Source File: preprocessing.py From rnnt-speech-recognition with MIT License | 5 votes |
def tf_plot_audio(audio_arr, sr, trans, name): return tf.py_function( lambda _audio, _sr, _trans: plot_audio( _audio.numpy(), _sr.numpy(), _trans.numpy(), name ), inp=[audio_arr, sr, trans], Tout=[])
Example #30
Source File: utils_v2.py From Transformer-in-generating-dialogue with Apache License 2.0 | 5 votes |
def jit_tokenize_sequences(source_sent, target_sent): return tf.py_function(tokenize_sequences, [source_sent, target_sent], [tf.int64, tf.int64])