Python tensorflow.bitcast() Examples
The following are 30
code examples of tensorflow.bitcast().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow
, or try the search function
.
Example #1
Source File: diet.py From fine-lm with MIT License | 6 votes |
def _quantize(x, params, randomize=True): """Quantize x according to params, optionally randomizing the rounding.""" if not params.quantize: return x if not randomize: return tf.bitcast( tf.cast(x / params.quantization_scale, tf.int16), tf.float16) abs_x = tf.abs(x) sign_x = tf.sign(x) y = abs_x / params.quantization_scale y = tf.floor(y + tf.random_uniform(common_layers.shape_list(x))) y = tf.minimum(y, tf.int16.max) * sign_x q = tf.bitcast(tf.cast(y, tf.int16), tf.float16) return q
Example #2
Source File: diet.py From training_results_v0.5 with Apache License 2.0 | 6 votes |
def _quantize(x, params, randomize=True): """Quantize x according to params, optionally randomizing the rounding.""" if not params.quantize: return x if not randomize: return tf.bitcast( tf.cast(x / params.quantization_scale, tf.int16), tf.float16) abs_x = tf.abs(x) sign_x = tf.sign(x) y = abs_x / params.quantization_scale y = tf.floor(y + tf.random_uniform(common_layers.shape_list(x))) y = tf.minimum(y, tf.int16.max) * sign_x q = tf.bitcast(tf.cast(y, tf.int16), tf.float16) return q
Example #3
Source File: diet.py From training_results_v0.5 with Apache License 2.0 | 6 votes |
def _quantize(x, params, randomize=True): """Quantize x according to params, optionally randomizing the rounding.""" if not params.quantize: return x if not randomize: return tf.bitcast( tf.cast(x / params.quantization_scale, tf.int16), tf.float16) abs_x = tf.abs(x) sign_x = tf.sign(x) y = abs_x / params.quantization_scale y = tf.floor(y + tf.random_uniform(common_layers.shape_list(x))) y = tf.minimum(y, tf.int16.max) * sign_x q = tf.bitcast(tf.cast(y, tf.int16), tf.float16) return q
Example #4
Source File: bingrad_common.py From terngrad with Apache License 2.0 | 6 votes |
def decode_from_ternary_gradients(grads_and_vars, scalers, shapes): """Decode each gradient tensor.""" with tf.name_scope('ternary_decoder'): gradients, variables = zip(*grads_and_vars) floating_gradients = [] for gradient, variable, scaler, shape in zip(gradients, variables, scalers, shapes): if gradient is None: floating_gradients.append(None) # gradient is encoded, so we use variable to check its size # We also assume dtype of variable and gradient is the same floating_gradient = tf.cond(tf.size(variable) < FLAGS.size_to_binarize, lambda: tf.bitcast(gradient, variable.dtype), lambda: ternary_decoder(gradient, scaler, shape)) floating_gradients.append(floating_gradient) return list(zip(floating_gradients, variables))
Example #5
Source File: diet.py From BERT with Apache License 2.0 | 6 votes |
def _quantize(x, params, randomize=True): """Quantize x according to params, optionally randomizing the rounding.""" if not params.quantize: return x if not randomize: return tf.bitcast( tf.cast(x / params.quantization_scale, tf.int16), tf.float16) abs_x = tf.abs(x) sign_x = tf.sign(x) y = abs_x / params.quantization_scale y = tf.floor(y + tf.random_uniform(common_layers.shape_list(x))) y = tf.minimum(y, tf.int16.max) * sign_x q = tf.bitcast(tf.cast(y, tf.int16), tf.float16) return q
Example #6
Source File: tf_utils.py From deepsignal with GNU General Public License v3.0 | 6 votes |
def parse_a_line_b(value, base_num, signal_num): vec = tf.decode_raw(value, tf.int8) bases = tf.cast(tf.reshape(tf.strided_slice(vec, [0], [base_num]), [base_num]), dtype=tf.int32) means = tf.bitcast( tf.reshape(tf.strided_slice(vec, [base_num], [base_num + base_num * 4]), [base_num, 4]), type=tf.float32) stds = tf.bitcast( tf.reshape(tf.strided_slice(vec, [base_num * 5], [base_num * 5 + base_num * 4]), [base_num, 4]), type=tf.float32) sanum = tf.cast(tf.bitcast( tf.reshape(tf.strided_slice(vec, [base_num * 9], [base_num * 9 + base_num * 2]), [base_num, 2]), type=tf.int16), dtype=tf.int32) signals = tf.bitcast( tf.reshape(tf.strided_slice(vec, [base_num * 11], [base_num * 11 + 4 * signal_num]), [signal_num, 4]), type=tf.float32) labels = tf.cast( tf.reshape(tf.strided_slice(vec, [base_num * 11 + signal_num * 4], [base_num * 11 + signal_num * 4 + 1]), [1]), dtype=tf.int32) return bases, means, stds, sanum, signals, labels
Example #7
Source File: boosted_trees.py From estimator with Apache License 2.0 | 6 votes |
def lookup(self): """Returns cached_tree_ids, cached_node_ids, cached_logits.""" cached_tree_ids, cached_node_ids, cached_logits = tf.split( lookup_ops.lookup_table_find_v2( self._table_ref, self._example_ids, default_value=[0.0, _DUMMY_NODE_ID, 0.0]), [1, 1, self._logits_dimension], axis=1) cached_tree_ids = tf.compat.v1.squeeze( tf.bitcast(cached_tree_ids, tf.dtypes.int32)) cached_node_ids = tf.compat.v1.squeeze( tf.bitcast(cached_node_ids, tf.dtypes.int32)) if self._example_ids.shape.ndims is not None: cached_logits.set_shape( [self._example_ids.shape[0], self._logits_dimension]) return (cached_tree_ids, cached_node_ids, cached_logits)
Example #8
Source File: utils_tf.py From delta with Apache License 2.0 | 5 votes |
def create_topk_unique(inputs, k): """Creates the top k values in sorted order with indices.""" height = inputs.shape[0] width = inputs.shape[1] neg_inf_r0 = tf.constant(-np.inf, dtype=tf.float32) ones = tf.ones([height, width], dtype=tf.float32) neg_inf_r2 = ones * neg_inf_r0 inputs = tf.where(tf.is_nan(inputs), neg_inf_r2, inputs) tmp = inputs topk_r2 = tf.zeros([height, k], dtype=tf.float32) for i in range(k): kth_order_statistic = tf.reduce_max(tmp, axis=1, keepdims=True) k_mask = tf.tile( tf.expand_dims(tf.equal(tf.range(k), tf.fill([k], i)), 0), [height, 1]) topk_r2 = tf.where(k_mask, tf.tile(kth_order_statistic, [1, k]), topk_r2) ge_r2 = tf.greater_equal(inputs, tf.tile(kth_order_statistic, [1, width])) tmp = tf.where(ge_r2, neg_inf_r2, inputs) log2_ceiling = int(math.ceil(math.log(float(int(width)), 2))) next_power_of_two = 1 << log2_ceiling count_mask = next_power_of_two - 1 mask_r0 = tf.constant(count_mask) mask_r2 = tf.fill([height, k], mask_r0) topk_r2_s32 = tf.bitcast(topk_r2, tf.int32) topk_indices_r2 = tf.bitwise.bitwise_and(topk_r2_s32, mask_r2) return topk_r2, topk_indices_r2
Example #9
Source File: utils_tf.py From delta with Apache License 2.0 | 5 votes |
def create_make_unique(inputs): """Replaces the lower bits of each element with iota.""" if inputs.shape.ndims != 2: raise ValueError("Input of top_k_with_unique must be rank-2 " "but got: %s" % inputs.shape) height = inputs.shape[0] width = inputs.shape[1] zeros = tf.zeros([height, width], dtype=tf.int32) log2_ceiling = int(math.ceil(math.log(int(width), 2))) next_power_of_two = 1 << log2_ceiling count_mask = ~(next_power_of_two - 1) count_mask_r0 = tf.constant(count_mask) count_mask_r2 = tf.fill([height, width], count_mask_r0) smallest_normal = 1 << 23 smallest_normal_r0 = tf.constant(smallest_normal, dtype=tf.int32) smallest_normal_r2 = tf.fill([height, width], smallest_normal_r0) low_bit_mask = ~(1 << 31) low_bit_mask_r0 = tf.constant(low_bit_mask, dtype=tf.int32) low_bit_mask_r2 = tf.fill([height, width], low_bit_mask_r0) iota = tf.tile( tf.expand_dims(tf.range(width, dtype=tf.int32), 0), [height, 1]) input_r2 = tf.bitcast(inputs, tf.int32) abs_r2 = tf.bitwise.bitwise_and(input_r2, low_bit_mask_r2) if_zero_r2 = tf.equal(abs_r2, zeros) smallest_normal_preserving_sign_r2 = tf.bitwise.bitwise_or( input_r2, smallest_normal_r2) input_no_zeros_r2 = tf.where(if_zero_r2, smallest_normal_preserving_sign_r2, input_r2) and_r2 = tf.bitwise.bitwise_and(input_no_zeros_r2, count_mask_r2) or_r2 = tf.bitwise.bitwise_or(and_r2, iota) return tf.bitcast(or_r2, tf.float32)
Example #10
Source File: bingrad_common.py From terngrad with Apache License 2.0 | 5 votes |
def encode_to_ternary_gradients(grads_and_vars, get_shape=False): """Encode each gradient tensor.""" with tf.name_scope('ternary_encoder'): gradients, variables = zip(*grads_and_vars) ternary_gradients = [] gradient_shapes = [] for gradient in gradients: if gradient is None: ternary_gradients.append(None) if get_shape: gradient_shapes.append(None) continue if get_shape: if isinstance(gradient, tf.IndexedSlices): gradient_shape = gradient.dense_shape else: gradient_shape = gradient.get_shape() gradient_shapes.append(gradient_shape) ternary_gradient = tf.cond(tf.size(gradient) < FLAGS.size_to_binarize, lambda: tf.bitcast(gradient, type=tf.uint8), lambda: ternary_encoder(gradient)) ternary_gradients.append(ternary_gradient) if get_shape: return list(zip(ternary_gradients, variables)), gradient_shapes else: return list(zip(ternary_gradients, variables))
Example #11
Source File: bitcast_op_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def testUnknown(self): x = tf.placeholder(tf.float32) datatype = tf.int8 tf.bitcast(x, datatype, None)
Example #12
Source File: boosted_trees.py From estimator with Apache License 2.0 | 5 votes |
def insert(self, tree_ids, node_ids, logits): """Inserts values and returns the op.""" insert_op = lookup_ops.lookup_table_insert_v2( self._table_ref, self._example_ids, tf.concat([ tf.compat.v1.expand_dims( tf.bitcast(tree_ids, tf.dtypes.float32), 1), tf.compat.v1.expand_dims( tf.bitcast(node_ids, tf.dtypes.float32), 1), logits, ], axis=1, name='value_concat_for_cache_insert')) return insert_op
Example #13
Source File: bitcast_op_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def testErrors(self): x = np.zeros([1, 1], np.int8) datatype = tf.int32 with self.assertRaisesRegexp(ValueError, "Cannot bitcast due to shape"): tf.bitcast(x, datatype, None)
Example #14
Source File: bitcast_op_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def _testBitcast(self, x, datatype, shape): with self.test_session(): tf_ans = tf.bitcast(x, datatype) out = tf_ans.eval() buff_after = memoryview(out).tobytes() buff_before = memoryview(x).tobytes() self.assertEqual(buff_before, buff_after) self.assertEqual(tf_ans.get_shape(), shape) self.assertEqual(tf_ans.dtype, datatype)
Example #15
Source File: beam_search.py From training_results_v0.5 with Apache License 2.0 | 5 votes |
def _create_topk_unique(inputs, k): """Creates the top k values in sorted order with indices. Args: inputs: A tensor with rank of 2. [batch_size, original_size]. k: An integer, number of top elements to select. Returns: topk_r2: A tensor, the k largest elements. [batch_size, k]. topk_indices_r2: A tensor, indices of the top k values. [batch_size, k]. """ height = inputs.shape[0] width = inputs.shape[1] neg_inf_r0 = tf.constant(-np.inf, dtype=tf.float32) ones = tf.ones([height, width], dtype=tf.float32) neg_inf_r2 = ones * neg_inf_r0 inputs = tf.where(tf.is_nan(inputs), neg_inf_r2, inputs) # Select the current largest value k times and keep them in topk_r2. The # selected largest values are marked as the smallest value to avoid being # selected again. tmp = inputs topk_r2 = tf.zeros([height, k], dtype=tf.float32) for i in range(k): kth_order_statistic = tf.reduce_max(tmp, axis=1, keepdims=True) k_mask = tf.tile(tf.expand_dims(tf.equal(tf.range(k), tf.fill([k], i)), 0), [height, 1]) topk_r2 = tf.where(k_mask, tf.tile(kth_order_statistic, [1, k]), topk_r2) ge_r2 = tf.greater_equal(inputs, tf.tile(kth_order_statistic, [1, width])) tmp = tf.where(ge_r2, neg_inf_r2, inputs) log2_ceiling = int(math.ceil(math.log(float(int(width)), 2))) next_power_of_two = 1 << log2_ceiling count_mask = next_power_of_two - 1 mask_r0 = tf.constant(count_mask) mask_r2 = tf.fill([height, k], mask_r0) topk_r2_s32 = tf.bitcast(topk_r2, tf.int32) topk_indices_r2 = tf.bitwise.bitwise_and(topk_r2_s32, mask_r2) return topk_r2, topk_indices_r2
Example #16
Source File: diet.py From fine-lm with MIT License | 5 votes |
def _dequantize(q, params): """Dequantize q according to params.""" if not params.quantize: return q return tf.to_float(tf.bitcast(q, tf.int16)) * params.quantization_scale
Example #17
Source File: beam_search_decoder.py From training_results_v0.5 with Apache License 2.0 | 5 votes |
def create_topk_unique(inputs, k): height = inputs.shape[0] width = inputs.shape[1] neg_inf_r0 = tf.constant(-np.inf, dtype=tf.float32) ones = tf.ones([height, width], dtype=tf.float32) neg_inf_r2 = ones * neg_inf_r0 inputs = tf.where(tf.is_nan(inputs), neg_inf_r2, inputs) tmp = inputs topk_r2 = tf.zeros([height, k], dtype=tf.float32) for i in range(k): kth_order_statistic = tf.reduce_max(tmp, axis=1, keepdims=True) k_mask = tf.tile(tf.expand_dims(tf.equal(tf.range(k), tf.fill([k], i)), 0), [height, 1]) topk_r2 = tf.where(k_mask, tf.tile(kth_order_statistic, [1, k]), topk_r2) ge_r2 = tf.greater_equal(inputs, tf.tile(kth_order_statistic, [1, width])) tmp = tf.where(ge_r2, neg_inf_r2, inputs) log2_ceiling = int(math.ceil(math.log(float(int(width)), 2))) next_power_of_two = 1 << log2_ceiling count_mask = next_power_of_two - 1 mask_r0 = tf.constant(count_mask) mask_r2 = tf.fill([height, k], mask_r0) topk_r2_s32 = tf.bitcast(topk_r2, tf.int32) topk_indices_r2 = tf.bitwise.bitwise_and(topk_r2_s32, mask_r2) return topk_r2, topk_indices_r2
Example #18
Source File: beam_search_decoder.py From training_results_v0.5 with Apache License 2.0 | 5 votes |
def create_topk_unique(inputs, k): height = inputs.shape[0] width = inputs.shape[1] neg_inf_r0 = tf.constant(-np.inf, dtype=tf.float32) ones = tf.ones([height, width], dtype=tf.float32) neg_inf_r2 = ones * neg_inf_r0 inputs = tf.where(tf.is_nan(inputs), neg_inf_r2, inputs) tmp = inputs topk_r2 = tf.zeros([height, k], dtype=tf.float32) for i in range(k): kth_order_statistic = tf.reduce_max(tmp, axis=1, keepdims=True) k_mask = tf.tile(tf.expand_dims(tf.equal(tf.range(k), tf.fill([k], i)), 0), [height, 1]) topk_r2 = tf.where(k_mask, tf.tile(kth_order_statistic, [1, k]), topk_r2) ge_r2 = tf.greater_equal(inputs, tf.tile(kth_order_statistic, [1, width])) tmp = tf.where(ge_r2, neg_inf_r2, inputs) log2_ceiling = int(math.ceil(math.log(float(int(width)), 2))) next_power_of_two = 1 << log2_ceiling count_mask = next_power_of_two - 1 mask_r0 = tf.constant(count_mask) mask_r2 = tf.fill([height, k], mask_r0) topk_r2_s32 = tf.bitcast(topk_r2, tf.int32) topk_indices_r2 = tf.bitwise.bitwise_and(topk_r2_s32, mask_r2) return topk_r2, topk_indices_r2
Example #19
Source File: beam_search.py From training_results_v0.5 with Apache License 2.0 | 5 votes |
def _create_topk_unique(inputs, k): """Creates the top k values in sorted order with indices. Args: inputs: A tensor with rank of 2. [batch_size, original_size]. k: An integer, number of top elements to select. Returns: topk_r2: A tensor, the k largest elements. [batch_size, k]. topk_indices_r2: A tensor, indices of the top k values. [batch_size, k]. """ height = inputs.shape[0] width = inputs.shape[1] neg_inf_r0 = tf.constant(-np.inf, dtype=tf.float32) ones = tf.ones([height, width], dtype=tf.float32) neg_inf_r2 = ones * neg_inf_r0 inputs = tf.where(tf.is_nan(inputs), neg_inf_r2, inputs) # Select the current largest value k times and keep them in topk_r2. The # selected largest values are marked as the smallest value to avoid being # selected again. tmp = inputs topk_r2 = tf.zeros([height, k], dtype=tf.float32) for i in range(k): kth_order_statistic = tf.reduce_max(tmp, axis=1, keepdims=True) k_mask = tf.tile(tf.expand_dims(tf.equal(tf.range(k), tf.fill([k], i)), 0), [height, 1]) topk_r2 = tf.where(k_mask, tf.tile(kth_order_statistic, [1, k]), topk_r2) ge_r2 = tf.greater_equal(inputs, tf.tile(kth_order_statistic, [1, width])) tmp = tf.where(ge_r2, neg_inf_r2, inputs) log2_ceiling = int(math.ceil(math.log(float(int(width)), 2))) next_power_of_two = 1 << log2_ceiling count_mask = next_power_of_two - 1 mask_r0 = tf.constant(count_mask) mask_r2 = tf.fill([height, k], mask_r0) topk_r2_s32 = tf.bitcast(topk_r2, tf.int32) topk_indices_r2 = tf.bitwise.bitwise_and(topk_r2_s32, mask_r2) return topk_r2, topk_indices_r2
Example #20
Source File: diet.py From training_results_v0.5 with Apache License 2.0 | 5 votes |
def _dequantize(q, params): """Dequantize q according to params.""" if not params.quantize: return q return tf.to_float(tf.bitcast(q, tf.int16)) * params.quantization_scale
Example #21
Source File: beam_search_decoder.py From training_results_v0.5 with Apache License 2.0 | 5 votes |
def create_topk_unique(inputs, k): height = inputs.shape[0] width = inputs.shape[1] neg_inf_r0 = tf.constant(-np.inf, dtype=tf.float32) ones = tf.ones([height, width], dtype=tf.float32) neg_inf_r2 = ones * neg_inf_r0 inputs = tf.where(tf.is_nan(inputs), neg_inf_r2, inputs) tmp = inputs topk_r2 = tf.zeros([height, k], dtype=tf.float32) for i in range(k): kth_order_statistic = tf.reduce_max(tmp, axis=1, keepdims=True) k_mask = tf.tile(tf.expand_dims(tf.equal(tf.range(k), tf.fill([k], i)), 0), [height, 1]) topk_r2 = tf.where(k_mask, tf.tile(kth_order_statistic, [1, k]), topk_r2) ge_r2 = tf.greater_equal(inputs, tf.tile(kth_order_statistic, [1, width])) tmp = tf.where(ge_r2, neg_inf_r2, inputs) log2_ceiling = int(math.ceil(math.log(float(int(width)), 2))) next_power_of_two = 1 << log2_ceiling count_mask = next_power_of_two - 1 mask_r0 = tf.constant(count_mask) mask_r2 = tf.fill([height, k], mask_r0) topk_r2_s32 = tf.bitcast(topk_r2, tf.int32) topk_indices_r2 = tf.bitwise.bitwise_and(topk_r2_s32, mask_r2) return topk_r2, topk_indices_r2
Example #22
Source File: beam_search.py From BERT with Apache License 2.0 | 5 votes |
def _create_topk_unique(inputs, k): """Creates the top k values in sorted order with indices. Args: inputs: A tensor with rank of 2. [batch_size, original_size]. k: An integer, number of top elements to select. Returns: topk_r2: A tensor, the k largest elements. [batch_size, k]. topk_indices_r2: A tensor, indices of the top k values. [batch_size, k]. """ height = inputs.shape[0] width = inputs.shape[1] neg_inf_r0 = tf.constant(-np.inf, dtype=tf.float32) ones = tf.ones([height, width], dtype=tf.float32) neg_inf_r2 = ones * neg_inf_r0 inputs = tf.where(tf.is_nan(inputs), neg_inf_r2, inputs) # Select the current largest value k times and keep them in topk_r2. The # selected largest values are marked as the smallest value to avoid being # selected again. tmp = inputs topk_r2 = tf.zeros([height, k], dtype=tf.float32) for i in range(k): kth_order_statistic = tf.reduce_max(tmp, axis=1, keepdims=True) k_mask = tf.tile(tf.expand_dims(tf.equal(tf.range(k), tf.fill([k], i)), 0), [height, 1]) topk_r2 = tf.where(k_mask, tf.tile(kth_order_statistic, [1, k]), topk_r2) ge_r2 = tf.greater_equal(inputs, tf.tile(kth_order_statistic, [1, width])) tmp = tf.where(ge_r2, neg_inf_r2, inputs) log2_ceiling = int(math.ceil(math.log(float(int(width)), 2))) next_power_of_two = 1 << log2_ceiling count_mask = next_power_of_two - 1 mask_r0 = tf.constant(count_mask) mask_r2 = tf.fill([height, k], mask_r0) topk_r2_s32 = tf.bitcast(topk_r2, tf.int32) topk_indices_r2 = tf.bitwise.bitwise_and(topk_r2_s32, mask_r2) return topk_r2, topk_indices_r2
Example #23
Source File: diet.py From BERT with Apache License 2.0 | 5 votes |
def _dequantize(q, params): """Dequantize q according to params.""" if not params.quantize: return q return tf.to_float(tf.bitcast(q, tf.int16)) * params.quantization_scale
Example #24
Source File: core.py From lm-human-preferences with MIT License | 5 votes |
def map_flat_bits(f, values): """Apply the function f to bit-concatenated values, then convert back to original shapes and dtypes.""" values = [tf.convert_to_tensor(v) for v in values] def maybe_bitcast(v, dtype): cast = tf.cast if tf.bool in (v.dtype, dtype) else tf.bitcast return cast(v, dtype) bits = [maybe_bitcast(v, tf.uint8) for v in values] flat = tf.concat([tf.reshape(b, [-1]) for b in bits], axis=0) flat = f(flat) parts = tf.split(flat, [tf.size(b) for b in bits]) return [maybe_bitcast(tf.reshape(p, tf.shape(b)), v.dtype) for p, v, b in zip(parts, values, bits)]
Example #25
Source File: modalities.py From fine-lm with MIT License | 5 votes |
def bottom(self, x): """Transform input from data space to model space. Args: x: A Tensor with shape [batch, ...] Returns: body_input: A Tensor with shape [batch, ?, ?, body_input_depth]. """ inputs = x with tf.variable_scope(self.name): # TODO(aidangomez): Will need to sort out a better audio pipeline def xnet_resblock(x, filters, res_relu, name): """Xception-like block.""" with tf.variable_scope(name): # We only stride along the length dimension to preserve the spectral # bins (which are tiny in dimensionality relative to length) y = common_layers.separable_conv_block( x, filters, [((1, 1), (3, 3)), ((1, 1), (3, 3))], first_relu=True, padding="SAME", force2d=True, name="sep_conv_block") y = common_layers.pool(y, (3, 3), "MAX", "SAME", strides=(2, 1)) return y + common_layers.conv_block( x, filters, [((1, 1), (1, 1))], padding="SAME", strides=(2, 1), first_relu=res_relu, force2d=True, name="res_conv0") # Bitcast back from int32 x = tf.bitcast(inputs, tf.float32) x.set_shape([None, None, None, 1]) for i in range(self._model_hparams.audio_compression): x = xnet_resblock(x, 2**(i + 1), True, "compress_block_%d" % i) return xnet_resblock(x, self._body_input_depth, False, "compress_block_final")
Example #26
Source File: beam_search_decoder.py From training_results_v0.5 with Apache License 2.0 | 4 votes |
def create_make_unique(inputs): if inputs.shape.ndims != 2: raise ValueError("Input of top_k_with_unique must be rank-2 " "but got: %s" % inputs.shape) height = inputs.shape[0] width = inputs.shape[1] zeros = tf.zeros([height, width], dtype=tf.int32) # count_mask is used to mask away the low order bits to ensure that every # element is distinct. log2_ceiling = int(math.ceil(math.log(float(int(width)), 2))) next_power_of_two = 1 << log2_ceiling count_mask = ~(next_power_of_two - 1) count_mask_r0 = tf.constant(count_mask) count_mask_r2 = tf.fill([height, width], count_mask_r0) # smallest_normal is the bit representation of the smallest # positive normal floating point number. The sign is zero, # exponent is one, and the fraction is zero. smallest_normal = 1 << 23 smallest_normal_r0 = tf.constant(smallest_normal, dtype=tf.int32) smallest_normal_r2 = tf.fill([height, width], smallest_normal_r0) # Used to mask away the sign bit when computing the absolute value. low_bit_mask = ~(1 << 31) low_bit_mask_r0 = tf.constant(low_bit_mask, dtype=tf.int32) low_bit_mask_r2 = tf.fill([height, width], low_bit_mask_r0) iota = tf.tile(tf.expand_dims(tf.range(width, dtype=tf.int32), 0), [height, 1]) # Compare the absolute value with positive zero to handle negative zero. # # Pseudocode: input_no_zeros = abs(input) == 0 ? FLT_MIN : input input_r2 = tf.bitcast(inputs, tf.int32) abs_r2 = tf.bitwise.bitwise_and(input_r2, low_bit_mask_r2) if_zero_r2 = tf.equal(abs_r2, zeros) smallest_normal_preserving_sign_r2 = tf.bitwise.bitwise_or( input_r2, smallest_normal_r2) input_no_zeros_r2 = tf.where( if_zero_r2, smallest_normal_preserving_sign_r2, input_r2) # Discard the low-order bits and replace with iota. and_r2 = tf.bitwise.bitwise_and(input_no_zeros_r2, count_mask_r2) or_r2 = tf.bitwise.bitwise_or(and_r2, iota) return tf.bitcast(or_r2, tf.float32)
Example #27
Source File: beam_search_decoder.py From training_results_v0.5 with Apache License 2.0 | 4 votes |
def create_make_unique(inputs): if inputs.shape.ndims != 2: raise ValueError("Input of top_k_with_unique must be rank-2 " "but got: %s" % inputs.shape) height = inputs.shape[0] width = inputs.shape[1] zeros = tf.zeros([height, width], dtype=tf.int32) # count_mask is used to mask away the low order bits to ensure that every # element is distinct. log2_ceiling = int(math.ceil(math.log(float(int(width)), 2))) next_power_of_two = 1 << log2_ceiling count_mask = ~(next_power_of_two - 1) count_mask_r0 = tf.constant(count_mask) count_mask_r2 = tf.fill([height, width], count_mask_r0) # smallest_normal is the bit representation of the smallest # positive normal floating point number. The sign is zero, # exponent is one, and the fraction is zero. smallest_normal = 1 << 23 smallest_normal_r0 = tf.constant(smallest_normal, dtype=tf.int32) smallest_normal_r2 = tf.fill([height, width], smallest_normal_r0) # Used to mask away the sign bit when computing the absolute value. low_bit_mask = ~(1 << 31) low_bit_mask_r0 = tf.constant(low_bit_mask, dtype=tf.int32) low_bit_mask_r2 = tf.fill([height, width], low_bit_mask_r0) iota = tf.tile(tf.expand_dims(tf.range(width, dtype=tf.int32), 0), [height, 1]) # Compare the absolute value with positive zero to handle negative zero. # # Pseudocode: input_no_zeros = abs(input) == 0 ? FLT_MIN : input input_r2 = tf.bitcast(inputs, tf.int32) abs_r2 = tf.bitwise.bitwise_and(input_r2, low_bit_mask_r2) if_zero_r2 = tf.equal(abs_r2, zeros) smallest_normal_preserving_sign_r2 = tf.bitwise.bitwise_or( input_r2, smallest_normal_r2) input_no_zeros_r2 = tf.where( if_zero_r2, smallest_normal_preserving_sign_r2, input_r2) # Discard the low-order bits and replace with iota. and_r2 = tf.bitwise.bitwise_and(input_no_zeros_r2, count_mask_r2) or_r2 = tf.bitwise.bitwise_or(and_r2, iota) return tf.bitcast(or_r2, tf.float32)
Example #28
Source File: beam_search_decoder.py From training_results_v0.5 with Apache License 2.0 | 4 votes |
def create_make_unique(inputs): if inputs.shape.ndims != 2: raise ValueError("Input of top_k_with_unique must be rank-2 " "but got: %s" % inputs.shape) height = inputs.shape[0] width = inputs.shape[1] zeros = tf.zeros([height, width], dtype=tf.int32) # count_mask is used to mask away the low order bits to ensure that every # element is distinct. log2_ceiling = int(math.ceil(math.log(float(int(width)), 2))) next_power_of_two = 1 << log2_ceiling count_mask = ~(next_power_of_two - 1) count_mask_r0 = tf.constant(count_mask) count_mask_r2 = tf.fill([height, width], count_mask_r0) # smallest_normal is the bit representation of the smallest # positive normal floating point number. The sign is zero, # exponent is one, and the fraction is zero. smallest_normal = 1 << 23 smallest_normal_r0 = tf.constant(smallest_normal, dtype=tf.int32) smallest_normal_r2 = tf.fill([height, width], smallest_normal_r0) # Used to mask away the sign bit when computing the absolute value. low_bit_mask = ~(1 << 31) low_bit_mask_r0 = tf.constant(low_bit_mask, dtype=tf.int32) low_bit_mask_r2 = tf.fill([height, width], low_bit_mask_r0) iota = tf.tile(tf.expand_dims(tf.range(width, dtype=tf.int32), 0), [height, 1]) # Compare the absolute value with positive zero to handle negative zero. # # Pseudocode: input_no_zeros = abs(input) == 0 ? FLT_MIN : input input_r2 = tf.bitcast(inputs, tf.int32) abs_r2 = tf.bitwise.bitwise_and(input_r2, low_bit_mask_r2) if_zero_r2 = tf.equal(abs_r2, zeros) smallest_normal_preserving_sign_r2 = tf.bitwise.bitwise_or( input_r2, smallest_normal_r2) input_no_zeros_r2 = tf.where( if_zero_r2, smallest_normal_preserving_sign_r2, input_r2) # Discard the low-order bits and replace with iota. and_r2 = tf.bitwise.bitwise_and(input_no_zeros_r2, count_mask_r2) or_r2 = tf.bitwise.bitwise_or(and_r2, iota) return tf.bitcast(or_r2, tf.float32)
Example #29
Source File: modalities.py From BERT with Apache License 2.0 | 4 votes |
def audio_spectral_bottom(x, model_hparams, vocab_size): """Transform input from data space to model space. Args: x: A Tensor with shape [batch, ...] model_hparams: HParams, model hyperparmeters. vocab_size: int, vocabulary size. Returns: body_input: A Tensor with shape [batch, ?, ?, model_hparams.hidden_size]. """ del vocab_size # unused arg inputs = x with tf.variable_scope("audio_spectral_modality"): # TODO(aidangomez): Will need to sort out a better audio pipeline def xnet_resblock(x, filters, res_relu, name): """Xception-like block.""" with tf.variable_scope(name): # We only stride along the length dimension to preserve the spectral # bins (which are tiny in dimensionality relative to length) y = common_layers.separable_conv_block( x, filters, [((1, 1), (3, 3)), ((1, 1), (3, 3))], first_relu=True, padding="SAME", force2d=True, name="sep_conv_block") y = common_layers.pool(y, (3, 3), "MAX", "SAME", strides=(2, 1)) return y + common_layers.conv_block( x, filters, [((1, 1), (1, 1))], padding="SAME", strides=(2, 1), first_relu=res_relu, force2d=True, name="res_conv0") # Bitcast back from int32 x = tf.bitcast(inputs, tf.float32) x.set_shape([None, None, None, 1]) for i in range(model_hparams.audio_compression): x = xnet_resblock(x, 2**(i + 1), True, "compress_block_%d" % i) return xnet_resblock(x, model_hparams.hidden_size, False, "compress_block_final")
Example #30
Source File: utils.py From seed_rl with Apache License 2.0 | 4 votes |
def tpu_encode(ts): """Encodes a nest of Tensors in a suitable way for TPUs. TPUs do not support tf.uint8, tf.uint16 and other data types. Furthermore, the speed of transfer and device reshapes depend on the shape of the data. This function tries to optimize the data encoding for a number of use cases. Should be used on CPU before sending data to TPU and in conjunction with `tpu_decode` after the data is transferred. Args: ts: A tf.nest of Tensors. Returns: A tf.nest of encoded Tensors. """ def visit(t): num_elements = t.shape.num_elements() # We need a multiple of 128 elements: encoding reduces the number of # elements by a factor 4 (packing uint8s into uint32s), and first thing # decode does is to reshape with a 32 minor-most dimension. if (t.dtype == tf.uint8 and num_elements is not None and num_elements % 128 == 0): # For details of these transformations, see b/137182262. x = tf.xla.experimental.compile( lambda x: tf.transpose(x, list(range(1, t.shape.rank)) + [0]), [t])[0] x = tf.reshape(x, [-1, 4]) x = tf.bitcast(x, tf.uint32) x = tf.reshape(x, [-1]) return TPUEncodedUInt8(x, t.shape) elif t.dtype == tf.uint8: logging.warning('Inefficient uint8 transfer with shape: %s', t.shape) return tf.cast(t, tf.bfloat16) elif t.dtype == tf.uint16: return tf.cast(t, tf.int32) elif (t.dtype == tf.float32 and t.shape.rank > 1 and not (num_divisible(t.shape.dims, 128) >= 1 and num_divisible(t.shape.dims, 8) >= 2)): x = tf.reshape(t, [-1]) return TPUEncodedF32(x, t.shape) else: return t return tf.nest.map_structure(visit, ts)