Python tensorflow.round() Examples
The following are 30
code examples of tensorflow.round().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow
, or try the search function
.
Example #1
Source File: test_tf_NN.py From nn_builder with MIT License | 6 votes |
def test_output_head_activations_work(): """Tests that output head activations work properly""" nn_instance = NN(layers_info=[4, 7, 9, [5, 10, 3]], hidden_activations="relu", output_activation=["softmax", None, "relu"]) x = np.random.random((20, 2)) * -20.0 out = nn_instance(x) assert out.shape == (20, 18) sums = tf.reduce_sum(out[:, :5], axis=1) sums_others = tf.reduce_sum(out[:, 5:], axis=1) sums_others_2 = tf.reduce_sum(out[:, 5:15], axis=1) sums_others_3 = tf.reduce_sum(out[:, 15:18], axis=1) for row in range(out.shape[0]): assert tf.math.equal(np.round(sums[row], 4), 1.0), sums[row] assert not tf.math.equal(np.round(sums_others[row], 4), 1.0), np.round(sums_others[row], 4) assert not tf.math.equal(np.round(sums_others_2[row], 4), 1.0), np.round(sums_others_2[row], 4) assert not tf.math.equal(np.round(sums_others_3[row], 4), 1.0), np.round(sums_others_3[row], 4) for col in range(3): assert out[row, 15 + col] >= 0.0, out[row, 15 + col]
Example #2
Source File: tensor_ops.py From hart with GNU General Public License v3.0 | 6 votes |
def _bbox_to_mask(yy, region_size, dtype): # trim bounding box exeeding region_size on top and left neg_part = tf.nn.relu(-yy[:2]) core = tf.ones(tf.to_int32(tf.round(yy[2:] - neg_part)), dtype=dtype) y1 = tf.maximum(yy[0], 0.) x1 = tf.maximum(yy[1], 0.) y2 = tf.minimum(region_size[0], yy[0] + yy[2]) x2 = tf.minimum(region_size[1], yy[1] + yy[3]) padding = (y1, region_size[0] - y2, x1, region_size[1] - x2) padding = tf.reshape(tf.stack(padding), (-1, 2)) padding = tf.to_int32(tf.round(padding)) mask = tf.pad(core, padding) # trim bounding box exeeding region_size on bottom and right rs = tf.to_int32(tf.round(region_size)) mask = mask[:rs[0], :rs[1]] mask.set_shape((None, None)) return mask
Example #3
Source File: classifier.py From Document-Classifier-LSTM with MIT License | 6 votes |
def f1_score(y_true, y_pred): """ Compute the micro f(b) score with b=1. """ y_true = tf.cast(y_true, "float32") y_pred = tf.cast(tf.round(y_pred), "float32") # implicit 0.5 threshold via tf.round y_correct = y_true * y_pred sum_true = tf.reduce_sum(y_true, axis=1) sum_pred = tf.reduce_sum(y_pred, axis=1) sum_correct = tf.reduce_sum(y_correct, axis=1) precision = sum_correct / sum_pred recall = sum_correct / sum_true f_score = 2 * precision * recall / (precision + recall) f_score = tf.where(tf.is_nan(f_score), tf.zeros_like(f_score), f_score) return tf.reduce_mean(f_score)
Example #4
Source File: hatt_classifier.py From Document-Classifier-LSTM with MIT License | 6 votes |
def f1_score(y_true, y_pred): """ Compute the micro f(b) score with b=1. """ y_true = tf.cast(y_true, "float32") y_pred = tf.cast(tf.round(y_pred), "float32") # implicit 0.5 threshold via tf.round y_correct = y_true * y_pred sum_true = tf.reduce_sum(y_true, axis=1) sum_pred = tf.reduce_sum(y_pred, axis=1) sum_correct = tf.reduce_sum(y_correct, axis=1) precision = sum_correct / sum_pred recall = sum_correct / sum_true f_score = 2 * precision * recall / (precision + recall) f_score = tf.where(tf.is_nan(f_score), tf.zeros_like(f_score), f_score) return tf.reduce_mean(f_score)
Example #5
Source File: network.py From ai-platform with MIT License | 6 votes |
def _legacy_output_transform_func(*expr, out_mul=1.0, out_add=0.0, out_shrink=1, out_dtype=None): if out_mul != 1.0: expr = [x * out_mul for x in expr] if out_add != 0.0: expr = [x + out_add for x in expr] if out_shrink > 1: ksize = [1, 1, out_shrink, out_shrink] expr = [tf.nn.avg_pool(x, ksize=ksize, strides=ksize, padding="VALID", data_format="NCHW") for x in expr] if out_dtype is not None: if tf.as_dtype(out_dtype).is_integer: expr = [tf.round(x) for x in expr] expr = [tf.saturate_cast(x, out_dtype) for x in expr] return expr
Example #6
Source File: quantize_linear.py From onnx-tensorflow with Apache License 2.0 | 6 votes |
def version_10(cls, node, **kwargs): tensor_dict = kwargs["tensor_dict"] x = tensor_dict[node.inputs[0]] y_scale = tensor_dict[node.inputs[1]] x = tf.cast(x, tf.float32) y = tf.divide(x, y_scale) y = tf.round(y) if len(node.inputs) == 3: y_zero_point = tensor_dict[node.inputs[2]] y_dtype = y_zero_point.dtype y_zero_point = tf.cast(y_zero_point, tf.float32) y = tf.add(y, y_zero_point) else: # y_zero_point default dtype = uint8 y_dtype = tf.uint8 y = tf.saturate_cast(y, y_dtype) return [y]
Example #7
Source File: base.py From BERT with Apache License 2.0 | 6 votes |
def pixels_from_softmax(frame_logits, pure_sampling=False, temperature=1.0, gumbel_noise_factor=0.2): """Given frame_logits from a per-pixel softmax, generate colors.""" # If we're purely sampling, just sample each pixel. if pure_sampling or temperature == 0.0: return common_layers.sample_with_temperature(frame_logits, temperature) # Gumbel-sample from the pixel sofmax and average by pixel values. pixel_range = tf.to_float(tf.range(256)) for _ in range(len(frame_logits.get_shape().as_list()) - 1): pixel_range = tf.expand_dims(pixel_range, axis=0) frame_logits = tf.nn.log_softmax(frame_logits) gumbel_samples = discretization.gumbel_sample( common_layers.shape_list(frame_logits)) * gumbel_noise_factor frame = tf.nn.softmax((frame_logits + gumbel_samples) / temperature, axis=-1) result = tf.reduce_sum(frame * pixel_range, axis=-1) # Round on the forward pass, not on the backward one. return result + tf.stop_gradient(tf.round(result) - result)
Example #8
Source File: network.py From generative-compression with MIT License | 6 votes |
def quantizer(w, config, reuse=False, temperature=1, L=5, scope='image'): """ Quantize feature map over L centers to obtain discrete $\hat{w}$ + Centers: {-2,-1,0,1,2} + TODO: Toggle learnable centers? """ with tf.variable_scope('quantizer_{}'.format(scope, reuse=reuse)): centers = tf.cast(tf.range(-2,3), tf.float32) # Partition W into the Voronoi tesellation over the centers w_stack = tf.stack([w for _ in range(L)], axis=-1) w_hard = tf.cast(tf.argmin(tf.abs(w_stack - centers), axis=-1), tf.float32) + tf.reduce_min(centers) smx = tf.nn.softmax(-1.0/temperature * tf.abs(w_stack - centers), dim=-1) # Contract last dimension w_soft = tf.einsum('ijklm,m->ijkl', smx, centers) # w_soft = tf.tensordot(smx, centers, axes=((-1),(0))) # Treat quantization as differentiable for optimization w_bar = tf.round(tf.stop_gradient(w_hard - w_soft) + w_soft) return w_bar
Example #9
Source File: network.py From generative-compression with MIT License | 6 votes |
def quantizer(w, config, reuse=False, temperature=1, L=5, scope='image'): """ Quantize feature map over L centers to obtain discrete $\hat{w}$ + Centers: {-2,-1,0,1,2} + TODO: Toggle learnable centers? """ with tf.variable_scope('quantizer_{}'.format(scope, reuse=reuse)): centers = tf.cast(tf.range(-2,3), tf.float32) # Partition W into the Voronoi tesellation over the centers w_stack = tf.stack([w for _ in range(L)], axis=-1) w_hard = tf.cast(tf.argmin(tf.abs(w_stack - centers), axis=-1), tf.float32) + tf.reduce_min(centers) smx = tf.nn.softmax(-1.0/temperature * tf.abs(w_stack - centers), dim=-1) # Contract last dimension w_soft = tf.einsum('ijklm,m->ijkl', smx, centers) # w_soft = tf.tensordot(smx, centers, axes=((-1),(0))) # Treat quantization as differentiable for optimization w_bar = tf.round(tf.stop_gradient(w_hard - w_soft) + w_soft) return w_bar
Example #10
Source File: base.py From training_results_v0.5 with Apache License 2.0 | 6 votes |
def pixels_from_softmax(frame_logits, pure_sampling=False, temperature=1.0, gumbel_noise_factor=0.2): """Given frame_logits from a per-pixel softmax, generate colors.""" # If we're purely sampling, just sample each pixel. if pure_sampling or temperature == 0.0: return common_layers.sample_with_temperature(frame_logits, temperature) # Gumbel-sample from the pixel sofmax and average by pixel values. pixel_range = tf.to_float(tf.range(256)) for _ in range(len(frame_logits.get_shape().as_list()) - 1): pixel_range = tf.expand_dims(pixel_range, axis=0) frame_logits = tf.nn.log_softmax(frame_logits) gumbel_samples = discretization.gumbel_sample( common_layers.shape_list(frame_logits)) * gumbel_noise_factor frame = tf.nn.softmax((frame_logits + gumbel_samples) / temperature, axis=-1) result = tf.reduce_sum(frame * pixel_range, axis=-1) # Round on the forward pass, not on the backward one. return result + tf.stop_gradient(tf.round(result) - result)
Example #11
Source File: model.py From training_results_v0.5 with Apache License 2.0 | 5 votes |
def _get_infer_maximum_iterations(self, hparams, source_sequence_length): """Maximum decoding steps at inference time.""" if hparams.tgt_max_len_infer: maximum_iterations = hparams.tgt_max_len_infer utils.print_out(" decoding maximum_iterations %d" % maximum_iterations) else: decoding_length_factor = 2.0 max_encoder_length = tf.reduce_max(source_sequence_length) maximum_iterations = tf.to_int32( tf.round(tf.to_float(max_encoder_length) * decoding_length_factor)) return maximum_iterations
Example #12
Source File: supervised_tf.py From QMLT with Apache License 2.0 | 5 votes |
def outputs_to_predictions(circuit_output): return tf.round(circuit_output) # Generate some data
Example #13
Source File: supervised_advanced_tf.py From QMLT with Apache License 2.0 | 5 votes |
def outputs_to_predictions(outpt): return tf.round(outpt)
Example #14
Source File: model.py From training_results_v0.5 with Apache License 2.0 | 5 votes |
def _get_infer_maximum_iterations(self, hparams, source_sequence_length): """Maximum decoding steps at inference time.""" if hparams.tgt_max_len_infer: maximum_iterations = hparams.tgt_max_len_infer utils.print_out(" decoding maximum_iterations %d" % maximum_iterations) else: # TODO(thangluong): add decoding_length_factor flag decoding_length_factor = 2.0 max_encoder_length = tf.reduce_max(source_sequence_length) maximum_iterations = tf.to_int32( tf.round(tf.to_float(max_encoder_length) * decoding_length_factor)) return maximum_iterations
Example #15
Source File: train.py From STGAN with MIT License | 5 votes |
def mean_accuracy_multi_binary_label_with_logits(att, logits): # return tf.count_nonzero(tf.equal(tf.greater(logits, 0.5), tf.greater(tf.to_float(att), 0.5))) # return tf.reduce_mean(tf.to_float(tf.equal(tf.to_int64(tf.round(logits)), att))) #return tf.reduce_mean(tf.cast(tf.equal(tf.arg_max(att,1)))) return tf.reduce_mean(tf.to_float(tf.equal(tf.to_int64(tf.greater(logits, 0.0)), att)))
Example #16
Source File: modellib.py From rec-attend-public with MIT License | 5 votes |
def f_segm_match(iou, s_gt): """Matching between segmentation output and groundtruth. Args: y_out: [B, T, H, W], output segmentations y_gt: [B, T, H, W], groundtruth segmentations s_gt: [B, T], groudtruth score sequence """ global hungarian_module if hungarian_module is None: mod_name = './hungarian.so' hungarian_module = tf.load_op_library(mod_name) log.info('Loaded library "{}"'.format(mod_name)) # Mask X, [B, M] => [B, 1, M] mask_x = tf.expand_dims(s_gt, dim=1) # Mask Y, [B, M] => [B, N, 1] mask_y = tf.expand_dims(s_gt, dim=2) iou_mask = iou * mask_x * mask_y # Keep certain precision so that we can get optimal matching within # reasonable time. eps = 1e-5 precision = 1e6 iou_mask = tf.round(iou_mask * precision) / precision match_eps = hungarian_module.hungarian(iou_mask + eps)[0] # [1, N, 1, 1] s_gt_shape = tf.shape(s_gt) num_segm_out = s_gt_shape[1] num_segm_out_mul = tf.pack([1, num_segm_out, 1]) # Mask the graph algorithm output. match = match_eps * mask_x * mask_y return match
Example #17
Source File: distributions.py From m3ddpg with MIT License | 5 votes |
def mode(self): return tf.round(self.ps)
Example #18
Source File: distributions.py From MOREL with MIT License | 5 votes |
def mode(self): return tf.round(self.ps)
Example #19
Source File: bit_utils.py From bit-rnn with Apache License 2.0 | 5 votes |
def round_bit(x, bit): if bit == 32: return x g = tf.get_default_graph() k = 2**bit - 1 with g.gradient_override_map({'Round': 'Identity'}): return tf.round(x * k) / k
Example #20
Source File: ops.py From tfdeploy with MIT License | 5 votes |
def test_Round(self): t = tf.round(self.random(4, 3) - 0.5) self.check(t)
Example #21
Source File: ops.py From tfdeploy with MIT License | 5 votes |
def check(self, t, comp=None, ndigits=None, stats=False, abs=False, debug=False): if td._tf_version[:3] < (0, 12, 0): self.sess.run(tf.initialize_all_variables()) else: self.sess.run(tf.global_variables_initializer()) if not isinstance(t, tuple): t = (t,) for _t in t: rtf = _t.eval(session=self.sess) rtd = td.Tensor(_t, self.sess).eval() if debug: import pdb; pdb.set_trace() if ndigits is None: ndigits = self.ndigits if hasattr(comp, "__call__"): return comp(rtf, rtd) if isinstance(rtf, np.ndarray): self.assertEqual(rtf.dtype, rtd.dtype) if abs: rtf = np.abs(rtf) rtd = np.abs(rtd) if not stats: self.assertTrue(np.allclose(rtf, rtd, atol=0.1**ndigits)) else: self.assertEqual(round(rtf.sum(), ndigits), round(rtd.sum(), ndigits)) self.assertEqual(round(rtf.mean(), ndigits), round(rtd.mean(), ndigits)) elif isinstance(rtf, float): self.assertEqual(round(rtf, ndigits), round(rtd, ndigits)) else: self.assertEqual(rtf, rtd)
Example #22
Source File: utils.py From COCO-GAN with MIT License | 5 votes |
def aug_cylindrical_data_numpy(batch_images): width = batch_images_t.shape[2] rotate_dist = int(round(np.random.uniform(0, 1) * width)) batch_aug_results = np.concatenate([ batch_images[:, :, rotate_dist:], batch_images[:, :, :rotate_dist] ], axis=2) return batch_aug_results
Example #23
Source File: utils.py From COCO-GAN with MIT License | 5 votes |
def aug_cylindrical_data_tensor(batch_images_t): width = batch_images_t.shape[2].value rotate_dist = tf.round(tf.random.uniform([], 0, 1) * width) rotate_dist = tf.cast(rotate_dist, tf.int32) batch_aug_results = tf.concat([ batch_images_t[:, :, rotate_dist:], batch_images_t[:, :, :rotate_dist] ], axis=2) return batch_aug_results
Example #24
Source File: distributions.py From ICML2019-TREX with MIT License | 5 votes |
def mode(self): return tf.round(self.ps)
Example #25
Source File: distributions.py From ICML2019-TREX with MIT License | 5 votes |
def mode(self): return tf.round(self.ps)
Example #26
Source File: distributions.py From DRL_DeliveryDuel with MIT License | 5 votes |
def mode(self): return tf.round(self.ps)
Example #27
Source File: distributions.py From lirpg with MIT License | 5 votes |
def mode(self): return tf.round(self.ps)
Example #28
Source File: distributions.py From HardRLWithYoutube with MIT License | 5 votes |
def mode(self): return tf.round(self.ps)
Example #29
Source File: distributions.py From learning2run with MIT License | 5 votes |
def mode(self): return tf.round(self.ps)
Example #30
Source File: preprocessor.py From garbage-object-detection-tensorflow with MIT License | 5 votes |
def _compute_new_static_size(image, min_dimension, max_dimension): """Compute new static shape for resize_to_range method.""" image_shape = image.get_shape().as_list() orig_height = image_shape[0] orig_width = image_shape[1] orig_min_dim = min(orig_height, orig_width) # Calculates the larger of the possible sizes large_scale_factor = min_dimension / float(orig_min_dim) # Scaling orig_(height|width) by large_scale_factor will make the smaller # dimension equal to min_dimension, save for floating point rounding errors. # For reasonably-sized images, taking the nearest integer will reliably # eliminate this error. large_height = int(round(orig_height * large_scale_factor)) large_width = int(round(orig_width * large_scale_factor)) large_size = [large_height, large_width] if max_dimension: # Calculates the smaller of the possible sizes, use that if the larger # is too big. orig_max_dim = max(orig_height, orig_width) small_scale_factor = max_dimension / float(orig_max_dim) # Scaling orig_(height|width) by small_scale_factor will make the larger # dimension equal to max_dimension, save for floating point rounding # errors. For reasonably-sized images, taking the nearest integer will # reliably eliminate this error. small_height = int(round(orig_height * small_scale_factor)) small_width = int(round(orig_width * small_scale_factor)) small_size = [small_height, small_width] new_size = large_size if max(large_size) > max_dimension: new_size = small_size else: new_size = large_size return tf.constant(new_size)