Python tensorflow.newaxis() Examples
The following are 30
code examples of tensorflow.newaxis().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow
, or try the search function
.
Example #1
Source File: generate_rotate_anchors.py From R3Det_Tensorflow with MIT License | 6 votes |
def enum_ratios_and_thetas(anchors, anchor_ratios, anchor_angles): ''' ratio = h /w :param anchors: :param anchor_ratios: :return: ''' ws = anchors[:, 2] # for base anchor: w == h hs = anchors[:, 3] anchor_angles = tf.constant(anchor_angles, tf.float32) sqrt_ratios = tf.sqrt(tf.constant(anchor_ratios)) ws = tf.reshape(ws / sqrt_ratios[:, tf.newaxis], [-1]) hs = tf.reshape(hs * sqrt_ratios[:, tf.newaxis], [-1]) ws, _ = tf.meshgrid(ws, anchor_angles) hs, anchor_angles = tf.meshgrid(hs, anchor_angles) anchor_angles = tf.reshape(anchor_angles, [-1, 1]) ws = tf.reshape(ws, [-1, 1]) hs = tf.reshape(hs, [-1, 1]) return ws, hs, anchor_angles
Example #2
Source File: generate_rotate_anchors.py From RetinaNet_Tensorflow_Rotation with MIT License | 6 votes |
def enum_ratios_and_thetas(anchors, anchor_ratios, anchor_angles): ''' ratio = h /w :param anchors: :param anchor_ratios: :return: ''' ws = anchors[:, 2] # for base anchor: w == h hs = anchors[:, 3] anchor_angles = tf.constant(anchor_angles, tf.float32) sqrt_ratios = tf.sqrt(tf.constant(anchor_ratios)) ws = tf.reshape(ws / sqrt_ratios[:, tf.newaxis], [-1]) hs = tf.reshape(hs * sqrt_ratios[:, tf.newaxis], [-1]) ws, _ = tf.meshgrid(ws, anchor_angles) hs, anchor_angles = tf.meshgrid(hs, anchor_angles) anchor_angles = tf.reshape(anchor_angles, [-1, 1]) ws = tf.reshape(ws, [-1, 1]) hs = tf.reshape(hs, [-1, 1]) return ws, hs, anchor_angles
Example #3
Source File: core.py From lm-human-preferences with MIT License | 6 votes |
def take_top_p_logits(logits, p): """Nucleus sampling""" batch, sequence, _ = logits.shape.as_list() sorted_logits = tf.sort(logits, direction='DESCENDING', axis=-1) cumulative_probs = tf.cumsum(tf.nn.softmax(sorted_logits, axis=-1), axis=-1) indices = tf.stack([ tf.range(0, batch)[:, tf.newaxis], tf.range(0, sequence)[tf.newaxis, :], # number of indices to include tf.maximum(tf.reduce_sum(tf.cast(cumulative_probs <= p, tf.int32), axis=-1) - 1, 0), ], axis=-1) min_values = tf.gather_nd(sorted_logits, indices) return tf.where( logits < min_values, tf.ones_like(logits) * -1e10, logits, )
Example #4
Source File: basic_model.py From TIES-2.0 with MIT License | 6 votes |
def get_balanced_distribution_for_mote_carlo_sampling(self, ground_truth): N = self._placeholder_global_features[:, self.dim_num_vertices] # [b] NN = tf.tile(N[..., tf.newaxis], multiples=[1, self.max_vertices]) # [b] M = tf.sequence_mask(tf.cast(N, dtype=tf.int64), maxlen=self.max_vertices) # [b, v] M = tf.cast(M, dtype=tf.float32) MM = tf.cast(tf.sequence_mask(tf.cast(NN, dtype=tf.int64), maxlen=self.max_vertices), tf.float32)* M[...,tf.newaxis] #[b, v, v] P = tf.cast(ground_truth, dtype=tf.float32) X = tf.reduce_sum(P, axis=2) Y = tf.reduce_sum(P, axis=2) G_0 = tf.cast(tf.equal(ground_truth,0), tf.float32) G_1 = tf.cast(tf.equal(ground_truth,1), tf.float32) X = tf.reduce_sum(G_0*MM, axis=2) Y = tf.reduce_sum(G_1*MM, axis=2) P_0 = G_0 * 0.5 * ((X+Y)/X)[..., tf.newaxis] * MM P_1 = G_1 * 0.5 * ((X+Y)/Y)[..., tf.newaxis] * MM P = P_0 + P_1 return P
Example #5
Source File: token_generator.py From BERT with Apache License 2.0 | 6 votes |
def top_k_logits(logits, k): if k == 0: # no truncation return logits def _top_k(): values, _ = tf.nn.top_k(logits, k=k) min_values = values[:, -1, tf.newaxis] return tf.where( logits < min_values, tf.ones_like(logits, dtype=logits.dtype) * -1e10, logits, ) return tf.cond( tf.equal(k, 0), lambda: logits, lambda: _top_k(), )
Example #6
Source File: sample.py From GPT2 with MIT License | 6 votes |
def top_k_logits(logits, k): if k == 0: # no truncation return logits def _top_k(): values, _ = tf.nn.top_k(logits, k=k) min_values = values[:, -1, tf.newaxis] return tf.where( logits < min_values, tf.ones_like(logits, dtype=logits.dtype) * -1e10, logits, ) return tf.cond( tf.equal(k, 0), lambda: logits, lambda: _top_k(), )
Example #7
Source File: pyramid_network.py From FastMaskRCNN with Apache License 2.0 | 6 votes |
def _add_jittered_boxes(rois, scores, batch_inds, gt_boxes, jitter=0.1): ws = gt_boxes[:, 2] - gt_boxes[:, 0] hs = gt_boxes[:, 3] - gt_boxes[:, 1] shape = tf.shape(gt_boxes)[0] jitter = tf.random_uniform([shape, 1], minval = -jitter, maxval = jitter) jitter = tf.reshape(jitter, [-1]) ws_offset = ws * jitter hs_offset = hs * jitter x1s = gt_boxes[:, 0] + ws_offset x2s = gt_boxes[:, 2] + ws_offset y1s = gt_boxes[:, 1] + hs_offset y2s = gt_boxes[:, 3] + hs_offset boxes = tf.concat( values=[ x1s[:, tf.newaxis], y1s[:, tf.newaxis], x2s[:, tf.newaxis], y2s[:, tf.newaxis]], axis=1) new_scores = tf.ones([shape], tf.float32) new_batch_inds = tf.zeros([shape], tf.int32) return tf.concat(values=[rois, boxes], axis=0), \ tf.concat(values=[scores, new_scores], axis=0), \ tf.concat(values=[batch_inds, new_batch_inds], axis=0)
Example #8
Source File: bert_seq_tpu_utils.py From BERT with Apache License 2.0 | 6 votes |
def top_k_logits(logits, k): if k == 0: # no truncation return logits def _top_k(): values, _ = tf.nn.top_k(logits, k=k) min_values = values[:, -1, tf.newaxis] return tf.where( logits < min_values, tf.ones_like(logits, dtype=logits.dtype) * -1e10, logits, ) return tf.cond( tf.equal(k, 0), lambda: logits, lambda: _top_k(), )
Example #9
Source File: bert_seq_utils.py From BERT with Apache License 2.0 | 6 votes |
def top_k_logits(logits, k): if k == 0: # no truncation return logits def _top_k(): values, _ = tf.nn.top_k(logits, k=k) min_values = values[:, -1, tf.newaxis] return tf.where( logits < min_values, tf.ones_like(logits, dtype=logits.dtype) * -1e10, logits, ) return tf.cond( tf.equal(k, 0), lambda: logits, lambda: _top_k(), )
Example #10
Source File: bert_seq_sample_utils.py From BERT with Apache License 2.0 | 6 votes |
def top_k_logits(logits, k): if k == 0: # no truncation return logits def _top_k(): values, _ = tf.nn.top_k(logits, k=k) min_values = values[:, -1, tf.newaxis] return tf.where( logits < min_values, tf.ones_like(logits, dtype=logits.dtype) * -1e10, logits, ) return tf.cond( tf.equal(k, 0), lambda: logits, lambda: _top_k(), )
Example #11
Source File: reversible_layers_test.py From BERT with Apache License 2.0 | 6 votes |
def testOneHotMinusExactSoft(self): inputs = tf.constant([[0., 1., 0.], [0., 0., 1.]]) shift = tf.constant([[0.1, 0.6, 0.3], [0.2, 0.4, 0.4]]) outputs = reversible.one_hot_minus(inputs, shift) shift_zero = inputs shift_one = np.array([[1., 0., 0.], [0., 1., 0.]]) shift_two = np.array([[0., 0., 1.], [1., 0., 0.]]) expected_outputs = (shift[..., 0][..., tf.newaxis] * shift_zero + shift[..., 1][..., tf.newaxis] * shift_one + shift[..., 2][..., tf.newaxis] * shift_two) actual_outputs_val, expected_outputs_val = self.evaluate([ outputs, expected_outputs]) self.assertAllEqual(actual_outputs_val, expected_outputs_val)
Example #12
Source File: reversible_layers_test.py From BERT with Apache License 2.0 | 6 votes |
def testOneHotMultiplyExactSoft(self): inputs = tf.constant([[0., 1., 0.], [0., 0., 1.]]) scale = tf.constant([[0.1, 0.6, 0.3], [0.2, 0.4, 0.4]]) outputs = reversible.one_hot_multiply(inputs, scale) scale_zero = np.array([[0., 0., 0.], [0., 0., 0.]]) scale_one = inputs scale_two = np.array([[0., 0., 1.], [0., 1., 0.]]) expected_outputs = (scale[..., 0][..., tf.newaxis] * scale_zero + scale[..., 1][..., tf.newaxis] * scale_one + scale[..., 2][..., tf.newaxis] * scale_two) actual_outputs_val, expected_outputs_val = self.evaluate([ outputs, expected_outputs]) self.assertAllEqual(actual_outputs_val, expected_outputs_val)
Example #13
Source File: ngram.py From training_results_v0.5 with Apache License 2.0 | 6 votes |
def call(self, inputs): batch_shape = tf.shape(inputs)[:-1] length = tf.shape(inputs)[-1] ngram_range_counts = [] for n in range(self.minval, self.maxval): # Reshape inputs from [..., length] to [..., 1, length // n, n], dropping # remainder elements. Each n-vector is an ngram. reshaped_inputs = tf.reshape( inputs[..., :(n * (length // n))], tf.concat([batch_shape, [1], (length // n)[tf.newaxis], [n]], 0)) # Count the number of times each ngram appears in the input. We do so by # checking whether each n-vector in the input is equal to each n-vector # in a Tensor of all possible ngrams. The comparison is batched between # the input Tensor of shape [..., 1, length // n, n] and the ngrams Tensor # of shape [..., input_dim**n, 1, n]. ngrams = tf.reshape( list(np.ndindex((self.input_dim,) * n)), [1] * (len(inputs.shape)-1) + [self.input_dim**n, 1, n]) is_ngram = tf.equal( tf.reduce_sum(tf.cast(tf.equal(reshaped_inputs, ngrams), tf.int32), axis=-1), n) ngram_counts = tf.reduce_sum(tf.cast(is_ngram, tf.float32), axis=-1) ngram_range_counts.append(ngram_counts) return tf.concat(ngram_range_counts, axis=-1)
Example #14
Source File: reversible_layers_test.py From BERT with Apache License 2.0 | 6 votes |
def testDiscreteAutoregressiveFlowSample(self, loc_only): batch_size = 5 length = 2 vocab_size = 2 if loc_only: units = vocab_size network = reversible.MADE(units, []) else: units = 2 * vocab_size mask = tf.reshape([0] * vocab_size + [-1e10] + [0] * (vocab_size - 1), [1, 1, 2 * vocab_size]) network_ = reversible.MADE(units, []) network = lambda inputs: mask + network_(inputs) layer = reversible.DiscreteAutoregressiveFlow(network, 1.) logits = tf.tile(tf.random_normal([length, vocab_size])[tf.newaxis], [batch_size, 1, 1]) base = tfp.edward2.OneHotCategorical(logits=logits, dtype=tf.float32) outputs = layer(base) _ = outputs.value # need to do this to instantiate tf.variables self.evaluate(tf.global_variables_initializer()) res = self.evaluate(outputs) self.assertEqual(res.shape, (batch_size, length, vocab_size)) self.assertAllGreaterEqual(res, 0) self.assertAllLessEqual(res, vocab_size - 1)
Example #15
Source File: speech_transformer.py From athena with Apache License 2.0 | 6 votes |
def _create_masks(x, input_length, y): r""" Generate a square mask for the sequence. The masked positions are filled with float(1.0). Unmasked positions are filled with float(0.0). """ input_mask, output_mask = None, None if x is not None: input_mask = 1.0 - tf.sequence_mask( input_length, tf.shape(x)[1], dtype=tf.float32 ) input_mask = input_mask[:, tf.newaxis, tf.newaxis, :] input_mask.set_shape([None, None, None, None]) if y is not None: output_mask = tf.cast(tf.math.equal(y, 0), tf.float32) output_mask = output_mask[:, tf.newaxis, tf.newaxis, :] look_ahead_mask = generate_square_subsequent_mask(tf.shape(y)[1]) output_mask = tf.maximum(output_mask, look_ahead_mask) output_mask.set_shape([None, None, None, None]) return input_mask, output_mask
Example #16
Source File: ngram.py From BERT with Apache License 2.0 | 6 votes |
def call(self, inputs): batch_shape = tf.shape(inputs)[:-1] length = tf.shape(inputs)[-1] ngram_range_counts = [] for n in range(self.minval, self.maxval): # Reshape inputs from [..., length] to [..., 1, length // n, n], dropping # remainder elements. Each n-vector is an ngram. reshaped_inputs = tf.reshape( inputs[..., :(n * (length // n))], tf.concat([batch_shape, [1], (length // n)[tf.newaxis], [n]], 0)) # Count the number of times each ngram appears in the input. We do so by # checking whether each n-vector in the input is equal to each n-vector # in a Tensor of all possible ngrams. The comparison is batched between # the input Tensor of shape [..., 1, length // n, n] and the ngrams Tensor # of shape [..., input_dim**n, 1, n]. ngrams = tf.reshape( list(np.ndindex((self.input_dim,) * n)), [1] * (len(inputs.shape)-1) + [self.input_dim**n, 1, n]) is_ngram = tf.equal( tf.reduce_sum(tf.cast(tf.equal(reshaped_inputs, ngrams), tf.int32), axis=-1), n) ngram_counts = tf.reduce_sum(tf.cast(is_ngram, tf.float32), axis=-1) ngram_range_counts.append(ngram_counts) return tf.concat(ngram_range_counts, axis=-1)
Example #17
Source File: generator_utils.py From BERT with Apache License 2.0 | 6 votes |
def _compute_auxiliary_structure(self, contents_and_mask): """Compute segment and position metadata.""" contents = contents_and_mask[:, :self._num_sequences] start_mask = tf.cast(contents_and_mask[:, self._num_sequences:], dtype=INDEX_DTYPE) segment = tf.cumsum(start_mask, axis=0) uniform_count = tf.ones_like(segment[:, 0]) position = [] for i in range(self._num_sequences): segment_slice = segment[:, i] counts = tf.math.segment_sum(uniform_count, segment[:, i]) position.append(tf.range(self._packed_length) - tf.cumsum( tf.gather(counts, segment_slice - 1) * start_mask[:, i])) position = tf.concat([i[:, tf.newaxis] for i in position], axis=1) # Correct for padding tokens. pad_mask = tf.cast(tf.not_equal(contents, 0), dtype=INDEX_DTYPE) segment *= pad_mask position *= pad_mask return segment, position
Example #18
Source File: capsule_layers.py From BERT with Apache License 2.0 | 6 votes |
def vec_transformationByMat(poses, input_capsule_dim, input_capsule_num, output_capsule_dim, output_capsule_num, shared=True): inputs_poses_shape = poses.get_shape().as_list() poses = poses[..., tf.newaxis, :] poses = tf.tile( poses, [1, 1, output_capsule_num, 1] ) if shared: kernel = capsule_utils._get_weights_wrapper( name='weights', shape=[1, 1, output_capsule_num, output_capsule_dim, input_capsule_dim], weights_decay_factor=0.0 ) kernel = tf.tile( kernel, [inputs_poses_shape[0], input_capsule_num, 1, 1, 1] ) else: kernel = capsule_utils._get_weights_wrapper( name='weights', shape=[1, input_capsule_num, output_capsule_num, output_capsule_dim, input_capsule_dim], weights_decay_factor=0.0 ) kernel = tf.tile( kernel, [inputs_poses_shape[0], 1, 1, 1, 1] ) tf.logging.info('poses: {}'.format(poses[...,tf.newaxis].get_shape())) tf.logging.info('kernel: {}'.format(kernel.get_shape())) u_hat_vecs = tf.squeeze(tf.matmul(kernel, poses[...,tf.newaxis]),axis=-1) u_hat_vecs = tf.transpose(u_hat_vecs, (0, 2, 1, 3)) return u_hat_vecs
Example #19
Source File: sample.py From BERT with Apache License 2.0 | 6 votes |
def top_k_logits(logits, k): if k == 0: # no truncation return logits def _top_k(): values, _ = tf.nn.top_k(logits, k=k) min_values = values[:, -1, tf.newaxis] return tf.where( logits < min_values, tf.ones_like(logits, dtype=logits.dtype) * -1e10, logits, ) return tf.cond( tf.equal(k, 0), lambda: logits, lambda: _top_k(), )
Example #20
Source File: rnn_decoder_helpers.py From Counterfactual-StoryRW with MIT License | 6 votes |
def _top_k_logits(logits, k): """Adapted from https://github.com/openai/gpt-2/blob/master/src/sample.py#L63-L77 """ if k == 0: # no truncation return logits def _top_k(): values, _ = tf.nn.top_k(logits, k=k) min_values = values[:, -1, tf.newaxis] return tf.where( logits < min_values, tf.ones_like(logits, dtype=logits.dtype) * -1e10, logits, ) return tf.cond( tf.equal(k, 0), lambda: logits, lambda: _top_k(), )
Example #21
Source File: anchor_utils.py From R3Det_Tensorflow with MIT License | 5 votes |
def enum_ratios(anchors, anchor_ratios): ''' ratio = h /w :param anchors: :param anchor_ratios: :return: ''' ws = anchors[:, 2] # for base anchor: w == h hs = anchors[:, 3] sqrt_ratios = tf.sqrt(tf.constant(anchor_ratios)) ws = tf.reshape(ws / sqrt_ratios[:, tf.newaxis], [-1, 1]) hs = tf.reshape(hs * sqrt_ratios[:, tf.newaxis], [-1, 1]) return ws, hs
Example #22
Source File: transformer_test.py From graphics with Apache License 2.0 | 5 votes |
def test_perspective_transform_integer_centers_preset(self, dtype, interpolation): """Tests that we can reproduce the results of tfa_image.transform.""" image = tf.constant( ((1.0, 2.0, 3.0), (4.0, 5.0, 6.0), (7.0, 8.0, 9.0), (10.0, 11.0, 12.0)), dtype=dtype) scale = 3 transformation = tf.constant( ((1.0 / scale, 0.0, 0.0), (0.0, 1.0 / scale, 0.0), (0.0, 0.0, 1.0)), dtype=dtype) image_shape = tf.shape(image) image_resized_shape = image_shape * scale image = image[tf.newaxis, ..., tf.newaxis] transformation = transformation[tf.newaxis, ...] image_resized = tfa_image.transform( tf.cast(image, tf.float32), tf.cast( tfa_image.transform_ops.matrices_to_flat_transforms(transformation), tf.float32), interpolation=interpolation, output_shape=image_resized_shape) image_transformed = transformer.perspective_transform( image, transformation, resampling_type=transformer.ResamplingType.NEAREST if interpolation == "NEAREST" else transformer.ResamplingType.BILINEAR, pixel_type=transformer.PixelType.INTEGER, output_shape=image_resized_shape) self.assertAllClose(image_resized, image_transformed)
Example #23
Source File: tensor_ops.py From hart with GNU General Public License v3.0 | 5 votes |
def _bbox_to_mask_fixed_size(yy, region_size, output_size, dtype): mask = _bbox_to_mask(yy, region_size, dtype) nonzero_region = tf.greater(tf.reduce_prod(tf.shape(mask)), 0) mask = tf.cond(nonzero_region, lambda: mask, lambda: tf.zeros(output_size, dtype)) mask = tf.image.resize_images(mask[..., tf.newaxis], output_size)[..., 0] return mask
Example #24
Source File: tracker.py From hart with GNU General Public License v3.0 | 5 votes |
def __init__(self, inpt, bbox0, presence0, batch_size, glimpse_size, feature_extractor, rnn_units, bbox_gain=-4., att_gain=-2.5, zoneout_prob=0., identity_init=True, attention_module=RATMAttention, normalize_glimpse=False, debug=False, clip_bbox=False, transform_init_features=False, transform_init_state=False, dfn_readout=False, feature_shape=None, is_training=True): self.inpt = inpt self.bbox0 = bbox0 self.presence0 = presence0 self.glimpse_size = glimpse_size self.feature_extractor = feature_extractor self.rnn_units = rnn_units self.batch_size = batch_size self.inpt_size = convert_shape(inpt.get_shape()[2:], np.int32) self.bbox_gain = ensure_array(bbox_gain, 4)[np.newaxis] self.att_gain = ensure_array(att_gain, attention_module.n_params)[np.newaxis] self.zoneout_prob = zoneout_prob self.identity_init = identity_init self.attention_module = attention_module self.normalize_glimpse = normalize_glimpse self.debug = debug self.clip_bbox = clip_bbox self.transform_init_features = transform_init_features self.transform_init_state = transform_init_state self.dfn_readout = dfn_readout self.feature_shape = feature_shape self.is_training = tf.convert_to_tensor(is_training) super(HierarchicalAttentiveRecurrentTracker, self).__init__(self.__class__.__name__) try: self.register(is_training) except ValueError: pass
Example #25
Source File: attention_ops.py From hart with GNU General Public License v3.0 | 5 votes |
def _to_attention(self, raw_att, with_bias=True): bbox = FixedStdAttention.attention_to_bbox(self, raw_att) us = bbox[..., :2] if with_bias: us += self.offset_bias ds = bbox[..., 2:4] / (self.glimpse_size[np.newaxis, :2] - 1) ss = self._stride_to_std(ds) ap = tf.concat(axis=tf.rank(raw_att) - 1, values=(us, ss, ds), name='attention') ap.set_shape(raw_att.get_shape()[:-1].concatenate((6,))) return ap
Example #26
Source File: attention_ops.py From hart with GNU General Public License v3.0 | 5 votes |
def extract_glimpse(inpt, attention_params, glimpse_size): """Extracts an attention glimpse :param inpt: tensor of shape == (batch_size, img_height, img_width) :param attention_params: tensor of shape = (batch_size, 6) as [uy, sy, dy, ux, sx, dx] with u - mean, s - std, d - stride" :param glimpse_size: 2-tuple of ints as (height, width), size of the extracted glimpse :return: tensor """ ap = attention_params shape = inpt.get_shape() rank = len(shape) assert rank in (3, 4), "Input must be 3 or 4 dimensional tensor" inpt_H, inpt_W = shape[1:3] if rank == 3: inpt = inpt[..., tf.newaxis] rank += 1 Fy = gaussian_mask(ap[..., 0::2], glimpse_size[0], inpt_H) Fx = gaussian_mask(ap[..., 1::2], glimpse_size[1], inpt_W) gs = [] for channel in tf.unstack(inpt, axis=rank - 1): g = tf.matmul(tf.matmul(Fy, channel, adjoint_a=True), Fx) gs.append(g) g = tf.stack(gs, axis=rank - 1) g.set_shape([shape[0]] + list(glimpse_size)) return g
Example #27
Source File: mask_encoder.py From nlp-journey with Apache License 2.0 | 5 votes |
def create_padding_mask(seq): seq = tf.cast(tf.math.equal(seq, 0), tf.float32) # 添加额外的维度来将填充加到注意力对数(logits) return seq[:, tf.newaxis, tf.newaxis, :] # (batch_size, 1, 1, seq_len)
Example #28
Source File: anchor_utils.py From RetinaNet_Tensorflow_Rotation with MIT License | 5 votes |
def enum_ratios(anchors, anchor_ratios): ''' ratio = h /w :param anchors: :param anchor_ratios: :return: ''' ws = anchors[:, 2] # for base anchor: w == h hs = anchors[:, 3] sqrt_ratios = tf.sqrt(tf.constant(anchor_ratios)) ws = tf.reshape(ws / sqrt_ratios[:, tf.newaxis], [-1, 1]) hs = tf.reshape(hs * sqrt_ratios[:, tf.newaxis], [-1, 1]) return ws, hs
Example #29
Source File: loss.py From athena with Apache License 2.0 | 5 votes |
def __call__(self, logits, samples, logit_length=None): target = samples["output"] shape = tf.shape(logits) target = tf.reshape(target, shape) loss = target - logits # mpc mask mask = tf.cast(tf.math.equal(tf.reshape(samples["input"], shape), 0), loss.dtype) loss *= mask # sequence length mask seq_mask = tf.sequence_mask(logit_length, shape[1], dtype=loss.dtype) seq_mask = tf.tile(seq_mask[:, :, tf.newaxis], [1, 1, shape[2]]) loss *= seq_mask loss = tf.reduce_sum(tf.abs(loss, name="L1_loss"), axis=-1) loss = tf.reduce_mean(loss) return loss
Example #30
Source File: transformer_test.py From graphics with Apache License 2.0 | 5 votes |
def test_perspective_transform_half_integer_centers_preset( self, dtype, interpolation): """Tests that we can reproduce the results of tf.image.resize.""" image = tf.constant( ((1.0, 2.0, 3.0), (4.0, 5.0, 6.0), (7.0, 8.0, 9.0), (10.0, 11.0, 12.0)), dtype=dtype) scale = 3 transformation = tf.constant( ((1.0 / scale, 0.0, 0.0), (0.0, 1.0 / scale, 0.0), (0.0, 0.0, 1.0)), dtype=dtype) image_shape = tf.shape(image) image_resized_shape = image_shape * scale image = image[tf.newaxis, ..., tf.newaxis] transformation = transformation[tf.newaxis, ...] image_resized = tf.image.resize( image, size=image_resized_shape, method=tf.image.ResizeMethod.NEAREST_NEIGHBOR if interpolation == "NEAREST" else tf.image.ResizeMethod.BILINEAR) image_transformed = transformer.perspective_transform( image, transformation, resampling_type=transformer.ResamplingType.NEAREST if interpolation == "NEAREST" else transformer.ResamplingType.BILINEAR, border_type=transformer.BorderType.DUPLICATE, output_shape=image_resized_shape) self.assertAllClose(image_resized, image_transformed)