Python tensorflow.compat.v1.transpose() Examples
The following are 30
code examples of tensorflow.compat.v1.transpose().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.compat.v1
, or try the search function
.
Example #1
Source File: slicenet.py From tensor2tensor with Apache License 2.0 | 6 votes |
def rank_loss(sentence_emb, image_emb, margin=0.2): """Experimental rank loss, thanks to kkurach@ for the code.""" with tf.name_scope("rank_loss"): # Normalize first as this is assumed in cosine similarity later. sentence_emb = tf.nn.l2_normalize(sentence_emb, 1) image_emb = tf.nn.l2_normalize(image_emb, 1) # Both sentence_emb and image_emb have size [batch, depth]. scores = tf.matmul(image_emb, tf.transpose(sentence_emb)) # [batch, batch] diagonal = tf.diag_part(scores) # [batch] cost_s = tf.maximum(0.0, margin - diagonal + scores) # [batch, batch] cost_im = tf.maximum( 0.0, margin - tf.reshape(diagonal, [-1, 1]) + scores) # [batch, batch] # Clear diagonals. batch_size = tf.shape(sentence_emb)[0] empty_diagonal_mat = tf.ones_like(cost_s) - tf.eye(batch_size) cost_s *= empty_diagonal_mat cost_im *= empty_diagonal_mat return tf.reduce_mean(cost_s) + tf.reduce_mean(cost_im)
Example #2
Source File: expert_utils.py From tensor2tensor with Apache License 2.0 | 6 votes |
def __init__(self, num_experts, gates): """Create a SparseDispatcher. Args: num_experts: an integer. gates: a `Tensor` of shape `[batch_size, num_experts]`. Returns: a SparseDispatcher """ self._gates = gates self._num_experts = num_experts where = tf.to_int32(tf.where(tf.transpose(gates) > 0)) self._expert_index, self._batch_index = tf.unstack(where, num=2, axis=1) self._part_sizes_tensor = tf.reduce_sum(tf.to_int32(gates > 0), [0]) self._nonzero_gates = tf.gather( tf.reshape(self._gates, [-1]), self._batch_index * num_experts + self._expert_index)
Example #3
Source File: tiled_linear.py From lamb with Apache License 2.0 | 6 votes |
def _build_tiled_linear(self, inputs, input_name_and_sizes, output_name_and_sizes, add_bias): results = [] for output_name, output_size in output_name_and_sizes: r = 0.0 for input_, (input_name, input_size) in zip(inputs, input_name_and_sizes): name = 'W_{}_{}'.format(input_name, output_name) weight = self._get_variable( name, shape=[output_size, input_size]) r += tf.sparse_tensor_dense_matmul(weight, input_, adjoint_b=True) r = tf.transpose(r) if add_bias: # Biases are dense, hence we call _get_variable of the base # class. r += super(SparseTiledLinear, self)._get_variable( 'B_{}'.format(output_name), shape=[output_size], default_initializer=tf.zeros_initializer()) results.append(r) return results # TODO(melisgl): Since computation is the same as in TiledLinear, # perhaps this should be implemented as a custom getter (see # tf.get_variable) instead of being tied to tiling.
Example #4
Source File: neural_gpu.py From tensor2tensor with Apache License 2.0 | 6 votes |
def neural_gpu_body(inputs, hparams, name=None): """The core Neural GPU.""" with tf.variable_scope(name, "neural_gpu"): def step(state, inp): # pylint: disable=missing-docstring x = tf.nn.dropout(state, 1.0 - hparams.dropout) for layer in range(hparams.num_hidden_layers): x = common_layers.conv_gru( x, (hparams.kernel_height, hparams.kernel_width), hparams.hidden_size, name="cgru_%d" % layer) # Padding input is zeroed-out in the modality, we check this by summing. padding_inp = tf.less(tf.reduce_sum(tf.abs(inp), axis=[1, 2]), 0.00001) new_state = tf.where(padding_inp, state, x) # No-op where inp is padding. return new_state return tf.foldl( step, tf.transpose(inputs, [1, 0, 2, 3]), initializer=inputs, parallel_iterations=1, swap_memory=True)
Example #5
Source File: transformer_nat.py From tensor2tensor with Apache License 2.0 | 6 votes |
def vq_nearest_neighbor(x, hparams): """Find the nearest element in means to elements in x.""" bottleneck_size = 2**hparams.bottleneck_bits means = hparams.means x_norm_sq = tf.reduce_sum(tf.square(x), axis=-1, keepdims=True) means_norm_sq = tf.reduce_sum(tf.square(means), axis=-1, keepdims=True) scalar_prod = tf.matmul(x, means, transpose_b=True) dist = x_norm_sq + tf.transpose(means_norm_sq) - 2 * scalar_prod if hparams.bottleneck_kind == "em": x_means_idx = tf.multinomial(-dist, num_samples=hparams.num_samples) x_means_hot = tf.one_hot( x_means_idx, depth=bottleneck_size) x_means_hot = tf.reduce_mean(x_means_hot, axis=1) else: x_means_idx = tf.argmax(-dist, axis=-1) x_means_hot = tf.one_hot(x_means_idx, depth=bottleneck_size) x_means = tf.matmul(x_means_hot, means) e_loss = tf.reduce_mean(tf.squared_difference(x, tf.stop_gradient(x_means))) return x_means_hot, e_loss
Example #6
Source File: scheduled_sampling.py From tensor2tensor with Apache License 2.0 | 6 votes |
def _update_timestep(x, timestep, values): """Set x[:, timestep] = values. This operation is **NOT** differentiable. Args: x: Tensor of shape [batch_size, seq_len, ...] timestep: int or scalar Tensor. Index to update in x. values: Tensor of shape [batch_size, ...]. New values for x[:, i]. Returns: Copy of 'x' after setting x[:, timestep] = values. """ perm = range(x.shape.ndims) perm[0], perm[1] = perm[1], perm[0] x = tf.transpose(x, perm) x = inplace_ops.alias_inplace_update(x, timestep, values) x = tf.transpose(x, perm) return x
Example #7
Source File: masked.py From magenta with Apache License 2.0 | 6 votes |
def batch_to_time(x, block_size): """Inverse of `time_to_batch(x, block_size)`. Args: x: Tensor of shape [nb*block_size, k, n] for some natural number k. block_size: number of time steps (i.e. size of dimension 1) in the output tensor. Returns: Tensor of shape [nb, k*block_size, n]. """ shape = x.get_shape().as_list() y = tf.reshape(x, [shape[0] // block_size, block_size, shape[1], shape[2]]) y = tf.transpose(y, [0, 2, 1, 3]) y = tf.reshape(y, [shape[0] // block_size, shape[1] * block_size, shape[2]]) y.set_shape([mul_or_none(shape[0], 1. / block_size), mul_or_none(shape[1], block_size), shape[2]]) return y
Example #8
Source File: modalities.py From tensor2tensor with Apache License 2.0 | 6 votes |
def video_l1_top(body_output, targets, model_hparams, vocab_size): """Top transformation for video.""" del targets, vocab_size # unused arg num_channels = model_hparams.problem.num_channels num_frames = model_hparams.video_num_target_frames with tf.variable_scope("rgb"): body_output_shape = common_layers.shape_list(body_output) res = tf.layers.dense(body_output, num_channels * num_frames, name="cast") res = tf.reshape(res, body_output_shape[:3] + [num_channels, num_frames]) res = tf.transpose(res, [0, 4, 1, 2, 3]) # Move frames next to batch. if not tf.get_variable_scope().reuse: res_argmax = res[:, -1, :, :, :] tf.summary.image( "result", common_layers.tpu_safe_image_summary(res_argmax), max_outputs=1) return tf.expand_dims(res, axis=-1) # Add an axis like in perplexity.
Example #9
Source File: vq_discrete.py From tensor2tensor with Apache License 2.0 | 6 votes |
def embedding_lookup(self, x, means): """Compute nearest neighbors and loss for training the embeddings. Args: x: Batch of encoder continuous latent states sliced/projected into shape [-1, num_blocks, block_dim]. means: Embedding means. Returns: The nearest neighbor in one hot form, the nearest neighbor itself, the commitment loss, embedding training loss. """ x_means_hot = self.nearest_neighbor(x, means) x_means_hot_flat = tf.reshape( x_means_hot, [-1, self.hparams.num_blocks, self.hparams.block_v_size]) x_means = tf.matmul(tf.transpose(x_means_hot_flat, perm=[1, 0, 2]), means) x_means = tf.transpose(x_means, [1, 0, 2]) q_loss = tf.reduce_mean( tf.squared_difference(tf.stop_gradient(x), x_means)) e_loss = tf.reduce_mean( tf.squared_difference(x, tf.stop_gradient(x_means))) return x_means_hot, x_means, q_loss, e_loss
Example #10
Source File: discretization.py From tensor2tensor with Apache License 2.0 | 6 votes |
def project_hidden(x, projection_tensors, hidden_size, num_blocks): """Project encoder hidden state under num_blocks using projection tensors. Args: x: Encoder hidden state of shape [batch_size, latent_dim, hidden_size]. projection_tensors: Projection tensors used to project the hidden state. hidden_size: Dimension of the latent space. num_blocks: Number of blocks in DVQ. Returns: x_projected: Projected states of shape [batch_size, latent_dim, num_blocks, hidden_size / num_blocks]. """ batch_size, latent_dim, _ = common_layers.shape_list(x) x = tf.reshape(x, shape=[1, -1, hidden_size]) x_tiled = tf.reshape( tf.tile(x, multiples=[num_blocks, 1, 1]), shape=[num_blocks, -1, hidden_size]) x_projected = tf.matmul(x_tiled, projection_tensors) x_projected = tf.transpose(x_projected, perm=[1, 0, 2]) x_4d = tf.reshape(x_projected, [batch_size, latent_dim, num_blocks, -1]) return x_4d
Example #11
Source File: modeling.py From gpt2-ml with Apache License 2.0 | 6 votes |
def _attention_projection_and_transpose(x_flat, batch_size, seq_length, num_attention_heads, size_per_head, name, initializer_range=0.02): """ :param x_flat: [batch_size*seq_length, width] :return: A fixed up tensor of size [batch_size, num_attention_heads, seq_length, size_per_head] """ batch_size_seq_length, dim = get_shape_list(x_flat, expected_rank=2) if dim != size_per_head * num_attention_heads: raise ValueError("passed in a tensor of shape {} when size_per_head={} and num_attention_heads={}".format( (batch_size_seq_length, dim), size_per_head, num_attention_heads )) projected = tf.layers.dense( x_flat, num_attention_heads * size_per_head, name=name, kernel_initializer=create_initializer(initializer_range)) projected = tf.reshape( projected, [batch_size, seq_length, num_attention_heads, size_per_head]) output_tensor = tf.transpose(projected, [0, 2, 1, 3]) return output_tensor
Example #12
Source File: lstm_models.py From magenta with Apache License 2.0 | 6 votes |
def _reshape_to_hierarchy(self, t): """Reshapes `t` so that its initial dimensions match the hierarchy.""" # Exclude the final, core decoder length. level_lengths = self._level_lengths[:-1] t_shape = t.shape.as_list() t_rank = len(t_shape) batch_size = t_shape[0] hier_shape = [batch_size] + level_lengths if t_rank == 3: hier_shape += [-1] + t_shape[2:] elif t_rank != 2: # We only expect rank-2 for lengths and rank-3 for sequences. raise ValueError('Unexpected shape for tensor: %s' % t) hier_t = tf.reshape(t, hier_shape) # Move the batch dimension to after the hierarchical dimensions. num_levels = len(level_lengths) perm = list(range(len(hier_shape))) perm.insert(num_levels, perm.pop(0)) return tf.transpose(hier_t, perm)
Example #13
Source File: seq2seq.py From magenta with Apache License 2.0 | 6 votes |
def _transpose_batch_time(x): """Transposes the batch and time dimensions of a Tensor. If the input tensor has rank < 2 it returns the original tensor. Retains as much of the static shape information as possible. Args: x: A Tensor. Returns: x transposed along the first two dimensions. """ x_static_shape = x.get_shape() if x_static_shape.rank is not None and x_static_shape.rank < 2: return x x_rank = tf.rank(x) x_t = tf.transpose( x, tf.concat(([1, 0], tf.range(2, x_rank)), axis=0)) x_t.set_shape( tf.TensorShape( [x_static_shape[1], x_static_shape[0]]).concatenate( x_static_shape[2:])) return x_t
Example #14
Source File: seq2seq.py From magenta with Apache License 2.0 | 6 votes |
def categorical_sample(logits, dtype=tf.int32, sample_shape=(), seed=None): """Samples from categorical distribution.""" logits = tf.convert_to_tensor(logits, name="logits") event_size = tf.shape(logits)[-1] batch_shape_tensor = tf.shape(logits)[:-1] def _sample_n(n): """Sample vector of categoricals.""" if logits.shape.ndims == 2: logits_2d = logits else: logits_2d = tf.reshape(logits, [-1, event_size]) sample_dtype = tf.int64 if logits.dtype.size > 4 else tf.int32 draws = tf.multinomial( logits_2d, n, seed=seed, output_dtype=sample_dtype) draws = tf.reshape( tf.transpose(draws), tf.concat([[n], batch_shape_tensor], 0)) return tf.cast(draws, dtype) return _call_sampler(_sample_n, sample_shape)
Example #15
Source File: region_similarity_calculator.py From Object_Detection_Tracking with Apache License 2.0 | 6 votes |
def intersection(boxlist1, boxlist2, scope=None): """Compute pairwise intersection areas between boxes. Args: boxlist1: BoxList holding N boxes boxlist2: BoxList holding M boxes scope: name scope. Returns: a tensor with shape [N, M] representing pairwise intersections """ with tf.name_scope(scope, 'Intersection'): y_min1, x_min1, y_max1, x_max1 = tf.split( value=boxlist1.get(), num_or_size_splits=4, axis=1) y_min2, x_min2, y_max2, x_max2 = tf.split( value=boxlist2.get(), num_or_size_splits=4, axis=1) all_pairs_min_ymax = tf.minimum(y_max1, tf.transpose(y_max2)) all_pairs_max_ymin = tf.maximum(y_min1, tf.transpose(y_min2)) intersect_heights = tf.maximum(0.0, all_pairs_min_ymax - all_pairs_max_ymin) all_pairs_min_xmax = tf.minimum(x_max1, tf.transpose(x_max2)) all_pairs_max_xmin = tf.maximum(x_min1, tf.transpose(x_min2)) intersect_widths = tf.maximum(0.0, all_pairs_min_xmax - all_pairs_max_xmin) return intersect_heights * intersect_widths
Example #16
Source File: data_aug_lib.py From mesh with Apache License 2.0 | 6 votes |
def _rand_noise(noise_mean, noise_dev, scale, shape): """Generate random noise given a particular scale and shape.""" noise_shape = [x // scale for x in shape] noise_shape = [1 if x == 0 else x for x in noise_shape] noise = tf.random.normal( shape=noise_shape, mean=noise_mean, stddev=noise_dev) noise = tf.clip_by_value( noise, noise_mean - 2.0 * noise_dev, noise_mean + 2.0 * noise_dev) if scale != 1: noise = tf.image.resize_images( noise, [shape[0], shape[1]]) noise = tf.transpose(noise, [0, 2, 1]) noise = tf.image.resize_images( noise, [shape[0], shape[2]]) noise = tf.transpose(noise, [0, 2, 1]) return noise
Example #17
Source File: common_layers.py From language with Apache License 2.0 | 6 votes |
def _split_heads(x, num_heads): """Split dimension 3 into multiple heads. Attempts to preserve static shape information. Args: x: a Tensor with shape [batch, length, emb_size] num_heads: an integer Returns: a Tensor with shape [batch, num_heads, length, emb_size / num_heads] """ old_shape = x.get_shape().dims new_shape = old_shape[:-1] + [num_heads] + [old_shape[-1] // num_heads] ret = tf.reshape(x, tf.concat([tf.shape(x)[:-1], [num_heads, -1]], 0)) ret.set_shape(new_shape) return tf.transpose(ret, [0, 2, 1, 3])
Example #18
Source File: discriminative_eval.py From language with Apache License 2.0 | 5 votes |
def create_cpc_model(model, num_choices, is_training): """Creates a classification model. Args: model: the BERT model from modeling.py num_choices: number of negatives samples + 1 is_training: training mode (bool) Returns: tuple of (loss, per_example_loss, logits, probabilities) for model """ output_layer = model.get_pooled_output() hidden_size = output_layer.shape[-1].value with tf.variable_scope("cpc_loss"): softmax_weights = tf.get_variable( "softmax_weights", [hidden_size, 8], initializer=tf.truncated_normal_initializer(stddev=0.02)) with tf.variable_scope("loss"): if is_training: # I.e., 0.1 dropout output_layer = tf.nn.dropout(output_layer, keep_prob=0.9) matmul_out = tf.matmul(output_layer, softmax_weights) logits = tf.reshape(matmul_out, (-1, num_choices, 8)) logits = tf.transpose(logits, perm=[0, 2, 1]) probabilities = tf.nn.softmax(logits, axis=-1) return (logits, probabilities)
Example #19
Source File: bilin_model_builder.py From language with Apache License 2.0 | 5 votes |
def create_model(model, labels, label_types, num_choices, k_size=4): """Creates a classification model. Args: model: the BERT model from modeling.py labels: ground truth paragraph order label_types: which k distances are being predicted num_choices: number of negatives samples + 1 k_size: window size of CPC k distance Returns: tuple of (loss, per_example_loss, logits, probabilities) for model """ output_layer = model.get_pooled_output() hidden_size = output_layer.shape[-1].value with tf.variable_scope("cpc_loss"): output = tf.reshape(output_layer, (-1, num_choices + 1, hidden_size)) contexts = output[:, 0, :] targets = output[:, 1:, :] softmax_weights = tf.get_variable( "cpc_weights", [k_size * 2, hidden_size, hidden_size], initializer=tf.truncated_normal_initializer(stddev=0.02)) context_encoded = tf.matmul(softmax_weights, contexts, transpose_b=True) context_encoded = tf.transpose(context_encoded, perm=[2, 0, 1]) logits = tf.matmul(targets, context_encoded, transpose_b=True) logits = tf.transpose(logits, perm=[0, 2, 1]) example_weights = tf.reduce_sum(tf.one_hot(label_types, k_size * 2), axis=1) per_example_loss = tf.nn.sparse_softmax_cross_entropy_with_logits( logits=logits, labels=labels) probabilities = tf.nn.softmax(logits, axis=-1) loss = tf.reduce_mean( tf.reduce_sum(example_weights * per_example_loss, axis=-1)) return (loss, per_example_loss, logits, probabilities)
Example #20
Source File: visualization.py From tensor2robot with Apache License 2.0 | 5 votes |
def get_softmax_viz(image, softmax, nrows=None): """Arrange softmax maps in a grid and superimpose them on the image.""" softmax_shape = tf.shape(softmax) batch_size = softmax_shape[0] target_height = softmax_shape[1] * 2 target_width = softmax_shape[2] * 2 num_points = softmax_shape[3] if nrows is None: # Find a number of rows such that the arrangement is as square as possible. num_points_float = tf.cast(num_points, tf.float32) nfsqrt = tf.cast(tf.floor(tf.sqrt(num_points_float)), tf.int32) divs = tf.range(1, nfsqrt + 1) remainders = tf.mod(num_points_float, tf.cast(divs, tf.float32)) divs = tf.gather(divs, tf.where(tf.equal(remainders, 0))) nrows = tf.reduce_max(divs) ncols = tf.cast(num_points / nrows, tf.int32) nrows = tf.cast(nrows, tf.int32) # Normalize per channel img = softmax / tf.reduce_max(softmax, axis=[1, 2], keepdims=True) # Use softmax as hue and saturation and original image as value of HSV image. greyimg = tf.image.rgb_to_grayscale(image) greyimg = tf.image.resize_images(greyimg, [target_height, target_width]) greyimg = tf.tile(greyimg, [1, 1, 1, num_points]) greyimg = tf.reshape(greyimg, [batch_size, target_height, target_width, num_points, 1]) img = tf.image.resize_images(img, [target_height, target_width]) img = tf.reshape(img, [batch_size, target_height, target_width, num_points, 1]) img = tf.concat([img / 2.0 + 0.5, img, greyimg * 0.7 + 0.3], axis=4) # Rearrange channels into a ncols x nrows grid. img = tf.reshape(img, [batch_size, target_height, target_width, nrows, ncols, 3]) img = tf.transpose(img, [0, 3, 1, 4, 2, 5]) img = tf.reshape(img, [batch_size, target_height * nrows, target_width * ncols, 3]) img = tf.image.hsv_to_rgb(img) return img
Example #21
Source File: resnet.py From tensor2robot with Apache License 2.0 | 5 votes |
def __call__(self, inputs, training): """Add operations to classify a batch of input images. Args: inputs: A Tensor representing a batch of input images. training: A boolean. Set to True to add operations required only when training the classifier. Returns: A logits Tensor with shape [<batch_size>, self.num_classes]. """ with self._model_variable_scope(): if self.data_format == 'channels_first': # Convert the inputs from channels_last (NHWC) to channels_first (NCHW). # This provides a large performance boost on GPU. See # https://www.tensorflow.org/performance/performance_guide#data_formats inputs = tf.transpose(inputs, [0, 3, 1, 2]) inputs = conv2d_fixed_padding( inputs=inputs, filters=self.num_filters, kernel_size=self.kernel_size, strides=self.conv_stride, data_format=self.data_format) inputs = tf.identity(inputs, 'initial_conv') if self.first_pool_size: inputs = tf.layers.max_pooling2d( inputs=inputs, pool_size=self.first_pool_size, strides=self.first_pool_stride, padding='SAME', data_format=self.data_format) inputs = tf.identity(inputs, 'initial_max_pool') for i, num_blocks in enumerate(self.block_sizes): num_filters = self.num_filters * (2**i) inputs = block_layer( inputs=inputs, filters=num_filters, bottleneck=self.bottleneck, block_fn=self.block_fn, blocks=num_blocks, strides=self.block_strides[i], training=training, name='block_layer{}'.format(i + 1), data_format=self.data_format) return inputs
Example #22
Source File: robust_model.py From interval-bound-propagation with Apache License 2.0 | 5 votes |
def compute_verifiable_loss(self, verifiable_obj, labels): """Compute verifiable training objective. Args: verifiable_obj: Verifiable training objective. labels: Ground truth labels. Returns: verifiable_loss: Aggregrated loss of the verifiable training objective. """ # Three options: reduce max, reduce mean, and softmax. if self.config['verifiable_training_aggregation'] == 'mean': verifiable_loss = tf.reduce_mean( verifiable_obj) # average across all target labels elif self.config['verifiable_training_aggregation'] == 'max': # Worst target label only. verifiable_loss = tf.reduce_mean(tf.reduce_max(verifiable_obj, axis=0)) elif self.config['verifiable_training_aggregation'] == 'softmax': # This assumes that entries in verifiable_obj belonging to the true class # are set to a (large) negative value, so to not affect the softmax much. # [batch_size]. Compute x-entropy against one-hot distrib. for true label. verifiable_loss = tf.nn.sparse_softmax_cross_entropy_with_logits( logits=tf.transpose(verifiable_obj), labels=labels) verifiable_loss = tf.reduce_mean( verifiable_loss) # aggregation across batch else: logging.info(self.config['verifiable_training_aggregation']) raise ValueError( 'Bad input argument for verifiable_training_aggregation used.') return verifiable_loss
Example #23
Source File: specification.py From interval-bound-propagation with Apache License 2.0 | 5 votes |
def evaluate(self, logits): if len(logits.shape) == 2: correct_class_logit = tf.gather_nd(logits, self._correct_idx) correct_class_logit = tf.expand_dims(correct_class_logit, -1) wrong_class_logits = tf.gather_nd(logits, self._wrong_idx) elif len(logits.shape) == 3: # [num_restarts, batch_size, num_classes] to # [num_restarts, batch_size, num_specs] logits = tf.transpose(logits, [1, 2, 0]) # Put restart dimension last. correct_class_logit = tf.gather_nd(logits, self._correct_idx) correct_class_logit = tf.transpose(correct_class_logit) correct_class_logit = tf.expand_dims(correct_class_logit, -1) wrong_class_logits = tf.gather_nd(logits, self._wrong_idx) wrong_class_logits = tf.transpose(wrong_class_logits, [2, 0, 1]) else: assert len(logits.shape) == 4 # [num_restarts, num_specs, batch_size, num_classes] to # [num_restarts, batch_size, num_specs]. logits = tf.transpose(logits, [2, 3, 1, 0]) correct_class_logit = tf.gather_nd(logits, self._correct_idx) correct_class_logit = tf.transpose(correct_class_logit, [2, 0, 1]) batch_size = tf.shape(logits)[0] wrong_idx = tf.concat([ self._wrong_idx, tf.tile(tf.reshape(tf.range(self.num_specifications, dtype=tf.int32), [1, self.num_specifications, 1]), [batch_size, 1, 1])], axis=-1) wrong_class_logits = tf.gather_nd(logits, wrong_idx) wrong_class_logits = tf.transpose(wrong_class_logits, [2, 0, 1]) return wrong_class_logits - correct_class_logit
Example #24
Source File: specification.py From interval-bound-propagation with Apache License 2.0 | 5 votes |
def _build(self, modules): if not (self.collapse and isinstance(modules[-1], verifiable_wrapper.LinearFCWrapper)): logging.info('Elision of last layer disabled.') bounds = modules[-1].output_bounds bounds = bounds_lib.IntervalBounds.convert(bounds) correct_class_logit = tf.gather_nd(bounds.lower, self._correct_idx) wrong_class_logits = tf.gather_nd(bounds.upper, self._wrong_idx) return wrong_class_logits - tf.expand_dims(correct_class_logit, 1) logging.info('Elision of last layer active.') bounds = modules[-1].input_bounds bounds = bounds_lib.IntervalBounds.convert(bounds) batch_size = tf.shape(bounds.lower)[0] w = modules[-1].module.w b = modules[-1].module.b w_t = tf.tile(tf.expand_dims(tf.transpose(w), 0), [batch_size, 1, 1]) b_t = tf.tile(tf.expand_dims(b, 0), [batch_size, 1]) w_correct = tf.expand_dims(tf.gather_nd(w_t, self._correct_idx), -1) b_correct = tf.expand_dims(tf.gather_nd(b_t, self._correct_idx), 1) w_wrong = tf.transpose(tf.gather_nd(w_t, self._wrong_idx), [0, 2, 1]) b_wrong = tf.gather_nd(b_t, self._wrong_idx) w = w_wrong - w_correct b = b_wrong - b_correct # Maximize z * w + b s.t. lower <= z <= upper. c = (bounds.lower + bounds.upper) / 2. r = (bounds.upper - bounds.lower) / 2. c = tf.einsum('ij,ijk->ik', c, w) if b is not None: c += b r = tf.einsum('ij,ijk->ik', r, tf.abs(w)) return c + r
Example #25
Source File: preprocessor.py From Object_Detection_Tracking with Apache License 2.0 | 5 votes |
def keypoint_flip_horizontal(keypoints, flip_point, flip_permutation, scope=None): """Flips the keypoints horizontally around the flip_point. This operation flips the x coordinate for each keypoint around the flip_point and also permutes the keypoints in a manner specified by flip_permutation. Args: keypoints: a tensor of shape [num_instances, num_keypoints, 2] flip_point: (float) scalar tensor representing the x coordinate to flip the keypoints around. flip_permutation: rank 1 int32 tensor containing the keypoint flip permutation. This specifies the mapping from original keypoint indices to the flipped keypoint indices. This is used primarily for keypoints that are not reflection invariant. E.g. Suppose there are 3 keypoints representing ['head', 'right_eye', 'left_eye'], then a logical choice for flip_permutation might be [0, 2, 1] since we want to swap the 'left_eye' and 'right_eye' after a horizontal flip. scope: name scope. Returns: new_keypoints: a tensor of shape [num_instances, num_keypoints, 2] """ with tf.name_scope(scope, 'FlipHorizontal'): keypoints = tf.transpose(keypoints, [1, 0, 2]) keypoints = tf.gather(keypoints, flip_permutation) v, u = tf.split(value=keypoints, num_or_size_splits=2, axis=2) u = flip_point * 2.0 - u new_keypoints = tf.concat([v, u], 2) new_keypoints = tf.transpose(new_keypoints, [1, 0, 2]) return new_keypoints
Example #26
Source File: faster_rcnn_box_coder.py From Object_Detection_Tracking with Apache License 2.0 | 5 votes |
def _decode(self, rel_codes, anchors): """Decode relative codes to boxes. Args: rel_codes: a tensor representing N anchor-encoded boxes. anchors: BoxList of anchors. Returns: boxes: BoxList holding N bounding boxes. """ ycenter_a, xcenter_a, ha, wa = anchors.get_center_coordinates_and_sizes() ty, tx, th, tw = tf.unstack(tf.transpose(rel_codes)) if self._scale_factors: ty /= self._scale_factors[0] tx /= self._scale_factors[1] th /= self._scale_factors[2] tw /= self._scale_factors[3] w = tf.exp(tw) * wa h = tf.exp(th) * ha ycenter = ty * ha + ycenter_a xcenter = tx * wa + xcenter_a ymin = ycenter - h / 2. xmin = xcenter - w / 2. ymax = ycenter + h / 2. xmax = xcenter + w / 2. return box_list.BoxList(tf.transpose(tf.stack([ymin, xmin, ymax, xmax])))
Example #27
Source File: rl.py From tensor2tensor with Apache License 2.0 | 5 votes |
def body(self, features): observations = features["inputs"] x = tf.transpose(observations, [0, 2, 3, 1, 4]) x_shape = common_layers.shape_list(x) x = tf.reshape(x, x_shape[:-2] + [-1]) dropout = getattr(self.hparams, "dropout_ppo", 0.0) with tf.variable_scope("feed_forward_cnn_small"): x = tf.cast(x, tf.float32) / 255.0 x = tf.nn.dropout(x, rate=dropout) x = tf.layers.conv2d( x, 32, (4, 4), strides=(2, 2), name="conv1", activation=common_layers.belu, padding="SAME") x = tf.nn.dropout(x, rate=dropout) x = tf.layers.conv2d( x, 64, (4, 4), strides=(2, 2), name="conv2", activation=common_layers.belu, padding="SAME") x = tf.nn.dropout(x, rate=dropout) x = tf.layers.conv2d( x, 128, (4, 4), strides=(2, 2), name="conv3", activation=common_layers.belu, padding="SAME") flat_x = tf.layers.flatten(x) flat_x = tf.nn.dropout(flat_x, rate=dropout) x = tf.layers.dense(flat_x, 128, activation=tf.nn.relu, name="dense1") logits = tf.layers.dense( x, self.hparams.problem.num_actions, name="dense2" ) logits = tf.expand_dims(logits, axis=1) logits = clip_logits(logits, self.hparams) value = tf.layers.dense(x, 1, name="value") return {"target_policy": logits, "target_value": value}
Example #28
Source File: utils.py From lamb with Apache License 2.0 | 5 votes |
def random_mask2(shape, k): x = tf.random_normal(shape=shape) x = tf.transpose(x) kth_largest = tf.nn.top_k(x, k)[0][:, k-1] mask = tf.to_float(tf.greater_equal(x, tf.expand_dims(kth_largest, 1))) return tf.transpose(mask)
Example #29
Source File: lib_tfsampling.py From magenta with Apache License 2.0 | 5 votes |
def sample_with_temperature(logits, temperature): """Either argmax after softmax or random sample along the pitch axis. Args: logits: a Tensor of shape (batch, time, pitch, instrument). temperature: a float 0.0=argmax 1.0=random Returns: a Tensor of the same shape, with one_hots on the pitch dimension. """ logits = tf.transpose(logits, [0, 1, 3, 2]) pitch_range = tf.shape(logits)[-1] def sample_from_logits(logits): with tf.control_dependencies([tf.assert_greater(temperature, 0.0)]): logits = tf.identity(logits) reshaped_logits = ( tf.reshape(logits, [-1, tf.shape(logits)[-1]]) / temperature) choices = tf.multinomial(reshaped_logits, 1) choices = tf.reshape(choices, tf.shape(logits)[:logits.get_shape().ndims - 1]) return choices choices = tf.cond(tf.equal(temperature, 0.0), lambda: tf.argmax(tf.nn.softmax(logits), -1), lambda: sample_from_logits(logits)) samples_onehot = tf.one_hot(choices, pitch_range) return tf.transpose(samples_onehot, [0, 1, 3, 2])
Example #30
Source File: inception_score.py From Inception-Score with Apache License 2.0 | 5 votes |
def inception_logits(images = inception_images, num_splits = 1): images = tf.transpose(images, [0, 2, 3, 1]) size = 299 images = tf.compat.v1.image.resize_bilinear(images, [size, size]) generated_images_list = array_ops.split(images, num_or_size_splits = num_splits) logits = tf.map_fn( fn = tfgan.eval.classifier_fn_from_tfhub(INCEPTION_TFHUB, INCEPTION_OUTPUT, True), elems = array_ops.stack(generated_images_list), parallel_iterations = 8, back_prop = False, swap_memory = True, name = 'RunClassifier') logits = array_ops.concat(array_ops.unstack(logits), 0) return logits