Python tensorflow.compat.v1.shape() Examples
The following are 30
code examples of tensorflow.compat.v1.shape().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.compat.v1
, or try the search function
.
Example #1
Source File: data_reader.py From tensor2tensor with Apache License 2.0 | 6 votes |
def pad_for_tpu(shapes_dict, hparams, max_length): """Pads unknown features' dimensions for TPU.""" padded_shapes = {} def get_filler(specified_max_length): if not specified_max_length: return max_length return min(specified_max_length, max_length) inputs_none_filler = get_filler(hparams.max_input_seq_length) targets_none_filler = get_filler(hparams.max_target_seq_length) def pad_one_shape(shape, none_filler): return [ (dim if dim is not None else none_filler) for dim in shape.as_list() ] for key, shape in six.iteritems(shapes_dict): if key == "inputs": padded_shapes[key] = pad_one_shape(shape, inputs_none_filler) elif key == "targets": padded_shapes[key] = pad_one_shape(shape, targets_none_filler) else: padded_shapes[key] = pad_one_shape(shape, max_length) return padded_shapes
Example #2
Source File: nasnet_utils.py From benchmarks with Apache License 2.0 | 6 votes |
def _cell_base(self, net, prev_layer): """Runs the beginning of the conv cell before the predicted ops are run.""" num_filters = self._filter_size # Check to be sure prev layer stuff is setup correctly prev_layer = self._reduce_prev_layer(prev_layer, net) net = tf.nn.relu(net) net = slim.conv2d(net, num_filters, 1, scope='1x1') net = slim.batch_norm(net, scope='beginning_bn') split_axis = get_channel_index() net = tf.split(axis=split_axis, num_or_size_splits=1, value=net) for split in net: assert int(split.shape[split_axis] == int( self._num_conv_filters * self._filter_scaling)) net.append(prev_layer) return net
Example #3
Source File: nasnet_utils.py From benchmarks with Apache License 2.0 | 6 votes |
def _reduce_prev_layer(self, prev_layer, curr_layer): """Matches dimension of prev_layer to the curr_layer.""" # Set the prev layer to the current layer if it is none if prev_layer is None: return curr_layer curr_num_filters = self._filter_size prev_num_filters = get_channel_dim(prev_layer.shape) curr_filter_shape = int(curr_layer.shape[2]) prev_filter_shape = int(prev_layer.shape[2]) if curr_filter_shape != prev_filter_shape: prev_layer = tf.nn.relu(prev_layer) prev_layer = factorized_reduction(prev_layer, curr_num_filters, stride=2) elif curr_num_filters != prev_num_filters: prev_layer = tf.nn.relu(prev_layer) prev_layer = slim.conv2d( prev_layer, curr_num_filters, 1, scope='prev_1x1') prev_layer = slim.batch_norm(prev_layer, scope='prev_bn') return prev_layer
Example #4
Source File: preprocessing.py From benchmarks with Apache License 2.0 | 6 votes |
def decode_jpeg(image_buffer, scope=None): # , dtype=tf.float32): """Decode a JPEG string into one 3-D float image Tensor. Args: image_buffer: scalar string Tensor. scope: Optional scope for op_scope. Returns: 3-D float Tensor with values ranging from [0, 1). """ # with tf.op_scope([image_buffer], scope, 'decode_jpeg'): # with tf.name_scope(scope, 'decode_jpeg', [image_buffer]): with tf.name_scope(scope or 'decode_jpeg'): # Decode the string as an RGB JPEG. # Note that the resulting image contains an unknown height and width # that is set dynamically by decode_jpeg. In other words, the height # and width of image is unknown at compile-time. image = tf.image.decode_jpeg(image_buffer, channels=3, fancy_upscaling=False, dct_method='INTEGER_FAST') # image = tf.Print(image, [tf.shape(image)], 'Image shape: ') return image
Example #5
Source File: dropout.py From lamb with Apache License 2.0 | 6 votes |
def _ensure_keep_mask(self, x): if self._keep_mask is None or not self._share_mask: shape = tf.shape(x) k = shape[1] # To make this class a drop-in replacement for bernoulli dropout we # paramaterize it with keep_prob. Set alpha of the dirichlet so that the # variance is equal to the variance of the bernoulli with p=keep_prob # divided by keep_prob. # Now the variance of the dirichlet with k equal alphas is # (k-1)/(k^2(k*alpha+1). Solve that for alpha. kf = tf.cast(k, tf.float32) alpha = self._keep_prob * (kf - 1.0) / ((1-self._keep_prob)*kf) - 1.0/kf dist = tfp.distributions.Dirichlet(tf.ones(shape=k) * alpha) assert (dist.reparameterization_type == tfp.distributions.FULLY_REPARAMETERIZED) # The E[dir(alpha)] = 1/k for all elements, but we want the expectation to # be keep_prob, hence the multiplication. self._keep_mask = kf * dist.sample(shape[0]) self._keep_mask.set_shape(x.get_shape()) return self._keep_mask
Example #6
Source File: dropout.py From lamb with Apache License 2.0 | 6 votes |
def _build(self, x, state): prev_keep_mask = state shape = tf.shape(x) noise = tf.random_uniform(shape, dtype=x.dtype) other_mask = tf.floor(self._keep_prob + noise) choice_noise = tf.random_uniform(shape, dtype=x.dtype) choice = tf.less(choice_noise, self._flip_prob) # KLUDGE(melisgl): The client has to pass the last keep_mask from # a batch to the next so the mask may end up next to some # recurrent cell state. This state is often zero at the beginning # and may be periodically zeroed (per example) during training. # While zeroing LSTM state is okay, zeroing the dropout mask is # not. So instead of forcing every client to deal with this common # (?) case, if an all zero mask is detected, then regenerate a # fresh mask. This is of course a major hack and won't help with # learnt initial states, for example. sum_ = tf.reduce_sum(prev_keep_mask, 1, keepdims=True) is_initializing = tf.equal(sum_, 0.0) self._keep_mask = tf.where(tf.logical_or(choice, is_initializing), other_mask, prev_keep_mask) self._time_step += 1 return x * self._keep_mask / self._keep_prob * self._scaler
Example #7
Source File: utils.py From lamb with Apache License 2.0 | 6 votes |
def layer_norm(x, reduction_indices, epsilon=1e-9, gain=None, bias=None, per_element=True, scope=None): """DOC.""" reduction_indices = ensure_list(reduction_indices) mean = tf.reduce_mean(x, reduction_indices, keep_dims=True) variance = tf.reduce_mean(tf.squared_difference(x, mean), reduction_indices, keep_dims=True) normalized = (x - mean) / tf.sqrt(variance + epsilon) dtype = x.dtype shape = x.get_shape().as_list() for i in six.moves.range(len(shape)): if i not in reduction_indices or not per_element: shape[i] = 1 with tf.variable_scope(scope or 'layer_norm'): if gain is None: gain = tf.get_variable('gain', shape=shape, dtype=dtype, initializer=tf.ones_initializer()) if bias is None: bias = tf.get_variable('bias', shape=shape, dtype=dtype, initializer=tf.zeros_initializer()) return gain*normalized+bias
Example #8
Source File: utils.py From lamb with Apache License 2.0 | 6 votes |
def sparse_random_indices(ratio, shape): """DOC.""" assert 0 < ratio and ratio <= 1.0 n = round_to_int(tf.TensorShape(shape).num_elements()*ratio) # There are two implementations. The first generates random indices # and wastes computation due to collisions, and the second wastes # memory. if ratio < 0.25: indices = {} if isinstance(shape, tf.TensorShape): shape = shape.as_list() while len(indices) < n: index = _random_index(shape) indices[index] = True return indices.keys() else: indices = _all_indices(shape) random.shuffle(indices) return indices[:n]
Example #9
Source File: utils.py From lamb with Apache License 2.0 | 6 votes |
def log_trainables(scopes=('',)): """"Log number of trainable parameters for each scope in `scopes`. Args: scopes: A sequence of scope names. Returns: The total number of trainable parameters over all scopes in `scopes`. Possibly counting some parameters multiple times if the scopes are nested. """ total = 0 for scope in scopes: logging.info('Trainables in scope "%s":', scope) n = 0 for var in trainable_vars_in_scope(scope): shape = var.get_shape() logging.info('trainable: %s shape %r (%r)', var.name, shape.as_list(), shape.num_elements()) n += shape.num_elements() logging.info('Number of parameters in scope "%s": %r', scope, n) total += n return total
Example #10
Source File: metrics.py From tensor2tensor with Apache License 2.0 | 6 votes |
def padded_accuracy_topk(predictions, labels, k, weights_fn=common_layers.weights_nonzero): """Percentage of times that top-k predictions matches labels on non-0s.""" with tf.variable_scope("padded_accuracy_topk", values=[predictions, labels]): padded_predictions, padded_labels = common_layers.pad_with_zeros( predictions, labels) weights = weights_fn(padded_labels) effective_k = tf.minimum(k, common_layers.shape_list(padded_predictions)[-1]) _, outputs = tf.nn.top_k(padded_predictions, k=effective_k) outputs = tf.to_int32(outputs) padded_labels = tf.to_int32(padded_labels) padded_labels = tf.expand_dims(padded_labels, axis=-1) padded_labels += tf.zeros_like(outputs) # Pad to same shape. same = tf.to_float(tf.equal(outputs, padded_labels)) same_topk = tf.reduce_sum(same, axis=-1) return same_topk, weights
Example #11
Source File: metrics.py From tensor2tensor with Apache License 2.0 | 6 votes |
def two_class_log_likelihood(predictions, labels, weights_fn=None): """Log-likelihood for two class classification with 0/1 labels. Args: predictions: A float valued tensor of shape [`batch_size`]. Each component should be between 0 and 1. labels: An int valued tensor of shape [`batch_size`]. Each component should either be 0 or 1. weights_fn: unused. Returns: A pair, with the average log likelihood in the first component. """ del weights_fn float_predictions = tf.cast(tf.squeeze(predictions), dtype=tf.float64) batch_probs = tf.stack([1. - float_predictions, float_predictions], axis=-1) int_labels = tf.cast(tf.squeeze(labels), dtype=tf.int32) onehot_targets = tf.cast(tf.one_hot(int_labels, 2), dtype=tf.float64) chosen_probs = tf.einsum( "ij,ij->i", batch_probs, onehot_targets, name="chosen_probs") avg_log_likelihood = tf.reduce_mean(tf.log(chosen_probs)) return avg_log_likelihood, tf.constant(1.0)
Example #12
Source File: metrics.py From tensor2tensor with Apache License 2.0 | 6 votes |
def set_precision(predictions, labels, weights_fn=common_layers.weights_nonzero): """Precision of set predictions. Args: predictions : A Tensor of scores of shape [batch, nlabels]. labels: A Tensor of int32s giving true set elements, of shape [batch, seq_length]. weights_fn: A function to weight the elements. Returns: hits: A Tensor of shape [batch, nlabels]. weights: A Tensor of shape [batch, nlabels]. """ with tf.variable_scope("set_precision", values=[predictions, labels]): labels = tf.squeeze(labels, [2, 3]) weights = weights_fn(labels) labels = tf.one_hot(labels, predictions.shape[-1]) labels = tf.reduce_max(labels, axis=1) labels = tf.cast(labels, tf.bool) return tf.to_float(tf.equal(labels, predictions)), weights
Example #13
Source File: metrics.py From tensor2tensor with Apache License 2.0 | 6 votes |
def image_summary(predictions, targets, hparams): """Reshapes predictions and passes it to tensorboard. Args: predictions : The predicted image (logits). targets : The ground truth. hparams: model hparams. Returns: summary_proto: containing the summary images. weights: A Tensor of zeros of the same shape as predictions. """ del hparams results = tf.cast(tf.argmax(predictions, axis=-1), tf.uint8) gold = tf.cast(targets, tf.uint8) summary1 = tf.summary.image("prediction", results, max_outputs=2) summary2 = tf.summary.image("data", gold, max_outputs=2) summary = tf.summary.merge([summary1, summary2]) return summary, tf.zeros_like(predictions)
Example #14
Source File: mobilenet.py From benchmarks with Apache License 2.0 | 6 votes |
def global_pool(input_tensor, pool_op=tf.nn.avg_pool): """Applies avg pool to produce 1x1 output. NOTE: This function is funcitonally equivalenet to reduce_mean, but it has baked in average pool which has better support across hardware. Args: input_tensor: input tensor pool_op: pooling op (avg pool is default) Returns: a tensor batch_size x 1 x 1 x depth. """ shape = input_tensor.get_shape().as_list() if shape[1] is None or shape[2] is None: kernel_size = tf.convert_to_tensor( [1, tf.shape(input_tensor)[1], tf.shape(input_tensor)[2], 1]) else: kernel_size = [1, shape[1], shape[2], 1] output = pool_op( input_tensor, ksize=kernel_size, strides=[1, 1, 1, 1], padding='VALID') # Recover output shape, for unknown shape. output.set_shape([None, 1, 1, None]) return output
Example #15
Source File: t2t_model.py From tensor2tensor with Apache License 2.0 | 6 votes |
def _greedy_infer(self, features, decode_length, use_tpu=False): """A greedy inference method. Models should ideally implement a more efficient version of this function. Args: features: an map of string to `Tensor` decode_length: an integer. How many additional timesteps to decode. use_tpu: A bool, whether to build the inference graph for TPU. Returns: A dict of decoding results { "outputs": integer `Tensor` of decoded ids of shape [batch_size, <= decode_length] if beam_size == 1 or [batch_size, top_beams, <= decode_length] "scores": None "logits": `Tensor` of shape [batch_size, time, 1, 1, vocab_size]. "losses": a dictionary: {loss-name (string): floating point `Scalar`} } """ if use_tpu: return self._slow_greedy_infer_tpu(features, decode_length) return self._slow_greedy_infer(features, decode_length)
Example #16
Source File: t2t_model.py From tensor2tensor with Apache License 2.0 | 6 votes |
def body(self, features): """Computes the targets' pre-logit activations given transformed inputs. Most `T2TModel` subclasses will override this method. Args: features: dict of str to Tensor, where each Tensor has shape [batch_size, ..., hidden_size]. It typically contains keys `inputs` and `targets`. Returns: output: Tensor of pre-logit activations with shape [batch_size, ..., hidden_size]. losses: Either single loss as a scalar, a list, a Tensor (to be averaged), or a dictionary of losses. If losses is a dictionary with the key "training", losses["training"] is considered the final training loss and output is considered logits; self.top and self.loss will be skipped. """ raise NotImplementedError("Abstract Method")
Example #17
Source File: expert_utils.py From tensor2tensor with Apache License 2.0 | 6 votes |
def combine(self, x): """Return the output from the experts. When one example goes to multiple experts, the outputs are summed. Args: x: a Tensor with shape [batch, num_experts, expert_capacity, depth] Returns: a `Tensor` with shape `[batch, length, depth] """ depth = tf.shape(x)[-1] x *= tf.expand_dims(self._nonpadding, -1) ret = tf.unsorted_segment_sum( x, self._flat_indices, num_segments=self._batch * self._length) ret = tf.reshape(ret, [self._batch, self._length, depth]) return ret
Example #18
Source File: expert_utils.py From tensor2tensor with Apache License 2.0 | 6 votes |
def combine(self, expert_out, multiply_by_gates=True): """Sum together the expert output, weighted by the gates. The slice corresponding to a particular batch element `b` is computed as the sum over all experts `i` of the expert output, weighted by the corresponding gate values. If `multiply_by_gates` is set to False, the gate values are ignored. Args: expert_out: a list of `num_experts` `Tensor`s, each with shape `[expert_batch_size_i, <extra_output_dims>]`. multiply_by_gates: a boolean Returns: a `Tensor` with shape `[batch_size, <extra_output_dims>]`. """ # see comments on convert_gradient_to_tensor stitched = common_layers.convert_gradient_to_tensor( tf.concat(expert_out, 0)) if multiply_by_gates: stitched *= tf.expand_dims(self._nonzero_gates, 1) combined = tf.unsorted_segment_sum(stitched, self._batch_index, tf.shape(self._gates)[0]) return combined
Example #19
Source File: expert_utils.py From tensor2tensor with Apache License 2.0 | 6 votes |
def __init__(self, num_experts, gates): """Create a SparseDispatcher. Args: num_experts: an integer. gates: a `Tensor` of shape `[batch_size, num_experts]`. Returns: a SparseDispatcher """ self._gates = gates self._num_experts = num_experts where = tf.to_int32(tf.where(tf.transpose(gates) > 0)) self._expert_index, self._batch_index = tf.unstack(where, num=2, axis=1) self._part_sizes_tensor = tf.reduce_sum(tf.to_int32(gates > 0), [0]) self._nonzero_gates = tf.gather( tf.reshape(self._gates, [-1]), self._batch_index * num_experts + self._expert_index)
Example #20
Source File: expert_utils.py From tensor2tensor with Apache License 2.0 | 6 votes |
def restore(self, x): """Add padding back to the given tensor. Args: x (tf.Tensor): of shape [dim_compressed,...] Returns: a tensor of shape [dim_origin,...] with dim_compressed >= dim_origin. The dim is restored from the original reference tensor """ with tf.name_scope("pad_reduce/restore"): x = tf.scatter_nd( indices=self.nonpad_ids, updates=x, shape=tf.concat([self.dim_origin, tf.shape(x)[1:]], axis=0), ) return x
Example #21
Source File: expert_utils.py From tensor2tensor with Apache License 2.0 | 6 votes |
def remove(self, x): """Remove padding from the given tensor. Args: x (tf.Tensor): of shape [dim_origin,...] Returns: a tensor of shape [dim_compressed,...] with dim_compressed <= dim_origin """ with tf.name_scope("pad_reduce/remove"): x_shape = x.get_shape().as_list() x = tf.gather_nd( x, indices=self.nonpad_ids, ) if not tf.executing_eagerly(): # This is a hack but for some reason, gather_nd return a tensor of # undefined shape, so the shape is set up manually x.set_shape([None] + x_shape[1:]) return x
Example #22
Source File: expert_utils.py From tensor2tensor with Apache License 2.0 | 6 votes |
def _normal_distribution_cdf(x, stddev): """Evaluates the CDF of the normal distribution. Normal distribution with mean 0 and standard deviation stddev, evaluated at x=x. input and output `Tensor`s have matching shapes. Args: x: a `Tensor` stddev: a `Tensor` with the same shape as `x`. Returns: a `Tensor` with the same shape as `x`. """ return 0.5 * (1.0 + tf.erf(x / (math.sqrt(2) * stddev + 1e-20)))
Example #23
Source File: metrics.py From tensor2tensor with Apache License 2.0 | 6 votes |
def sigmoid_precision_one_hot(logits, labels, weights_fn=None): """Calculate precision for a set, given one-hot labels and logits. Predictions are converted to one-hot, as predictions[example][arg-max(example)] = 1 Args: logits: Tensor of size [batch-size, o=1, p=1, num-classes] labels: Tensor of size [batch-size, o=1, p=1, num-classes] weights_fn: Function that takes in labels and weighs examples (unused) Returns: precision (scalar), weights """ with tf.variable_scope("sigmoid_precision_one_hot", values=[logits, labels]): del weights_fn num_classes = logits.shape[-1] predictions = tf.nn.sigmoid(logits) predictions = tf.argmax(predictions, -1) predictions = tf.one_hot(predictions, num_classes) _, precision = tf.metrics.precision(labels=labels, predictions=predictions) return precision, tf.constant(1.0)
Example #24
Source File: metrics.py From tensor2tensor with Apache License 2.0 | 6 votes |
def set_recall(predictions, labels, weights_fn=common_layers.weights_nonzero): """Recall of set predictions. Args: predictions : A Tensor of scores of shape [batch, nlabels]. labels: A Tensor of int32s giving true set elements, of shape [batch, seq_length]. weights_fn: A function to weight the elements. Returns: hits: A Tensor of shape [batch, nlabels]. weights: A Tensor of shape [batch, nlabels]. """ with tf.variable_scope("set_recall", values=[predictions, labels]): labels = tf.squeeze(labels, [2, 3]) weights = weights_fn(labels) labels = tf.one_hot(labels, predictions.shape[-1]) labels = tf.reduce_max(labels, axis=1) labels = tf.cast(labels, tf.bool) return tf.to_float(tf.equal(labels, predictions)), weights
Example #25
Source File: nasnet_utils.py From benchmarks with Apache License 2.0 | 5 votes |
def _operation_to_pooling_info(operation): """Parses the pooling operation string to return its type and shape.""" pooling_type = _operation_to_pooling_type(operation) pooling_shape = _operation_to_pooling_shape(operation) return pooling_type, pooling_shape
Example #26
Source File: expert_utils.py From tensor2tensor with Apache License 2.0 | 5 votes |
def flatten_all_but_last(a): """Flatten all dimensions of a except the last.""" ret = tf.reshape(a, [-1, tf.shape(a)[-1]]) if not tf.executing_eagerly(): ret.set_shape([None] + a.get_shape().as_list()[-1:]) return ret
Example #27
Source File: nasnet_utils.py From benchmarks with Apache License 2.0 | 5 votes |
def _operation_to_info(operation): """Takes in operation name and returns meta information. An example would be 'separable_3x3_4' -> (3, 4). Args: operation: String that corresponds to convolution operation. Returns: Tuple of (filter shape, num layers). """ num_layers = _operation_to_num_layers(operation) filter_shape = _operation_to_filter_shape(operation) return num_layers, filter_shape
Example #28
Source File: expert_utils.py From tensor2tensor with Apache License 2.0 | 5 votes |
def dispatch(self, inp): """Create one input Tensor for each expert. The `Tensor` for a expert `i` contains the slices of `inp` corresponding to the batch elements `b` where `gates[b, i] > 0`. Args: inp: a `Tensor` of shape "[batch_size, <extra_input_dims>]` Returns: a list of `num_experts` `Tensor`s with shapes `[expert_batch_size_i, <extra_input_dims>]`. """ inp = tf.gather(inp, self._batch_index) return tf.split(inp, self._part_sizes_tensor, 0, num=self._num_experts)
Example #29
Source File: nasnet_utils.py From benchmarks with Apache License 2.0 | 5 votes |
def _operation_to_pooling_shape(operation): """Takes in the operation string and returns the pooling kernel shape.""" splitted_operation = operation.split('_') shape = splitted_operation[-1] assert 'x' in shape filter_height, filter_width = shape.split('x') assert filter_height == filter_width return int(filter_height)
Example #30
Source File: dropout.py From lamb with Apache License 2.0 | 5 votes |
def _ensure_keep_mask(self, x): if self._keep_mask is None or not self._share_mask: shape = tf.shape(x) # Calculate the stddev for the normal distribution that # matches the stddev of the bernoulli with p=keep_prob. stddev = tf.sqrt((1 - self._keep_prob) / self._keep_prob) self._keep_mask = tf.random_normal(shape, mean=1.0, stddev=stddev, dtype=x.dtype) self._keep_mask.set_shape(x.get_shape()) return self._keep_mask