Python tensorflow.compat.v1.reduce_prod() Examples
The following are 13
code examples of tensorflow.compat.v1.reduce_prod().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.compat.v1
, or try the search function
.
Example #1
Source File: seq2seq.py From magenta with Apache License 2.0 | 6 votes |
def _call_sampler(sample_n_fn, sample_shape, name=None): """Reshapes vector of samples.""" with tf.name_scope(name, "call_sampler", values=[sample_shape]): sample_shape = tf.convert_to_tensor( sample_shape, dtype=tf.int32, name="sample_shape") # Ensure sample_shape is a vector (vs just a scalar). pad = tf.cast(tf.equal(tf.rank(sample_shape), 0), tf.int32) sample_shape = tf.reshape( sample_shape, tf.pad(tf.shape(sample_shape), paddings=[[pad, 0]], constant_values=1)) samples = sample_n_fn(tf.reduce_prod(sample_shape)) batch_event_shape = tf.shape(samples)[1:] final_shape = tf.concat([sample_shape, batch_event_shape], 0) return tf.reshape(samples, final_shape)
Example #2
Source File: modalities.py From tensor2tensor with Apache License 2.0 | 5 votes |
def video_pixel_noise_bottom(x, model_hparams, vocab_size): """Bottom transformation for video.""" input_noise = getattr(model_hparams, "video_modality_input_noise", 0.25) inputs = x if model_hparams.mode == tf.estimator.ModeKeys.TRAIN: background = tfp.stats.percentile(inputs, 50., axis=[0, 1, 2, 3]) input_shape = common_layers.shape_list(inputs) input_size = tf.reduce_prod(input_shape[:-1]) input_mask = tf.multinomial( tf.log([[input_noise, 1.-input_noise]]), input_size) input_mask = tf.reshape(tf.cast(input_mask, tf.int32), input_shape[:-1]+[1]) inputs = inputs * input_mask + background * (1 - input_mask) return video_bottom(inputs, model_hparams, vocab_size)
Example #3
Source File: common_layers.py From tensor2tensor with Apache License 2.0 | 5 votes |
def apply_spectral_norm(x): """Normalizes x using the spectral norm. The implementation follows Algorithm 1 of https://arxiv.org/abs/1802.05957. If x is not a 2-D Tensor, then it is reshaped such that the number of channels (last-dimension) is the same. Args: x: Tensor with the last dimension equal to the number of filters. Returns: x: Tensor with the same shape as x normalized by the spectral norm. assign_op: Op to be run after every step to update the vector "u". """ weights_shape = shape_list(x) other, num_filters = tf.reduce_prod(weights_shape[:-1]), weights_shape[-1] # Reshape into a 2-D matrix with outer size num_filters. weights_2d = tf.reshape(x, (other, num_filters)) # v = Wu / ||W u|| with tf.variable_scope("u", reuse=tf.AUTO_REUSE): u = tf.get_variable( "u", [num_filters, 1], initializer=tf.truncated_normal_initializer(), trainable=False) v = tf.nn.l2_normalize(tf.matmul(weights_2d, u)) # u_new = vW / ||v W|| u_new = tf.nn.l2_normalize(tf.matmul(tf.transpose(v), weights_2d)) # s = v*W*u spectral_norm = tf.squeeze( tf.matmul(tf.transpose(v), tf.matmul(weights_2d, tf.transpose(u_new)))) # set u equal to u_new in the next iteration. assign_op = tf.assign(u, tf.transpose(u_new)) return tf.divide(x, spectral_norm), assign_op
Example #4
Source File: common_layers.py From tensor2tensor with Apache License 2.0 | 5 votes |
def weight_targeting(w, k): """Weight-level magnitude pruning.""" k = tf.to_int32(k) w_shape = shape_list(w) size = tf.to_int32(tf.reduce_prod(w_shape[:-1])) w = tf.reshape(w, [size, w_shape[-1]]) transpose_w = tf.transpose(w) thres = contrib.framework().sort(tf.abs(transpose_w), axis=1)[:, k] mask = to_float(thres[None, :] >= tf.abs(w)) return tf.reshape(mask, w_shape)
Example #5
Source File: common_layers.py From tensor2tensor with Apache License 2.0 | 5 votes |
def unit_targeting(w, k): """Unit-level magnitude pruning.""" k = tf.to_int32(k) w_shape = shape_list(w) size = tf.to_int32(tf.reduce_prod(w_shape[:-1])) w = tf.reshape(w, [size, w_shape[-1]]) norm = tf.norm(w, axis=0) thres = contrib.framework().sort(norm, axis=0)[k] mask = to_float(thres >= norm)[None, :] mask = tf.tile(mask, [size, 1]) return tf.reshape(mask, w_shape)
Example #6
Source File: fastlin.py From interval-bound-propagation with Apache License 2.0 | 5 votes |
def _conv1d_expression(expr, w, padding, stride): """Scale a linear expression by w (through a convolutional layer).""" b = tf.nn.conv1d(expr.b, w, padding=padding, stride=stride) shape = tf.concat([[tf.reduce_prod(tf.shape(expr.w)[:2])], tf.shape(expr.w)[2:]], axis=0) w = tf.nn.conv1d(tf.reshape(expr.w, shape), w, padding=padding, stride=stride) shape = tf.concat([tf.shape(expr.w)[:2], tf.shape(w)[1:]], axis=0) w = tf.reshape(w, shape) return LinearExpression(w=w, b=b, lower=expr.lower, upper=expr.upper)
Example #7
Source File: fastlin.py From interval-bound-propagation with Apache License 2.0 | 5 votes |
def _conv2d_expression(expr, w, padding, strides): """Scale a linear expression by w (through a convolutional layer).""" b = tf.nn.convolution(expr.b, w, padding=padding, strides=strides) shape = tf.concat([[tf.reduce_prod(tf.shape(expr.w)[:2])], tf.shape(expr.w)[2:]], axis=0) w = tf.nn.convolution(tf.reshape(expr.w, shape), w, padding=padding, strides=strides) shape = tf.concat([tf.shape(expr.w)[:2], tf.shape(w)[1:]], axis=0) w = tf.reshape(w, shape) return LinearExpression(w=w, b=b, lower=expr.lower, upper=expr.upper)
Example #8
Source File: archs.py From compression with Apache License 2.0 | 5 votes |
def _get_moments(self, inputs): # Like tf.nn.moments but unbiased sample std. deviation. # Reduce over channels only. mean = tf.reduce_mean(inputs, [self.axis], keepdims=True, name="mean") variance = tf.reduce_sum( tf.squared_difference(inputs, tf.stop_gradient(mean)), [self.axis], keepdims=True, name="variance_sum") # Divide by N-1 inputs_shape = tf.shape(inputs) counts = tf.reduce_prod([inputs_shape[ax] for ax in [self.axis]]) variance /= (tf.cast(counts, tf.float32) - 1) return mean, variance
Example #9
Source File: archs.py From compression with Apache License 2.0 | 5 votes |
def estimate_entropy(entropy_model, inputs, spatial_shape=None) -> EntropyInfo: """Compresses `inputs` with the given entropy model and estimates entropy. Arguments: entropy_model: An `EntropyModel` instance. inputs: The input tensor to be fed to the entropy model. spatial_shape: Shape of the input image (HxW). Must be provided for `valid == False`. Returns: The 'noisy' and quantized inputs, as well as differential and discrete entropy estimates, as an `EntropyInfo` named tuple. """ # We are summing over the log likelihood tensor, so we need to explicitly # divide by the batch size. batch = tf.cast(tf.shape(inputs)[0], tf.float32) # Divide by this to flip sign and convert from nats to bits. quotient = tf.constant(-np.log(2), dtype=tf.float32) num_pixels = tf.cast(tf.reduce_prod(spatial_shape), tf.float32) # Compute noisy outputs and estimate differential entropy. noisy, likelihood = entropy_model(inputs, training=True) log_likelihood = tf.log(likelihood) nbits = tf.reduce_sum(log_likelihood) / (quotient * batch) nbpp = nbits / num_pixels # Compute quantized outputs and estimate discrete entropy. quantized, likelihood = entropy_model(inputs, training=False) log_likelihood = tf.log(likelihood) qbits = tf.reduce_sum(log_likelihood) / (quotient * batch) qbpp = qbits / num_pixels return EntropyInfo(noisy, quantized, nbits, nbpp, qbits, qbpp)
Example #10
Source File: shape_utils.py From models with Apache License 2.0 | 5 votes |
def flatten_dimensions(inputs, first, last): """Flattens `K-d` tensor along [first, last) dimensions. Converts `inputs` with shape [D0, D1, ..., D(K-1)] into a tensor of shape [D0, D1, ..., D(first) * D(first+1) * ... * D(last-1), D(last), ..., D(K-1)]. Example: `inputs` is a tensor with initial shape [10, 5, 20, 20, 3]. new_tensor = flatten_dimensions(inputs, first=1, last=3) new_tensor.shape -> [10, 100, 20, 3]. Args: inputs: a tensor with shape [D0, D1, ..., D(K-1)]. first: first value for the range of dimensions to flatten. last: last value for the range of dimensions to flatten. Note that the last dimension itself is excluded. Returns: a tensor with shape [D0, D1, ..., D(first) * D(first + 1) * ... * D(last - 1), D(last), ..., D(K-1)]. Raises: ValueError: if first and last arguments are incorrect. """ if first >= inputs.shape.ndims or last > inputs.shape.ndims: raise ValueError('`first` and `last` must be less than inputs.shape.ndims. ' 'found {} and {} respectively while ndims is {}'.format( first, last, inputs.shape.ndims)) shape = combined_static_and_dynamic_shape(inputs) flattened_dim_prod = tf.reduce_prod(shape[first:last], keepdims=True) new_shape = tf.concat([shape[:first], flattened_dim_prod, shape[last:]], axis=0) return tf.reshape(inputs, new_shape)
Example #11
Source File: shape_utils.py From models with Apache License 2.0 | 5 votes |
def expand_first_dimension(inputs, dims): """Expands `K-d` tensor along first dimension to be a `(K+n-1)-d` tensor. Converts `inputs` with shape [D0, D1, ..., D(K-1)] into a tensor of shape [dims[0], dims[1], ..., dims[-1], D1, ..., D(k-1)]. Example: `inputs` is a tensor with shape [50, 20, 20, 3]. new_tensor = expand_first_dimension(inputs, [10, 5]). new_tensor.shape -> [10, 5, 20, 20, 3]. Args: inputs: a tensor with shape [D0, D1, ..., D(K-1)]. dims: List with new dimensions to expand first axis into. The length of `dims` is typically 2 or larger. Returns: a tensor with shape [dims[0], dims[1], ..., dims[-1], D1, ..., D(k-1)]. """ inputs_shape = combined_static_and_dynamic_shape(inputs) expanded_shape = tf.stack(dims + inputs_shape[1:]) # Verify that it is possible to expand the first axis of inputs. assert_op = tf.assert_equal( inputs_shape[0], tf.reduce_prod(tf.stack(dims)), message=('First dimension of `inputs` cannot be expanded into provided ' '`dims`')) with tf.control_dependencies([assert_op]): inputs_reshaped = tf.reshape(inputs, expanded_shape) return inputs_reshaped
Example #12
Source File: resource_function.py From morph-net with Apache License 2.0 | 4 votes |
def flop_coeff(op): """Computes the coefficient of number of flops associated with a convolution. The FLOPs cost of a convolution is given by C * output_depth * input_depth, where C = 2 * output_width * output_height * filter_size. The 2 is because we have one multiplication and one addition for each convolution weight and pixel. This function returns C. Supported operations names are listed in cost_calculator.FLOP_OPS. Args: op: A tf.Operation of supported types. Returns: A float, the coefficient that when multiplied by the input depth and by the output depth gives the number of flops needed to compute the convolution. Raises: ValueError: conv_op is not a supported tf.Operation. """ if not is_flop_op(op): return 0.0 if op.type == 'MatMul': # A MatMul is like a 1x1 conv with an output size of 1x1, so from the factor # below only the 2.0 remains. return 2.0 # Looking at the output shape makes it easy to automatically take into # account strides and the type of padding. def kernel_num_elements(tensor): """Returns the number of elements of a kernel. Args: tensor: The weight tensor. Returns: Number of elements of the kernel (either float or tf.float). """ num_elements = np.prod(tensor.shape.dims[1:-1]).value if num_elements: return num_elements return tf.to_float(tf.reduce_prod(tf.shape(tensor)[1:-1])) if op.type in ('Conv2D', 'DepthwiseConv2dNative', 'Conv3D'): num_elements = kernel_num_elements(op.outputs[0]) elif op.type == 'Conv2DBackpropInput': # For a transposed convolution, the input and the output are swapped (as # far as shapes are concerned). In other words, for a given filter shape # and stride, if Conv2D maps from shapeX to shapeY, Conv2DBackpropInput # maps from shapeY to shapeX. Therefore wherever we use the output shape # for Conv2D, we use the input shape for Conv2DBackpropInput. num_elements = kernel_num_elements(cost_calculator.get_input_activation(op)) else: # Can only happen if elements are added to FLOP_OPS and not taken care of. assert False, '%s in cost_calculator.FLOP_OPS but not handled' % op.type # Handle dynamic shaping while keeping old code path to not break # other clients. return 2.0 * num_elements * _get_conv_filter_size(op)
Example #13
Source File: pixel_control_ops.py From trfl with Apache License 2.0 | 4 votes |
def pixel_control_rewards(observations, cell_size): """Calculates pixel control task rewards from observation sequence. The observations are first split in a grid of KxK cells. For each cell a distinct pseudo reward is computed as the average absolute change in pixel intensity for all pixels in the cell. The change in intensity is averaged across both pixels and channels (e.g. RGB). The `observations` provided to this function should be cropped suitably, to ensure that the observations' height and width are a multiple of `cell_size`. The values of the `observations` tensor should be rescaled to [0, 1]. In the UNREAL agent observations are cropped to 80x80, and each cell is 4x4 in size. See "Reinforcement Learning with Unsupervised Auxiliary Tasks" by Jaderberg, Mnih, Czarnecki et al. (https://arxiv.org/abs/1611.05397). Args: observations: A tensor of shape `[T+1,B,H,W,C...]`, where * `T` is the sequence length, `B` is the batch size. * `H` is height, `W` is width. * `C...` is at least one channel dimension (e.g., colour, stack). * `T` and `B` can be statically unknown. cell_size: The size of each cell. Returns: A tensor of pixel control rewards calculated from the observation. The shape is `[T,B,H',W']`, where `H'` and `W'` are determined by the `cell_size`. If evenly-divisible, `H' = H/cell_size`, and similar for `W`. """ # Calculate the absolute differences across the sequence. abs_diff = tf.abs(observations[1:] - observations[:-1]) # Average over cells. `abs_diff` has shape [T,B,H,W,C...], e.g., # [T,B,H,W,C] if we have a colour channel. We want to use the TF avg_pool3d # op, but it expects 5D inputs so we collapse all channel dimensions. # Merge remaining dimensions after W: [T,B,H,W,C']. full_shape = tf.shape(abs_diff) preserved_shape = full_shape[:4] trailing_shape = (tf.reduce_prod(full_shape[4:]),) shape = tf.concat([preserved_shape, trailing_shape], 0) abs_diff = tf.reshape(abs_diff, shape) # Apply the averaging using average pooling and reducing over channel. avg_abs_diff = tf.nn.avg_pool3d( abs_diff, ksize=[1, 1, cell_size, cell_size, 1], strides=[1, 1, cell_size, cell_size, 1], padding="VALID") # [T,B,H',W',C']. pseudo_rewards = tf.reduce_mean( avg_abs_diff, axis=[4], name="pseudo_rewards") # [T,B,H',W']. sequence_batch = abs_diff.get_shape()[:2] new_height_width = avg_abs_diff.get_shape()[2:4] pseudo_rewards.set_shape(sequence_batch.concatenate(new_height_width)) return pseudo_rewards