Python tensorflow.python.ops.math_ops.add_n() Examples
The following are 30
code examples of tensorflow.python.ops.math_ops.add_n().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.python.ops.math_ops
, or try the search function
.
Example #1
Source File: gmm_ops.py From auto-alt-text-lambda-api with MIT License | 7 votes |
def _init_clusters_random(data, num_clusters, random_seed): """Does random initialization of clusters. Args: data: a list of Tensors with a matrix of data, each row is an example. num_clusters: an integer with the number of clusters. random_seed: Seed for PRNG used to initialize seeds. Returns: A Tensor with num_clusters random rows of data. """ assert isinstance(data, list) num_data = math_ops.add_n([array_ops.shape(inp)[0] for inp in data]) with ops.control_dependencies( [check_ops.assert_less_equal(num_clusters, num_data)]): indices = random_ops.random_uniform( [num_clusters], minval=0, maxval=math_ops.cast(num_data, dtypes.int64), seed=random_seed, dtype=dtypes.int64) indices = math_ops.cast(indices, dtypes.int32) % num_data clusters_init = embedding_lookup(data, indices, partition_strategy='div') return clusters_init
Example #2
Source File: tensor_util.py From deep_image_model with Apache License 2.0 | 6 votes |
def reduce_sum_n(tensors, name=None): """Reduce tensors to a scalar sum. This reduces each tensor in `tensors` to a scalar via `tf.reduce_sum`, then adds them via `tf.add_n`. Args: tensors: List of tensors, all of the same numeric type. name: Tensor name, and scope for all other ops. Returns: Total loss tensor, or None if no losses have been configured. Raises: ValueError: if `losses` is missing or empty. """ if not tensors: raise ValueError('No tensors provided.') tensors = [math_ops.reduce_sum(t, name='%s/sum' % t.op.name) for t in tensors] if len(tensors) == 1: return tensors[0] with ops.name_scope(name, 'reduce_sum_n', tensors) as scope: return math_ops.add_n(tensors, name=scope)
Example #3
Source File: tensor_util.py From lambda-packs with MIT License | 6 votes |
def reduce_sum_n(tensors, name=None): """Reduce tensors to a scalar sum. This reduces each tensor in `tensors` to a scalar via `tf.reduce_sum`, then adds them via `tf.add_n`. Args: tensors: List of tensors, all of the same numeric type. name: Tensor name, and scope for all other ops. Returns: Total loss tensor, or None if no losses have been configured. Raises: ValueError: if `losses` is missing or empty. """ if not tensors: raise ValueError('No tensors provided.') with ops.name_scope(name, 'reduce_sum_n', tensors) as name_scope: tensors = [ math_ops.reduce_sum(t, name='%s/sum' % t.op.name) for t in tensors] if len(tensors) == 1: return tensors[0] return math_ops.add_n(tensors, name=name_scope)
Example #4
Source File: mixture.py From lambda-packs with MIT License | 6 votes |
def _mean(self): with ops.control_dependencies(self._assertions): distribution_means = [d.mean() for d in self.components] cat_probs = self._cat_probs(log_probs=False) # This was checked to not be None at construction time. static_event_rank = self.event_shape.ndims # Expand the rank of x up to static_event_rank times so that # broadcasting works correctly. def expand(x): expanded_x = x for _ in range(static_event_rank): expanded_x = array_ops.expand_dims(expanded_x, -1) return expanded_x cat_probs = [expand(c_p) for c_p in cat_probs] partial_means = [ c_p * m for (c_p, m) in zip(cat_probs, distribution_means) ] # These should all be the same shape by virtue of matching # batch_shape and event_shape. return math_ops.add_n(partial_means)
Example #5
Source File: loss_ops.py From lambda-packs with MIT License | 6 votes |
def get_total_loss(add_regularization_losses=True, name="total_loss"): """Returns a tensor whose value represents the total loss. Notice that the function adds the given losses to the regularization losses. Args: add_regularization_losses: A boolean indicating whether or not to use the regularization losses in the sum. name: The name of the returned tensor. Returns: A `Tensor` whose value represents the total loss. Raises: ValueError: if `losses` is not iterable. """ losses = get_losses() if add_regularization_losses: losses += get_regularization_losses() return math_ops.add_n(losses, name=name)
Example #6
Source File: gradients_impl.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def _MultiDeviceAddN(tensor_list): """Adds tensors from potentially multiple devices.""" # Basic function structure comes from control_flow_ops.group(). # Sort tensors according to their devices. tensors_on_device = collections.defaultdict(lambda: []) for tensor in tensor_list: tensors_on_device[tensor.device].append(tensor) # For each device, add the tensors on that device first. # Then gather the partial sums from multiple devices. # TODO(sjhwang): Create hierarchical aggregation tree as pbar's suggestion. # E.g., aggregate per GPU, then per task, and so on. summands = [] def DeviceKey(dev): return "" if dev is None else dev for dev in sorted(six.iterkeys(tensors_on_device), key=DeviceKey): tensors = tensors_on_device[dev] with ops.colocate_with(tensors[0].op, ignore_existing=True): summands.append(math_ops.add_n(tensors)) return math_ops.add_n(summands)
Example #7
Source File: loss_ops.py From tf-slim with Apache License 2.0 | 6 votes |
def get_total_loss(add_regularization_losses=True, name="total_loss"): """Returns a tensor whose value represents the total loss. Notice that the function adds the given losses to the regularization losses. Args: add_regularization_losses: A boolean indicating whether or not to use the regularization losses in the sum. name: The name of the returned tensor. Returns: A `Tensor` whose value represents the total loss. Raises: ValueError: if `losses` is not iterable. """ losses = get_losses() if add_regularization_losses: losses += get_regularization_losses() return math_ops.add_n(losses, name=name)
Example #8
Source File: util.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def get_total_loss(add_regularization_losses=True, name="total_loss"): """Returns a tensor whose value represents the total loss. Notice that the function adds the given losses to the regularization losses. Args: add_regularization_losses: A boolean indicating whether or not to use the regularization losses in the sum. name: The name of the returned tensor. Returns: A `Tensor` whose value represents the total loss. Raises: ValueError: if `losses` is not iterable. """ losses = get_losses() if add_regularization_losses: losses += get_regularization_losses() return math_ops.add_n(losses, name=name)
Example #9
Source File: gradients_impl.py From deep_image_model with Apache License 2.0 | 6 votes |
def _MultiDeviceAddN(tensor_list): """Adds tensors from potentially multiple devices.""" # Basic function structure comes from control_flow_ops.group(). # Sort tensors according to their devices. tensors_on_device = collections.defaultdict(lambda: []) for tensor in tensor_list: tensors_on_device[tensor.device].append(tensor) # For each device, add the tensors on that device first. # Then gather the partial sums from multiple devices. # TODO(sjhwang): Create hierarchical aggregation tree as pbar's suggestion. # E.g., aggregate per GPU, then per task, and so on. summands = [] def DeviceKey(dev): return "" if dev is None else dev for dev in sorted(six.iterkeys(tensors_on_device), key=DeviceKey): tensors = tensors_on_device[dev] with ops.colocate_with(tensors[0].op, ignore_existing=True): summands.append(math_ops.add_n(tensors)) return math_ops.add_n(summands)
Example #10
Source File: loss_ops.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def get_total_loss(add_regularization_losses=True, name="total_loss"): """Returns a tensor whose value represents the total loss. Notice that the function adds the given losses to the regularization losses. Args: add_regularization_losses: A boolean indicating whether or not to use the regularization losses in the sum. name: The name of the returned tensor. Returns: A `Tensor` whose value represents the total loss. Raises: ValueError: if `losses` is not iterable. """ losses = get_losses() if add_regularization_losses: losses += get_regularization_losses() return math_ops.add_n(losses, name=name)
Example #11
Source File: factorization_ops.py From lambda-packs with MIT License | 6 votes |
def _prepare_gramian(self, factors, gramian): """Helper function to create ops to prepare/calculate gramian. Args: factors: Variable or list of Variable representing (sharded) factors. Used to compute the updated corresponding gramian value. gramian: Variable storing the gramian calculated from the factors. Returns: A op that updates the gramian with the calcuated value from the factors. """ partial_gramians = [] for f in factors: with ops.colocate_with(f): partial_gramians.append(math_ops.matmul(f, f, transpose_a=True)) with ops.colocate_with(gramian): prep_gramian = state_ops.assign(gramian, math_ops.add_n(partial_gramians)).op return prep_gramian
Example #12
Source File: clustering_ops.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def _init_clusters_random(self): """Does random initialization of clusters. Returns: Tensor of randomly initialized clusters. """ num_data = math_ops.add_n([array_ops.shape(inp)[0] for inp in self._inputs]) # Note that for mini-batch k-means, we should ensure that the batch size of # data used during initialization is sufficiently large to avoid duplicated # clusters. with ops.control_dependencies( [check_ops.assert_less_equal(self._num_clusters, num_data)]): indices = random_ops.random_uniform( array_ops.reshape(self._num_clusters, [-1]), minval=0, maxval=math_ops.cast(num_data, dtypes.int64), seed=self._random_seed, dtype=dtypes.int64) clusters_init = embedding_lookup( self._inputs, indices, partition_strategy='div') return clusters_init
Example #13
Source File: gmm_ops.py From lambda-packs with MIT License | 6 votes |
def _init_clusters_random(data, num_clusters, random_seed): """Does random initialization of clusters. Args: data: a list of Tensors with a matrix of data, each row is an example. num_clusters: an integer with the number of clusters. random_seed: Seed for PRNG used to initialize seeds. Returns: A Tensor with num_clusters random rows of data. """ assert isinstance(data, list) num_data = math_ops.add_n([array_ops.shape(inp)[0] for inp in data]) with ops.control_dependencies( [check_ops.assert_less_equal(num_clusters, num_data)]): indices = random_ops.random_uniform( [num_clusters], minval=0, maxval=math_ops.cast(num_data, dtypes.int64), seed=random_seed, dtype=dtypes.int64) indices %= math_ops.cast(num_data, dtypes.int64) clusters_init = embedding_lookup(data, indices, partition_strategy='div') return clusters_init
Example #14
Source File: factorization_ops.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def _prepare_gramian(self, factors, gramian): """Helper function to create ops to prepare/calculate gramian. Args: factors: Variable or list of Variable representing (sharded) factors. Used to compute the updated corresponding gramian value. gramian: Variable storing the gramian calculated from the factors. Returns: A op that updates the gramian with the calcuated value from the factors. """ partial_gramians = [] for f in factors: with ops.colocate_with(f): partial_gramians.append(math_ops.matmul(f, f, transpose_a=True)) with ops.colocate_with(gramian): prep_gramian = state_ops.assign(gramian, math_ops.add_n(partial_gramians)).op return prep_gramian
Example #15
Source File: util.py From lambda-packs with MIT License | 6 votes |
def get_total_loss(add_regularization_losses=True, name="total_loss"): """Returns a tensor whose value represents the total loss. In particular, this adds any losses you have added with `tf.add_loss()` to any regularization losses that have been added by regularization parameters on layers constructors e.g. `tf.layers`. Be very sure to use this if you are constructing a loss_op manually. Otherwise regularization arguments on `tf.layers` methods will not function. Args: add_regularization_losses: A boolean indicating whether or not to use the regularization losses in the sum. name: The name of the returned tensor. Returns: A `Tensor` whose value represents the total loss. Raises: ValueError: if `losses` is not iterable. """ losses = get_losses() if add_regularization_losses: losses += get_regularization_losses() return math_ops.add_n(losses, name=name)
Example #16
Source File: regularizers.py From deep_image_model with Apache License 2.0 | 6 votes |
def sum_regularizer(regularizer_list, scope=None): """Returns a function that applies the sum of multiple regularizers. Args: regularizer_list: A list of regularizers to apply. scope: An optional scope name Returns: A function with signature `sum_reg(weights)` that applies the sum of all the input regularizers. """ regularizer_list = [reg for reg in regularizer_list if reg is not None] if not regularizer_list: return None def sum_reg(weights): """Applies the sum of all the input regularizers.""" with ops.name_scope(scope, 'sum_regularizer', [weights]) as name: regularizer_tensors = [reg(weights) for reg in regularizer_list] return math_ops.add_n(regularizer_tensors, name=name) return sum_reg
Example #17
Source File: gradients_impl.py From lambda-packs with MIT License | 6 votes |
def _MultiDeviceAddN(tensor_list): """Adds tensors from potentially multiple devices.""" # Basic function structure comes from control_flow_ops.group(). # Sort tensors according to their devices. tensors_on_device = collections.defaultdict(lambda: []) for tensor in tensor_list: tensors_on_device[tensor.device].append(tensor) # For each device, add the tensors on that device first. # Then gather the partial sums from multiple devices. # TODO(sjhwang): Create hierarchical aggregation tree as pbar's suggestion. # E.g., aggregate per GPU, then per task, and so on. summands = [] def DeviceKey(dev): return "" if dev is None else dev for dev in sorted(six.iterkeys(tensors_on_device), key=DeviceKey): tensors = tensors_on_device[dev] with ops.colocate_with(tensors[0].op, ignore_existing=True): summands.append(math_ops.add_n(tensors)) return math_ops.add_n(summands)
Example #18
Source File: mixture.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def _mean(self): with ops.control_dependencies(self._assertions): distribution_means = [d.mean() for d in self.components] cat_probs = self._cat_probs(log_probs=False) # This was checked to not be None at construction time. static_event_rank = self.get_event_shape().ndims # Expand the rank of x up to static_event_rank times so that # broadcasting works correctly. def expand(x): expanded_x = x for _ in range(static_event_rank): expanded_x = array_ops.expand_dims(expanded_x, -1) return expanded_x cat_probs = [expand(c_p) for c_p in cat_probs] partial_means = [ c_p * m for (c_p, m) in zip(cat_probs, distribution_means) ] # These should all be the same shape by virtue of matching # batch_shape and event_shape. return math_ops.add_n(partial_means)
Example #19
Source File: regularizers.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def sum_regularizer(regularizer_list, scope=None): """Returns a function that applies the sum of multiple regularizers. Args: regularizer_list: A list of regularizers to apply. scope: An optional scope name Returns: A function with signature `sum_reg(weights)` that applies the sum of all the input regularizers. """ regularizer_list = [reg for reg in regularizer_list if reg is not None] if not regularizer_list: return None def sum_reg(weights): """Applies the sum of all the input regularizers.""" with ops.name_scope(scope, 'sum_regularizer', [weights]) as name: regularizer_tensors = [reg(weights) for reg in regularizer_list] return math_ops.add_n(regularizer_tensors, name=name) return sum_reg
Example #20
Source File: sdca_ops.py From deep_image_model with Apache License 2.0 | 5 votes |
def approximate_duality_gap(self): """Add operations to compute the approximate duality gap. Returns: An Operation that computes the approximate duality gap over all examples. """ with name_scope('sdca/approximate_duality_gap'): _, values_list = self._hashtable.export_sharded() shard_sums = [] for values in values_list: with ops.device(values.device): # For large tables to_double() below allocates a large temporary # tensor that is freed once the sum operation completes. To reduce # peak memory usage in cases where we have multiple large tables on a # single device, we serialize these operations. # Note that we need double precision to get accurate results. with ops.control_dependencies(shard_sums): shard_sums.append( math_ops.reduce_sum(math_ops.to_double(values), 0)) summed_values = math_ops.add_n(shard_sums) primal_loss = summed_values[1] dual_loss = summed_values[2] example_weights = summed_values[3] # Note: we return NaN if there are no weights or all weights are 0, e.g. # if no examples have been processed return (primal_loss + dual_loss + self._l1_loss() + (2.0 * self._l2_loss(self._symmetric_l2_regularization())) ) / example_weights
Example #21
Source File: sdca_ops.py From deep_image_model with Apache License 2.0 | 5 votes |
def _l2_loss(self, l2): """Computes the (un-normalized) l2 loss of the model.""" with name_scope('sdca/l2_loss'): sums = [] for name in ['sparse_features_weights', 'dense_features_weights']: for weights in self._convert_n_to_tensor(self._variables[name]): with ops.device(weights.device): sums.append( math_ops.reduce_sum( math_ops.square(math_ops.cast(weights, dtypes.float64)))) sum = math_ops.add_n(sums) # SDCA L2 regularization cost is: l2 * sum(weights^2) / 2 return l2 * sum / 2.0
Example #22
Source File: seq2seq_ops.py From deep_image_model with Apache License 2.0 | 5 votes |
def sequence_classifier(decoding, labels, sampling_decoding=None, name=None): """Returns predictions and loss for sequence of predictions. Args: decoding: List of Tensors with predictions. labels: List of Tensors with labels. sampling_decoding: Optional, List of Tensor with predictions to be used in sampling. E.g. they shouldn't have dependncy on outputs. If not provided, decoding is used. name: Operation name. Returns: Predictions and losses tensors. """ with ops.name_scope(name, "sequence_classifier", [decoding, labels]): predictions, xent_list = [], [] for i, pred in enumerate(decoding): xent_list.append(nn.softmax_cross_entropy_with_logits( pred, labels[i], name="sequence_loss/xent_raw{0}".format(i))) if sampling_decoding: predictions.append(nn.softmax(sampling_decoding[i])) else: predictions.append(nn.softmax(pred)) xent = math_ops.add_n(xent_list, name="sequence_loss/xent") loss = math_ops.reduce_sum(xent, name="sequence_loss") return array_ops_.pack(predictions, axis=1), loss
Example #23
Source File: head.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def _multi_head(heads, loss_weights=None): """Creates a MultiHead stemming from same logits/hidden layer. Args: heads: list of _Head objects. loss_weights: optional list of weights to be used to combine losses from each head. All losses are weighted equally if not provided. Returns: A _Head instance that combines multiple heads. Raises: ValueError: if heads and loss_weights have different size. """ if loss_weights: if len(loss_weights) != len(heads): raise ValueError("heads and loss_weights must have same size") def _weighted_loss_combiner(losses): if loss_weights: if len(losses) != len(loss_weights): raise ValueError("losses and loss_weights must have same size") weighted_losses = [] for loss, weight in zip(losses, loss_weights): weighted_losses.append(math_ops.multiply(loss, weight)) return math_ops.add_n(weighted_losses) else: return math_ops.add_n(losses) return _MultiHead(heads, loss_combiner=_weighted_loss_combiner) # TODO(zakaria): Make the classes public once we are ready for users to subclass # them.
Example #24
Source File: odes.py From deep_image_model with Apache License 2.0 | 5 votes |
def _dot_product(xs, ys, name=None): """Calculate the vector inner product between two lists of Tensors.""" with ops.name_scope(name, 'dot_product', [xs, ys]) as scope: return math_ops.add_n([x * y for x, y in zip(xs, ys)], name=scope)
Example #25
Source File: regularizers.py From deep_image_model with Apache License 2.0 | 5 votes |
def apply_regularization(regularizer, weights_list=None): """Returns the summed penalty by applying `regularizer` to the `weights_list`. Adding a regularization penalty over the layer weights and embedding weights can help prevent overfitting the training data. Regularization over layer biases is less common/useful, but assuming proper data preprocessing/mean subtraction, it usually shouldn't hurt much either. Args: regularizer: A function that takes a single `Tensor` argument and returns a scalar `Tensor` output. weights_list: List of weights `Tensors` or `Variables` to apply `regularizer` over. Defaults to the `GraphKeys.WEIGHTS` collection if `None`. Returns: A scalar representing the overall regularization penalty. Raises: ValueError: If `regularizer` does not return a scalar output, or if we find no weights. """ if not weights_list: weights_list = ops.get_collection(ops.GraphKeys.WEIGHTS) if not weights_list: raise ValueError('No weights to regularize.') with ops.name_scope('get_regularization_penalty', values=weights_list) as scope: penalties = [regularizer(w) for w in weights_list] penalties = [ p if p is not None else constant_op.constant(0.0) for p in penalties ] for p in penalties: if p.get_shape().ndims != 0: raise ValueError('regularizer must return a scalar Tensor instead of a ' 'Tensor with rank %d.' % p.get_shape().ndims) summed_penalty = math_ops.add_n(penalties, name=scope) ops.add_to_collection(ops.GraphKeys.REGULARIZATION_LOSSES, summed_penalty) return summed_penalty
Example #26
Source File: parallel_reader.py From tf-slim with Apache License 2.0 | 5 votes |
def num_records_produced(self, name=None): """Returns the number of records this reader has produced. Args: name: A name for the operation (optional). Returns: An int64 Tensor. """ num_records = [r.num_records_produced() for r in self._readers] return math_ops.add_n(num_records, name=name)
Example #27
Source File: stochastic_graph.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def _add_n_or_sum(terms): # add_n works for Tensors of the same dtype and shape shape = terms[0].get_shape() dtype = terms[0].dtype if all(term.get_shape().is_fully_defined() and term.get_shape().is_compatible_with(shape) and term.dtype == dtype for term in terms): return math_ops.add_n(terms) else: return sum(terms)
Example #28
Source File: odes.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def _dot_product(xs, ys, name=None): """Calculate the vector inner product between two lists of Tensors.""" with ops.name_scope(name, 'dot_product', [xs, ys]) as scope: return math_ops.add_n([x * y for x, y in zip(xs, ys)], name=scope)
Example #29
Source File: odes.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def _scaled_dot_product(scale, xs, ys, name=None): """Calculate a scaled, vector inner product between lists of Tensors.""" with ops.name_scope(name, 'scaled_dot_product', [scale, xs, ys]) as scope: # Some of the parameters in our Butcher tableau include zeros. Using # _possibly_nonzero lets us avoid wasted computation. return math_ops.add_n([(scale * x) * y for x, y in zip(xs, ys) if _possibly_nonzero(x) or _possibly_nonzero(y)], name=scope)
Example #30
Source File: rev_block_lib.py From tensornets with MIT License | 5 votes |
def _force_data_dependency(first_compute, then_compute): """Force all of `then_compute` to depend on all of `first_compute`. Uses a dummy data dependency, which is useful when running on TPUs because XLA ignores control dependencies. Only supports float arguments. Args: first_compute: `list<Tensor>`. These will be made to run before the `Tensor`s `then_compute`. then_compute: `list<Tensor>`. These will run after all the `Tensor`s in `first_compute`. Returns: `list<Tensor>`, same length as `then_compute`. Raises: ValueError: if ranks are unknown or types are not floating. """ def _first_element(x): if x.get_shape().ndims is None: raise ValueError("Rank of Tensor %s must be known" % x) ndims = x.get_shape().ndims begin = framework_ops.convert_to_tensor([0] * ndims, dtype=dtypes.int32) size = framework_ops.convert_to_tensor([1] * ndims, dtype=dtypes.int32) return array_ops.reshape(array_ops.slice(x, begin, size), []) first_compute_sum = math_ops.add_n( [_first_element(x) for x in first_compute if x is not None]) dtype = first_compute_sum.dtype if not dtype.is_floating: raise ValueError("_force_data_dependency only supports floating dtypes.") epsilon = np.finfo(dtype.as_numpy_dtype).tiny zero = array_ops.stop_gradient(epsilon * first_compute_sum) return [ array_ops.identity(x) + zero if x is not None else None for x in then_compute ]