Python tensorflow.python.ops.math_ops.reduce_mean() Examples

The following are 30 code examples of tensorflow.python.ops.math_ops.reduce_mean(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.python.ops.math_ops , or try the search function .
Example #1
Source File: vgg_test.py    From tf-slim with Apache License 2.0 6 votes vote down vote up
def testTrainEvalWithReuse(self):
    train_batch_size = 2
    eval_batch_size = 1
    train_height, train_width = 224, 224
    eval_height, eval_width = 256, 256
    num_classes = 1000
    with self.cached_session():
      train_inputs = random_ops.random_uniform(
          (train_batch_size, train_height, train_width, 3))
      logits, _ = vgg.vgg_a(train_inputs)
      self.assertListEqual(logits.get_shape().as_list(),
                           [train_batch_size, num_classes])
      variable_scope.get_variable_scope().reuse_variables()
      eval_inputs = random_ops.random_uniform(
          (eval_batch_size, eval_height, eval_width, 3))
      logits, _ = vgg.vgg_a(
          eval_inputs, is_training=False, spatial_squeeze=False)
      self.assertListEqual(logits.get_shape().as_list(),
                           [eval_batch_size, 2, 2, num_classes])
      logits = math_ops.reduce_mean(logits, [1, 2])
      predictions = math_ops.argmax(logits, 1)
      self.assertEqual(predictions.get_shape().as_list(), [eval_batch_size]) 
Example #2
Source File: hybrid_model.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def loss(self, data, labels):
    """The loss to minimize while training."""

    if self.is_regression:
      diff = self.training_inference_graph(data) - math_ops.to_float(labels)
      mean_squared_error = math_ops.reduce_mean(diff * diff)
      root_mean_squared_error = math_ops.sqrt(mean_squared_error, name="loss")
      loss = root_mean_squared_error
    else:
      loss = math_ops.reduce_mean(
          nn_ops.sparse_softmax_cross_entropy_with_logits(
              self.training_inference_graph(data),
              array_ops.squeeze(math_ops.to_int32(labels))),
          name="loss")
    if self.regularizer:
      loss += layers.apply_regularization(self.regularizer,
                                          variables.trainable_variables())
    return loss 
Example #3
Source File: alexnet_test.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def testTrainEvalWithReuse(self):
    train_batch_size = 2
    eval_batch_size = 1
    train_height, train_width = 224, 224
    eval_height, eval_width = 300, 400
    num_classes = 1000
    with self.test_session():
      train_inputs = random_ops.random_uniform(
          (train_batch_size, train_height, train_width, 3))
      logits, _ = alexnet.alexnet_v2(train_inputs)
      self.assertListEqual(logits.get_shape().as_list(),
                           [train_batch_size, num_classes])
      variable_scope.get_variable_scope().reuse_variables()
      eval_inputs = random_ops.random_uniform(
          (eval_batch_size, eval_height, eval_width, 3))
      logits, _ = alexnet.alexnet_v2(
          eval_inputs, is_training=False, spatial_squeeze=False)
      self.assertListEqual(logits.get_shape().as_list(),
                           [eval_batch_size, 4, 7, num_classes])
      logits = math_ops.reduce_mean(logits, [1, 2])
      predictions = math_ops.argmax(logits, 1)
      self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size]) 
Example #4
Source File: overfeat_test.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def testTrainEvalWithReuse(self):
    train_batch_size = 2
    eval_batch_size = 1
    train_height, train_width = 231, 231
    eval_height, eval_width = 281, 281
    num_classes = 1000
    with self.test_session():
      train_inputs = random_ops.random_uniform(
          (train_batch_size, train_height, train_width, 3))
      logits, _ = overfeat.overfeat(train_inputs)
      self.assertListEqual(logits.get_shape().as_list(),
                           [train_batch_size, num_classes])
      variable_scope.get_variable_scope().reuse_variables()
      eval_inputs = random_ops.random_uniform(
          (eval_batch_size, eval_height, eval_width, 3))
      logits, _ = overfeat.overfeat(
          eval_inputs, is_training=False, spatial_squeeze=False)
      self.assertListEqual(logits.get_shape().as_list(),
                           [eval_batch_size, 2, 2, num_classes])
      logits = math_ops.reduce_mean(logits, [1, 2])
      predictions = math_ops.argmax(logits, 1)
      self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size]) 
Example #5
Source File: hybrid_model.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def loss(self, data, labels):
    """The loss to minimize while training."""

    if self.is_regression:
      diff = self.training_inference_graph(data) - math_ops.to_float(labels)
      mean_squared_error = math_ops.reduce_mean(diff * diff)
      root_mean_squared_error = math_ops.sqrt(mean_squared_error, name="loss")
      loss = root_mean_squared_error
    else:
      loss = math_ops.reduce_mean(
          nn_ops.sparse_softmax_cross_entropy_with_logits(
              labels=array_ops.squeeze(math_ops.to_int32(labels)),
              logits=self.training_inference_graph(data)),
          name="loss")
    if self.regularizer:
      loss += layers.apply_regularization(self.regularizer,
                                          variables.trainable_variables())
    return loss 
Example #6
Source File: dnn.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def _rescale_eval_loss(loss, weights):
  """Rescales evaluation loss according to the given weights.

  The rescaling is needed because in the training loss weights are not
  considered in the denominator, whereas  for the evaluation loss we should
  divide by the sum of weights.

  The rescaling factor is:
    R = sum_{i} 1 / sum_{i} w_{i}

  Args:
    loss: the scalar weighted loss.
    weights: weight coefficients. Either a scalar, or a `Tensor` of shape
      [batch_size].

  Returns:
    The given loss multiplied by the rescaling factor.
  """
  rescaling_factor = math_ops.reduce_mean(weights)
  return math_ops.div(loss, rescaling_factor) 
Example #7
Source File: vgg_test.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def testTrainEvalWithReuse(self):
    train_batch_size = 2
    eval_batch_size = 1
    train_height, train_width = 224, 224
    eval_height, eval_width = 256, 256
    num_classes = 1000
    with self.test_session():
      train_inputs = random_ops.random_uniform(
          (train_batch_size, train_height, train_width, 3))
      logits, _ = vgg.vgg_16(train_inputs)
      self.assertListEqual(logits.get_shape().as_list(),
                           [train_batch_size, num_classes])
      variable_scope.get_variable_scope().reuse_variables()
      eval_inputs = random_ops.random_uniform(
          (eval_batch_size, eval_height, eval_width, 3))
      logits, _ = vgg.vgg_16(
          eval_inputs, is_training=False, spatial_squeeze=False)
      self.assertListEqual(logits.get_shape().as_list(),
                           [eval_batch_size, 2, 2, num_classes])
      logits = math_ops.reduce_mean(logits, [1, 2])
      predictions = math_ops.argmax(logits, 1)
      self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size]) 
Example #8
Source File: vgg_test.py    From tf-slim with Apache License 2.0 6 votes vote down vote up
def testTrainEvalWithReuse(self):
    train_batch_size = 2
    eval_batch_size = 1
    train_height, train_width = 224, 224
    eval_height, eval_width = 256, 256
    num_classes = 1000
    with self.cached_session():
      train_inputs = random_ops.random_uniform(
          (train_batch_size, train_height, train_width, 3))
      logits, _ = vgg.vgg_19(train_inputs)
      self.assertListEqual(logits.get_shape().as_list(),
                           [train_batch_size, num_classes])
      variable_scope.get_variable_scope().reuse_variables()
      eval_inputs = random_ops.random_uniform(
          (eval_batch_size, eval_height, eval_width, 3))
      logits, _ = vgg.vgg_19(
          eval_inputs, is_training=False, spatial_squeeze=False)
      self.assertListEqual(logits.get_shape().as_list(),
                           [eval_batch_size, 2, 2, num_classes])
      logits = math_ops.reduce_mean(logits, [1, 2])
      predictions = math_ops.argmax(logits, 1)
      self.assertEqual(predictions.get_shape().as_list(), [eval_batch_size]) 
Example #9
Source File: vgg_test.py    From tf-slim with Apache License 2.0 6 votes vote down vote up
def testTrainEvalWithReuse(self):
    train_batch_size = 2
    eval_batch_size = 1
    train_height, train_width = 224, 224
    eval_height, eval_width = 256, 256
    num_classes = 1000
    with self.cached_session():
      train_inputs = random_ops.random_uniform(
          (train_batch_size, train_height, train_width, 3))
      logits, _ = vgg.vgg_16(train_inputs)
      self.assertListEqual(logits.get_shape().as_list(),
                           [train_batch_size, num_classes])
      variable_scope.get_variable_scope().reuse_variables()
      eval_inputs = random_ops.random_uniform(
          (eval_batch_size, eval_height, eval_width, 3))
      logits, _ = vgg.vgg_16(
          eval_inputs, is_training=False, spatial_squeeze=False)
      self.assertListEqual(logits.get_shape().as_list(),
                           [eval_batch_size, 2, 2, num_classes])
      logits = math_ops.reduce_mean(logits, [1, 2])
      predictions = math_ops.argmax(logits, 1)
      self.assertEqual(predictions.get_shape().as_list(), [eval_batch_size]) 
Example #10
Source File: alexnet_test.py    From tf-slim with Apache License 2.0 6 votes vote down vote up
def testTrainEvalWithReuse(self):
    train_batch_size = 2
    eval_batch_size = 1
    train_height, train_width = 224, 224
    eval_height, eval_width = 300, 400
    num_classes = 1000
    with self.cached_session():
      train_inputs = random_ops.random_uniform(
          (train_batch_size, train_height, train_width, 3))
      logits, _ = alexnet.alexnet_v2(train_inputs)
      self.assertListEqual(logits.get_shape().as_list(),
                           [train_batch_size, num_classes])
      variable_scope.get_variable_scope().reuse_variables()
      eval_inputs = random_ops.random_uniform(
          (eval_batch_size, eval_height, eval_width, 3))
      logits, _ = alexnet.alexnet_v2(
          eval_inputs, is_training=False, spatial_squeeze=False)
      self.assertListEqual(logits.get_shape().as_list(),
                           [eval_batch_size, 4, 7, num_classes])
      logits = math_ops.reduce_mean(logits, [1, 2])
      predictions = math_ops.argmax(logits, 1)
      self.assertEqual(predictions.get_shape().as_list(), [eval_batch_size]) 
Example #11
Source File: hybrid_model.py    From lambda-packs with MIT License 6 votes vote down vote up
def loss(self, data, labels):
    """The loss to minimize while training."""

    if self.is_regression:
      diff = self.training_inference_graph(data) - math_ops.to_float(labels)
      mean_squared_error = math_ops.reduce_mean(diff * diff)
      root_mean_squared_error = math_ops.sqrt(mean_squared_error, name="loss")
      loss = root_mean_squared_error
    else:
      loss = math_ops.reduce_mean(
          nn_ops.sparse_softmax_cross_entropy_with_logits(
              labels=array_ops.squeeze(math_ops.to_int32(labels)),
              logits=self.training_inference_graph(data)),
          name="loss")
    if self.regularizer:
      loss += layers.apply_regularization(self.regularizer,
                                          variables.trainable_variables())
    return loss 
Example #12
Source File: overfeat_test.py    From tf-slim with Apache License 2.0 6 votes vote down vote up
def testTrainEvalWithReuse(self):
    train_batch_size = 2
    eval_batch_size = 1
    train_height, train_width = 231, 231
    eval_height, eval_width = 281, 281
    num_classes = 1000
    with self.cached_session():
      train_inputs = random_ops.random_uniform(
          (train_batch_size, train_height, train_width, 3))
      logits, _ = overfeat.overfeat(train_inputs)
      self.assertListEqual(logits.get_shape().as_list(),
                           [train_batch_size, num_classes])
      variable_scope.get_variable_scope().reuse_variables()
      eval_inputs = random_ops.random_uniform(
          (eval_batch_size, eval_height, eval_width, 3))
      logits, _ = overfeat.overfeat(
          eval_inputs, is_training=False, spatial_squeeze=False)
      self.assertListEqual(logits.get_shape().as_list(),
                           [eval_batch_size, 2, 2, num_classes])
      logits = math_ops.reduce_mean(logits, [1, 2])
      predictions = math_ops.argmax(logits, 1)
      self.assertEqual(predictions.get_shape().as_list(), [eval_batch_size]) 
Example #13
Source File: backend.py    From lambda-packs with MIT License 6 votes vote down vote up
def var(x, axis=None, keepdims=False):
  """Variance of a tensor, alongside the specified axis.

  Arguments:
      x: A tensor or variable.
      axis: An integer, the axis to compute the variance.
      keepdims: A boolean, whether to keep the dimensions or not.
          If `keepdims` is `False`, the rank of the tensor is reduced
          by 1. If `keepdims` is `True`,
          the reduced dimension is retained with length 1.

  Returns:
      A tensor with the variance of elements of `x`.
  """
  axis = _normalize_axis(axis, ndim(x))
  if x.dtype.base_dtype == dtypes_module.bool:
    x = math_ops.cast(x, floatx())
  m = math_ops.reduce_mean(x, reduction_indices=axis, keep_dims=True)
  devs_squared = math_ops.square(x - m)
  return math_ops.reduce_mean(
      devs_squared, reduction_indices=axis, keep_dims=keepdims) 
Example #14
Source File: gmm_ops.py    From lambda-packs with MIT License 6 votes vote down vote up
def _covariance(x, diag):
  """Defines the covariance operation of a matrix.

  Args:
    x: a matrix Tensor. Dimension 0 should contain the number of examples.
    diag: if True, it computes the diagonal covariance.

  Returns:
    A Tensor representing the covariance of x. In the case of
  diagonal matrix just the diagonal is returned.
  """
  num_points = math_ops.to_float(array_ops.shape(x)[0])
  x -= math_ops.reduce_mean(x, 0, keep_dims=True)
  if diag:
    cov = math_ops.reduce_sum(
        math_ops.square(x), 0, keep_dims=True) / (num_points - 1)
  else:
    cov = math_ops.matmul(x, x, transpose_a=True) / (num_points - 1)
  return cov 
Example #15
Source File: gmm_ops.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def _covariance(x, diag):
  """Defines the covariance operation of a matrix.

  Args:
    x: a matrix Tensor. Dimension 0 should contain the number of examples.
    diag: if True, it computes the diagonal covariance.

  Returns:
    A Tensor representing the covariance of x. In the case of
  diagonal matrix just the diagonal is returned.
  """
  num_points = math_ops.to_float(array_ops.shape(x)[0])
  x -= math_ops.reduce_mean(x, 0, keep_dims=True)
  if diag:
    cov = math_ops.reduce_sum(
        math_ops.square(x), 0, keep_dims=True) / (num_points - 1)
  else:
    cov = math_ops.matmul(x, x, transpose_a=True) / (num_points - 1)
  return cov 
Example #16
Source File: vgg_test.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def testTrainEvalWithReuse(self):
    train_batch_size = 2
    eval_batch_size = 1
    train_height, train_width = 224, 224
    eval_height, eval_width = 256, 256
    num_classes = 1000
    with self.test_session():
      train_inputs = random_ops.random_uniform(
          (train_batch_size, train_height, train_width, 3))
      logits, _ = vgg.vgg_19(train_inputs)
      self.assertListEqual(logits.get_shape().as_list(),
                           [train_batch_size, num_classes])
      variable_scope.get_variable_scope().reuse_variables()
      eval_inputs = random_ops.random_uniform(
          (eval_batch_size, eval_height, eval_width, 3))
      logits, _ = vgg.vgg_19(
          eval_inputs, is_training=False, spatial_squeeze=False)
      self.assertListEqual(logits.get_shape().as_list(),
                           [eval_batch_size, 2, 2, num_classes])
      logits = math_ops.reduce_mean(logits, [1, 2])
      predictions = math_ops.argmax(logits, 1)
      self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size]) 
Example #17
Source File: target_column.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def training_loss(self, logits, target, features, name="training_loss"):
    """Returns training loss tensor for this head.

    Training loss is different from the loss reported on the tensorboard as we
    should respect the example weights when computing the gradient.

      L = sum_{i} w_{i} * l_{i} / B

    where B is the number of examples in the batch, l_{i}, w_{i} are individual
    losses, and example weight.

    Args:
      logits: logits, a float tensor.
      target: either a tensor for labels or in multihead case, a dict of string
        to target tensor.
      features: features dict.
      name: Op name.

    Returns:
      Loss tensor.
    """
    target = target[self.name] if isinstance(target, dict) else target
    loss_unweighted = self._loss_fn(logits, target)

    weight_tensor = self.get_weight_tensor(features)
    if weight_tensor is None:
      return math_ops.reduce_mean(loss_unweighted, name=name)
    loss_weighted = self._weighted_loss(loss_unweighted, weight_tensor)
    return math_ops.reduce_mean(loss_weighted, name=name) 
Example #18
Source File: head.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def _centered_bias_step(logits_dimension, weight_collection, labels,
                        train_loss_fn):
  """Creates and returns training op for centered bias."""
  centered_bias = ops.get_collection(weight_collection)
  batch_size = array_ops.shape(labels)[0]
  logits = array_ops.reshape(
      array_ops.tile(centered_bias[0], [batch_size]),
      [batch_size, logits_dimension])
  with ops.name_scope(None, "centered_bias", (labels, logits)):
    centered_bias_loss = math_ops.reduce_mean(
        train_loss_fn(logits, labels), name="training_loss")
  # Learn central bias by an optimizer. 0.1 is a convervative lr for a
  # single variable.
  return training.AdagradOptimizer(0.1).minimize(
      centered_bias_loss, var_list=centered_bias) 
Example #19
Source File: target_column.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def loss(self, logits, target, features):
    """Returns loss tensor for this head.

    The loss returned is the weighted average.

      L = sum_{i} w_{i} * l_{i} / sum_{i} w_{i}

    Args:
      logits: logits, a float tensor.
      target: either a tensor for labels or in multihead case, a dict of string
        to target tensor.
      features: features dict.

    Returns:
      Loss tensor.
    """
    target = target[self.name] if isinstance(target, dict) else target
    loss_unweighted = self._loss_fn(logits, target)

    weight_tensor = self.get_weight_tensor(features)
    if weight_tensor is None:
      return math_ops.reduce_mean(loss_unweighted, name="loss")
    loss_weighted = self._weighted_loss(loss_unweighted, weight_tensor)
    return math_ops.div(
        math_ops.reduce_sum(loss_weighted),
        math_ops.to_float(math_ops.reduce_sum(weight_tensor)),
        name="loss") 
Example #20
Source File: layers_test.py    From tf-slim with Apache License 2.0 5 votes vote down vote up
def testCreateFCFollowByDropout(self):
    height, width = 3, 3
    with self.cached_session() as sess:
      images = random_ops.random_uniform(
          (5, height, width, 3), seed=1, name='images')
      output = _layers.fully_connected(images, 50)
      num_elem_initial = math_ops.reduce_mean(
          math_ops.cast(output > 0, dtypes.float32))
      output = _layers.dropout(output)
      num_elem = math_ops.reduce_mean(math_ops.cast(output > 0, dtypes.float32))
      sess.run(variables_lib.global_variables_initializer())
      num_elem, num_elem_initial = sess.run([num_elem, num_elem_initial])
      self.assertLess(num_elem, num_elem_initial / 2 + 0.1)
      self.assertGreater(num_elem, num_elem_initial / 2 - 0.1) 
Example #21
Source File: monte_carlo.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def _sample_mean(values):
  """Mean over sample indices.  In this module this is always [0]."""
  return math_ops.reduce_mean(values, reduction_indices=[0]) 
Example #22
Source File: head.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def _loss(loss_unweighted, weight, name):
  """Returns loss."""
  if weight is None:
    loss = math_ops.reduce_mean(loss_unweighted, name=name)
    return loss, loss
  else:
    loss_weighted = _weighted_loss(loss_unweighted, weight)
    weighted_average_loss = math_ops.div(
        math_ops.reduce_sum(loss_weighted),
        math_ops.to_float(math_ops.reduce_sum(weight)),
        name="weighted_average_loss")
    loss = math_ops.reduce_mean(loss_weighted, name=name)
    return loss, weighted_average_loss 
Example #23
Source File: metric_learning_test.py    From tf-slim with Apache License 2.0 5 votes vote down vote up
def testNpairsMultiLabel(self):
    with self.cached_session():
      num_data = 15
      feat_dim = 6
      num_classes = 10
      reg_lambda = 0.02

      embeddings_anchor = np.random.rand(num_data, feat_dim).astype(np.float32)
      embeddings_positive = np.random.rand(num_data, feat_dim).astype(
          np.float32)

      labels = np.random.randint(0, 2, (num_data, num_classes))
      # set entire column to one so that each row has at least one bit set.
      labels[:, -1] = 1

      # Compute the loss in NP
      reg_term = np.mean(np.sum(np.square(embeddings_anchor), 1))
      reg_term += np.mean(np.sum(np.square(embeddings_positive), 1))
      reg_term *= 0.25 * reg_lambda

      similarity_matrix = np.matmul(embeddings_anchor, embeddings_positive.T)

      labels_remapped = np.dot(labels, labels.T).astype(np.float)
      labels_remapped /= np.sum(labels_remapped, 1, keepdims=True)

      xent_loss = math_ops.reduce_mean(nn.softmax_cross_entropy_with_logits(
          logits=ops.convert_to_tensor(similarity_matrix),
          labels=ops.convert_to_tensor(labels_remapped))).eval()
      loss_np = xent_loss + reg_term

      # Compute the loss in TF
      loss_tf = metric_learning.npairs_loss_multilabel(
          sparse_labels=convert_to_list_of_sparse_tensor(labels),
          embeddings_anchor=ops.convert_to_tensor(embeddings_anchor),
          embeddings_positive=ops.convert_to_tensor(embeddings_positive),
          reg_lambda=reg_lambda)
      loss_tf = loss_tf.eval()

      self.assertAllClose(loss_np, loss_tf) 
Example #24
Source File: layers_test.py    From tf-slim with Apache License 2.0 5 votes vote down vote up
def testCreateFCWithDropout(self):
    height, width = 3, 3
    with self.cached_session() as sess:
      images = random_ops.random_uniform(
          (5, height, width, 3), seed=1, name='images')
      output = _layers.fully_connected(
          images, 50, normalizer_fn=_layers.dropout)
      num_elem = math_ops.reduce_mean(math_ops.cast(output > 0, dtypes.float32))
      sess.run(variables_lib.global_variables_initializer())
      num_elem = sess.run(num_elem)
      self.assertLess(num_elem, 0.5)
      self.assertGreater(num_elem, 0.1) 
Example #25
Source File: loss_ops_test.py    From tf-slim with Apache License 2.0 5 votes vote down vote up
def testNoneLossCollection(self):
    logits = constant_op.constant([[1.2, 0.4, -1.0, -1.1]])
    labels = constant_op.constant([[1.0, 0.0, 0.0, 1.0]])
    losses = loss_ops.hinge_loss(logits, labels)
    self.assertFalse(loss_ops.get_losses())
    loss_ops.add_loss(math_ops.reduce_mean(losses), loss_collection=None)
    self.assertFalse(loss_ops.get_losses())
    with self.cached_session():
      self.assertAllClose(losses.eval(), [[0.0, 1.4, 0.0, 2.1]], atol=1e-3) 
Example #26
Source File: layers_test.py    From tf-slim with Apache License 2.0 5 votes vote down vote up
def testCreateDropoutNoTraining(self):
    height, width = 3, 3
    with self.cached_session() as sess:
      images = random_ops.random_uniform(
          (5, height, width, 3), seed=1, name='images')
      num_elem_initial = math_ops.reduce_mean(
          math_ops.cast(images > 0, dtypes.float32))
      output = _layers.dropout(images, is_training=False)
      num_elem = math_ops.reduce_mean(math_ops.cast(output > 0, dtypes.float32))
      num_elem, num_elem_initial = sess.run([num_elem, num_elem_initial])
      self.assertEqual(num_elem, num_elem_initial)
      outputs, inputs = sess.run([output, images])
      self.assertAllClose(outputs, inputs) 
Example #27
Source File: layers_test.py    From tf-slim with Apache License 2.0 5 votes vote down vote up
def testDropout(self):
    height, width = 10, 10
    with self.cached_session() as sess:
      images = random_ops.random_uniform(
          (5, height, width, 3), seed=1, name='images')
      num_elem_initial = math_ops.reduce_mean(
          math_ops.cast(images > 0, dtypes.float32))
      output = _layers.dropout(images)
      num_elem = math_ops.reduce_mean(math_ops.cast(output > 0, dtypes.float32))
      num_elem, num_elem_initial = sess.run([num_elem, num_elem_initial])
      self.assertLess(num_elem, num_elem_initial / 2 + 0.1)
      self.assertGreater(num_elem, num_elem_initial / 2 - 0.1) 
Example #28
Source File: rev_block_lib_test.py    From tf-slim with Apache License 2.0 5 votes vote down vote up
def testReuse(self):

    def f(x):
      return core_layers.dense(x, self.CHANNELS // 2)

    def g(x):
      return core_layers.dense(x, self.CHANNELS // 2)

    x = random_ops.random_uniform(
        [self.BATCH_SIZE, self.CHANNELS], dtype=dtypes.float32)
    x1, x2 = array_ops.split(x, 2, axis=-1)

    with variable_scope.variable_scope("test"):
      y1, y2 = rev_block_lib.rev_block(x1, x2, f, g, num_layers=self.NUM_LAYERS)

    num_vars_before = len(variables.global_variables())

    with variable_scope.variable_scope("test", reuse=True):
      y1, y2 = rev_block_lib.rev_block(x1, x2, f, g, num_layers=self.NUM_LAYERS)

    num_vars_after = len(variables.global_variables())
    self.assertEqual(num_vars_before, num_vars_after)

    loss = math_ops.reduce_mean(y1 + y2)
    _ = gradients_impl.gradients(loss,
                                 [x] + variables.trainable_variables())

    with variable_scope.variable_scope("test", reuse=True):
      y1, y2 = rev_block_lib.rev_block(x1, x2, f, g, num_layers=self.NUM_LAYERS)

    num_vars_after = len(variables.global_variables())
    self.assertEqual(num_vars_before, num_vars_after) 
Example #29
Source File: custom_ssim.py    From MimickNet with Apache License 2.0 5 votes vote down vote up
def ssim(img1, img2, max_val):
    _, _, checks = _verify_compatible_image_shapes(img1, img2)
    with ops.control_dependencies(checks):
        img1 = array_ops.identity(img1)

    # Need to convert the images to float32.  Scale max_val accordingly so that
    # SSIM is computed correctly.
    max_val = math_ops.cast(max_val, img1.dtype)
    max_val = tf.image.convert_image_dtype(max_val, dtypes.float32)
    img1 = tf.image.convert_image_dtype(img1, dtypes.float32)
    img2 = tf.image.convert_image_dtype(img2, dtypes.float32)
    ssim_per_channel, cs, luminance = _ssim_per_channel(img1, img2, max_val)
    # Compute average over color channels.
    return math_ops.reduce_mean(ssim_per_channel, [-1]), math_ops.reduce_mean(cs, [-1]), math_ops.reduce_mean(luminance, [-1]) 
Example #30
Source File: custom_ssim.py    From MimickNet with Apache License 2.0 5 votes vote down vote up
def _ssim_per_channel(img1, img2, max_val=1.0):
    filter_size = constant_op.constant(11, dtype=dtypes.int32)
    filter_sigma = constant_op.constant(1.5, dtype=img1.dtype)

    shape1, shape2 = array_ops.shape_n([img1, img2])
    checks = [
      control_flow_ops.Assert(math_ops.reduce_all(math_ops.greater_equal(
          shape1[-3:-1], filter_size)), [shape1, filter_size], summarize=8),
      control_flow_ops.Assert(math_ops.reduce_all(math_ops.greater_equal(
          shape2[-3:-1], filter_size)), [shape2, filter_size], summarize=8)]

    # Enforce the check to run before computation.
    with ops.control_dependencies(checks):
        img1 = array_ops.identity(img1)

    kernel = _fspecial_gauss(filter_size, filter_sigma)
    kernel = array_ops.tile(kernel, multiples=[1, 1, shape1[-1], 1])

    compensation = 1.0

    def reducer(x):
        shape = array_ops.shape(x)
        x = array_ops.reshape(x, shape=array_ops.concat([[-1], shape[-3:]], 0))
        y = nn.depthwise_conv2d(x, kernel, strides=[1, 1, 1, 1], padding='VALID')
        return array_ops.reshape(y, array_ops.concat([shape[:-3],
                                                      array_ops.shape(y)[1:]], 0))

    luminance, cs = _ssim_helper(img1, img2, reducer, max_val, compensation)

    # Average over the second and the third from the last: height, width.
    axes = constant_op.constant([-3, -2], dtype=dtypes.int32)
    ssim_val = math_ops.reduce_mean(luminance * cs, axes)
    cs = math_ops.reduce_mean(cs, axes)
    luminance = math_ops.reduce_mean(luminance, axes)
    return ssim_val, cs, luminance