Python tensorflow.get_collection() Examples

The following are 30 code examples of tensorflow.get_collection(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: variables.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def get_unique_variable(name):
  """Gets the variable uniquely identified by that name.

  Args:
    name: a name that uniquely identifies the variable.

  Returns:
    a tensorflow variable.

  Raises:
    ValueError: if no variable uniquely identified by the name exists.
  """
  candidates = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, name)
  if not candidates:
    raise ValueError('Couldnt find variable %s' % name)

  for candidate in candidates:
    if candidate.op.name == name:
      return candidate
  raise ValueError('Variable %s does not uniquely identify a variable', name) 
Example #2
Source File: build_graph.py    From HardRLWithYoutube with MIT License 6 votes vote down vote up
def scope_vars(scope, trainable_only=False):
    """
    Get variables inside a scope
    The scope can be specified as a string
    Parameters
    ----------
    scope: str or VariableScope
        scope in which the variables reside.
    trainable_only: bool
        whether or not to return only the variables that were marked as trainable.
    Returns
    -------
    vars: [tf.Variable]
        list of variables in `scope`.
    """
    return tf.get_collection(
        tf.GraphKeys.TRAINABLE_VARIABLES if trainable_only else tf.GraphKeys.GLOBAL_VARIABLES,
        scope=scope if isinstance(scope, str) else scope.name
    ) 
Example #3
Source File: model.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def get_params(self):
        """
        Provides access to the model's parameters.
        :return: A list of all Variables defining the model parameters.
        """
        # Catch eager execution and assert function overload.
        try:
            if tf.executing_eagerly():
                raise NotImplementedError("For Eager execution - get_params "
                                          "must be overridden.")
        except AttributeError:
            pass

        # For Graoh based execution
        scope_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                       self.scope)
        return scope_vars 
Example #4
Source File: model_deploy_test.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def testCreateSingleclone(self):
    g = tf.Graph()
    with g.as_default():
      tf.set_random_seed(0)
      tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
      tf_labels = tf.constant(self._labels, dtype=tf.float32)

      model_fn = BatchNormClassifier
      clone_args = (tf_inputs, tf_labels)
      deploy_config = model_deploy.DeploymentConfig(num_clones=1)

      self.assertEqual(slim.get_variables(), [])
      clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
      clone = clones[0]
      self.assertEqual(len(slim.get_variables()), 5)
      for v in slim.get_variables():
        self.assertDeviceEqual(v.device, 'CPU:0')
        self.assertDeviceEqual(v.value().device, 'CPU:0')
      self.assertEqual(clone.outputs.op.name,
                       'BatchNormClassifier/fully_connected/Sigmoid')
      self.assertEqual(clone.scope, '')
      self.assertDeviceEqual(clone.device, 'GPU:0')
      self.assertEqual(len(slim.losses.get_losses()), 1)
      update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
      self.assertEqual(len(update_ops), 2) 
Example #5
Source File: model_deploy_test.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def testCreateLogisticClassifier(self):
    g = tf.Graph()
    with g.as_default():
      tf.set_random_seed(0)
      tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
      tf_labels = tf.constant(self._labels, dtype=tf.float32)

      model_fn = LogisticClassifier
      clone_args = (tf_inputs, tf_labels)
      deploy_config = model_deploy.DeploymentConfig(num_clones=1)

      self.assertEqual(slim.get_variables(), [])
      clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
      clone = clones[0]
      self.assertEqual(len(slim.get_variables()), 2)
      for v in slim.get_variables():
        self.assertDeviceEqual(v.device, 'CPU:0')
        self.assertDeviceEqual(v.value().device, 'CPU:0')
      self.assertEqual(clone.outputs.op.name,
                       'LogisticClassifier/fully_connected/Sigmoid')
      self.assertEqual(clone.scope, '')
      self.assertDeviceEqual(clone.device, 'GPU:0')
      self.assertEqual(len(slim.losses.get_losses()), 1)
      update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
      self.assertEqual(update_ops, []) 
Example #6
Source File: model_deploy_test.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def testCreateLogisticClassifier(self):
    g = tf.Graph()
    with g.as_default():
      tf.set_random_seed(0)
      tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
      tf_labels = tf.constant(self._labels, dtype=tf.float32)

      model_fn = LogisticClassifier
      clone_args = (tf_inputs, tf_labels)
      deploy_config = model_deploy.DeploymentConfig(num_clones=1)

      self.assertEqual(slim.get_variables(), [])
      clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
      self.assertEqual(len(slim.get_variables()), 2)
      update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
      self.assertEqual(update_ops, [])

      optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
      total_loss, grads_and_vars = model_deploy.optimize_clones(clones,
                                                                optimizer)
      self.assertEqual(len(grads_and_vars), len(tf.trainable_variables()))
      self.assertEqual(total_loss.op.name, 'total_loss')
      for g, v in grads_and_vars:
        self.assertDeviceEqual(g.device, 'GPU:0')
        self.assertDeviceEqual(v.device, 'CPU:0') 
Example #7
Source File: model_deploy_test.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def testCreateSingleclone(self):
    g = tf.Graph()
    with g.as_default():
      tf.set_random_seed(0)
      tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
      tf_labels = tf.constant(self._labels, dtype=tf.float32)

      model_fn = BatchNormClassifier
      clone_args = (tf_inputs, tf_labels)
      deploy_config = model_deploy.DeploymentConfig(num_clones=1)

      self.assertEqual(slim.get_variables(), [])
      clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
      self.assertEqual(len(slim.get_variables()), 5)
      update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
      self.assertEqual(len(update_ops), 2)

      optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
      total_loss, grads_and_vars = model_deploy.optimize_clones(clones,
                                                                optimizer)
      self.assertEqual(len(grads_and_vars), len(tf.trainable_variables()))
      self.assertEqual(total_loss.op.name, 'total_loss')
      for g, v in grads_and_vars:
        self.assertDeviceEqual(g.device, 'GPU:0')
        self.assertDeviceEqual(v.device, 'CPU:0') 
Example #8
Source File: value_functions.py    From lirpg with MIT License 6 votes vote down vote up
def __init__(self, ob_dim, ac_dim): #pylint: disable=W0613
        X = tf.placeholder(tf.float32, shape=[None, ob_dim*2+ac_dim*2+2]) # batch of observations
        vtarg_n = tf.placeholder(tf.float32, shape=[None], name='vtarg')
        wd_dict = {}
        h1 = tf.nn.elu(dense(X, 64, "h1", weight_init=U.normc_initializer(1.0), bias_init=0, weight_loss_dict=wd_dict))
        h2 = tf.nn.elu(dense(h1, 64, "h2", weight_init=U.normc_initializer(1.0), bias_init=0, weight_loss_dict=wd_dict))
        vpred_n = dense(h2, 1, "hfinal", weight_init=U.normc_initializer(1.0), bias_init=0, weight_loss_dict=wd_dict)[:,0]
        sample_vpred_n = vpred_n + tf.random_normal(tf.shape(vpred_n))
        wd_loss = tf.get_collection("vf_losses", None)
        loss = tf.reduce_mean(tf.square(vpred_n - vtarg_n)) + tf.add_n(wd_loss)
        loss_sampled = tf.reduce_mean(tf.square(vpred_n - tf.stop_gradient(sample_vpred_n)))
        self._predict = U.function([X], vpred_n)
        optim = kfac.KfacOptimizer(learning_rate=0.001, cold_lr=0.001*(1-0.9), momentum=0.9, \
                                    clip_kl=0.3, epsilon=0.1, stats_decay=0.95, \
                                    async=1, kfac_update=2, cold_iter=50, \
                                    weight_decay_dict=wd_dict, max_grad_norm=None)
        vf_var_list = []
        for var in tf.trainable_variables():
            if "vf" in var.name:
                vf_var_list.append(var)

        update_op, self.q_runner = optim.minimize(loss, loss_sampled, var_list=vf_var_list)
        self.do_update = U.function([X, vtarg_n], update_op) #pylint: disable=E1101
        U.initialize() # Initialize uninitialized TF variables 
Example #9
Source File: build_graph.py    From lirpg with MIT License 6 votes vote down vote up
def scope_vars(scope, trainable_only=False):
    """
    Get variables inside a scope
    The scope can be specified as a string
    Parameters
    ----------
    scope: str or VariableScope
        scope in which the variables reside.
    trainable_only: bool
        whether or not to return only the variables that were marked as trainable.
    Returns
    -------
    vars: [tf.Variable]
        list of variables in `scope`.
    """
    return tf.get_collection(
        tf.GraphKeys.TRAINABLE_VARIABLES if trainable_only else tf.GraphKeys.GLOBAL_VARIABLES,
        scope=scope if isinstance(scope, str) else scope.name
    ) 
Example #10
Source File: cifar10.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def _add_loss_summaries(total_loss):
  """Add summaries for losses in CIFAR-10 model.

  Generates moving average for all losses and associated summaries for
  visualizing the performance of the network.

  Args:
    total_loss: Total loss from loss().
  Returns:
    loss_averages_op: op for generating moving averages of losses.
  """
  # Compute the moving average of all individual losses and the total loss.
  loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
  losses = tf.get_collection('losses')
  loss_averages_op = loss_averages.apply(losses + [total_loss])

  # Attach a scalar summary to all individual losses and the total loss; do the
  # same for the averaged version of the losses.
  for l in losses + [total_loss]:
    # Name each loss as '(raw)' and name the moving average version of the loss
    # as the original loss name.
    tf.summary.scalar(l.op.name + ' (raw)', l)
    tf.summary.scalar(l.op.name, loss_averages.average(l))

  return loss_averages_op 
Example #11
Source File: discriminator.py    From SSGAN-Tensorflow with MIT License 6 votes vote down vote up
def __call__(self, input):
        with tf.variable_scope(self.name, reuse=self._reuse):
            if not self._reuse:
                print('\033[93m'+self.name+'\033[0m')
            _ = input
            num_channel = [32, 64, 128, 256, 256, 512]
            num_layer = np.ceil(np.log2(min(_.shape.as_list()[1:3]))).astype(np.int)
            for i in range(num_layer):
                ch = num_channel[i] if i < len(num_channel) else 512
                _ = conv2d(_, ch, self._is_train, info=not self._reuse,
                           norm=self._norm_type, name='conv{}'.format(i+1))
            _ = conv2d(_, int(num_channel[i]/4), self._is_train, k=1, s=1,
                       info=not self._reuse, norm='None', name='conv{}'.format(i+2))
            _ = conv2d(_, self._num_class+1, self._is_train, k=1, s=1, info=not self._reuse,
                       activation_fn=None, norm='None',
                       name='conv{}'.format(i+3))
            _ = tf.squeeze(_)
            if not self._reuse: 
                log.info('discriminator output {}'.format(_.shape.as_list()))
            self._reuse = True
            self.var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self.name)
            return tf.nn.sigmoid(_), _ 
Example #12
Source File: networks.py    From soccer-matlab with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def build_graph(self,state,global_step):
        '''
        Builds the computation graph for the critic
        Inputs:
            states: tf placeholder inputs to network
        '''  

        self.global_step = global_step
        self.outputs = [state]
        with tf.variable_scope(self.scope, reuse=self.reuse):
            for i in range(1,len(self.units)-1):
                layer = tf.layers.dense(self.outputs[i-1], self.units[i], tf.nn.relu, trainable=self.trainable)
                self.outputs.append(layer)
            mu = settings.ACTION_SCALE * tf.layers.dense(self.outputs[-1], self.units[-1], tf.nn.tanh, trainable=self.trainable) 
            sigma = tf.layers.dense(self.outputs[-1], self.units[-1], tf.nn.softplus, trainable=self.trainable) 
            self.outputs.append([mu,sigma])

            self.norm_dist = tf.distributions.Normal(loc=mu,scale=sigma) 
        self.params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.scope) 
Example #13
Source File: train_image_classifier.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def _get_variables_to_train():
  """Returns a list of variables to train.

  Returns:
    A list of variables to train by the optimizer.
  """
  if FLAGS.trainable_scopes is None:
    return tf.trainable_variables()
  else:
    scopes = [scope.strip() for scope in FLAGS.trainable_scopes.split(',')]

  variables_to_train = []
  for scope in scopes:
    variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope)
    variables_to_train.extend(variables)
  return variables_to_train 
Example #14
Source File: networks.py    From soccer-matlab with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def build_graph(self,state,global_step):
        '''
        Builds the computation graph for the critic
        Inputs:
            states: tf placeholder inputs to network
        '''
        
        self.global_step = global_step
        self.outputs = [state]
        with tf.variable_scope(self.scope, reuse=self.reuse):
            for i in range(1,len(self.units)-1):
                layer = tf.layers.dense(self.outputs[i-1], self.units[i], tf.nn.relu, trainable=self.trainable)
                self.outputs.append(layer)
            layer = tf.layers.dense(self.outputs[-1], self.units[-1], trainable=self.trainable)
            self.outputs.append(layer)
        self.params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.scope) 
Example #15
Source File: collections_test.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def testTotalLossWithoutRegularization(self):
    batch_size = 5
    height, width = 299, 299
    num_classes = 1001
    with self.test_session():
      inputs = tf.random_uniform((batch_size, height, width, 3))
      dense_labels = tf.random_uniform((batch_size, num_classes))
      with slim.arg_scope([slim.ops.conv2d, slim.ops.fc], weight_decay=0):
        logits, end_points = slim.inception.inception_v3(
            inputs,
            num_classes=num_classes)
        # Cross entropy loss for the main softmax prediction.
        slim.losses.cross_entropy_loss(logits,
                                       dense_labels,
                                       label_smoothing=0.1,
                                       weight=1.0)
        # Cross entropy loss for the auxiliary softmax head.
        slim.losses.cross_entropy_loss(end_points['aux_logits'],
                                       dense_labels,
                                       label_smoothing=0.1,
                                       weight=0.4,
                                       scope='aux_loss')
      losses = tf.get_collection(slim.losses.LOSSES_COLLECTION)
      self.assertEqual(len(losses), 2) 
Example #16
Source File: value_functions.py    From HardRLWithYoutube with MIT License 6 votes vote down vote up
def __init__(self, ob_dim, ac_dim): #pylint: disable=W0613
        X = tf.placeholder(tf.float32, shape=[None, ob_dim*2+ac_dim*2+2]) # batch of observations
        vtarg_n = tf.placeholder(tf.float32, shape=[None], name='vtarg')
        wd_dict = {}
        h1 = tf.nn.elu(dense(X, 64, "h1", weight_init=U.normc_initializer(1.0), bias_init=0, weight_loss_dict=wd_dict))
        h2 = tf.nn.elu(dense(h1, 64, "h2", weight_init=U.normc_initializer(1.0), bias_init=0, weight_loss_dict=wd_dict))
        vpred_n = dense(h2, 1, "hfinal", weight_init=U.normc_initializer(1.0), bias_init=0, weight_loss_dict=wd_dict)[:,0]
        sample_vpred_n = vpred_n + tf.random_normal(tf.shape(vpred_n))
        wd_loss = tf.get_collection("vf_losses", None)
        loss = tf.reduce_mean(tf.square(vpred_n - vtarg_n)) + tf.add_n(wd_loss)
        loss_sampled = tf.reduce_mean(tf.square(vpred_n - tf.stop_gradient(sample_vpred_n)))
        self._predict = U.function([X], vpred_n)
        optim = kfac.KfacOptimizer(learning_rate=0.001, cold_lr=0.001*(1-0.9), momentum=0.9, \
                                    clip_kl=0.3, epsilon=0.1, stats_decay=0.95, \
                                    async=1, kfac_update=2, cold_iter=50, \
                                    weight_decay_dict=wd_dict, max_grad_norm=None)
        vf_var_list = []
        for var in tf.trainable_variables():
            if "vf" in var.name:
                vf_var_list.append(var)

        update_op, self.q_runner = optim.minimize(loss, loss_sampled, var_list=vf_var_list)
        self.do_update = U.function([X, vtarg_n], update_op) #pylint: disable=E1101
        U.initialize() # Initialize uninitialized TF variables 
Example #17
Source File: variables.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def add_variable(var, restore=True):
  """Adds a variable to the MODEL_VARIABLES collection.

    Optionally it will add the variable to  the VARIABLES_TO_RESTORE collection.
  Args:
    var: a variable.
    restore: whether the variable should be added to the
      VARIABLES_TO_RESTORE collection.

  """
  collections = [MODEL_VARIABLES]
  if restore:
    collections.append(VARIABLES_TO_RESTORE)
  for collection in collections:
    if var not in tf.get_collection(collection):
      tf.add_to_collection(collection, var) 
Example #18
Source File: cnn_policy.py    From lirpg with MIT License 5 votes vote down vote up
def get_trainable_variables(self):
        return tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self.scope) 
Example #19
Source File: mlp_policy.py    From HardRLWithYoutube with MIT License 5 votes vote down vote up
def get_trainable_variables(self):
        return tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self.scope) 
Example #20
Source File: image_embedding_test.py    From DOTA_models with Apache License 2.0 5 votes vote down vote up
def _assertCollectionSize(self, expected_size, collection):
    actual_size = len(tf.get_collection(collection))
    if expected_size != actual_size:
      self.fail("Found %d items in collection %s (expected %d)." %
                (actual_size, collection, expected_size)) 
Example #21
Source File: show_and_tell_model.py    From DOTA_models with Apache License 2.0 5 votes vote down vote up
def build_image_embeddings(self):
    """Builds the image model subgraph and generates image embeddings.

    Inputs:
      self.images

    Outputs:
      self.image_embeddings
    """
    inception_output = image_embedding.inception_v3(
        self.images,
        trainable=self.train_inception,
        is_training=self.is_training())
    self.inception_variables = tf.get_collection(
        tf.GraphKeys.GLOBAL_VARIABLES, scope="InceptionV3")

    # Map inception output into embedding space.
    with tf.variable_scope("image_embedding") as scope:
      image_embeddings = tf.contrib.layers.fully_connected(
          inputs=inception_output,
          num_outputs=self.config.embedding_size,
          activation_fn=None,
          weights_initializer=self.initializer,
          biases_initializer=None,
          scope=scope)

    # Save the embedding size in the graph.
    tf.constant(self.config.embedding_size, name="embedding_size")

    self.image_embeddings = image_embeddings 
Example #22
Source File: collections_test.py    From DOTA_models with Apache License 2.0 5 votes vote down vote up
def testRegularizationLosses(self):
    batch_size = 5
    height, width = 299, 299
    with self.test_session():
      inputs = tf.random_uniform((batch_size, height, width, 3))
      with slim.arg_scope([slim.ops.conv2d, slim.ops.fc], weight_decay=0.00004):
        slim.inception.inception_v3(inputs)
      losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
      self.assertEqual(len(losses), len(get_variables_by_name('weights'))) 
Example #23
Source File: collections_test.py    From DOTA_models with Apache License 2.0 5 votes vote down vote up
def testVariablesToRestoreWithoutLogits(self):
    batch_size = 5
    height, width = 299, 299
    with self.test_session():
      inputs = tf.random_uniform((batch_size, height, width, 3))
      with slim.arg_scope([slim.ops.conv2d],
                          batch_norm_params={'decay': 0.9997}):
        slim.inception.inception_v3(inputs, restore_logits=False)
      variables_to_restore = tf.get_collection(
          slim.variables.VARIABLES_TO_RESTORE)
      self.assertEqual(len(variables_to_restore), 384) 
Example #24
Source File: collections_test.py    From DOTA_models with Apache License 2.0 5 votes vote down vote up
def testVariablesToRestore(self):
    batch_size = 5
    height, width = 299, 299
    with self.test_session():
      inputs = tf.random_uniform((batch_size, height, width, 3))
      with slim.arg_scope([slim.ops.conv2d],
                          batch_norm_params={'decay': 0.9997}):
        slim.inception.inception_v3(inputs)
      variables_to_restore = tf.get_collection(
          slim.variables.VARIABLES_TO_RESTORE)
      self.assertEqual(len(variables_to_restore), 388)
      self.assertListEqual(variables_to_restore, get_variables()) 
Example #25
Source File: ops_test.py    From DOTA_models with Apache License 2.0 5 votes vote down vote up
def testReuseConvWithWD(self):
    height, width = 3, 3
    with self.test_session():
      images = tf.random_uniform((5, height, width, 3), seed=1)
      ops.conv2d(images, 32, [3, 3], weight_decay=0.01, scope='conv1')
      self.assertEquals(len(variables.get_variables()), 2)
      self.assertEquals(
          len(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)), 1)
      ops.conv2d(images, 32, [3, 3], weight_decay=0.01, scope='conv1',
                 reuse=True)
      self.assertEquals(len(variables.get_variables()), 2)
      self.assertEquals(
          len(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)), 1) 
Example #26
Source File: ops_test.py    From DOTA_models with Apache License 2.0 5 votes vote down vote up
def testReuseVars(self):
    height, width = 3, 3
    with self.test_session() as sess:
      image_shape = (10, height, width, 3)
      image_values = np.random.rand(*image_shape)
      expected_mean = np.mean(image_values, axis=(0, 1, 2))
      expected_var = np.var(image_values, axis=(0, 1, 2))
      images = tf.constant(image_values, shape=image_shape, dtype=tf.float32)
      output = ops.batch_norm(images, decay=0.1, is_training=False)
      update_ops = tf.get_collection(ops.UPDATE_OPS_COLLECTION)
      with tf.control_dependencies(update_ops):
        output = tf.identity(output)
      # Initialize all variables
      sess.run(tf.global_variables_initializer())
      moving_mean = variables.get_variables('BatchNorm/moving_mean')[0]
      moving_variance = variables.get_variables('BatchNorm/moving_variance')[0]
      mean, variance = sess.run([moving_mean, moving_variance])
      # After initialization moving_mean == 0 and moving_variance == 1.
      self.assertAllClose(mean, [0] * 3)
      self.assertAllClose(variance, [1] * 3)
      # Simulate assigment from saver restore.
      init_assigns = [tf.assign(moving_mean, expected_mean),
                      tf.assign(moving_variance, expected_var)]
      sess.run(init_assigns)
      for _ in range(10):
        sess.run([output], {images: np.random.rand(*image_shape)})
      mean = moving_mean.eval()
      variance = moving_variance.eval()
      # Although we feed different images, the moving_mean and moving_variance
      # shouldn't change.
      self.assertAllClose(mean, expected_mean)
      self.assertAllClose(variance, expected_var) 
Example #27
Source File: ops_test.py    From DOTA_models with Apache License 2.0 5 votes vote down vote up
def testComputeMovingVars(self):
    height, width = 3, 3
    with self.test_session() as sess:
      image_shape = (10, height, width, 3)
      image_values = np.random.rand(*image_shape)
      expected_mean = np.mean(image_values, axis=(0, 1, 2))
      expected_var = np.var(image_values, axis=(0, 1, 2))
      images = tf.constant(image_values, shape=image_shape, dtype=tf.float32)
      output = ops.batch_norm(images, decay=0.1)
      update_ops = tf.get_collection(ops.UPDATE_OPS_COLLECTION)
      with tf.control_dependencies(update_ops):
        output = tf.identity(output)
      # Initialize all variables
      sess.run(tf.global_variables_initializer())
      moving_mean = variables.get_variables('BatchNorm/moving_mean')[0]
      moving_variance = variables.get_variables('BatchNorm/moving_variance')[0]
      mean, variance = sess.run([moving_mean, moving_variance])
      # After initialization moving_mean == 0 and moving_variance == 1.
      self.assertAllClose(mean, [0] * 3)
      self.assertAllClose(variance, [1] * 3)
      for _ in range(10):
        sess.run([output])
      mean = moving_mean.eval()
      variance = moving_variance.eval()
      # After 10 updates with decay 0.1 moving_mean == expected_mean and
      # moving_variance == expected_var.
      self.assertAllClose(mean, expected_mean)
      self.assertAllClose(variance, expected_var) 
Example #28
Source File: ops_test.py    From DOTA_models with Apache License 2.0 5 votes vote down vote up
def testCreateMovingVars(self):
    height, width = 3, 3
    with self.test_session():
      images = tf.random_uniform((5, height, width, 3), seed=1)
      _ = ops.batch_norm(images, moving_vars='moving_vars')
      moving_mean = tf.get_collection('moving_vars',
                                      'BatchNorm/moving_mean')
      self.assertEquals(len(moving_mean), 1)
      self.assertEquals(moving_mean[0].op.name, 'BatchNorm/moving_mean')
      moving_variance = tf.get_collection('moving_vars',
                                          'BatchNorm/moving_variance')
      self.assertEquals(len(moving_variance), 1)
      self.assertEquals(moving_variance[0].op.name, 'BatchNorm/moving_variance') 
Example #29
Source File: ops_test.py    From DOTA_models with Apache License 2.0 5 votes vote down vote up
def testReuseUpdateOps(self):
    height, width = 3, 3
    with self.test_session():
      images = tf.random_uniform((5, height, width, 3), seed=1)
      ops.batch_norm(images, scope='bn')
      self.assertEquals(len(tf.get_collection(ops.UPDATE_OPS_COLLECTION)), 2)
      ops.batch_norm(images, scope='bn', reuse=True)
      self.assertEquals(len(tf.get_collection(ops.UPDATE_OPS_COLLECTION)), 4) 
Example #30
Source File: ops_test.py    From DOTA_models with Apache License 2.0 5 votes vote down vote up
def testReuseVariables(self):
    height, width = 3, 3
    with self.test_session():
      images = tf.random_uniform((5, height, width, 3), seed=1)
      ops.batch_norm(images, scale=True, scope='bn')
      ops.batch_norm(images, scale=True, scope='bn', reuse=True)
      beta = variables.get_variables_by_name('beta')
      gamma = variables.get_variables_by_name('gamma')
      self.assertEquals(len(beta), 1)
      self.assertEquals(len(gamma), 1)
      moving_vars = tf.get_collection('moving_vars')
      self.assertEquals(len(moving_vars), 2)