Python tensorflow.compat.v1.Variable() Examples

The following are 30 code examples of tensorflow.compat.v1.Variable(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.compat.v1 , or try the search function .
Example #1
Source File: lib_graph.py    From magenta with Apache License 2.0 6 votes vote down vote up
def setup_optimizer(self):
    """Instantiates learning rate, decay op and train_op among others."""
    # If not training, don't need to add optimizer to the graph.
    if not self.is_training:
      self.train_op = tf.no_op()
      self.learning_rate = tf.no_op()
      return

    self.learning_rate = tf.Variable(
        self.hparams.learning_rate,
        name='learning_rate',
        trainable=False,
        dtype=tf.float32)

    # FIXME 0.5 -> hparams.decay_rate
    self.decay_op = tf.assign(self.learning_rate, 0.5 * self.learning_rate)
    self.optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)
    self.train_op = self.optimizer.minimize(self.loss) 
Example #2
Source File: averaged.py    From lamb with Apache License 2.0 6 votes vote down vote up
def __init__(self, tensors):
    tensors = list(tensors)
    with tf.variable_scope('averaged'):
      self._num_samples = tf.Variable(0, name='num_samples', trainable=False)
      with tf.variable_scope('avg'):
        self._averages = [
            tf.get_variable(
                tensor.name.replace('/', '-').replace(':', '-'),
                tensor.get_shape(), initializer=tf.zeros_initializer(),
                trainable=False)
            for tensor in tensors]
      with tf.variable_scope('save'):
        self._saves = [
            tf.get_variable(
                tensor.name.replace('/', '-').replace(':', '-'),
                tensor.get_shape(), initializer=tf.zeros_initializer(),
                trainable=False)
            for tensor in tensors]
    self._tensors = tensors
    self._take_sample = self._make_take_sample()
    self._switch = self._make_swith_to_average()
    self._restore = self._make_restore()
    self._reset = self._make_reset() 
Example #3
Source File: gaussian_query_test.py    From privacy with Apache License 2.0 6 votes vote down vote up
def test_gaussian_sum_with_changing_clip_no_noise(self):
    with self.cached_session() as sess:
      record1 = tf.constant([-6.0, 8.0])  # Clipped to [-3.0, 4.0].
      record2 = tf.constant([4.0, -3.0])  # Not clipped.

      l2_norm_clip = tf.Variable(5.0)
      l2_norm_clip_placeholder = tf.placeholder(tf.float32)
      assign_l2_norm_clip = tf.assign(l2_norm_clip, l2_norm_clip_placeholder)
      query = gaussian_query.GaussianSumQuery(
          l2_norm_clip=l2_norm_clip, stddev=0.0)
      query_result, _ = test_utils.run_query(query, [record1, record2])

      self.evaluate(tf.global_variables_initializer())
      result = sess.run(query_result)
      expected = [1.0, 1.0]
      self.assertAllClose(result, expected)

      sess.run(assign_l2_norm_clip, {l2_norm_clip_placeholder: 0.0})
      result = sess.run(query_result)
      expected = [0.0, 0.0]
      self.assertAllClose(result, expected) 
Example #4
Source File: official_ncf_model.py    From benchmarks with Apache License 2.0 6 votes vote down vote up
def _fp16_variable_creator(next_creator, **kwargs):
  """Variable creator to create variables in fp32 and cast them to fp16."""
  dtype = kwargs.get('dtype', None)
  initial_value = kwargs.get('initial_value', None)
  if dtype is None:
    if initial_value is not None and not callable(initial_value):
      dtype = initial_value.dtype
  if dtype == tf.float16:
    if callable(initial_value):
      new_initial_value = lambda: tf.cast(initial_value(), tf.float32)
    else:
      new_initial_value = tf.cast(initial_value, tf.float32)
    kwargs['dtype'] = tf.float32
    kwargs['initial_value'] = new_initial_value
    var = next_creator(**kwargs)
    return tf.cast(var, dtype=tf.float16)
  else:
    return next_creator(**kwargs) 
Example #5
Source File: official_ncf_model.py    From benchmarks with Apache License 2.0 6 votes vote down vote up
def get_synthetic_inputs(self, input_name, nclass):
    """Returns the ops to generate synthetic inputs and labels."""
    def users_init_val():
      return tf.random_uniform((self.batch_size, 1), minval=0,
                               maxval=_NUM_USERS_20M, dtype=tf.int32)
    users = tf.Variable(users_init_val, dtype=tf.int32, trainable=False,
                        collections=[tf.GraphKeys.LOCAL_VARIABLES],
                        name='synthetic_users')
    def items_init_val():
      return tf.random_uniform((self.batch_size, 1), minval=0,
                               maxval=_NUM_ITEMS_20M, dtype=tf.int32)
    items = tf.Variable(items_init_val, dtype=tf.int32, trainable=False,
                        collections=[tf.GraphKeys.LOCAL_VARIABLES],
                        name='synthetic_items')

    def labels_init_val():
      return tf.random_uniform((self.batch_size,), minval=0, maxval=2,
                               dtype=tf.int32)
    labels = tf.Variable(labels_init_val, dtype=tf.int32, trainable=False,
                         collections=[tf.GraphKeys.LOCAL_VARIABLES],
                         name='synthetic_labels')

    return [users, items, labels] 
Example #6
Source File: variable_mgr_util_test.py    From benchmarks with Apache License 2.0 6 votes vote down vote up
def testAppendGradientsWithLossScaleWithtNan(self):
    v = tf.Variable(0)
    training_ops = []
    get_apply_gradients_ops_func = lambda: [tf.assign(v, v + 1)]
    loss_scale_params = variable_mgr_util.AutoLossScaleParams(
        enable_auto_loss_scale=True,
        loss_scale=tf.Variable(4, dtype=tf.float32),
        loss_scale_normal_steps=tf.Variable(10),
        inc_loss_scale_every_n=10,
        is_chief=True)
    variable_mgr_util.append_gradients_with_loss_scale(
        training_ops,
        get_apply_gradients_ops_func,
        loss_scale_params,
        grad_has_inf_nan=tf.constant(True))

    with self.test_session() as sess:
      sess.run(tf.global_variables_initializer())
      sess.run(training_ops)
      self.assertEqual(sess.run(v), 0)  # Skip updating for v.
      # halve loss_scale and reset local_scale_normal_steps.
      self.assertEqual(sess.run(loss_scale_params.loss_scale), 2)
      self.assertEqual(sess.run(loss_scale_params.loss_scale_normal_steps), 0) 
Example #7
Source File: variable_mgr_util_test.py    From benchmarks with Apache License 2.0 6 votes vote down vote up
def testAppendGradientsWithLossScaleWithoutNan(self):
    v = tf.Variable(0)
    training_ops = []
    get_apply_gradients_ops_func = lambda: [tf.assign(v, v + 1)]
    loss_scale_params = variable_mgr_util.AutoLossScaleParams(
        enable_auto_loss_scale=True,
        loss_scale=tf.Variable(4, dtype=tf.float32),
        loss_scale_normal_steps=tf.Variable(10),
        inc_loss_scale_every_n=10,
        is_chief=True)
    variable_mgr_util.append_gradients_with_loss_scale(
        training_ops,
        get_apply_gradients_ops_func,
        loss_scale_params,
        grad_has_inf_nan=tf.constant(False))

    with self.test_session() as sess:
      sess.run(tf.global_variables_initializer())
      sess.run(training_ops)
      self.assertEqual(sess.run(v), 1)
      self.assertEqual(sess.run(loss_scale_params.loss_scale), 8)
      self.assertEqual(sess.run(loss_scale_params.loss_scale_normal_steps), 0) 
Example #8
Source File: variable_mgr_util_test.py    From benchmarks with Apache License 2.0 6 votes vote down vote up
def testAppendGradientsWithLossScaleForNonChiefWorker(self):
    v = tf.Variable(0)
    training_ops = []
    get_apply_gradients_ops_func = lambda: [tf.assign(v, v + 1)]
    loss_scale_params = variable_mgr_util.AutoLossScaleParams(
        enable_auto_loss_scale=True,
        loss_scale=tf.Variable(4),
        loss_scale_normal_steps=tf.Variable(10),
        inc_loss_scale_every_n=10,
        is_chief=False)  # Non-chief
    variable_mgr_util.append_gradients_with_loss_scale(
        training_ops,
        get_apply_gradients_ops_func,
        loss_scale_params,
        grad_has_inf_nan=False)

    with self.test_session() as sess:
      sess.run(tf.global_variables_initializer())
      sess.run(training_ops)
      self.assertEqual(sess.run(v), 1)
      self.assertEqual(sess.run(loss_scale_params.loss_scale), 4)
      self.assertEqual(sess.run(loss_scale_params.loss_scale_normal_steps), 10) 
Example #9
Source File: dp_optimizer_eager_test.py    From privacy with Apache License 2.0 6 votes vote down vote up
def testNoiseMultiplier(self, cls):
    with tf.GradientTape(persistent=True) as gradient_tape:
      var0 = tf.Variable([0.0])
      data0 = tf.Variable([[0.0]])

      dp_sum_query = gaussian_query.GaussianSumQuery(4.0, 8.0)
      dp_sum_query = privacy_ledger.QueryWithLedger(dp_sum_query, 1e6, 1 / 1e6)

      opt = cls(dp_sum_query, num_microbatches=1, learning_rate=2.0)

      self.evaluate(tf.global_variables_initializer())
      # Fetch params to validate initial values
      self.assertAllClose([0.0], self.evaluate(var0))

      grads = []
      for _ in range(1000):
        grads_and_vars = opt.compute_gradients(
            lambda: self._loss_fn(var0, data0), [var0],
            gradient_tape=gradient_tape)
        grads.append(grads_and_vars[0][0])

      # Test standard deviation is close to l2_norm_clip * noise_multiplier.
      self.assertNear(np.std(grads), 2.0 * 4.0, 0.5) 
Example #10
Source File: dp_optimizer_eager_test.py    From privacy with Apache License 2.0 6 votes vote down vote up
def testClippingNorm(self, cls):
    with tf.GradientTape(persistent=True) as gradient_tape:
      var0 = tf.Variable([0.0, 0.0])
      data0 = tf.Variable([[3.0, 4.0], [6.0, 8.0]])

      dp_sum_query = gaussian_query.GaussianSumQuery(1.0, 0.0)
      dp_sum_query = privacy_ledger.QueryWithLedger(dp_sum_query, 1e6, 1 / 1e6)

      opt = cls(dp_sum_query, num_microbatches=1, learning_rate=2.0)

      self.evaluate(tf.global_variables_initializer())
      # Fetch params to validate initial values
      self.assertAllClose([0.0, 0.0], self.evaluate(var0))

      # Expected gradient is sum of differences.
      grads_and_vars = opt.compute_gradients(
          lambda: self._loss_fn(var0, data0), [var0],
          gradient_tape=gradient_tape)
      self.assertAllCloseAccordingToType([-0.6, -0.8], grads_and_vars[0][0]) 
Example #11
Source File: dp_optimizer_eager_test.py    From privacy with Apache License 2.0 6 votes vote down vote up
def testBaseline(self, cls, num_microbatches, expected_answer):
    with tf.GradientTape(persistent=True) as gradient_tape:
      var0 = tf.Variable([1.0, 2.0])
      data0 = tf.Variable([[3.0, 4.0], [5.0, 6.0], [7.0, 8.0], [-1.0, 0.0]])

      dp_sum_query = gaussian_query.GaussianSumQuery(1.0e9, 0.0)
      dp_sum_query = privacy_ledger.QueryWithLedger(
          dp_sum_query, 1e6, num_microbatches / 1e6)

      opt = cls(
          dp_sum_query,
          num_microbatches=num_microbatches,
          learning_rate=2.0)

      self.evaluate(tf.global_variables_initializer())
      # Fetch params to validate initial values
      self.assertAllClose([1.0, 2.0], self.evaluate(var0))

      # Expected gradient is sum of differences divided by number of
      # microbatches.
      grads_and_vars = opt.compute_gradients(
          lambda: self._loss_fn(var0, data0), [var0],
          gradient_tape=gradient_tape)
      self.assertAllCloseAccordingToType(expected_answer, grads_and_vars[0][0]) 
Example #12
Source File: dp_optimizer_vectorized_test.py    From privacy with Apache License 2.0 6 votes vote down vote up
def testDPGaussianOptimizerClass(self, cls):
    with self.cached_session() as sess:
      var0 = tf.Variable([0.0])
      data0 = tf.Variable([[0.0]])

      opt = cls(
          l2_norm_clip=4.0,
          noise_multiplier=2.0,
          num_microbatches=1,
          learning_rate=2.0)

      self.evaluate(tf.global_variables_initializer())
      # Fetch params to validate initial values
      self.assertAllClose([0.0], self.evaluate(var0))

      gradient_op = opt.compute_gradients(self._loss(data0, var0), [var0])
      grads = []
      for _ in range(1000):
        grads_and_vars = sess.run(gradient_op)
        grads.append(grads_and_vars[0][0])

      # Test standard deviation is close to l2_norm_clip * noise_multiplier.
      self.assertNear(np.std(grads), 2.0 * 4.0, 0.5) 
Example #13
Source File: dp_optimizer_vectorized_test.py    From privacy with Apache License 2.0 6 votes vote down vote up
def testClippingNorm(self, cls):
    with self.cached_session() as sess:
      var0 = tf.Variable([0.0, 0.0])
      data0 = tf.Variable([[3.0, 4.0], [6.0, 8.0]])

      opt = cls(l2_norm_clip=1.0,
                noise_multiplier=0.,
                num_microbatches=1,
                learning_rate=2.0)

      self.evaluate(tf.global_variables_initializer())
      # Fetch params to validate initial values
      self.assertAllClose([0.0, 0.0], self.evaluate(var0))

      # Expected gradient is sum of differences.
      gradient_op = opt.compute_gradients(self._loss(data0, var0), [var0])
      grads_and_vars = sess.run(gradient_op)
      self.assertAllCloseAccordingToType([-0.6, -0.8], grads_and_vars[0][0]) 
Example #14
Source File: dp_optimizer_test.py    From privacy with Apache License 2.0 6 votes vote down vote up
def testDPGaussianOptimizerClass(self, cls):
    with self.cached_session() as sess:
      var0 = tf.Variable([0.0])
      data0 = tf.Variable([[0.0]])

      opt = cls(
          l2_norm_clip=4.0,
          noise_multiplier=2.0,
          num_microbatches=1,
          learning_rate=2.0)

      self.evaluate(tf.global_variables_initializer())
      # Fetch params to validate initial values
      self.assertAllClose([0.0], self.evaluate(var0))

      gradient_op = opt.compute_gradients(self._loss(data0, var0), [var0])
      grads = []
      for _ in range(1000):
        grads_and_vars = sess.run(gradient_op)
        grads.append(grads_and_vars[0][0])

      # Test standard deviation is close to l2_norm_clip * noise_multiplier.
      self.assertNear(np.std(grads), 2.0 * 4.0, 0.5) 
Example #15
Source File: simd_mesh_impl.py    From mesh with Apache License 2.0 6 votes vote down vote up
def assign_to_slices(self, assign_fn, values, assign_to_tensor_list=None):
      """Assign to the slice variables.

      Args:
        assign_fn: a function from
          (mtf.Variable, tf.Variable, tf.Tensor) -> tf.Operation
        values: a list of tf.Tensor
        assign_to_tensor_list: an optional list of tf.Variable

      Returns:
        a tf.operation
      """
      if assign_to_tensor_list is None:
        assign_to_tensor_list = self._laid_out_tensor.all_slices
      # Handle both N -> 1 and N -> N cases.
      num_slices = min(len(assign_to_tensor_list), len(values))
      devices = [""] * num_slices
      return tf.group(
          mtf.parallel(devices, assign_fn,
                       [self._variable] * len(devices),
                       assign_to_tensor_list[:num_slices],
                       values[:num_slices])) 
Example #16
Source File: variables.py    From tf-slim with Apache License 2.0 6 votes vote down vote up
def get_variable_full_name(var):
  """Returns the full name of a variable.

  For normal Variables, this is the same as the var.op.name.  For
  sliced or PartitionedVariables, this name is the same for all the
  slices/partitions. In both cases, this is normally the name used in
  a checkpoint file.

  Args:
    var: A `Variable` object.

  Returns:
    A string that is the full name.
  """
  if var._save_slice_info:
    return var._save_slice_info.full_name
  else:
    return var.op.name 
Example #17
Source File: variables.py    From tf-slim with Apache License 2.0 6 votes vote down vote up
def get_unique_variable(var_op_name):
  """Gets the variable uniquely identified by that var_op_name.

  Args:
    var_op_name: the full name of the variable op, including the scope.

  Returns:
    a tensorflow variable.

  Raises:
    ValueError: if no variable uniquely identified by the name exists.
  """
  candidates = get_variables(scope=var_op_name)
  if not candidates:
    raise ValueError('Couldn\'t find variable %s' % var_op_name)

  for candidate in candidates:
    if candidate.op.name == var_op_name:
      return candidate
  raise ValueError('Variable %s does not uniquely identify a variable' %
                   var_op_name) 
Example #18
Source File: variables.py    From tf-slim with Apache License 2.0 6 votes vote down vote up
def global_variable(initial_value,
                    validate_shape=True,
                    name=None,
                    use_resource=None):
  """Create a variable with a value and add it to `GraphKeys.GLOBAL_VARIABLES`.

  Args:
    initial_value: See variables.Variable.__init__.
    validate_shape: See variables.Variable.__init__.
    name: See variables.Variable.__init__.
    use_resource: If `True` use a ResourceVariable instead of a Variable.

  Returns:
    New variable.
  """
  return variable_scope.variable(
      initial_value,
      trainable=False,
      collections=[ops.GraphKeys.GLOBAL_VARIABLES],
      validate_shape=validate_shape,
      use_resource=use_resource,
      name=name) 
Example #19
Source File: variables.py    From tf-slim with Apache License 2.0 6 votes vote down vote up
def local_variable(initial_value,
                   validate_shape=True,
                   name=None,
                   use_resource=None):
  """Create a variable with a value and add it to `GraphKeys.LOCAL_VARIABLES`.

  Args:
    initial_value: See variables.Variable.__init__.
    validate_shape: See variables.Variable.__init__.
    name: See variables.Variable.__init__.
    use_resource: If `True` use a ResourceVariable instead of a Variable.

  Returns:
    New variable.
  """
  return variable_scope.variable(
      initial_value,
      trainable=False,
      collections=[ops.GraphKeys.LOCAL_VARIABLES],
      validate_shape=validate_shape,
      use_resource=use_resource,
      name=name) 
Example #20
Source File: flop_regularizer_test.py    From morph-net with Apache License 2.0 6 votes vote down vote up
def testFlopRegularizerDontConvertToVariable(self):
    tf.reset_default_graph()
    tf.set_random_seed(1234)

    x = tf.constant(1.0, shape=[2, 6], name='x', dtype=tf.float32)
    w = tf.Variable(tf.truncated_normal([6, 4], stddev=1.0), use_resource=True)
    net = tf.matmul(x, w)

    # Create FLOPs network regularizer.
    threshold = 0.9
    flop_reg = flop_regularizer.GroupLassoFlopsRegularizer([net.op], threshold,
                                                           0)

    with self.cached_session():
      tf.global_variables_initializer().run()
      flop_reg.get_regularization_term().eval() 
Example #21
Source File: common_layers.py    From tensor2tensor with Apache License 2.0 6 votes vote down vote up
def underlying_variable(t):
  """Find the underlying tf.Variable object.

  Args:
    t: a Tensor

  Returns:
    tf.Variable.
  """
  t = underlying_variable_ref(t)
  assert t is not None
  # make sure that the graph has a variable index and that it is up-to-date
  if not hasattr(tf.get_default_graph(), "var_index"):
    tf.get_default_graph().var_index = {}
  var_index = tf.get_default_graph().var_index
  for v in tf.global_variables()[len(var_index):]:
    var_index[v.name] = v
  return var_index[t.name] 
Example #22
Source File: common_layers.py    From tensor2tensor with Apache License 2.0 6 votes vote down vote up
def underlying_variable_ref(t):
  """Find the underlying variable ref.

  Traverses through Identity, ReadVariableOp, and Enter ops.
  Stops when op type has Variable or VarHandle in name.

  Args:
    t: a Tensor

  Returns:
    a Tensor that is a variable ref, or None on error.
  """
  while t.op.type in ["Identity", "ReadVariableOp", "Enter"]:
    t = t.op.inputs[0]

  op_type = t.op.type
  if "Variable" in op_type or "VarHandle" in op_type:
    return t
  else:
    return None 
Example #23
Source File: deep_cnn.py    From privacy with Apache License 2.0 6 votes vote down vote up
def _variable_with_weight_decay(name, shape, stddev, wd):
  """Helper to create an initialized Variable with weight decay.

  Note that the Variable is initialized with a truncated normal distribution.
  A weight decay is added only if one is specified.

  Args:
    name: name of the variable
    shape: list of ints
    stddev: standard deviation of a truncated Gaussian
    wd: add L2Loss weight decay multiplied by this float. If None, weight
        decay is not added for this Variable.

  Returns:
    Variable Tensor
  """
  var = _variable_on_cpu(name, shape,
                         tf.truncated_normal_initializer(stddev=stddev))
  if wd is not None:
    weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
    tf.add_to_collection('losses', weight_decay)
  return var 
Example #24
Source File: dp_optimizer_test.py    From privacy with Apache License 2.0 6 votes vote down vote up
def testBaseline(self, cls, num_microbatches, expected_answer):
    with self.cached_session() as sess:
      var0 = tf.Variable([1.0, 2.0])
      data0 = tf.Variable([[3.0, 4.0], [5.0, 6.0], [7.0, 8.0], [-1.0, 0.0]])

      dp_sum_query = gaussian_query.GaussianSumQuery(1.0e9, 0.0)
      dp_sum_query = privacy_ledger.QueryWithLedger(
          dp_sum_query, 1e6, num_microbatches / 1e6)

      opt = cls(
          dp_sum_query,
          num_microbatches=num_microbatches,
          learning_rate=2.0)

      self.evaluate(tf.global_variables_initializer())
      # Fetch params to validate initial values
      self.assertAllClose([1.0, 2.0], self.evaluate(var0))

      # Expected gradient is sum of differences divided by number of
      # microbatches.
      gradient_op = opt.compute_gradients(self._loss(data0, var0), [var0])
      grads_and_vars = sess.run(gradient_op)
      self.assertAllCloseAccordingToType(expected_answer, grads_and_vars[0][0]) 
Example #25
Source File: dp_optimizer_test.py    From privacy with Apache License 2.0 6 votes vote down vote up
def testClippingNorm(self, cls):
    with self.cached_session() as sess:
      var0 = tf.Variable([0.0, 0.0])
      data0 = tf.Variable([[3.0, 4.0], [6.0, 8.0]])

      dp_sum_query = gaussian_query.GaussianSumQuery(1.0, 0.0)
      dp_sum_query = privacy_ledger.QueryWithLedger(dp_sum_query, 1e6, 1 / 1e6)

      opt = cls(dp_sum_query, num_microbatches=1, learning_rate=2.0)

      self.evaluate(tf.global_variables_initializer())
      # Fetch params to validate initial values
      self.assertAllClose([0.0, 0.0], self.evaluate(var0))

      # Expected gradient is sum of differences.
      gradient_op = opt.compute_gradients(self._loss(data0, var0), [var0])
      grads_and_vars = sess.run(gradient_op)
      self.assertAllCloseAccordingToType([-0.6, -0.8], grads_and_vars[0][0]) 
Example #26
Source File: dp_optimizer_vectorized_test.py    From privacy with Apache License 2.0 6 votes vote down vote up
def testBaseline(self, cls, num_microbatches, expected_answer):
    with self.cached_session() as sess:
      var0 = tf.Variable([1.0, 2.0])
      data0 = tf.Variable([[3.0, 4.0], [5.0, 6.0], [7.0, 8.0], [-1.0, 0.0]])

      opt = cls(
          l2_norm_clip=1.0e9,
          noise_multiplier=0.0,
          num_microbatches=num_microbatches,
          learning_rate=2.0)

      self.evaluate(tf.global_variables_initializer())
      # Fetch params to validate initial values
      self.assertAllClose([1.0, 2.0], self.evaluate(var0))

      # Expected gradient is sum of differences divided by number of
      # microbatches.
      gradient_op = opt.compute_gradients(self._loss(data0, var0), [var0])
      grads_and_vars = sess.run(gradient_op)
      self.assertAllCloseAccordingToType(expected_answer, grads_and_vars[0][0]) 
Example #27
Source File: images_demo.py    From tensorboard with Apache License 2.0 5 votes vote down vote up
def get_image(verbose=False):
    """Get the image as a TensorFlow variable.

    Returns:
      A `tf.Variable`, which must be initialized prior to use:
      invoke `sess.run(result.initializer)`.
    """
    base_data = tf.constant(image_data(verbose=verbose))
    base_image = tf.image.decode_image(base_data, channels=3)
    base_image.set_shape((IMAGE_HEIGHT, IMAGE_WIDTH, 3))
    parsed_image = tf.Variable(base_image, name="image", dtype=tf.uint8)
    return parsed_image 
Example #28
Source File: accumulate.py    From gpt2-estimator with MIT License 5 votes vote down vote up
def __init__(self, opt, var_list):
        self.opt = opt
        self.var_list = var_list
        self.aggregation = tf.VariableAggregation.MEAN
        self.accum_vars = {tv: tf.Variable(tf.zeros_like(tv.initialized_value()), trainable=False, aggregation=self.aggregation, name='accum_var{0}'.format(tv_ind))
                           for tv_ind, tv in enumerate(var_list)}
        self.total_loss = tf.Variable(
            tf.zeros(shape=[], dtype=tf.float32), aggregation=self.aggregation, name='total_loss')
        self.count_loss = tf.Variable(
            tf.zeros(shape=[], dtype=tf.float32), aggregation=self.aggregation, name='count_loss') 
Example #29
Source File: tf_mittens.py    From mittens with Apache License 2.0 5 votes vote down vote up
def _weight_init(self, m, n, name):
        """
        Uses the Xavier Glorot method for initializing weights. This is
        built in to TensorFlow as `tf.contrib.layers.xavier_initializer`,
        but it's nice to see all the details.
        """
        x = np.sqrt(6.0/(m+n))
        with tf.name_scope(name) as scope:
            return tf.Variable(
                tf.random_uniform(
                    [m, n], minval=-x, maxval=x), name=name) 
Example #30
Source File: placement_mesh_impl.py    From mesh with Apache License 2.0 5 votes vote down vote up
def assign_to_slices(self, assign_fn, values):
      """Assign to the slice variables.

      Args:
        assign_fn: a function from
          (mtf.Variable, tf.Variable, tf.Tensor) -> tf.Operation
        values: a list of tf.Tensor

      Returns:
        a tf.operation
      """
      return tf.group(mtf.parallel(
          self._mesh_impl.devices, assign_fn, [self._variable] * len(values),
          self.laid_out_tensor.all_slices, values))