Python tensorflow.compat.v2.Variable() Examples

The following are 23 code examples of tensorflow.compat.v2.Variable(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.compat.v2 , or try the search function .
Example #1
Source File: discriminator_agent.py    From valan with Apache License 2.0 6 votes vote down vote up
def __init__(self, config):
    """Initialize R2R Agent."""
    super(DiscriminatorAgent, self).__init__(name='discriminator_r2r')

    self._instruction_encoder = instruction_encoder.InstructionEncoder(
        num_hidden_layers=2,
        output_dim=256,
        pretrained_embed_path=config.pretrained_embed_path,
        oov_bucket_size=config.oov_bucket_size,
        vocab_size=config.vocab_size,
        word_embed_dim=config.word_embed_dim,
    )
    self._image_encoder = image_encoder.ImageEncoder(
        256, 512, num_hidden_layers=2)
    self.affine_a = tf.Variable(1.0, dtype=tf.float32, trainable=True)
    self.affine_b = tf.Variable(0.0, dtype=tf.float32, trainable=True) 
Example #2
Source File: continuous_batched.py    From compression with Apache License 2.0 6 votes vote down vote up
def from_config(cls, config):
    """Instantiates an entropy model from a configuration dictionary.

    Arguments:
      config: A `dict`, typically the output of `get_config`.

    Returns:
      An entropy model.
    """
    self = super().from_config(config)
    with self.name_scope:
      # pylint:disable=protected-access
      if config["quantization_offset"]:
        zeros = tf.zeros(self.prior_shape, dtype=self.dtype)
        self._quantization_offset = tf.Variable(
            zeros, name="quantization_offset")
      else:
        self._quantization_offset = None
      # pylint:enable=protected-access
    return self 
Example #3
Source File: trax2keras.py    From trax with Apache License 2.0 6 votes vote down vote up
def build(self, input_shape):
    with math_lib.use_backend("tf"):
      # Using `is` instead of `==` following Trax's practice
      if self._trax_layer.weights is base.EMPTY_WEIGHTS:
        sanitized_input_shape = math_lib.nested_map(
            functools.partial(_replace_none_batch, batch_size=self._batch_size),
            input_shape)
        weights, state = self._trax_layer.init(
            tensor_shapes_to_shape_dtypes(sanitized_input_shape, self.dtype),
            rng=self._initializer_rng)
      else:
        weights = self._trax_layer.weights
        state = self._trax_layer.state
      # Note: `weights` may contain `EMPTY_WEIGHTS`
      self._weights = math_lib.nested_map(
          functools.partial(tf.Variable, trainable=True), weights)
      self._state = math_lib.nested_map(
          functools.partial(tf.Variable, trainable=False), state)
      self._rng = tf.Variable(self._forward_rng_init, trainable=False)
    super(TraxKerasLayer, self).build(input_shape) 
Example #4
Source File: nql_test.py    From language with Apache License 2.0 5 votes vote down vote up
def test_group_rel_from_variable(self):
    x = self.context.one(cell(2, 2), 'place_t')
    initializer = tf.keras.initializers.GlorotUniform()(
        [1, self.context.get_max_id('dir_g')])
    dir_tf_var = tf.Variable(initializer)
    dir_nql_exp = self.context.as_nql(dir_tf_var, 'dir_g')
    y = x.follow(dir_nql_exp)
    y.eval() 
Example #5
Source File: feature_column_v2_test.py    From hub with Apache License 2.0 5 votes vote down vote up
def __init__(self, returns_dict=False):
    embeddings = [
        ("", [0, 0, 0, 0]),  # OOV items are mapped to this embedding.
        ("hello world", [1, 2, 3, 4]),
        ("pair-programming", [5, 5, 5, 5]),
    ]
    keys = tf.constant([item[0] for item in embeddings], dtype=tf.string)
    indices = tf.constant(list(range(len(embeddings))), dtype=tf.int64)
    tbl_init = KeyValueTensorInitializer(keys, indices)
    self.table = HashTable(tbl_init, 0)
    self.weights = tf.Variable(
        list([item[1] for item in embeddings]), dtype=tf.float32)
    self.variables = [self.weights]
    self.trainable_variables = self.variables
    self._returns_dict = returns_dict 
Example #6
Source File: deep_factorized.py    From compression with Apache License 2.0 5 votes vote down vote up
def _make_variables(self):
    """Creates the variables representing the parameters of the distribution."""
    channels = self.batch_shape.num_elements()
    filters = (1,) + self.num_filters + (1,)
    scale = self.init_scale ** (1 / (len(self.num_filters) + 1))
    self._matrices = []
    self._biases = []
    self._factors = []

    for i in range(len(self.num_filters) + 1):
      init = tf.math.log(tf.math.expm1(1 / scale / filters[i + 1]))
      init = tf.cast(init, dtype=self.dtype)
      init = tf.broadcast_to(init, (channels, filters[i + 1], filters[i]))
      matrix = tf.Variable(init, name="matrix_{}".format(i))
      self._matrices.append(matrix)

      bias = tf.Variable(
          tf.random.uniform(
              (channels, filters[i + 1], 1), -.5, .5, dtype=self.dtype),
          name="bias_{}".format(i))
      self._biases.append(bias)

      if i < len(self.num_filters):
        factor = tf.Variable(
            tf.zeros((channels, filters[i + 1], 1), dtype=self.dtype),
            name="factor_{}".format(i))
        self._factors.append(factor) 
Example #7
Source File: uniform_noise_test.py    From compression with Apache License 2.0 5 votes vote down vote up
def test_variables_receive_gradients(self):
    loc = tf.Variable(tf.ones([2], dtype=tf.float32))
    log_scale = tf.Variable(tf.zeros([2], dtype=tf.float32))
    logit_weight = tf.Variable(tf.constant([.3, .7], dtype=tf.float32))
    with tf.GradientTape() as tape:
      dist = self.dist_cls(
          loc=loc, scale=tf.exp(log_scale), weight=tf.nn.softmax(logit_weight))
      x = tf.random.normal([20])
      loss = -tf.reduce_mean(dist.log_prob(x))
    grads = tape.gradient(loss, [loc, log_scale, logit_weight])
    self.assertLen(grads, 3)
    self.assertNotIn(None, grads) 
Example #8
Source File: uniform_noise_test.py    From compression with Apache License 2.0 5 votes vote down vote up
def test_variables_receive_gradients(self):
    loc = tf.Variable(1., dtype=tf.float32)
    log_scale = tf.Variable(0., dtype=tf.float32)
    with tf.GradientTape() as tape:
      dist = self.dist_cls(loc=loc, scale=tf.exp(log_scale))
      x = tf.random.normal([20])
      loss = -tf.reduce_mean(dist.log_prob(x))
    grads = tape.gradient(loss, [loc, log_scale])
    self.assertLen(grads, 2)
    self.assertNotIn(None, grads) 
Example #9
Source File: random_agent.py    From agents with Apache License 2.0 5 votes vote down vote up
def __init__(self,
               time_step_spec: ts.TimeStep,
               action_spec: types.NestedTensorSpec,
               debug_summaries: bool = False,
               summarize_grads_and_vars: bool = False,
               train_step_counter: Optional[tf.Variable] = None,
               num_outer_dims: int = 1,
               name: Optional[Text] = None):
    """Creates a random agent.

    Args:
      time_step_spec: A `TimeStep` spec of the expected time_steps.
      action_spec: A nest of BoundedTensorSpec representing the actions.
      debug_summaries: A bool to gather debug summaries.
      summarize_grads_and_vars: If true, gradient summaries will be written.
      train_step_counter: An optional counter to increment every time the train
        op is run.  Defaults to the global_step.
      num_outer_dims: same as base class.
      name: The name of this agent. All variables in this module will fall under
        that name. Defaults to the class name.
    """
    tf.Module.__init__(self, name=name)

    policy_class = random_tf_policy.RandomTFPolicy

    super(RandomAgent, self).__init__(
        time_step_spec,
        action_spec,
        policy_class=policy_class,
        debug_summaries=debug_summaries,
        summarize_grads_and_vars=summarize_grads_and_vars,
        train_step_counter=train_step_counter,
        num_outer_dims=num_outer_dims) 
Example #10
Source File: policy_loader_test.py    From agents with Apache License 2.0 5 votes vote down vote up
def __init__(self):
    super(AddNet, self).__init__(
        tensor_spec.TensorSpec((), tf.float32), (), 'add_net')
    self.var = tf.Variable(0.0, dtype=tf.float32) 
Example #11
Source File: utils.py    From models with Apache License 2.0 5 votes vote down vote up
def __init__(self,
               summary_writer,
               summary_fn,
               global_step=None,
               summary_interval=None):
    """Construct a summary manager object.

    Args:
      summary_writer: A `tf.summary.SummaryWriter` instance for writing
        summaries.
      summary_fn: A callable defined as `def summary_fn(name, tensor,
        step=None)`, which describes the summary operation.
      global_step: A `tf.Variable` instance for checking the current global step
        value, in case users want to save summaries every N steps.
      summary_interval: An integer, indicates the minimum step interval between
        two summaries.
    """
    if summary_writer is not None:
      self._summary_writer = summary_writer
      self._enabled = True
    else:
      self._summary_writer = tf.summary.create_noop_writer()
      self._enabled = False
    self._summary_fn = summary_fn

    if global_step is None:
      self._global_step = tf.summary.experimental.get_step()
    else:
      self._global_step = global_step

    if summary_interval is not None:
      if self._global_step is None:
        raise ValueError("`summary_interval` is not None, but no `global_step` "
                         "can be obtained ")
      self._last_summary_step = self._global_step.numpy()
    self._summary_interval = summary_interval 
Example #12
Source File: __init__.py    From language with Apache License 2.0 5 votes vote down vote up
def tf(self):
    """A Tensorflow expression which evaluates this NeuralQueryExpression.

    Returns:
      A Tensorflow expression that computes this NeuralQueryExpression's value.
    """
    if isinstance(self._tf, tf.Tensor) or isinstance(self._tf, tf.Variable):
      return self._tf  # pytype: disable=bad-return-type
    else:
      return tf.constant(self._tf) 
Example #13
Source File: loop_basic_test.py    From autograph with Apache License 2.0 5 votes vote down vote up
def test_for_one_var_ds_iterator(self, l):
    inputs_ = lambda: (iter(_int_dataset(l)), tf.Variable(0))
    self.assertFunctionMatchesEagerStatefulInput(for_one_var, inputs_) 
Example #14
Source File: loop_basic_test.py    From autograph with Apache License 2.0 5 votes vote down vote up
def test_for_no_vars_ds_iterator(self, l):
    inputs_ = lambda: (iter(_int_dataset(l)), tf.Variable(0))
    self.assertFunctionMatchesEagerStatefulInput(for_no_vars, inputs_) 
Example #15
Source File: loop_basic_test.py    From autograph with Apache License 2.0 5 votes vote down vote up
def test_for_no_vars(self, l, type_):
    l = type_(l)
    self.assertFunctionMatchesEager(for_no_vars, l, tf.Variable(0)) 
Example #16
Source File: loop_basic_test.py    From autograph with Apache License 2.0 5 votes vote down vote up
def test_while_no_vars(self, n, type_):
    n = type_(n)
    self.assertFunctionMatchesEager(while_no_vars, n, tf.Variable(0)) 
Example #17
Source File: cond_basic_test.py    From autograph with Apache License 2.0 5 votes vote down vote up
def test_no_vars(self, target, c, type_):
    c = type_(c)
    self.assertFunctionMatchesEager(target, c, tf.Variable(0)) 
Example #18
Source File: utils.py    From models with Apache License 2.0 5 votes vote down vote up
def __init__(self, epoch_steps, global_step):
    """Constructs the EpochHelper.

    Args:
      epoch_steps: An integer indicates how many steps in an epoch.
      global_step: A `tf.Variable` instance indicates the current global step.
    """
    self._epoch_steps = epoch_steps
    self._global_step = global_step
    self._current_epoch = None
    self._epoch_start_step = None
    self._in_epoch = False 
Example #19
Source File: fixed_policy_agent.py    From agents with Apache License 2.0 4 votes vote down vote up
def __init__(self,
               time_step_spec: ts.TimeStep,
               action_spec: types.NestedTensorSpec,
               policy_class: PolicyClassType,
               debug_summaries: bool = False,
               summarize_grads_and_vars: bool = False,
               train_step_counter: Optional[tf.Variable] = None,
               num_outer_dims: int = 1,
               name: Optional[Text] = None):
    """Creates a fixed-policy agent with no-op for training.

    Args:
      time_step_spec: A `TimeStep` spec of the expected time_steps.
      action_spec: A nest of BoundedTensorSpec representing the actions.
      policy_class: a tf_policy.TFPolicy or py_policy.PyPolicy class to use as a
        policy.
      debug_summaries: A bool to gather debug summaries. Used to initialize the
        base class
      summarize_grads_and_vars: If true, gradient summaries will be written.
      train_step_counter: An optional counter to increment every time the train
        op is run.  Defaults to the global_step. Used to initialize the
        base class
      num_outer_dims: Used to initialize the base class
      name: The name of this agent. All variables in this module will fall under
        that name. Defaults to the class name. Used to initialize the
        base class.
    """
    tf.Module.__init__(self, name=name)

    policy = policy_class(time_step_spec=time_step_spec,
                          action_spec=action_spec)

    collect_policy = policy

    super(FixedPolicyAgent, self).__init__(
        time_step_spec,
        action_spec,
        policy,
        collect_policy,
        train_sequence_length=None,
        debug_summaries=debug_summaries,
        summarize_grads_and_vars=summarize_grads_and_vars,
        train_step_counter=train_step_counter,
        num_outer_dims=num_outer_dims) 
Example #20
Source File: learner.py    From valan with Apache License 2.0 4 votes vote down vote up
def _create_server(
    listen_address: Text,
    specs: common.ActorOutput,
    agent: base_agent.BaseAgent,
    queue: tf.queue.QueueBase,
    extra_variables: List[tf.Variable],
):
  """Starts server for communicating with actor(s).

  The learner server exposes the following two methods for the actor:
    enqueue: actors are expected to call this server method to submit their
      trajectories for learner. This method accepts a nested structure of
      tensors of type `ActorOutput`.
    variable_values: actors can call this server method to get the latest value
      of trainable variables as well as variables in `extra_variables`.

  Args:
    listen_address: The network address on which to listen.
    specs: A nested structure where each element is either a tensor or a
      TensorSpec.
    agent: An instance of `BaseAgent`.
    queue: An instance of `tf.queue.QueueBase`.
    extra_variables: A list of variables other than `agent.trainable_variables`
      to be sent via `variable_values` method.

  Returns:
    A server object.
  """
  logging.info('Creating gRPC server on address %s', listen_address)
  server = grpc.Server([listen_address])
  flat_specs = [
      tf.TensorSpec.from_spec(s, str(i))
      for i, s in enumerate(tf.nest.flatten(specs))
  ]

  @tf.function(input_signature=flat_specs)
  def enqueue(*tensors: common.ActorOutput):
    queue.enqueue(tensors)
    return []

  server.bind(enqueue, batched=False)

  @tf.function(input_signature=[])
  def variable_values():
    all_vars = copy.copy(agent.trainable_variables)
    all_vars += extra_variables
    return all_vars

  server.bind(variable_values, batched=False)

  return server 
Example #21
Source File: continuous_batched.py    From compression with Apache License 2.0 4 votes vote down vote up
def __init__(self, prior, coding_rank, compression=False,
               likelihood_bound=1e-9, tail_mass=2**-8,
               range_coder_precision=12):
    """Initializer.

    Arguments:
      prior: A `tfp.distributions.Distribution` object. A density model fitting
        the marginal distribution of the bottleneck data with additive uniform
        noise, which is shared a priori between the sender and the receiver. For
        best results, the distribution should be flexible enough to have a
        unit-width uniform distribution as a special case, since this is the
        marginal distribution for bottleneck dimensions that are constant. The
        distribution parameters may not depend on data (they must be either
        variables or constants).
      coding_rank: Integer. Number of innermost dimensions considered a coding
        unit. Each coding unit is compressed to its own bit string, and the
        `bits()` method sums over each coding unit.
      compression: Boolean. If set to `True`, the range coding tables used by
        `compress()` and `decompress()` will be built on instantiation. If set
        to `False`, these two methods will not be accessible.
      likelihood_bound: Float. Lower bound for likelihood values, to prevent
        training instabilities.
      tail_mass: Float. Approximate probability mass which is range encoded with
        less precision, by using a Golomb-like code.
      range_coder_precision: Integer. Precision passed to the range coding op.

    Raises:
      RuntimeError: when attempting to instantiate an entropy model with
        `compression=True` and not in eager execution mode.
    """
    if coding_rank < prior.batch_shape.rank:
      raise ValueError(
          "`coding_rank` can't be smaller than batch rank of prior.")
    super().__init__(
        prior, coding_rank, compression=compression,
        likelihood_bound=likelihood_bound, tail_mass=tail_mass,
        range_coder_precision=range_coder_precision)

    quantization_offset = helpers.quantization_offset(prior)
    if self.compression:
      # Optimization: if the quantization offset is zero, we don't need to
      # subtract/add it when quantizing, and we don't need to serialize its
      # value. Note that this code will only work in eager mode.
      # TODO(jonycgn): Reconsider if this optimization is worth keeping once
      # the implementation is stable.
      if tf.executing_eagerly() and tf.reduce_all(
          tf.equal(quantization_offset, 0.)):
        quantization_offset = None
      else:
        quantization_offset = tf.broadcast_to(
            quantization_offset, self.prior_shape)
        quantization_offset = tf.Variable(
            quantization_offset, trainable=False, name="quantization_offset")
    self._quantization_offset = quantization_offset 
Example #22
Source File: __init__.py    From language with Apache License 2.0 4 votes vote down vote up
def get_tf_tensor(self, rel_name):
    """Get the Tensor that represents a relation.

    Args:
      rel_name: string naming a declared relation

    Returns:
      tf.SparseTensor

    Raises:
      RuntimeError: If the expression has no initial value.
    """
    if rel_name not in self._cached_tensor:
      if rel_name not in self._np_initval:
        raise RuntimeError('KG relation named %r has no initial value.' %
                           rel_name)
      m = self._np_initval[rel_name]
      n_rows, n_cols = m.shape
      if self.is_dense(rel_name):
        self._cached_tensor[rel_name] = tf.Variable(
            m, trainable=self.is_trainable(rel_name), name='nql/' + rel_name)
        self._initializers.append(self._cached_tensor[rel_name].initializer)

      else:
        data_m = np.transpose(np.vstack([m.row, m.col]))
        if not self.is_trainable(rel_name):
          sparse_tensor = tf.SparseTensor(data_m, m.data, [n_rows, n_cols])
        else:
          data_var_name = 'nql/%s_values' % rel_name
          data_var = tf.Variable(m.data, trainable=True, name=data_var_name)
          self._initializers.append(data_var.initializer)
          sparse_tensor = tf.SparseTensor(data_m, data_var, [n_rows, n_cols])
          self._declaration[rel_name].underlying_parameter = data_var
        self._cached_tensor[rel_name] = (sparse_tensor.indices,
                                         sparse_tensor.values,
                                         sparse_tensor.dense_shape)
    if self.is_dense(rel_name):
      return self._cached_tensor[rel_name]  # pytype: disable=bad-return-type
    else:
      return tf.SparseTensor(
          indices=self._cached_tensor[rel_name][0],
          values=self._cached_tensor[rel_name][1],
          dense_shape=self._cached_tensor[rel_name][2],
      ) 
Example #23
Source File: conjugate_gradient_test.py    From tf-quant-finance with Apache License 2.0 4 votes vote down vote up
def _check_algorithm(self,
                       func=None,
                       start_point=None,
                       gtol=1e-4,
                       expected_argmin=None):
    """Runs algorithm on given test case and verifies result."""
    val_grad_func = lambda x: tff.math.value_and_gradient(func, x)
    start_point = tf.constant(start_point, dtype=tf.float64)
    expected_argmin = np.array(expected_argmin, dtype=np.float64)

    f_call_ctr = tf.Variable(0, dtype=tf.int32)

    def val_grad_func_with_counter(x):
      with tf.compat.v1.control_dependencies(
          [tf.compat.v1.assign_add(f_call_ctr, 1)]):
        return val_grad_func(x)

    result = tff.math.optimizer.conjugate_gradient_minimize(
        val_grad_func_with_counter,
        start_point,
        tolerance=gtol,
        max_iterations=200)
    self.evaluate(tf.compat.v1.global_variables_initializer())
    result = self.evaluate(result)
    f_call_ctr = self.evaluate(f_call_ctr)

    # Check that minimum is found.
    with self.subTest(name="Position"):
      self.assertAllClose(result.position, expected_argmin, rtol=1e-3,
                          atol=1e-3)
    # Check that gradient norm is below tolerance.
    grad_norm = np.max(result.objective_gradient)
    with self.subTest(name="GradientNorm"):
      self.assertLessEqual(grad_norm, 100 * gtol)
    # Check that number of function calls, declared by algorithm, is correct.
    with self.subTest(name="NumberOfEvals"):
      self.assertEqual(result.num_objective_evaluations, f_call_ctr)
    # Check returned function and gradient values.
    pos = tf.constant(result.position, dtype=tf.float64)
    f_at_pos, grad_at_pos = self.evaluate(val_grad_func(pos))
    with self.subTest(name="ObjectiveValue"):
      self.assertAllClose(result.objective_value, f_at_pos)
    with self.subTest(name="ObjectiveGradient"):
      self.assertAllClose(result.objective_gradient, grad_at_pos)
    # Check that all converged and none failed.
    with self.subTest(name="AllConverged"):
      self.assertTrue(np.all(result.converged))
    with self.subTest("NoneFailed"):
      self.assertFalse(np.any(result.failed))