Python tensorflow.compat.v2.executing_eagerly() Examples

The following are 26 code examples of tensorflow.compat.v2.executing_eagerly(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.compat.v2 , or try the search function .
Example #1
Source File: utils_test.py    From hub with Apache License 2.0 7 votes vote down vote up
def test_compute_distance_matrix_loo_cosine(self):
    if not tf.executing_eagerly():
      self.skipTest("Test requires eager mode.")
    np.random.seed(seed=self.random_seed)
    x_train = np.random.rand(self.train_samples, self.dim)

    d = utils.compute_distance_matrix_loo(x_train, measure="cosine")
    self.assertEqual(d.shape, (self.train_samples, self.train_samples))

    for i in range(self.train_samples):
      for j in range(self.train_samples):
        if i == j:
          self.assertEqual(float("inf"), d[i, j])
        else:
          d_ij = spdist.cosine(x_train[i, :], x_train[j, :])
          self.assertAlmostEqual(d_ij, d[i, j], places=5) 
Example #2
Source File: utils_test.py    From hub with Apache License 2.0 6 votes vote down vote up
def test_knn_errorrate_loo_multik(self):
    if not tf.executing_eagerly():
      self.skipTest("Test requires eager mode.")
    np.random.seed(seed=self.random_seed)
    x_train = np.random.rand(self.train_samples, self.dim)

    d = utils.compute_distance_matrix_loo(x_train)

    y_train = np.random.randint(self.classes, size=self.train_samples)

    ks_input = [5, 1, 5, 3]
    ks = [5,3,1]
    vals = []
    for val in ks:
        err = utils.knn_errorrate_loo(d, y_train, k=val)
        vals.append(err)

    comp = utils.knn_errorrate_loo(d, y_train, k=ks_input)

    self.assertEqual(len(vals), len(comp))
    for k, v in enumerate(comp):
        self.assertAlmostEqual(v, vals[k], places=5) 
Example #3
Source File: utils_test.py    From hub with Apache License 2.0 6 votes vote down vote up
def knn_errorrate_loo(self, k):
    if not tf.executing_eagerly():
      self.skipTest("Test requires eager mode.")
    x_train = np.random.rand(self.train_samples, self.dim)

    d = utils.compute_distance_matrix_loo(x_train)

    y_train = np.random.randint(self.classes, size=self.train_samples)

    err = utils.knn_errorrate_loo(d, y_train, k=k)

    cnt = 0.0
    for i in range(self.train_samples):
      knn = KNeighborsClassifier(n_neighbors=k)
      mask = [True]*self.train_samples
      mask[i] = False
      knn.fit(x_train[mask], y_train[mask])
      y_pred = knn.predict(x_train[i].reshape(-1, self.dim))
      if y_pred != y_train[i]:
        cnt += 1

    self.assertAlmostEqual(err, cnt / self.train_samples, places=5) 
Example #4
Source File: utils_test.py    From hub with Apache License 2.0 6 votes vote down vote up
def test_knn_errorrate_multik(self):
    if not tf.executing_eagerly():
      self.skipTest("Test requires eager mode.")
    np.random.seed(seed=self.random_seed)
    x_train = np.random.rand(self.train_samples, self.dim)
    x_test = np.random.rand(self.test_samples, self.dim)

    d = utils.compute_distance_matrix(x_train, x_test)

    y_test = np.random.randint(self.classes, size=self.test_samples)
    y_train = np.random.randint(self.classes, size=self.train_samples)

    ks_input = [5, 1, 5, 3]
    ks = [5,3,1]
    vals = []
    for val in ks:
        err = utils.knn_errorrate(d, y_train, y_test, k=val)
        vals.append(err)

    comp = utils.knn_errorrate(d, y_train, y_test, k=ks_input)

    self.assertEqual(len(vals), len(comp))
    for k, v in enumerate(comp):
        self.assertAlmostEqual(v, vals[k], places=5) 
Example #5
Source File: utils_test.py    From hub with Apache License 2.0 6 votes vote down vote up
def knn_errorrate(self, k):
    if not tf.executing_eagerly():
      self.skipTest("Test requires eager mode.")
    x_train = np.random.rand(self.train_samples, self.dim)
    x_test = np.random.rand(self.test_samples, self.dim)

    d = utils.compute_distance_matrix(x_train, x_test)

    y_test = np.random.randint(self.classes, size=self.test_samples)
    y_train = np.random.randint(self.classes, size=self.train_samples)

    err = utils.knn_errorrate(d, y_train, y_test, k=k)

    knn = KNeighborsClassifier(n_neighbors=k)
    knn.fit(x_train, y_train)
    y_pred = knn.predict(x_test)
    acc = metrics.accuracy_score(y_test, y_pred)

    self.assertAlmostEqual(1.0 - err, acc, places=5) 
Example #6
Source File: conjugate_gradient_test.py    From tf-quant-finance with Apache License 2.0 6 votes vote down vote up
def test_dynamic_shapes(self):
    """Can build op with dynamic shapes in graph mode."""
    if tf.executing_eagerly():
      return
    minimum = np.array([1.0, 1.0])
    scales = np.array([2.0, 3.0])

    @tff.math.make_val_and_grad_fn
    def quadratic(x):
      return tf.reduce_sum(input_tensor=scales * (x - minimum)**2)

    # Test with a vector of unknown dimension.
    start = tf.compat.v1.placeholder(tf.float32, shape=[None])
    op = tff.math.optimizer.conjugate_gradient_minimize(
        quadratic, initial_position=start, tolerance=1e-8)
    self.assertFalse(op.position.shape.is_fully_defined())

    with self.cached_session() as session:
      results = session.run(op, feed_dict={start: [0.6, 0.8]})
    self.assertTrue(results.converged)
    self.assertLessEqual(_norm(results.objective_gradient), 1e-8)
    self.assertArrayNear(results.position, minimum, 1e-5) 
Example #7
Source File: utils_test.py    From hub with Apache License 2.0 6 votes vote down vote up
def test_compute_distance_matrix_loo(self):
    if not tf.executing_eagerly():
      self.skipTest("Test requires eager mode.")
    np.random.seed(seed=self.random_seed)
    x_train = np.random.rand(self.train_samples, self.dim)

    d = utils.compute_distance_matrix_loo(x_train)
    self.assertEqual(d.shape, (self.train_samples, self.train_samples))

    for i in range(self.train_samples):
      for j in range(self.train_samples):
        if i == j:
          self.assertEqual(float("inf"), d[i, j])
        else:
          d_ij = np.linalg.norm(x_train[j, :] - x_train[i, :])**2
          self.assertAlmostEqual(d_ij, d[i, j], places=5) 
Example #8
Source File: test_utils_test.py    From datasets with Apache License 2.0 6 votes vote down vote up
def test_run_in_graph_and_eager_modes(self):
    l = []
    def inc(self, with_brackets):
      del self  # self argument is required by run_in_graph_and_eager_modes.
      mode = 'eager' if tf.executing_eagerly() else 'graph'
      with_brackets = 'with_brackets' if with_brackets else 'without_brackets'
      l.append((with_brackets, mode))

    f = test_utils.run_in_graph_and_eager_modes(inc)
    f(self, with_brackets=False)
    f = test_utils.run_in_graph_and_eager_modes()(inc)
    f(self, with_brackets=True)

    self.assertEqual(len(l), 4)
    self.assertEqual(set(l), {
        ('with_brackets', 'graph'),
        ('with_brackets', 'eager'),
        ('without_brackets', 'graph'),
        ('without_brackets', 'eager'),
    }) 
Example #9
Source File: test_utils_test.py    From datasets with Apache License 2.0 6 votes vote down vote up
def test_run_in_graph_and_eager_modes_setup_in_same_mode(self):
    modes = []
    mode_name = lambda: 'eager' if tf.executing_eagerly() else 'graph'

    class ExampleTest(test_case.TestCase):

      def runTest(self):
        pass

      def setUp(self):
        super(ExampleTest, self).setUp()
        modes.append('setup_' + mode_name())

      @test_utils.run_in_graph_and_eager_modes
      def testBody(self):
        modes.append('run_' + mode_name())

    e = ExampleTest()
    e.setUp()
    e.testBody()

    self.assertEqual(modes[0:2], ['setup_eager', 'run_eager'])
    self.assertEqual(modes[2:], ['setup_graph', 'run_graph']) 
Example #10
Source File: __init__.py    From language with Apache License 2.0 5 votes vote down vote up
def serialize_trained(self,
                        output_file,
                        session = None):
    """Save the current value of all trainable relations in a file.

    Args:
      output_file: Filename string or FileLike object.
      session: (DEPRECATED) a tf.Session used to find values of trained
        SparseTensors
    """
    trained_rels = [
        rel_name for rel_name in self.get_relation_names()
        if self.is_trainable(rel_name)
    ]
    sparse_tensors = [self.get_tf_tensor(rel_name) for rel_name in trained_rels]
    if tf.executing_eagerly():
      trained_dict = {
          name: tensor for name, tensor in zip(trained_rels, sparse_tensors)
      }
    else:
      if session is None:
        session = tf.get_default_session()
      trained_dict = {
          name: tensor
          for name, tensor in zip(trained_rels, session.run(sparse_tensors))
      }
    io.write_sparse_tensor_dict(output_file, trained_dict) 
Example #11
Source File: search_test.py    From hub with Apache License 2.0 5 votes vote down vote up
def test_run_e2e(self, mock_tfds_load):
    if not tf.executing_eagerly():
      self.skipTest("Test requires eager mode.")
    modules = self._create_image_models()
    #tfds.load = fake_image_dataset
    with flagsaver.flagsaver(
        dataset="cifar100",
        module=modules,
    ):
      search.main([]) 
Example #12
Source File: utils_test.py    From hub with Apache License 2.0 5 votes vote down vote up
def test_knn_errorrate_loo(self):
    if not tf.executing_eagerly():
      self.skipTest("Test requires eager mode.")
    np.random.seed(seed=self.random_seed)
    ks = [1, 3, 5]
    for idx, val in enumerate(ks):
      with self.subTest(i=idx):
        self.knn_errorrate_loo(val) 
Example #13
Source File: utils_test.py    From hub with Apache License 2.0 5 votes vote down vote up
def test_compute_distance_matrix_cosine(self):
    if not tf.executing_eagerly():
      self.skipTest("Test requires eager mode.")
    np.random.seed(seed=self.random_seed)
    x_train = np.random.rand(self.train_samples, self.dim)
    x_test = np.random.rand(self.test_samples, self.dim)

    d = utils.compute_distance_matrix(x_train, x_test, measure="cosine")
    self.assertEqual(d.shape, (self.test_samples, self.train_samples))

    for i in range(self.test_samples):
      for j in range(self.train_samples):
        d_ij = spdist.cosine(x_test[i, :], x_train[j, :])
        self.assertAlmostEqual(d_ij, d[i, j], places=5) 
Example #14
Source File: utils_test.py    From hub with Apache License 2.0 5 votes vote down vote up
def test_compute_distance_matrix(self):
    if not tf.executing_eagerly():
      self.skipTest("Test requires eager mode.")
    np.random.seed(seed=self.random_seed)
    x_train = np.random.rand(self.train_samples, self.dim)
    x_test = np.random.rand(self.test_samples, self.dim)

    d = utils.compute_distance_matrix(x_train, x_test)
    self.assertEqual(d.shape, (self.test_samples, self.train_samples))

    for i in range(self.test_samples):
      for j in range(self.train_samples):
        d_ij = np.linalg.norm(x_train[j, :] - x_test[i, :])**2
        self.assertAlmostEqual(d_ij, d[i, j], places=5) 
Example #15
Source File: test_helpers.py    From graphics with Apache License 2.0 5 votes vote down vote up
def generate_preset_test_rotation_matrices_3d():
  """Generates pre-set test 3d rotation matrices."""
  angles = generate_preset_test_euler_angles()
  preset_rotation_matrix = rotation_matrix_3d.from_euler(angles)
  if tf.executing_eagerly():
    return np.array(preset_rotation_matrix)
  with tf.compat.v1.Session() as sess:
    return np.array(sess.run([preset_rotation_matrix])) 
Example #16
Source File: periods.py    From tf-quant-finance with Apache License 2.0 5 votes vote down vote up
def __repr__(self):
    output = "PeriodTensor: shape={}".format(self.shape)
    if tf.executing_eagerly():
      return output + ", quantities={}".format(repr(self._quantity.numpy()))
    return output 
Example #17
Source File: linear_interpolation_test.py    From tf-quant-finance with Apache License 2.0 5 votes vote down vote up
def test_valid_gradients(self, optimize_for_tpu):
    """Tests none of the gradients is nan."""

    # In this example, `x[0]` and `x[1]` are both less than or equal to
    # `x_data[0]`. `x[-2]` and `x[-1]` are both greater than or equal to
    # `x_data[-1]`. They are set up this way to test none of the tf.where
    # branches of the implementation have any nan. An unselected nan could still
    # propagate through gradient calculation with the end result being nan.
    x = [[-10.0, -1.0, 1.0, 3.0, 6.0, 7.0], [8.0, 15.0, 18.0, 25.0, 30.0, 35.0]]
    x_data = [[-1.0, 2.0, 6.0], [8.0, 18.0, 30.0]]

    def _value_helper_fn(y_data):
      """A helper function that returns sum of squared interplated values."""

      interpolated_values = tff.math.interpolation.linear.interpolate(
          x, x_data, y_data,
          optimize_for_tpu=optimize_for_tpu,
          dtype=tf.float64)
      return tf.reduce_sum(tf.math.square(interpolated_values))

    y_data = tf.convert_to_tensor([[10.0, -1.0, -5.0], [7.0, 9.0, 20.0]],
                                  dtype=tf.float64)
    if tf.executing_eagerly():
      with tf.GradientTape(watch_accessed_variables=False) as tape:
        tape.watch(y_data)
        value = _value_helper_fn(y_data=y_data)
        gradients = tape.gradient(value, y_data)
    else:
      value = _value_helper_fn(y_data=y_data)
      gradients = tf.gradients(value, y_data)[0]

    gradients = tf.convert_to_tensor(gradients)

    self.assertFalse(self.evaluate(tf.reduce_any(tf.math.is_nan(gradients)))) 
Example #18
Source File: test_helpers.py    From graphics with Apache License 2.0 5 votes vote down vote up
def generate_preset_test_quaternions():
  """Generates pre-set test quaternions."""
  angles = generate_preset_test_euler_angles()
  preset_quaternion = quaternion.from_euler(angles)
  if tf.executing_eagerly():
    return np.array(preset_quaternion)
  with tf.compat.v1.Session() as sess:
    return np.array(sess.run([preset_quaternion])) 
Example #19
Source File: test_helpers.py    From graphics with Apache License 2.0 5 votes vote down vote up
def generate_preset_test_axis_angle():
  """Generates pre-set test rotation matrices."""
  angles = generate_preset_test_euler_angles()
  axis, angle = axis_angle.from_euler(angles)
  if tf.executing_eagerly():
    return np.array(axis), np.array(angle)
  with tf.compat.v1.Session() as sess:
    return np.array(sess.run([axis])), np.array(sess.run([angle])) 
Example #20
Source File: test_helpers.py    From graphics with Apache License 2.0 5 votes vote down vote up
def generate_preset_test_rotation_matrices_2d():
  """Generates pre-set test 2d rotation matrices."""
  angles = generate_preset_test_euler_angles(dimensions=1)
  preset_rotation_matrix = rotation_matrix_2d.from_euler(angles)
  if tf.executing_eagerly():
    return np.array(preset_rotation_matrix)
  with tf.compat.v1.Session() as sess:
    return np.array(sess.run([preset_rotation_matrix])) 
Example #21
Source File: sequential.py    From agents with Apache License 2.0 4 votes vote down vote up
def __init__(self,
               layers: typing.Sequence[tf.keras.layers.Layer],
               input_spec: types.NestedTensorSpec = None,
               name: typing.Text = None):
    """Create a Sequential Network.

    Args:
      layers: A list or tuple of layers to compose.  Any layers that
        are subclasses of `tf.keras.layers.{RNN,LSTM,GRU,...}` are
        wrapped in `tf_agents.keras_layers.RNNWrapper`.
      input_spec: (Optional.) A nest of `tf.TypeSpec` representing the
        input observations to the first layer.
      name: (Optional.) Network name.

    Raises:
      ValueError: If `layers` is empty.
      ValueError: If `layers[0]` is a generic Keras layer (not a TF-Agents
        network) and `input_spec is None`.
      TypeError: If any of the layers are not instances of keras `Layer`.
      RuntimeError: If not `tf.executing_eagerly()`; as this is required to
        be able to create deep copies of layers in `layers`.
    """
    if not tf.executing_eagerly():
      raise RuntimeError(
          'Not executing eagerly - cannot make deep copies of `layers`.')
    if not layers:
      raise ValueError(
          '`layers` must not be empty; saw: {}'.format(layers))
    for layer in layers:
      if not isinstance(layer, tf.keras.layers.Layer):
        raise TypeError(
            'Expected all layers to be instances of keras Layer, but saw'
            ': \'{}\''.format(layer))

    layers = [
        rnn_wrapper.RNNWrapper(layer) if isinstance(layer, tf.keras.layers.RNN)
        else layer
        for layer in layers
    ]

    state_spec = _infer_state_specs(layers)

    # Now we remove all of the empty state specs so if there are no RNN layers,
    # our state spec is empty.  layer_has_state is a list of bools telling us
    # which layers have a state and which don't.
    # TODO(b/158804957): tf.function changes "s in ((),)" to a tensor bool expr.
    # pylint: disable=literal-comparison
    layer_has_state = [s is not () for s in state_spec]
    state_spec = tuple(s for s in state_spec if s is not ())
    # pylint: enable=literal-comparison
    super(Sequential, self).__init__(input_tensor_spec=input_spec,
                                     state_spec=state_spec,
                                     name=name)
    self._sequential_layers = layers
    self._layer_has_state = layer_has_state 
Example #22
Source File: continuous_batched.py    From compression with Apache License 2.0 4 votes vote down vote up
def __init__(self, prior, coding_rank, compression=False,
               likelihood_bound=1e-9, tail_mass=2**-8,
               range_coder_precision=12):
    """Initializer.

    Arguments:
      prior: A `tfp.distributions.Distribution` object. A density model fitting
        the marginal distribution of the bottleneck data with additive uniform
        noise, which is shared a priori between the sender and the receiver. For
        best results, the distribution should be flexible enough to have a
        unit-width uniform distribution as a special case, since this is the
        marginal distribution for bottleneck dimensions that are constant. The
        distribution parameters may not depend on data (they must be either
        variables or constants).
      coding_rank: Integer. Number of innermost dimensions considered a coding
        unit. Each coding unit is compressed to its own bit string, and the
        `bits()` method sums over each coding unit.
      compression: Boolean. If set to `True`, the range coding tables used by
        `compress()` and `decompress()` will be built on instantiation. If set
        to `False`, these two methods will not be accessible.
      likelihood_bound: Float. Lower bound for likelihood values, to prevent
        training instabilities.
      tail_mass: Float. Approximate probability mass which is range encoded with
        less precision, by using a Golomb-like code.
      range_coder_precision: Integer. Precision passed to the range coding op.

    Raises:
      RuntimeError: when attempting to instantiate an entropy model with
        `compression=True` and not in eager execution mode.
    """
    if coding_rank < prior.batch_shape.rank:
      raise ValueError(
          "`coding_rank` can't be smaller than batch rank of prior.")
    super().__init__(
        prior, coding_rank, compression=compression,
        likelihood_bound=likelihood_bound, tail_mass=tail_mass,
        range_coder_precision=range_coder_precision)

    quantization_offset = helpers.quantization_offset(prior)
    if self.compression:
      # Optimization: if the quantization offset is zero, we don't need to
      # subtract/add it when quantizing, and we don't need to serialize its
      # value. Note that this code will only work in eager mode.
      # TODO(jonycgn): Reconsider if this optimization is worth keeping once
      # the implementation is stable.
      if tf.executing_eagerly() and tf.reduce_all(
          tf.equal(quantization_offset, 0.)):
        quantization_offset = None
      else:
        quantization_offset = tf.broadcast_to(
            quantization_offset, self.prior_shape)
        quantization_offset = tf.Variable(
            quantization_offset, trainable=False, name="quantization_offset")
    self._quantization_offset = quantization_offset 
Example #23
Source File: __init__.py    From language with Apache License 2.0 4 votes vote down vote up
def eval(self,
           session = None,
           as_dicts = True,
           as_top = 0,
           simplify_unitsize_minibatch=True,
           feed_dict = None):
    """Evaluate the Tensorflow expression associated with this NeuralQueryExpression.

    Args:
      session: (DEPRECATED) tf.Session used to evaluate if not in eager mode
      as_dicts: if true, convert each row of the minibatch to to a dictionary
        where the keys are entity names, and the values are weights for those
        entities.  Each 'row dictionary' is returned in an array. If as_dicts is
        false, then just return the result, which is typically a numpy array.
      as_top: if positive, return a list of the top k-scoring k-items from the
        as_dicts output.
      simplify_unitsize_minibatch: if true and as_dicts is also true, and the
        minibatch size is 1, then just return the single row dictionary.
      feed_dict: (DEPRECATED) dictionary mapping placeholder names to initial
        values

    Returns:
      Result of evaluating the underlying NeuralQueryExpression, in a format
      determined by as_dicts, simplify_unitsize_minibatch, and as_top.
    """
    if tf.executing_eagerly():
      if session is not None:
        raise ValueError('Passed in session in while eager mode.')
      result = self.tf.numpy()
    else:
      result = self.tf.eval(feed_dict=feed_dict, session=session)
    if as_top > 0:
      return self.context.as_top_k(
          as_top,
          result,
          self.type_name,
          simplify_unitsize_minibatch=simplify_unitsize_minibatch)
    elif as_dicts:
      return self.context.as_dicts(
          result,
          self.type_name,
          simplify_unitsize_minibatch=simplify_unitsize_minibatch)
    else:
      return result 
Example #24
Source File: gradient.py    From tf-quant-finance with Apache License 2.0 4 votes vote down vote up
def value_and_gradient(f,
                       xs,
                       output_gradients=None,
                       use_gradient_tape=False,
                       unconnected_gradients=None,
                       name=None):
  """Computes `f(*xs)` and its gradients wrt to `*xs`.

  Args:
    f: Python `callable` to be differentiated. If `f` returns a scalar, this
      scalar will be differentiated. If `f` returns a tensor or list of tensors,
      by default a scalar will be computed by adding all their values to produce
      a single scalar. If desired, the tensors can be elementwise multiplied by
      the tensors passed as the `dy` keyword argument to the returned gradient
      function.
    xs: Python list of parameters of `f` for which to differentiate. (Can also
      be single `Tensor`.)
    output_gradients: A `Tensor` or list of `Tensor`s the same size as the
      result `ys = f(*xs)` and holding the gradients computed for each `y` in
      `ys`. This argument is forwarded to the underlying gradient implementation
      (i.e., either the `grad_ys` argument of `tf.gradients` or the
      `output_gradients` argument of `tf.GradientTape.gradient`).
    use_gradient_tape: Python `bool` indicating that `tf.GradientTape` should be
      used regardless of `tf.executing_eagerly()` status.
      Default value: `False`.
    unconnected_gradients: An enum `tf.UnconnectedGradients` which specifies the
      gradient value returned when the given input tensors are unconnected.
      Default value: `None`, which maps to `tf.UnconnectedGradients.NONE`.
    name: Python `str` name prefixed to ops created by this function.
      Default value: `None` (i.e., `'value_and_gradient'`).

  Returns:
    A tuple of two elements. The first one is a `Tensor` representing the value
    of the function at `xs` and the second one is either a `Tensot` or a list of
    `Tensor`s representing grafient of `f(*xs)` wrt `xs`.
    y: `y = f(*xs)`.
    dydx: Gradient of `y` wrt each of `xs`.
  """
  unconnected_gradients = unconnected_gradients or tf.UnconnectedGradients.NONE
  xs, is_xs_list_like = _prepare_args(xs)
  with tf.name_scope(name or "value_and_gradient"):
    if tf.executing_eagerly() or use_gradient_tape:
      with tf.GradientTape() as tape:
        for x in xs:
          tape.watch(x)
        y = f(*xs)
      grad = tape.gradient(y, xs, output_gradients=output_gradients,
                           unconnected_gradients=unconnected_gradients)
    else:
      y = f(*xs)
      grad = tf.gradients(ys=y, xs=xs, grad_ys=output_gradients,
                          unconnected_gradients=unconnected_gradients)
    if is_xs_list_like:
      return y, grad
    else:
      return y, grad[0] 
Example #25
Source File: gradient.py    From tf-quant-finance with Apache License 2.0 4 votes vote down vote up
def gradients(func_or_y, xs, output_gradients=None, use_gradient_tape=False,
              unconnected_gradients=None,
              name=None):
  """Computes the gradients of `func_or_y` wrt to `*xs`.

  Args:
   func_or_y: Either a `Tensor` conencted to the input `x` or a Python callable
      accepting one `Tensor` of shape of `x` and returning a `Tensor` of any
      shape. The function whose gradient is to be computed. If eagerly
      executing, can only be a callable, i.e., one should not supply a Tensor
      in eager mode.
    xs: Python list of parameters of `f` for which to differentiate. (Can also
      be single `Tensor`.)
    output_gradients: A `Tensor` or list of `Tensor`s the same size as the
      result `ys = f(*xs)` and holding the gradients computed for each `y` in
      `ys`. This argument is forwarded to the underlying gradient implementation
      (i.e., either the `grad_ys` argument of `tf.gradients` or the
      `output_gradients` argument of `tf.GradientTape.gradient`).
      Default value: `None` which maps to a ones-like `Tensor` of `ys`.
    use_gradient_tape: Python `bool` indicating that `tf.GradientTape` should be
      used regardless of `tf.executing_eagerly()` status.
      Default value: `False`.
    unconnected_gradients: An enum `tf.UnconnectedGradients` which specifies the
      gradient value returned when the given input tensors are unconnected.
      Default value: `None`, which maps to `tf.UnconnectedGradients.NONE`.
    name: Python `str` name prefixed to ops created by this function.
      Default value: `None` (i.e., 'gradients').

  Returns:
    A `Tensor` with the gradient of `y` wrt each of `xs` or a list of `Tensor`s
    if `xs` is a list.
  """
  unconnected_gradients = unconnected_gradients or tf.UnconnectedGradients.NONE
  f = _prepare_func(func_or_y)
  with tf.name_scope(name or "gradients"):
    xs, is_xs_list_like = _prepare_args(xs)
    if not tf.executing_eagerly() and not use_gradient_tape:
      y = f(*xs)
      grad = tf.gradients(y, xs, grad_ys=output_gradients,
                          unconnected_gradients=unconnected_gradients)
    else:
      if not callable(func_or_y):
        raise ValueError("`func_or_y` should be a callable in eager mode or "
                         "when `tf.GradientTape` is used.")
      with tf.GradientTape() as tape:
        for x in xs:
          tape.watch(x)
        y = f(*xs)
      grad = tape.gradient(y, xs, output_gradients=output_gradients,
                           unconnected_gradients=unconnected_gradients)
    if is_xs_list_like:
      return grad
    else:
      return grad[0] 
Example #26
Source File: extensions.py    From trax with Apache License 2.0 4 votes vote down vote up
def dataset_as_numpy(dataset):
  """Converts a `tf.data.Dataset` to an iterable of ndarrays.

  `dataset_as_numpy` converts a possibly nested structure of `tf.data.Dataset`s
  and `tf.Tensor`s to iterables of ndarrays and ndarrays, respectively. This
  function must be run in eager mode outside tf.function.

  Args:
    dataset: a possibly nested structure of `tf.data.Dataset`s and/or
      `tf.Tensor`s.

  Returns:
    A structure matching `dataset` where `tf.data.Dataset`s are converted to
    generators of ndarrays and `tf.Tensor`s are converted to ndarrays.
  """
  if not tf.executing_eagerly():
    raise ValueError(
        "dataset_as_numpy must be run in eager mode outside tf.function")
  nested_ds = dataset
  del dataset

  # Flatten
  flat_ds = tf.nest.flatten(nested_ds)
  flat_np = []

  # Type check for Tensors and Datasets
  for ds_el in flat_ds:
    if not isinstance(ds_el, (tf.Tensor, tf.data.Dataset)):
      types = tf.nest.map_structure(type, nested_ds)
      raise ValueError("Arguments to dataset_as_numpy must be (possibly nested "
                       "structure of) tf.Tensors or tf.data.Datasets. Got: %s" %
                       types)

  for ds_el in flat_ds:
    if isinstance(ds_el, tf.Tensor):
      np_el = tf_np.asarray(ds_el)
    elif isinstance(ds_el, tf.data.Dataset):
      np_el = _eager_dataset_iterator(ds_el)
    else:
      assert False
    flat_np.append(np_el)

  return tf.nest.pack_sequence_as(nested_ds, flat_np)


# TODO(nareshmodi): Group key should change based on the set of devices that we
# are mapping over. Make it so that we assign a unique group_key for every
# unique set of devices. We don't change it every time to avoid the overhead of
# discovering the full group (though may not be problematic in the local case).