Python tensorflow.python.eager.context.eager_mode() Examples

The following are 10 code examples of tensorflow.python.eager.context.eager_mode(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.python.eager.context , or try the search function .
Example #1
Source File: conftest.py    From larq with Apache License 2.0 7 votes vote down vote up
def keras_should_run_eagerly(request):
    """Fixture to run in graph and two eager modes.

    The modes are:
    - Graph mode
    - TensorFlow eager and Keras eager
    - TensorFlow eager and Keras not eager

    The `tf.context` sets graph/eager mode for TensorFlow. The yield is True if Keras
    should run eagerly.
    """

    if request.param == "graph":
        if version.parse(tf.__version__) >= version.parse("2"):
            pytest.skip("Skipping graph mode for TensorFlow 2+.")

        with context.graph_mode():
            yield
    else:
        with context.eager_mode():
            yield request.param == "tf_keras_eager" 
Example #2
Source File: dataset_test.py    From spotify-tensorflow with Apache License 2.0 5 votes vote down vote up
def run_in_eager(f):
        @wraps(f)
        def wrapper(*args, **kwds):
            with eager_mode():
                return f(*args, **kwds)
        return wrapper 
Example #3
Source File: conftest.py    From larq with Apache License 2.0 5 votes vote down vote up
def eager_mode():
    """pytest fixture for running test in eager mode"""
    with context.eager_mode():
        yield 
Example #4
Source File: conftest.py    From larq with Apache License 2.0 5 votes vote down vote up
def eager_and_graph_mode(request):
    """pytest fixture for running test in eager and graph mode"""
    if request.param == "graph":
        with context.graph_mode():
            with tf.compat.v1.Session().as_default():
                yield request.param
                tf.keras.backend.clear_session()
    else:
        with context.eager_mode():
            yield request.param 
Example #5
Source File: test_case.py    From transform with Apache License 2.0 5 votes vote down vote up
def _eager_function_handler(input_signature):
  """Run the given function in eager mode.

  Args:
    input_signature: A possibly nested sequence of `tf.TensorSpec` objects
      specifying the shapes and dtypes of the Tensors that will be supplied to
      this function.

  Returns:
    A wrapper function that accepts arguments specified by `input_signature`.
  """
  def wrapper(fn):
    """Decorator that runs decorated function in eager mode."""
    def _run_eagerly(*inputs):  # pylint: disable=missing-docstring
      with context.eager_mode():
        constants = [_wrap_as_constant(value, tensor_spec)
                     for value, tensor_spec in zip(inputs, input_signature)]
        output = fn(*constants)
        if hasattr(output, '_make'):
          return output._make([tensor.numpy() for tensor in output])
        if isinstance(output, (tuple, list)):
          return [tensor.numpy() for tensor in output]
        else:
          return output.numpy()
    return _run_eagerly
  return wrapper 
Example #6
Source File: regression_head_test.py    From estimator with Apache License 2.0 5 votes vote down vote up
def test_weighted_multi_batch_eval_eager(self):
    """1d label, 1 example, 3 batches."""
    with context.eager_mode():
      head = head_lib.RegressionHead(weight_column='label_weights')
      self.assertEqual(1, head.logits_dimension)

      logits = np.array(((45.,), (41.,), (44.,)))
      features = {
          'x': np.array(((42.,), (43.,), (44.,))),
          'label_weights': np.array(((1.,), (.1,), (1.5,))),
          # 'logits' is not a feature, but we use `tf.data.Dataset` to make it
          # as a `tensor` (required by `update_metrics`), and access it
          # via `features['logits']` in `update_metrics`
          'logits': logits
      }
      labels = np.array(((35.,), (42.,), (45.,)))

      # losses = [1*(35-45)^2, .1*(42-41)^2, 1.5*(45-44)^2] = [100, .1, 1.5]
      # loss = sum(losses) = 100+.1+1.5 = 101.6
      # loss_mean = loss/(1+.1+1.5) = 101.6/2.6 = 39.076923
      expected_metrics = {
          metric_keys.MetricKeys.LOSS_MEAN: 39.076923,
          metric_keys.MetricKeys.PREDICTION_MEAN:
              (45 + 41 * 0.1 + 44 * 1.5) / 2.6,
          metric_keys.MetricKeys.LABEL_MEAN: (35 + 42 * 0.1 + 45 * 1.5) / 2.6,
      }
      dataset = tf.compat.v1.data.Dataset.from_tensor_slices((features, labels))
      dataset = dataset.batch(1)
      eval_metrics = head.metrics()
      for (features, labels) in dataset:
        logits = features['logits']
        updated_metrics = head.update_metrics(eval_metrics, features, logits,
                                              labels)
        # Assert metrics.
      self.assertAllClose(
          expected_metrics,
          {k: updated_metrics[k].result() for k in updated_metrics}) 
Example #7
Source File: graph_callable.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 5 votes vote down vote up
def __init__(self, name, initializer, shape, dtype, trainable):
    self.name = name
    if initializer is None:
      initializer = _default_initializer(name, shape, dtype)
    initial_value = lambda: initializer(shape, dtype=dtype)

    with context.eager_mode():
      self.variable = resource_variable_ops.ResourceVariable(
          initial_value=initial_value, name=name, dtype=dtype,
          trainable=trainable)
    self.shape = shape
    self.dtype = dtype
    self.placeholder = None
    self.trainable = trainable 
Example #8
Source File: adjoint.py    From tfdiffeq with MIT License 5 votes vote down vote up
def odeint_adjoint(func, y0, t, rtol=1e-6, atol=1e-12, method=None, options=None):
    # We need this in order to access the variables inside this module,
    # since we have no other way of getting variables along the execution path.
    if not isinstance(func, tf.keras.Model):
        raise ValueError('func is required to be an instance of tf.keras.Model')

    with eager_mode():
        tensor_input = False
        if tf.debugging.is_numeric_tensor(y0):
            class TupleFunc(tf.keras.Model):

                def __init__(self, base_func, **kwargs):
                    super(TupleFunc, self).__init__(**kwargs)
                    self.base_func = base_func

                def call(self, t, y):
                    return (self.base_func(t, y[0]),)

            tensor_input = True
            y0 = (y0,)
            func = TupleFunc(func)

        # build the function to get its variables
        if not func.built:
            _ = func(t, y0)

        global _arguments
        _arguments = _Arguments(func, method, options, rtol, atol)

        ys = OdeintAdjointMethod(*y0, t)

        if tensor_input or type(ys) == tuple or type(ys) == list:
            ys = ys[0]

        return ys 
Example #9
Source File: test_util.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 4 votes vote down vote up
def run_in_graph_and_eager_modes(__unused__=None, graph=None, config=None,
                                 use_gpu=False, force_gpu=False,
                                 reset_test=True):
  """Runs the test in both graph and eager modes.

  Args:
    __unused__: Prevents sliently skipping tests.
    graph: Optional graph to use during the returned session.
    config: An optional config_pb2.ConfigProto to use to configure the
      session.
    use_gpu: If True, attempt to run as many ops as possible on GPU.
    force_gpu: If True, pin all ops to `/device:GPU:0`.
    reset_test: If True, tearDown and SetUp the test case again.

  Returns:
    Returns a decorator that will run the decorated test function
        using both a graph and using eager execution.
  """

  assert not __unused__, "Add () after run_in_graph_and_eager_modes."

  def decorator(f):
    """Test method decorator."""
    def decorated(self, **kwargs):
      """Decorated the test method."""
      with context.graph_mode():
        with self.test_session(graph, config, use_gpu, force_gpu):
          f(self, **kwargs)

      if reset_test:
        # This decorator runs the wrapped test twice.
        # Reset the test environment between runs.
        self.tearDown()
        self.setUp()

      def run_eager_mode():
        if force_gpu:
          gpu_name = gpu_device_name()
          if not gpu_name:
            gpu_name = "/device:GPU:0"
          with context.device(gpu_name):
            f(self)
        elif use_gpu:
          # TODO(xpan): Support softplacement and gpu by default when available.
          f(self, **kwargs)
        else:
          with context.device("/device:CPU:0"):
            f(self, **kwargs)

      eager_graph = graph or ops.Graph()
      with context.eager_mode():
        with eager_graph.as_default():
          run_eager_mode()

    return decorated
  return decorator 
Example #10
Source File: odeint.py    From tfdiffeq with MIT License 4 votes vote down vote up
def odeint(func, y0, t, rtol=1e-7, atol=1e-9, method=None, options=None):
    """Integrate a system of ordinary differential equations.

    Solves the initial value problem for a non-stiff system of first order ODEs:
        ```
        dy/dt = func(t, y), y(t[0]) = y0
        ```
    where y is a Tensor of any shape.

    Output dtypes and numerical precision are based on the dtypes of the inputs `y0`.

    Args:
        func: Function that maps a Tensor holding the state `y` and a scalar Tensor
            `t` into a Tensor of state derivatives with respect to time.
        y0: N-D Tensor giving starting value of `y` at time point `t[0]`. May
            have any floating point or complex dtype.
        t: 1-D Tensor holding a sequence of time points for which to solve for
            `y`. The initial time point should be the first element of this sequence,
            and each time must be larger than the previous time. May have any floating
            point dtype. Converted to a Tensor with float64 dtype.
        rtol: optional float64 Tensor specifying an upper bound on relative error,
            per element of `y`.
        atol: optional float64 Tensor specifying an upper bound on absolute error,
            per element of `y`.
        method: optional string indicating the integration method to use.
        options: optional dict of configuring options for the indicated integration
            method. Can only be provided if a `method` is explicitly set.
        name: Optional name for this operation.

    Returns:
        y: Tensor, where the first dimension corresponds to different
            time points. Contains the solved value of y for each desired time point in
            `t`, with the initial value `y0` being the first element along the first
            dimension.

    Raises:
        ValueError: if an invalid `method` is provided.
        TypeError: if `options` is supplied without `method`, or if `t` or `y0` has
            an invalid dtype.
    """
    with eager_mode():
        tensor_input, func, y0, t = _check_inputs(func, y0, t)

        if options is None:
            options = {}
        elif method is None:
            raise ValueError('cannot supply `options` without specifying `method`')

        if method is None:
            method = 'dopri5'

        solver = SOLVERS[method](func, y0, rtol=rtol, atol=atol, **options)
        solution = solver.integrate(t)

        if tensor_input:
            solution = solution[0]
        return solution