Python absl.logging.log_first_n() Examples

The following are 3 code examples of absl.logging.log_first_n(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module absl.logging , or try the search function .
Example #1
Source File: gym_utils.py    From tensor2tensor with Apache License 2.0 5 votes vote down vote up
def gym_env_wrapper(env, rl_env_max_episode_steps, maxskip_env, rendered_env,
                    rendered_env_resize_to, sticky_actions, output_dtype,
                    num_actions):
  """Wraps a gym environment. see make_gym_env for details."""
  # rl_env_max_episode_steps is None or int.
  assert ((not rl_env_max_episode_steps) or
          isinstance(rl_env_max_episode_steps, int))

  wrap_with_time_limit = ((not rl_env_max_episode_steps) or
                          rl_env_max_episode_steps >= 0)

  if wrap_with_time_limit:
    env = remove_time_limit_wrapper(env)

  if num_actions is not None:
    logging.log_first_n(
        logging.INFO, "Number of discretized actions: %d", 1, num_actions)
    env = ActionDiscretizeWrapper(env, num_actions=num_actions)

  if sticky_actions:
    env = StickyActionEnv(env)

  if maxskip_env:
    env = MaxAndSkipEnv(env)  # pylint: disable=redefined-variable-type

  if rendered_env:
    env = RenderedEnv(
        env, resize_to=rendered_env_resize_to, output_dtype=output_dtype)

  if wrap_with_time_limit and rl_env_max_episode_steps is not None:
    env = gym.wrappers.TimeLimit(
        env, max_episode_steps=rl_env_max_episode_steps)
  return env 
Example #2
Source File: adversarial_neighbor.py    From neural-structured-learning with Apache License 2.0 5 votes vote down vote up
def _compute_gradient(self, loss, dense_features, gradient_tape=None):
    """Computes the gradient given a loss and dense features."""
    feature_values = list(dense_features.values())
    if gradient_tape is None:
      grads = tf.gradients(loss, feature_values)
    else:
      grads = gradient_tape.gradient(loss, feature_values)

    # The order of elements returned by .values() and .keys() are guaranteed
    # corresponding to each other.
    keyed_grads = dict(zip(dense_features.keys(), grads))

    invalid_grads, valid_grads = self._split_dict(keyed_grads,
                                                  lambda grad: grad is None)
    # Two cases that grad can be invalid (None):
    # (1) The feature is not differentiable, like strings or integers.
    # (2) The feature is not involved in loss computation.
    if invalid_grads:
      if self._raise_invalid_gradient:
        raise ValueError('Cannot perturb features ' + str(invalid_grads.keys()))
      logging.log_first_n(logging.WARNING, 'Cannot perturb features %s', 1,
                          invalid_grads.keys())

    # Guards against numerical errors. If the gradient is malformed (inf, -inf,
    # or NaN) on a dimension, replace it with 0, which has the effect of not
    # perturbing the original sample along that perticular dimension.
    return tf.nest.map_structure(
        lambda g: tf.where(tf.math.is_finite(g), g, tf.zeros_like(g)),
        valid_grads)

  # The _compose_as_dict and _decompose_as functions are similar to
  # tf.nest.{flatten, pack_sequence_as} except that the composed representation
  # is a dictionary of (name, value) pairs instead of a list of values. The
  # names are needed for joining values from different inputs (e.g. input
  # features and feature masks) with possibly missing values (e.g. no mask for
  # some features). 
Example #3
Source File: resolver.py    From hub with Apache License 2.0 5 votes vote down vote up
def tfhub_cache_dir(default_cache_dir=None, use_temp=False):
  """Returns cache directory.

  Returns cache directory from either TFHUB_CACHE_DIR environment variable
  or --tfhub_cache_dir or default, if set.

  Args:
    default_cache_dir: Default cache location to use if neither TFHUB_CACHE_DIR
                       environment variable nor --tfhub_cache_dir are
                       not specified.
    use_temp: bool, Optional to enable using system's temp directory as a
              module cache directory if neither default_cache_dir nor
              --tfhub_cache_dir nor TFHUB_CACHE_DIR environment variable are
              specified .
  """

  # Note: We are using FLAGS["tfhub_cache_dir"] (and not FLAGS.tfhub_cache_dir)
  # to access the flag value in order to avoid parsing argv list. The flags
  # should have been parsed by now in main() by tf.app.run(). If that was not
  # the case (say in Colab env) we skip flag parsing because argv may contain
  # unknown flags.
  cache_dir = (
      os.getenv(_TFHUB_CACHE_DIR, "") or FLAGS["tfhub_cache_dir"].value or
      default_cache_dir)
  if not cache_dir and use_temp:
    # Place all TF-Hub modules under <system's temp>/tfhub_modules.
    cache_dir = os.path.join(tempfile.gettempdir(), "tfhub_modules")
  if cache_dir:
    logging.log_first_n(logging.INFO, "Using %s to cache modules.", 1,
                        cache_dir)
  return cache_dir