Python absl.logging.INFO Examples

The following are 30 code examples of absl.logging.INFO(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module absl.logging , or try the search function .
Example #1
Source File: retrain.py    From hub with Apache License 2.0 6 votes vote down vote up
def logging_level_verbosity(logging_verbosity):
  """Converts logging_level into TensorFlow logging verbosity value.

  Args:
    logging_verbosity: String value representing logging level: 'DEBUG', 'INFO',
    'WARN', 'ERROR', 'FATAL'
  """
  name_to_level = {
      'FATAL': logging.FATAL,
      'ERROR': logging.ERROR,
      'WARN': logging.WARN,
      'INFO': logging.INFO,
      'DEBUG': logging.DEBUG
  }

  try:
    return name_to_level[logging_verbosity]
  except Exception as e:
    raise RuntimeError('Not supported logs verbosity (%s). Use one of %s.' %
                       (str(e), list(name_to_level))) 
Example #2
Source File: logging_functional_test.py    From abseil-py with Apache License 2.0 6 votes vote down vote up
def test_bad_exc_info_py_logging(self):

    def assert_stderr(stderr):
      # The exact message differs among different Python versions. So it just
      # asserts some certain information is there.
      self.assertIn('Traceback (most recent call last):', stderr)
      self.assertIn('IndexError', stderr)

    expected_logs = [
        ['stderr', None, assert_stderr],
        ['absl_log_file', 'INFO', '']]

    self._exec_test(
        _verify_ok,
        expected_logs,
        test_name='bad_exc_info',
        use_absl_log_file=True) 
Example #3
Source File: logging_test.py    From abseil-py with Apache License 2.0 6 votes vote down vote up
def test_start_logging_to_file(
      self, mock_getpid, mock_unlink, mock_islink, mock_time,
      mock_localtime, mock_find_log_dir_and_names):
    mock_find_log_dir_and_names.return_value = ('here', 'prog1', 'prog1')
    mock_time.return_value = '12345'
    mock_localtime.return_value = self.now_tuple
    mock_getpid.return_value = 4321
    symlink = os.path.join('here', 'prog1.INFO')
    mock_islink.return_value = True
    with mock.patch.object(
        logging, 'open', return_value=sys.stdout, create=True):
      if getattr(os, 'symlink', None):
        with mock.patch.object(os, 'symlink'):
          self.python_handler.start_logging_to_file()
          mock_unlink.assert_called_once_with(symlink)
          os.symlink.assert_called_once_with(
              'prog1.INFO.19791021-181716.4321', symlink)
      else:
        self.python_handler.start_logging_to_file() 
Example #4
Source File: __init__.py    From abseil-py with Apache License 2.0 6 votes vote down vote up
def get_log_file_name(level=INFO):
  """Returns the name of the log file.

  For Python logging, only one file is used and level is ignored. And it returns
  empty string if it logs to stderr/stdout or the log stream has no `name`
  attribute.

  Args:
    level: int, the absl.logging level.

  Raises:
    ValueError: Raised when `level` has an invalid value.
  """
  if level not in converter.ABSL_LEVELS:
    raise ValueError('Invalid absl.logging level {}'.format(level))
  stream = get_absl_handler().python_handler.stream
  if (stream == sys.stderr or stream == sys.stdout or
      not hasattr(stream, 'name')):
    return ''
  else:
    return stream.name 
Example #5
Source File: __init__.py    From abseil-py with Apache License 2.0 6 votes vote down vote up
def set_stderrthreshold(s):
  """Sets the stderr threshold to the value passed in.

  Args:
    s: str|int, valid strings values are case-insensitive 'debug',
        'info', 'warning', 'error', and 'fatal'; valid integer values are
        logging.DEBUG|INFO|WARNING|ERROR|FATAL.

  Raises:
      ValueError: Raised when s is an invalid value.
  """
  if s in converter.ABSL_LEVELS:
    FLAGS.stderrthreshold = converter.ABSL_LEVELS[s]
  elif isinstance(s, str) and s.upper() in converter.ABSL_NAMES:
    FLAGS.stderrthreshold = s
  else:
    raise ValueError(
        'set_stderrthreshold only accepts integer absl logging level '
        'from -3 to 1, or case-insensitive string values '
        "'debug', 'info', 'warning', 'error', and 'fatal'. "
        'But found "{}" ({}).'.format(s, type(s))) 
Example #6
Source File: medaka.py    From medaka with Mozilla Public License 2.0 6 votes vote down vote up
def _log_level():
    """Parser to set logging level and acquire software version/commit"""

    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter, add_help=False)

    #parser.add_argument('--version', action='version', version=get_version())

    modify_log_level = parser.add_mutually_exclusive_group()
    modify_log_level.add_argument('--debug', action='store_const',
        dest='log_level', const=logging.DEBUG, default=logging.INFO,
        help='Verbose logging of debug information.')
    modify_log_level.add_argument('--quiet', action='store_const',
        dest='log_level', const=logging.WARNING, default=logging.INFO,
        help='Minimal logging; warnings only).')

    return parser 
Example #7
Source File: converter_test.py    From abseil-py with Apache License 2.0 6 votes vote down vote up
def test_standard_to_absl(self):
    self.assertEqual(
        absl_logging.DEBUG, converter.standard_to_absl(logging.DEBUG))
    self.assertEqual(
        absl_logging.INFO, converter.standard_to_absl(logging.INFO))
    self.assertEqual(
        absl_logging.WARN, converter.standard_to_absl(logging.WARN))
    self.assertEqual(
        absl_logging.WARN, converter.standard_to_absl(logging.WARNING))
    self.assertEqual(
        absl_logging.ERROR, converter.standard_to_absl(logging.ERROR))
    self.assertEqual(
        absl_logging.FATAL, converter.standard_to_absl(logging.FATAL))
    self.assertEqual(
        absl_logging.FATAL, converter.standard_to_absl(logging.CRITICAL))
    # vlog levels.
    self.assertEqual(2, converter.standard_to_absl(logging.DEBUG - 1))
    self.assertEqual(3, converter.standard_to_absl(logging.DEBUG - 2))

    with self.assertRaises(TypeError):
      converter.standard_to_absl('') 
Example #8
Source File: converter_test.py    From abseil-py with Apache License 2.0 6 votes vote down vote up
def test_absl_to_standard(self):
    self.assertEqual(
        logging.DEBUG, converter.absl_to_standard(absl_logging.DEBUG))
    self.assertEqual(
        logging.INFO, converter.absl_to_standard(absl_logging.INFO))
    self.assertEqual(
        logging.WARNING, converter.absl_to_standard(absl_logging.WARN))
    self.assertEqual(
        logging.WARN, converter.absl_to_standard(absl_logging.WARN))
    self.assertEqual(
        logging.ERROR, converter.absl_to_standard(absl_logging.ERROR))
    self.assertEqual(
        logging.FATAL, converter.absl_to_standard(absl_logging.FATAL))
    self.assertEqual(
        logging.CRITICAL, converter.absl_to_standard(absl_logging.FATAL))
    # vlog levels.
    self.assertEqual(9, converter.absl_to_standard(2))
    self.assertEqual(8, converter.absl_to_standard(3))

    with self.assertRaises(TypeError):
      converter.absl_to_standard('') 
Example #9
Source File: converter_test.py    From abseil-py with Apache License 2.0 5 votes vote down vote up
def test_string_to_standard(self):
    self.assertEqual(logging.DEBUG, converter.string_to_standard('debug'))
    self.assertEqual(logging.INFO, converter.string_to_standard('info'))
    self.assertEqual(logging.WARNING, converter.string_to_standard('warn'))
    self.assertEqual(logging.WARNING, converter.string_to_standard('warning'))
    self.assertEqual(logging.ERROR, converter.string_to_standard('error'))
    self.assertEqual(logging.CRITICAL, converter.string_to_standard('fatal'))

    self.assertEqual(logging.DEBUG, converter.string_to_standard('DEBUG'))
    self.assertEqual(logging.INFO, converter.string_to_standard('INFO'))
    self.assertEqual(logging.WARNING, converter.string_to_standard('WARN'))
    self.assertEqual(logging.WARNING, converter.string_to_standard('WARNING'))
    self.assertEqual(logging.ERROR, converter.string_to_standard('ERROR'))
    self.assertEqual(logging.CRITICAL, converter.string_to_standard('FATAL')) 
Example #10
Source File: logging_functional_test.py    From abseil-py with Apache License 2.0 5 votes vote down vote up
def test_stderrthreshold_py_logging(self):
    """Tests --stderrthreshold."""

    stderr_logs = '''\
I0000 00:00:00.000000 12345 logging_functional_test_helper.py:123] FLAGS.stderrthreshold=debug, debug log
I0000 00:00:00.000000 12345 logging_functional_test_helper.py:123] FLAGS.stderrthreshold=debug, info log
W0000 00:00:00.000000 12345 logging_functional_test_helper.py:123] FLAGS.stderrthreshold=debug, warning log
E0000 00:00:00.000000 12345 logging_functional_test_helper.py:123] FLAGS.stderrthreshold=debug, error log
I0000 00:00:00.000000 12345 logging_functional_test_helper.py:123] FLAGS.stderrthreshold=info, info log
W0000 00:00:00.000000 12345 logging_functional_test_helper.py:123] FLAGS.stderrthreshold=info, warning log
E0000 00:00:00.000000 12345 logging_functional_test_helper.py:123] FLAGS.stderrthreshold=info, error log
W0000 00:00:00.000000 12345 logging_functional_test_helper.py:123] FLAGS.stderrthreshold=warning, warning log
E0000 00:00:00.000000 12345 logging_functional_test_helper.py:123] FLAGS.stderrthreshold=warning, error log
E0000 00:00:00.000000 12345 logging_functional_test_helper.py:123] FLAGS.stderrthreshold=error, error log
'''

    expected_logs = [
        ['stderr', None, stderr_logs],
        ['absl_log_file', 'INFO', None],
    ]
    # Set verbosity to debug to test stderrthreshold == debug.
    extra_args = ['-v=1']

    self._exec_test(
        _verify_ok,
        expected_logs,
        test_name='stderrthreshold',
        extra_args=extra_args,
        use_absl_log_file=True) 
Example #11
Source File: logging_functional_test.py    From abseil-py with Apache License 2.0 5 votes vote down vote up
def test_py_logging_verbosity_file(self, verbosity):
    """Tests -v/--verbosity flag with Python logging to stderr."""
    v_flag = '-v=%d' % verbosity
    self._exec_test(
        _verify_ok,
        [['stderr', None, ''],
         # When using python logging, it only creates a file named INFO,
         # unlike C++ it also creates WARNING and ERROR files.
         ['absl_log_file', 'INFO', self._get_logs(verbosity)]],
        use_absl_log_file=True,
        extra_args=[v_flag]) 
Example #12
Source File: resolver.py    From hub with Apache License 2.0 5 votes vote down vote up
def tfhub_cache_dir(default_cache_dir=None, use_temp=False):
  """Returns cache directory.

  Returns cache directory from either TFHUB_CACHE_DIR environment variable
  or --tfhub_cache_dir or default, if set.

  Args:
    default_cache_dir: Default cache location to use if neither TFHUB_CACHE_DIR
                       environment variable nor --tfhub_cache_dir are
                       not specified.
    use_temp: bool, Optional to enable using system's temp directory as a
              module cache directory if neither default_cache_dir nor
              --tfhub_cache_dir nor TFHUB_CACHE_DIR environment variable are
              specified .
  """

  # Note: We are using FLAGS["tfhub_cache_dir"] (and not FLAGS.tfhub_cache_dir)
  # to access the flag value in order to avoid parsing argv list. The flags
  # should have been parsed by now in main() by tf.app.run(). If that was not
  # the case (say in Colab env) we skip flag parsing because argv may contain
  # unknown flags.
  cache_dir = (
      os.getenv(_TFHUB_CACHE_DIR, "") or FLAGS["tfhub_cache_dir"].value or
      default_cache_dir)
  if not cache_dir and use_temp:
    # Place all TF-Hub modules under <system's temp>/tfhub_modules.
    cache_dir = os.path.join(tempfile.gettempdir(), "tfhub_modules")
  if cache_dir:
    logging.log_first_n(logging.INFO, "Using %s to cache modules.", 1,
                        cache_dir)
  return cache_dir 
Example #13
Source File: logging_test.py    From abseil-py with Apache License 2.0 5 votes vote down vote up
def test_log_to_std_err(self):
    record = std_logging.LogRecord(
        'name', std_logging.INFO, 'path', 12, 'logging_msg', [], False)
    with mock.patch.object(std_logging.StreamHandler, 'emit'):
      self.python_handler._log_to_stderr(record)
      std_logging.StreamHandler.emit.assert_called_once_with(record) 
Example #14
Source File: logging_test.py    From abseil-py with Apache License 2.0 5 votes vote down vote up
def test_emit_log_to_stderr(self):
    record = std_logging.LogRecord(
        'name', std_logging.INFO, 'path', 12, 'logging_msg', [], False)
    with mock.patch.object(self.python_handler, '_log_to_stderr'):
      self.python_handler.emit(record)
      self.python_handler._log_to_stderr.assert_called_once_with(record) 
Example #15
Source File: logging_test.py    From abseil-py with Apache License 2.0 5 votes vote down vote up
def test_emit(self):
    stream = _StreamIO()
    handler = logging.PythonHandler(stream)
    handler.stderr_threshold = std_logging.FATAL
    record = std_logging.LogRecord(
        'name', std_logging.INFO, 'path', 12, 'logging_msg', [], False)
    handler.emit(record)
    self.assertEqual(1, stream.getvalue().count('logging_msg')) 
Example #16
Source File: logging_test.py    From abseil-py with Apache License 2.0 5 votes vote down vote up
def test_emit_and_stderr_threshold(self):
    mock_stderr = _StreamIO()
    stream = _StreamIO()
    handler = logging.PythonHandler(stream)
    record = std_logging.LogRecord(
        'name', std_logging.INFO, 'path', 12, 'logging_msg', [], False)
    with mock.patch.object(sys, 'stderr', new=mock_stderr) as mock_stderr:
      handler.emit(record)
      self.assertEqual(1, stream.getvalue().count('logging_msg'))
      self.assertEqual(1, mock_stderr.getvalue().count('logging_msg')) 
Example #17
Source File: logging_test.py    From abseil-py with Apache License 2.0 5 votes vote down vote up
def test_emit_on_stderr(self):
    mock_stderr = _StreamIO()
    with mock.patch.object(sys, 'stderr', new=mock_stderr) as mock_stderr:
      handler = logging.PythonHandler()
      handler.stderr_threshold = std_logging.INFO
      record = std_logging.LogRecord(
          'name', std_logging.INFO, 'path', 12, 'logging_msg', [], False)
      handler.emit(record)
      self.assertEqual(1, mock_stderr.getvalue().count('logging_msg')) 
Example #18
Source File: logging_test.py    From abseil-py with Apache License 2.0 5 votes vote down vote up
def setUp(self):
    self.now_tuple = time.localtime(time.mktime(
        (1979, 10, 21, 18, 17, 16, 3, 15, 1)))
    self.new_prefix = lambda level: '(blah_prefix)'
    mock.patch.object(time, 'time').start()
    mock.patch.object(time, 'localtime').start()
    self.record = std_logging.LogRecord(
        'name', std_logging.INFO, 'path', 12, 'A Message', [], False)
    self.formatter = logging.PythonFormatter() 
Example #19
Source File: logging_test.py    From abseil-py with Apache License 2.0 5 votes vote down vote up
def test_info(self):
    with mock.patch.object(self.logger, 'log'):
      self.logger.info(self.message)
      self.logger.log.assert_called_once_with(std_logging.INFO, self.message) 
Example #20
Source File: converter_test.py    From abseil-py with Apache License 2.0 5 votes vote down vote up
def test_standard_to_cpp(self):
    self.assertEqual(0, converter.standard_to_cpp(logging.DEBUG))
    self.assertEqual(0, converter.standard_to_cpp(logging.INFO))
    self.assertEqual(1, converter.standard_to_cpp(logging.WARN))
    self.assertEqual(1, converter.standard_to_cpp(logging.WARNING))
    self.assertEqual(2, converter.standard_to_cpp(logging.ERROR))
    self.assertEqual(3, converter.standard_to_cpp(logging.FATAL))
    self.assertEqual(3, converter.standard_to_cpp(logging.CRITICAL))

    with self.assertRaises(TypeError):
      converter.standard_to_cpp('') 
Example #21
Source File: logging_test.py    From abseil-py with Apache License 2.0 5 votes vote down vote up
def test_logger_cannot_be_disabled(self):
    self.logger.disabled = True
    record = self.logger.makeRecord(
        'name', std_logging.INFO, 'fn', 20, 'msg', [], False)
    with mock.patch.object(self.logger, 'callHandlers') as mock_call_handlers:
      self.logger.handle(record)
    mock_call_handlers.assert_called_once() 
Example #22
Source File: logging_test.py    From abseil-py with Apache License 2.0 5 votes vote down vote up
def setUp(self):
    self.record = std_logging.LogRecord(
        'name', std_logging.INFO, 'path/to/source.py', 13, 'log message',
        None, None) 
Example #23
Source File: converter_test.py    From abseil-py with Apache License 2.0 5 votes vote down vote up
def test_absl_to_cpp(self):
    self.assertEqual(0, converter.absl_to_cpp(absl_logging.DEBUG))
    self.assertEqual(0, converter.absl_to_cpp(absl_logging.INFO))
    self.assertEqual(1, converter.absl_to_cpp(absl_logging.WARN))
    self.assertEqual(2, converter.absl_to_cpp(absl_logging.ERROR))
    self.assertEqual(3, converter.absl_to_cpp(absl_logging.FATAL))

    with self.assertRaises(TypeError):
      converter.absl_to_cpp('') 
Example #24
Source File: base_client.py    From tfx with Apache License 2.0 5 votes vote down vote up
def WaitUntilModelLoaded(self, deadline: float,
                           polling_interval_sec: int) -> None:
    """Wait until model is loaded and available.

    Args:
      deadline: A deadline time in UTC timestamp (in seconds).
      polling_interval_sec: GetServingStatus() polling interval.

    Raises:
      DeadlineExceeded: When deadline exceeded before model is ready.
      ValidationFailed: If validation failed explicitly.
    """
    while time.time() < deadline:
      status = self._GetServingStatus()
      if status == types.ModelServingStatus.NOT_READY:
        logging.log_every_n_seconds(
            level=logging.INFO,
            n_seconds=10,
            msg='Waiting for model to be loaded...')
        time.sleep(polling_interval_sec)
        continue
      elif status == types.ModelServingStatus.UNAVAILABLE:
        raise error_types.ValidationFailed(
            'Model server failed to load the model.')
      else:
        logging.info('Model is successfully loaded.')
        return

    raise error_types.DeadlineExceeded(
        'Deadline exceeded while waiting the model to be loaded.') 
Example #25
Source File: gym_utils.py    From tensor2tensor with Apache License 2.0 5 votes vote down vote up
def gym_env_wrapper(env, rl_env_max_episode_steps, maxskip_env, rendered_env,
                    rendered_env_resize_to, sticky_actions, output_dtype,
                    num_actions):
  """Wraps a gym environment. see make_gym_env for details."""
  # rl_env_max_episode_steps is None or int.
  assert ((not rl_env_max_episode_steps) or
          isinstance(rl_env_max_episode_steps, int))

  wrap_with_time_limit = ((not rl_env_max_episode_steps) or
                          rl_env_max_episode_steps >= 0)

  if wrap_with_time_limit:
    env = remove_time_limit_wrapper(env)

  if num_actions is not None:
    logging.log_first_n(
        logging.INFO, "Number of discretized actions: %d", 1, num_actions)
    env = ActionDiscretizeWrapper(env, num_actions=num_actions)

  if sticky_actions:
    env = StickyActionEnv(env)

  if maxskip_env:
    env = MaxAndSkipEnv(env)  # pylint: disable=redefined-variable-type

  if rendered_env:
    env = RenderedEnv(
        env, resize_to=rendered_env_resize_to, output_dtype=output_dtype)

  if wrap_with_time_limit and rl_env_max_episode_steps is not None:
    env = gym.wrappers.TimeLimit(
        env, max_episode_steps=rl_env_max_episode_steps)
  return env 
Example #26
Source File: log.py    From reinvent-randomized with MIT License 5 votes vote down vote up
def get_logger(name, level=logging.INFO, with_tqdm=True):
    if with_tqdm:
        handler = TQDMHandler()
    else:
        handler = logging.StreamHandler(stream=sys.stderr)
    formatter = logging.Formatter(
        fmt="%(asctime)s: %(module)s.%(funcName)s +%(lineno)s: %(levelname)-8s %(message)s",
        datefmt="%H:%M:%S"
    )
    handler.setFormatter(formatter)

    logger = logging.getLogger(name)
    logger.setLevel(level)
    logger.addHandler(handler)
    return logger 
Example #27
Source File: predict_main.py    From lasertagger with Apache License 2.0 5 votes vote down vote up
def main(argv):
  if len(argv) > 1:
    raise app.UsageError('Too many command-line arguments.')
  flags.mark_flag_as_required('input_file')
  flags.mark_flag_as_required('input_format')
  flags.mark_flag_as_required('output_file')
  flags.mark_flag_as_required('label_map_file')
  flags.mark_flag_as_required('vocab_file')
  flags.mark_flag_as_required('saved_model')

  label_map = utils.read_label_map(FLAGS.label_map_file)
  converter = tagging_converter.TaggingConverter(
      tagging_converter.get_phrase_vocabulary_from_label_map(label_map),
      FLAGS.enable_swap_tag)
  builder = bert_example.BertExampleBuilder(label_map, FLAGS.vocab_file,
                                            FLAGS.max_seq_length,
                                            FLAGS.do_lower_case, converter)
  predictor = predict_utils.LaserTaggerPredictor(
      tf.contrib.predictor.from_saved_model(FLAGS.saved_model), builder,
      label_map)

  num_predicted = 0
  with tf.gfile.Open(FLAGS.output_file, 'w') as writer:
    for i, (sources, target) in enumerate(utils.yield_sources_and_targets(
        FLAGS.input_file, FLAGS.input_format)):
      logging.log_every_n(
          logging.INFO,
          f'{i} examples processed, {num_predicted} converted to tf.Example.',
          100)
      prediction = predictor.predict(sources)
      writer.write(f'{" ".join(sources)}\t{prediction}\t{target}\n')
      num_predicted += 1
  logging.info(f'{num_predicted} predictions saved to:\n{FLAGS.output_file}') 
Example #28
Source File: preprocess_main.py    From lasertagger with Apache License 2.0 5 votes vote down vote up
def main(argv):
  if len(argv) > 1:
    raise app.UsageError('Too many command-line arguments.')
  flags.mark_flag_as_required('input_file')
  flags.mark_flag_as_required('input_format')
  flags.mark_flag_as_required('output_tfrecord')
  flags.mark_flag_as_required('label_map_file')
  flags.mark_flag_as_required('vocab_file')

  label_map = utils.read_label_map(FLAGS.label_map_file)
  converter = tagging_converter.TaggingConverter(
      tagging_converter.get_phrase_vocabulary_from_label_map(label_map),
      FLAGS.enable_swap_tag)
  builder = bert_example.BertExampleBuilder(label_map, FLAGS.vocab_file,
                                            FLAGS.max_seq_length,
                                            FLAGS.do_lower_case, converter)

  num_converted = 0
  with tf.io.TFRecordWriter(FLAGS.output_tfrecord) as writer:
    for i, (sources, target) in enumerate(utils.yield_sources_and_targets(
        FLAGS.input_file, FLAGS.input_format)):
      logging.log_every_n(
          logging.INFO,
          f'{i} examples processed, {num_converted} converted to tf.Example.',
          10000)
      example = builder.build_bert_example(
          sources, target,
          FLAGS.output_arbitrary_targets_for_infeasible_examples)
      if example is None:
        continue
      writer.write(example.to_tf_example().SerializeToString())
      num_converted += 1
  logging.info(f'Done. {num_converted} examples converted to tf.Example.')
  count_fname = _write_example_count(num_converted)
  logging.info(f'Wrote:\n{FLAGS.output_tfrecord}\n{count_fname}') 
Example #29
Source File: layers.py    From mead-baseline with Apache License 2.0 5 votes vote down vote up
def set_tf_log_level(ll):
    # 0     | DEBUG            | [Default] Print all messages
    # 1     | INFO             | Filter out INFO messages
    # 2     | WARNING          | Filter out INFO & WARNING messages
    # 3     | ERROR            | Filter out all messages
    import os

    TF_VERSION = get_version(tf)
    if TF_VERSION < 2:
        import tensorflow.compat.v1.logging as tf_logging
    else:
        from absl import logging as tf_logging
    tf_ll = tf_logging.WARN
    tf_cpp_ll = 1
    ll = ll.lower()
    if ll == "debug":
        tf_ll = tf_logging.DEBUG
        tf_cpp_ll = 0
    if ll == "info":
        tf_cpp_ll = 0
        tf_ll = tf_logging.INFO
    if ll == "error":
        tf_ll = tf_logging.ERROR
        tf_cpp_ll = 2
    tf_logging.set_verbosity(tf_ll)
    os.environ["TF_CPP_MIN_LOG_LEVEL"] = f"{tf_cpp_ll}" 
Example #30
Source File: graph_keras_mlp_cora.py    From neural-structured-learning with Apache License 2.0 5 votes vote down vote up
def log_metrics(model_desc, eval_metrics):
  """Logs evaluation metrics at `logging.INFO` level.

  Args:
    model_desc: A description of the model.
    eval_metrics: A dictionary mapping metric names to corresponding values. It
      must contain the loss and accuracy metrics.
  """
  logging.info('\n')
  logging.info('Eval accuracy for %s: %s', model_desc, eval_metrics['accuracy'])
  logging.info('Eval loss for %s: %s', model_desc, eval_metrics['loss'])
  if 'graph_loss' in eval_metrics:
    logging.info('Eval graph loss for %s: %s', model_desc,
                 eval_metrics['graph_loss'])