Python tensorflow.python.platform.tf_logging.error() Examples

The following are 30 code examples of tensorflow.python.platform.tf_logging.error(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.python.platform.tf_logging , or try the search function .
Example #1
Source File: event_multiplexer.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def Reload(self):
    """Call `Reload` on every `EventAccumulator`."""
    logging.info('Beginning EventMultiplexer.Reload()')
    self._reload_called = True
    # Build a list so we're safe even if the list of accumulators is modified
    # even while we're reloading.
    with self._accumulators_mutex:
      items = list(self._accumulators.items())

    names_to_delete = set()
    for name, accumulator in items:
      try:
        accumulator.Reload()
      except (OSError, IOError) as e:
        logging.error("Unable to reload accumulator '%s': %s", name, e)
      except directory_watcher.DirectoryDeletedError:
        names_to_delete.add(name)

    with self._accumulators_mutex:
      for name in names_to_delete:
        logging.warning("Deleting accumulator '%s'", name)
        del self._accumulators[name]
    logging.info('Finished with EventMultiplexer.Reload()')
    return self 
Example #2
Source File: googletest.py    From lambda-packs with MIT License 6 votes vote down vote up
def GetTempDir():
  """Return a temporary directory for tests to use."""
  global _googletest_temp_dir
  if not _googletest_temp_dir:
    first_frame = tf_inspect.stack()[-1][0]
    temp_dir = os.path.join(tempfile.gettempdir(),
                            os.path.basename(tf_inspect.getfile(first_frame)))
    temp_dir = tempfile.mkdtemp(prefix=temp_dir.rstrip('.py'))

    def delete_temp_dir(dirname=temp_dir):
      try:
        file_io.delete_recursively(dirname)
      except errors.OpError as e:
        logging.error('Error removing %s: %s', dirname, e)

    atexit.register(delete_temp_dir)
    _googletest_temp_dir = temp_dir

  return _googletest_temp_dir 
Example #3
Source File: tpu_estimator.py    From embedding-as-service with MIT License 6 votes vote down vote up
def _validate_input_pipeline(self):
    """Validates the input pipeline.

    Perform some sanity checks to log user friendly information. We should
    error out to give users better error message. But, if
    _WRAP_INPUT_FN_INTO_WHILE_LOOP is False (legacy behavior), we cannot break
    user code, so, log a warning.

    Raises:
      RuntimeError: If the validation failed.
    """
    if ops.get_default_graph().get_collection(ops.GraphKeys.QUEUE_RUNNERS):
      err_msg = ('Input pipeline contains one or more QueueRunners. '
                 'It could be slow and not scalable. Please consider '
                 'converting your input pipeline to use `tf.data` instead (see '
                 'https://www.tensorflow.org/guide/datasets for '
                 'instructions.')
      if _WRAP_INPUT_FN_INTO_WHILE_LOOP:
        raise RuntimeError(err_msg)
      else:
        logging.warn(err_msg) 
Example #4
Source File: tpu_estimator.py    From embedding-as-service with MIT License 6 votes vote down vote up
def after_run(self, run_context, run_values):
    _ = run_context
    scalar_stopping_signal = run_values.results
    if _StopSignals.should_stop(scalar_stopping_signal):
      # NOTE(xiejw): In prediction, stopping signals are inserted for each
      # batch. And we append one more batch to signal the system it should stop.
      # The data flow might look like
      #
      #  batch   0: images, labels, stop = 0  (user provided)
      #  batch   1: images, labels, stop = 0  (user provided)
      #  ...
      #  batch  99: images, labels, stop = 0  (user provided)
      #  batch 100: images, labels, stop = 1  (TPUEstimator appended)
      #
      # where the final batch (id = 100) is appended by TPUEstimator, so we
      # should drop it before returning the predictions to user.
      # To achieve that, we throw the OutOfRangeError in after_run. Once
      # Monitored Session sees this error in SessionRunHook.after_run, the
      # "current" prediction, i.e., batch with id=100, will be discarded
      # immediately
      raise errors.OutOfRangeError(None, None, 'Stopped by stopping signal.') 
Example #5
Source File: monitors.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def every_n_step_end(self, step, outputs):
    super(ExportMonitor, self).every_n_step_end(step, outputs)
    try:
      self._last_export_dir = self._estimator.export(
          self.export_dir,
          exports_to_keep=self.exports_to_keep,
          signature_fn=self.signature_fn,
          input_fn=self._input_fn,
          default_batch_size=self._default_batch_size,
          input_feature_key=self._input_feature_key,
          use_deprecated_input_fn=self._use_deprecated_input_fn)
    except RuntimeError:
      # Currently we are not syncronized with saving checkpoints, which leads to
      # runtime errors when we are calling export on the same global step.
      # Exports depend on saved checkpoints for constructing the graph and
      # getting the global step from the graph instance saved in the checkpoint.
      # If the checkpoint is stale with respect to current step, the global step
      # is taken to be the last saved checkpoint's global step and exporter
      # doesn't export the same checkpoint again with the following error.
      logging.info("Skipping exporting because the existing checkpoint has "
                   "already been exported. "
                   "Consider exporting less frequently.") 
Example #6
Source File: directory_watcher.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def _SetPath(self, path):
    """Sets the current path to watch for new events.

    This also records the size of the old path, if any. If the size can't be
    found, an error is logged.

    Args:
      path: The full path of the file to watch.
    """
    old_path = self._path
    if old_path and not io_wrapper.IsGCSPath(old_path):
      try:
        # We're done with the path, so store its size.
        size = gfile.Stat(old_path).length
        logging.debug('Setting latest size of %s to %d', old_path, size)
        self._finalized_sizes[old_path] = size
      except errors.OpError as e:
        logging.error('Unable to get size of %s: %s', old_path, e)

    self._path = path
    self._loader = self._loader_factory(path) 
Example #7
Source File: event_multiplexer.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def Reload(self):
    """Call `Reload` on every `EventAccumulator`."""
    logging.info('Beginning EventMultiplexer.Reload()')
    self._reload_called = True
    # Build a list so we're safe even if the list of accumulators is modified
    # even while we're reloading.
    with self._accumulators_mutex:
      items = list(self._accumulators.items())

    names_to_delete = set()
    for name, accumulator in items:
      try:
        accumulator.Reload()
      except (OSError, IOError) as e:
        logging.error("Unable to reload accumulator '%s': %s", name, e)
      except directory_watcher.DirectoryDeletedError:
        names_to_delete.add(name)

    with self._accumulators_mutex:
      for name in names_to_delete:
        logging.warning("Deleting accumulator '%s'", name)
        del self._accumulators[name]
    logging.info('Finished with EventMultiplexer.Reload()')
    return self 
Example #8
Source File: learn_runner.py    From lambda-packs with MIT License 6 votes vote down vote up
def _execute_schedule(experiment, schedule):
  """Execute the method named `schedule` of `experiment`."""
  if not hasattr(experiment, schedule):
    logging.error('Schedule references non-existent task %s', schedule)
    valid_tasks = [x for x in dir(experiment)
                   if not x.startswith('_')
                   and callable(getattr(experiment, x))]
    logging.error('Allowed values for this experiment are: %s', valid_tasks)
    raise ValueError('Schedule references non-existent task %s' % schedule)

  task = getattr(experiment, schedule)
  if not callable(task):
    logging.error('Schedule references non-callable member %s', schedule)
    valid_tasks = [x for x in dir(experiment)
                   if not x.startswith('_')
                   and callable(getattr(experiment, x))]
    logging.error('Allowed values for this experiment are: %s', valid_tasks)
    raise TypeError('Schedule references non-callable member %s' % schedule)
  return task() 
Example #9
Source File: directory_watcher.py    From lambda-packs with MIT License 6 votes vote down vote up
def _SetPath(self, path):
    """Sets the current path to watch for new events.

    This also records the size of the old path, if any. If the size can't be
    found, an error is logged.

    Args:
      path: The full path of the file to watch.
    """
    old_path = self._path
    if old_path and not io_wrapper.IsGCSPath(old_path):
      try:
        # We're done with the path, so store its size.
        size = gfile.Stat(old_path).length
        logging.debug('Setting latest size of %s to %d', old_path, size)
        self._finalized_sizes[old_path] = size
      except errors.OpError as e:
        logging.error('Unable to get size of %s: %s', old_path, e)

    self._path = path
    self._loader = self._loader_factory(path) 
Example #10
Source File: directory_watcher.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def _SetPath(self, path):
    """Sets the current path to watch for new events.

    This also records the size of the old path, if any. If the size can't be
    found, an error is logged.

    Args:
      path: The full path of the file to watch.
    """
    old_path = self._path
    if old_path and not io_wrapper.IsGCSPath(old_path):
      try:
        # We're done with the path, so store its size.
        size = gfile.Stat(old_path).length
        logging.debug('Setting latest size of %s to %d', old_path, size)
        self._finalized_sizes[old_path] = size
      except errors.OpError as e:
        logging.error('Unable to get size of %s: %s', old_path, e)

    self._path = path
    self._loader = self._loader_factory(path) 
Example #11
Source File: monitors.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def every_n_step_end(self, step, outputs):
    super(ExportMonitor, self).every_n_step_end(step, outputs)
    try:
      self._last_export_dir = self._estimator.export(
          self.export_dir,
          exports_to_keep=self.exports_to_keep,
          signature_fn=self.signature_fn,
          input_fn=self._input_fn,
          default_batch_size=self._default_batch_size,
          input_feature_key=self._input_feature_key,
          use_deprecated_input_fn=self._use_deprecated_input_fn)
    except RuntimeError:
      # Currently we are not syncronized with saving checkpoints, which leads to
      # runtime errors when we are calling export on the same global step.
      # Exports depend on saved checkpoints for constructing the graph and
      # getting the global step from the graph instance saved in the checkpoint.
      # If the checkpoint is stale with respect to current step, the global step
      # is taken to be the last saved checkpoint's global step and exporter
      # doesn't export the same checkpoint again with the following error.
      logging.info("Skipping exporting because the existing checkpoint has "
                   "already been exported. "
                   "Consider exporting less frequently.") 
Example #12
Source File: tpu_estimator.py    From Chinese-XLNet with Apache License 2.0 6 votes vote down vote up
def after_run(self, run_context, run_values):
    _ = run_context
    scalar_stopping_signal = run_values.results
    if _StopSignals.should_stop(scalar_stopping_signal):
      # NOTE(xiejw): In prediction, stopping signals are inserted for each
      # batch. And we append one more batch to signal the system it should stop.
      # The data flow might look like
      #
      #  batch   0: images, labels, stop = 0  (user provided)
      #  batch   1: images, labels, stop = 0  (user provided)
      #  ...
      #  batch  99: images, labels, stop = 0  (user provided)
      #  batch 100: images, labels, stop = 1  (TPUEstimator appended)
      #
      # where the final batch (id = 100) is appended by TPUEstimator, so we
      # should drop it before returning the predictions to user.
      # To achieve that, we throw the OutOfRangeError in after_run. Once
      # Monitored Session sees this error in SessionRunHook.after_run, the
      # "current" prediction, i.e., batch with id=100, will be discarded
      # immediately
      raise errors.OutOfRangeError(None, None, 'Stopped by stopping signal.') 
Example #13
Source File: tpu_estimator.py    From Chinese-XLNet with Apache License 2.0 6 votes vote down vote up
def _validate_input_pipeline(self):
    """Validates the input pipeline.

    Perform some sanity checks to log user friendly information. We should
    error out to give users better error message. But, if
    _WRAP_INPUT_FN_INTO_WHILE_LOOP is False (legacy behavior), we cannot break
    user code, so, log a warning.

    Raises:
      RuntimeError: If the validation failed.
    """
    if ops.get_default_graph().get_collection(ops.GraphKeys.QUEUE_RUNNERS):
      err_msg = ('Input pipeline contains one or more QueueRunners. '
                 'It could be slow and not scalable. Please consider '
                 'converting your input pipeline to use `tf.data` instead (see '
                 'https://www.tensorflow.org/guide/datasets for '
                 'instructions.')
      if _WRAP_INPUT_FN_INTO_WHILE_LOOP:
        raise RuntimeError(err_msg)
      else:
        logging.warn(err_msg) 
Example #14
Source File: event_multiplexer.py    From lambda-packs with MIT License 6 votes vote down vote up
def Reload(self):
    """Call `Reload` on every `EventAccumulator`."""
    logging.info('Beginning EventMultiplexer.Reload()')
    self._reload_called = True
    # Build a list so we're safe even if the list of accumulators is modified
    # even while we're reloading.
    with self._accumulators_mutex:
      items = list(self._accumulators.items())

    names_to_delete = set()
    for name, accumulator in items:
      try:
        accumulator.Reload()
      except (OSError, IOError) as e:
        logging.error("Unable to reload accumulator '%s': %s", name, e)
      except directory_watcher.DirectoryDeletedError:
        names_to_delete.add(name)

    with self._accumulators_mutex:
      for name in names_to_delete:
        logging.warning("Deleting accumulator '%s'", name)
        del self._accumulators[name]
    logging.info('Finished with EventMultiplexer.Reload()')
    return self 
Example #15
Source File: tpu_estimator.py    From Chinese-XLNet with Apache License 2.0 5 votes vote down vote up
def _assertCompilationSucceeded(self, result, coord):
    proto = tpu_compilation_result.CompilationResultProto()
    proto.ParseFromString(result)
    if proto.status_error_message:
      logging.error('Compilation failed: {}'.format(proto.status_error_message))
      coord.request_stop()
    else:
      logging.info('Compilation succeeded') 
Example #16
Source File: directory_watcher.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def _HasOOOWrite(self, path):
    """Returns whether the path has had an out-of-order write."""
    # Check the sizes of each path before the current one.
    size = gfile.Stat(path).length
    old_size = self._finalized_sizes.get(path, None)
    if size != old_size:
      if old_size is None:
        logging.error('File %s created after file %s even though it\'s '
                      'lexicographically earlier', path, self._path)
      else:
        logging.error('File %s updated even though the current file is %s',
                      path, self._path)
      return True
    else:
      return False 
Example #17
Source File: training_util.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def get_global_step(graph=None):
  """Get the global step tensor.

  The global step tensor must be an integer variable. We first try to find it
  in the collection `GLOBAL_STEP`, or by name `global_step:0`.

  Args:
    graph: The graph to find the global step in. If missing, use default graph.

  Returns:
    The global step variable, or `None` if none was found.

  Raises:
    TypeError: If the global step tensor has a non-integer type, or if it is not
      a `Variable`.
  """
  graph = ops.get_default_graph() if graph is None else graph
  global_step_tensor = None
  global_step_tensors = graph.get_collection(ops.GraphKeys.GLOBAL_STEP)
  if len(global_step_tensors) == 1:
    global_step_tensor = global_step_tensors[0]
  elif not global_step_tensors:
    try:
      global_step_tensor = graph.get_tensor_by_name('global_step:0')
    except KeyError:
      return None
  else:
    logging.error('Multiple tensors in global_step collection.')
    return None

  assert_global_step(global_step_tensor)
  return global_step_tensor 
Example #18
Source File: saver.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def latest_checkpoint(checkpoint_dir, latest_filename=None):
  """Finds the filename of latest saved checkpoint file.

  Args:
    checkpoint_dir: Directory where the variables were saved.
    latest_filename: Optional name for the protocol buffer file that
      contains the list of most recent checkpoint filenames.
      See the corresponding argument to `Saver.save()`.

  Returns:
    The full path to the latest checkpoint or `None` if no checkpoint was found.
  """
  # Pick the latest checkpoint based on checkpoint state.
  ckpt = get_checkpoint_state(checkpoint_dir, latest_filename)
  if ckpt and ckpt.model_checkpoint_path:
    # Look for either a V2 path or a V1 path, with priority for V2.
    v2_path = _prefix_to_checkpoint_path(ckpt.model_checkpoint_path,
                                         saver_pb2.SaverDef.V2)
    v1_path = _prefix_to_checkpoint_path(ckpt.model_checkpoint_path,
                                         saver_pb2.SaverDef.V1)
    if file_io.get_matching_files(v2_path) or file_io.get_matching_files(
        v1_path):
      return ckpt.model_checkpoint_path
    else:
      logging.error("Couldn't match files for checkpoint %s",
                    ckpt.model_checkpoint_path)
  return None 
Example #19
Source File: basic_session_run_hooks.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def __init__(self,
               save_steps=None,
               save_secs=None,
               output_dir=None,
               summary_writer=None,
               scaffold=None,
               summary_op=None):
    """Initializes a `SummarySaver` monitor.

    Args:
      save_steps: `int`, save summaries every N steps. Exactly one of
          `save_secs` and `save_steps` should be set.
      save_secs: `int`, save summaries every N seconds.
      output_dir: `string`, the directory to save the summaries to. Only used
          if no `summary_writer` is supplied.
      summary_writer: `SummaryWriter`. If `None` and an `output_dir` was passed,
          one will be created accordingly.
      scaffold: `Scaffold` to get summary_op if it's not provided.
      summary_op: `Tensor` of type `string`. A serialized `Summary` protocol
          buffer, as output by TF summary methods like `tf.summary.scalar` or
          `tf.summary.merge_all`.

    Raises:
      ValueError: Exactly one of scaffold or summary_op should be set.
    """
    if ((scaffold is None and summary_op is None) or
        (scaffold is not None and summary_op is not None)):
      raise ValueError(
          "Exactly one of scaffold or summary_op must be provided.")
    self._summary_op = summary_op
    self._summary_writer = summary_writer
    if summary_writer is None and output_dir:
      self._summary_writer = SummaryWriterCache.get(output_dir)
    self._scaffold = scaffold
    self._timer = _SecondOrStepTimer(every_secs=save_secs,
                                     every_steps=save_steps)
    # TODO(mdan): Throw an error if output_dir and summary_writer are None. 
Example #20
Source File: vgsl_model.py    From Action_Recognition_Zoo with MIT License 5 votes vote down vote up
def _AddRateToSummary(tag, rate, step, sw):
  """Adds the given rate to the summary with the given tag.

  Args:
    tag:   Name for this value.
    rate:  Value to add to the summary. Perhaps an error rate.
    step:  Global step of the graph for the x-coordinate of the summary.
    sw:    Summary writer to which to write the rate value.
  """
  sw.add_summary(
      summary_pb2.Summary(value=[summary_pb2.Summary.Value(
          tag=tag, simple_value=rate)]), step) 
Example #21
Source File: vgsl_model.py    From yolo_v2 with Apache License 2.0 5 votes vote down vote up
def _AddRateToSummary(tag, rate, step, sw):
  """Adds the given rate to the summary with the given tag.

  Args:
    tag:   Name for this value.
    rate:  Value to add to the summary. Perhaps an error rate.
    step:  Global step of the graph for the x-coordinate of the summary.
    sw:    Summary writer to which to write the rate value.
  """
  sw.add_summary(
      summary_pb2.Summary(value=[summary_pb2.Summary.Value(
          tag=tag, simple_value=rate)]), step) 
Example #22
Source File: tpu_estimator.py    From Chinese-XLNet with Apache License 2.0 5 votes vote down vote up
def features_and_labels(self):
    """Gets `features` and `labels`."""
    if self.is_dataset:
      if self._iterator is None:
        raise RuntimeError('Internal error: Must run dataset_initializer '
                           'before calling features_and_labels(). Please file '
                           'a bug!')
      return _Inputs._parse_inputs(self._iterator.get_next())

    return (self._features, self._labels) 
Example #23
Source File: vgsl_model.py    From ECO-pytorch with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def _AddRateToSummary(tag, rate, step, sw):
  """Adds the given rate to the summary with the given tag.

  Args:
    tag:   Name for this value.
    rate:  Value to add to the summary. Perhaps an error rate.
    step:  Global step of the graph for the x-coordinate of the summary.
    sw:    Summary writer to which to write the rate value.
  """
  sw.add_summary(
      summary_pb2.Summary(value=[summary_pb2.Summary.Value(
          tag=tag, simple_value=rate)]), step) 
Example #24
Source File: vgsl_model.py    From hands-detection with MIT License 5 votes vote down vote up
def _AddRateToSummary(tag, rate, step, sw):
  """Adds the given rate to the summary with the given tag.

  Args:
    tag:   Name for this value.
    rate:  Value to add to the summary. Perhaps an error rate.
    step:  Global step of the graph for the x-coordinate of the summary.
    sw:    Summary writer to which to write the rate value.
  """
  sw.add_summary(
      summary_pb2.Summary(value=[summary_pb2.Summary.Value(
          tag=tag, simple_value=rate)]), step) 
Example #25
Source File: data_load.py    From tacotron_asr with Apache License 2.0 5 votes vote down vote up
def _run(self, sess, enqueue_op, coord=None):

        if coord:
            coord.register_thread(threading.current_thread())
        decremented = False
        try:
            while True:
                if coord and coord.should_stop():
                    break
                try:
                    self.func(sess, enqueue_op)  # call enqueue function
                except self._queue_closed_exception_types:  # pylint: disable=catching-non-exception
                    # This exception indicates that a queue was closed.
                    with self._lock:
                        self._runs_per_session[sess] -= 1
                        decremented = True
                        if self._runs_per_session[sess] == 0:
                            try:
                                sess.run(self._close_op)
                            except Exception as e:
                                # Intentionally ignore errors from close_op.
                                logging.vlog(1, "Ignored exception: %s", str(e))
                        return
        except Exception as e:
            # This catches all other exceptions.
            if coord:
                coord.request_stop(e)
            else:
                logging.error("Exception in QueueRunner: %s", str(e))
                with self._lock:
                    self._exceptions_raised.append(e)
                raise
        finally:
            # Make sure we account for all terminations: normal or errors.
            if not decremented:
                with self._lock:
                    self._runs_per_session[sess] -= 1 
Example #26
Source File: monitors.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def every_n_step_end(self, step, outputs):
    super(NanLoss, self).every_n_step_end(step, outputs)
    if np.isnan(_extract_output(outputs, self._loss_tensor)):
      failure_message = "Model diverged with loss = NaN."
      if self._fail_on_nan_loss:
        logging.error(failure_message)
        raise NanLossDuringTrainingError
      else:
        logging.warning(failure_message)
        # We don't raise an error but we return "should stop" so we stop, but
        # without an exception.
        return True 
Example #27
Source File: monitors.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def __init__(self,
               summary_op,
               save_steps=100,
               output_dir=None,
               summary_writer=None,
               scaffold=None):
    """Initializes a `SummarySaver` monitor.

    Args:
      summary_op: `Tensor` of type `string`. A serialized `Summary` protocol
          buffer, as output by TF summary methods like `summary.scalar` or
          `summary.merge_all`.
      save_steps: `int`, save summaries every N steps. See `EveryN`.
      output_dir: `string`, the directory to save the summaries to. Only used
          if no `summary_writer` is supplied.
      summary_writer: `SummaryWriter`. If `None` and an `output_dir` was passed,
          one will be created accordingly.
      scaffold: `Scaffold` to get summary_op if it's not provided.
    """
    # TODO(ipolosukhin): Implement every N seconds.
    super(SummarySaver, self).__init__(every_n_steps=save_steps)
    self._summary_op = summary_op
    self._summary_writer = summary_writer
    if summary_writer is None and output_dir:
      self._summary_writer = summary_io.SummaryWriter(output_dir)
    self._scaffold = scaffold
    # TODO(mdan): Throw an error if output_dir and summary_writer are None. 
Example #28
Source File: vgsl_model.py    From DOTA_models with Apache License 2.0 5 votes vote down vote up
def _AddRateToSummary(tag, rate, step, sw):
  """Adds the given rate to the summary with the given tag.

  Args:
    tag:   Name for this value.
    rate:  Value to add to the summary. Perhaps an error rate.
    step:  Global step of the graph for the x-coordinate of the summary.
    sw:    Summary writer to which to write the rate value.
  """
  sw.add_summary(
      summary_pb2.Summary(value=[summary_pb2.Summary.Value(
          tag=tag, simple_value=rate)]), step) 
Example #29
Source File: tensorboard_server.py    From tensorlang with Apache License 2.0 5 votes vote down vote up
def run_simple_server(tb_app, host, port):
  """Run a TensorBoard HTTP server, and print some messages to the console."""
  try:
    server, url = make_simple_server(tb_app, host, port)
  except socket.error:
    # An error message was already logged
    exit(-1)
  msg = 'Starting TensorBoard %s at %s' % (tb_app.tag, url)
  print(msg)
  logging.info(msg)
  print('(Press CTRL+C to quit)')
  sys.stdout.flush()

  server.serve_forever() 
Example #30
Source File: vgsl_model.py    From object_detection_kitti with Apache License 2.0 5 votes vote down vote up
def _AddRateToSummary(tag, rate, step, sw):
  """Adds the given rate to the summary with the given tag.

  Args:
    tag:   Name for this value.
    rate:  Value to add to the summary. Perhaps an error rate.
    step:  Global step of the graph for the x-coordinate of the summary.
    sw:    Summary writer to which to write the rate value.
  """
  sw.add_summary(
      summary_pb2.Summary(value=[summary_pb2.Summary.Value(
          tag=tag, simple_value=rate)]), step)