Python tensorflow.python.platform.tf_logging.log_first_n() Examples

The following are 10 code examples of tensorflow.python.platform.tf_logging.log_first_n(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.python.platform.tf_logging , or try the search function .
Example #1
Source File: supervisor.py    From ctw-baseline with MIT License 6 votes vote down vote up
def run_loop(self):
    # Count the steps.
    current_step = training_util.global_step(self._sess, self._step_counter)
    added_steps = current_step - self._last_step
    self._last_step = current_step
    # Measure the elapsed time.
    current_time = time.time()
    elapsed_time = current_time - self._last_time
    self._last_time = current_time
    # Reports the number of steps done per second
    if elapsed_time > 0.:
      steps_per_sec = added_steps / elapsed_time
    else:
      steps_per_sec = float("inf")
    summary = Summary(value=[Summary.Value(tag=self._summary_tag,
                                           simple_value=steps_per_sec)])
    if self._sv.summary_writer:
      self._sv.summary_writer.add_summary(summary, current_step)
    logging.log_first_n(logging.INFO, "%s: %g", 10,
                        self._summary_tag, steps_per_sec) 
Example #2
Source File: supervisor.py    From lambda-packs with MIT License 6 votes vote down vote up
def run_loop(self):
    # Count the steps.
    current_step = training_util.global_step(self._sess, self._step_counter)
    added_steps = current_step - self._last_step
    self._last_step = current_step
    # Measure the elapsed time.
    current_time = time.time()
    elapsed_time = current_time - self._last_time
    self._last_time = current_time
    # Reports the number of steps done per second
    if elapsed_time > 0.:
      steps_per_sec = added_steps / elapsed_time
    else:
      steps_per_sec = float("inf")
    summary = Summary(value=[Summary.Value(tag=self._summary_tag,
                                           simple_value=steps_per_sec)])
    if self._sv.summary_writer:
      self._sv.summary_writer.add_summary(summary, current_step)
    logging.log_first_n(logging.INFO, "%s: %g", 10,
                        self._summary_tag, steps_per_sec) 
Example #3
Source File: supervisor.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def run_loop(self):
    # Count the steps.
    current_step = training_util.global_step(self._sess, self._sv.global_step)
    added_steps = current_step - self._last_step
    self._last_step = current_step
    # Measure the elapsed time.
    current_time = time.time()
    elapsed_time = current_time - self._last_time
    self._last_time = current_time
    # Reports the number of steps done per second
    steps_per_sec = added_steps / elapsed_time
    summary = Summary(value=[Summary.Value(tag=self._summary_tag,
                                           simple_value=steps_per_sec)])
    if self._sv.summary_writer:
      self._sv.summary_writer.add_summary(summary, current_step)
    logging.log_first_n(logging.INFO, "%s: %g", 10,
                        self._summary_tag, steps_per_sec) 
Example #4
Source File: supervisor.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def run_loop(self):
    # Count the steps.
    current_step = training_util.global_step(self._sess, self._sv.global_step)
    added_steps = current_step - self._last_step
    self._last_step = current_step
    # Measure the elapsed time.
    current_time = time.time()
    elapsed_time = current_time - self._last_time
    self._last_time = current_time
    # Reports the number of steps done per second
    steps_per_sec = added_steps / elapsed_time
    summary = Summary(value=[Summary.Value(tag=self._summary_tag,
                                           simple_value=steps_per_sec)])
    if self._sv.summary_writer:
      self._sv.summary_writer.add_summary(summary, current_step)
    logging.log_first_n(logging.INFO, "%s: %g", 10,
                        self._summary_tag, steps_per_sec) 
Example #5
Source File: supervisor.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 6 votes vote down vote up
def run_loop(self):
    # Count the steps.
    current_step = training_util.global_step(self._sess, self._step_counter)
    added_steps = current_step - self._last_step
    self._last_step = current_step
    # Measure the elapsed time.
    current_time = time.time()
    elapsed_time = current_time - self._last_time
    self._last_time = current_time
    # Reports the number of steps done per second
    if elapsed_time > 0.:
      steps_per_sec = added_steps / elapsed_time
    else:
      steps_per_sec = float("inf")
    summary = Summary(value=[Summary.Value(tag=self._summary_tag,
                                           simple_value=steps_per_sec)])
    if self._sv.summary_writer:
      self._sv.summary_writer.add_summary(summary, current_step)
    logging.log_first_n(logging.INFO, "%s: %g", 10,
                        self._summary_tag, steps_per_sec) 
Example #6
Source File: supervisor.py    From keras-lambda with MIT License 6 votes vote down vote up
def run_loop(self):
    # Count the steps.
    current_step = training_util.global_step(self._sess, self._sv.global_step)
    added_steps = current_step - self._last_step
    self._last_step = current_step
    # Measure the elapsed time.
    current_time = time.time()
    elapsed_time = current_time - self._last_time
    self._last_time = current_time
    # Reports the number of steps done per second
    steps_per_sec = added_steps / elapsed_time
    summary = Summary(value=[Summary.Value(tag=self._summary_tag,
                                           simple_value=steps_per_sec)])
    if self._sv.summary_writer:
      self._sv.summary_writer.add_summary(summary, current_step)
    logging.log_first_n(logging.INFO, "%s: %g", 10,
                        self._summary_tag, steps_per_sec) 
Example #7
Source File: event_accumulator.py    From lambda-packs with MIT License 5 votes vote down vote up
def _ProcessHealthPillSummary(self, value, event):
    """Process summaries containing health pills.

    These summaries are distinguished by the fact that they have a Tensor field
    and have a special tag value.

    This method emits ERROR-level messages to the logs if it encounters Tensor
    summaries that it cannot process.

    Args:
      value: A summary_pb2.Summary.Value with a Tensor field.
      event: The event_pb2.Event containing that value.
    """
    elements = tensor_util.MakeNdarray(value.tensor)

    # The node_name property of the value object is actually a watch key: a
    # combination of node name, output slot, and a suffix. We capture the
    # actual node name and the output slot with a regular expression.
    match = re.match(r'^(.*):(\d+):DebugNumericSummary$', value.node_name)
    if not match:
      logging.log_first_n(
          logging.ERROR,
          'Unsupported watch key %s for health pills; skipping this sequence.',
          1,
          value.node_name)
      return

    node_name = match.group(1)
    output_slot = int(match.group(2))
    self._ProcessHealthPill(
        event.wall_time, event.step, node_name, output_slot, elements) 
Example #8
Source File: event_accumulator.py    From auto-alt-text-lambda-api with MIT License 4 votes vote down vote up
def _ProcessTensorSummary(self, value, event):
    """Process summaries generated by the TensorSummary op.

    These summaries are distinguished by the fact that they have a Tensor field,
    rather than one of the old idiosyncratic per-summary data fields.

    Processing Tensor summaries is complicated by the fact that Tensor summaries
    are not self-descriptive; you need to read the NodeDef of the corresponding
    TensorSummary op to know the summary_type, the tag, etc.

    This method emits ERROR-level messages to the logs if it encounters Tensor
    summaries that it cannot process.

    Args:
      value: A summary_pb2.Summary.Value with a Tensor field.
      event: The event_pb2.Event containing that value.
    """

    def LogErrorOnce(msg):
      logging.log_first_n(logging.ERROR, msg, 1)

    name = value.node_name
    if self._graph is None:
      LogErrorOnce('Attempting to process TensorSummary output, but '
                   'no graph is present, so processing is impossible. '
                   'All TensorSummary output will be ignored.')
      return

    if name not in self._tensor_summaries:
      LogErrorOnce('No node_def for TensorSummary {}; skipping this sequence.'.
                   format(name))
      return

    summary_description = self._tensor_summaries[name]
    type_hint = summary_description.type_hint

    if not type_hint:
      LogErrorOnce('No type_hint for TensorSummary {}; skipping this sequence.'.
                   format(name))
      return

    if type_hint == 'scalar':
      scalar = float(tensor_util.MakeNdarray(value.tensor))
      self._ProcessScalar(name, event.wall_time, event.step, scalar)
    else:
      LogErrorOnce(
          'Unsupported type {} for TensorSummary {}; skipping this sequence.'.
          format(type_hint, name)) 
Example #9
Source File: event_accumulator.py    From deep_image_model with Apache License 2.0 4 votes vote down vote up
def _ProcessTensorSummary(self, value, event):
    """Process summaries generated by the TensorSummary op.

    These summaries are distinguished by the fact that they have a Tensor field,
    rather than one of the old idiosyncratic per-summary data fields.

    Processing Tensor summaries is complicated by the fact that Tensor summaries
    are not self-descriptive; you need to read the NodeDef of the corresponding
    TensorSummary op to know the summary_type, the tag, etc.

    This method emits ERROR-level messages to the logs if it encounters Tensor
    summaries that it cannot process.

    Args:
      value: A summary_pb2.Summary.Value with a Tensor field.
      event: The event_pb2.Event containing that value.
    """

    def LogErrorOnce(msg):
      logging.log_first_n(logging.ERROR, msg, 1)

    name = value.node_name
    if self._graph is None:
      LogErrorOnce('Attempting to process TensorSummary output, but '
                   'no graph is present, so processing is impossible. '
                   'All TensorSummary output will be ignored.')
      return

    if name not in self._tensor_summaries:
      LogErrorOnce('No node_def for TensorSummary {}; skipping this sequence.'.
                   format(name))
      return

    summary_description = self._tensor_summaries[name]
    type_hint = summary_description.type_hint

    if not type_hint:
      LogErrorOnce('No type_hint for TensorSummary {}; skipping this sequence.'.
                   format(name))
      return

    if type_hint == 'scalar':
      scalar = float(tensor_util.MakeNdarray(value.tensor))
      self._ProcessScalar(name, event.wall_time, event.step, scalar)
    else:
      LogErrorOnce(
          'Unsupported type {} for TensorSummary {}; skipping this sequence.'.
          format(type_hint, name)) 
Example #10
Source File: event_accumulator.py    From keras-lambda with MIT License 4 votes vote down vote up
def _ProcessTensorSummary(self, value, event):
    """Process summaries generated by the TensorSummary op.

    These summaries are distinguished by the fact that they have a Tensor field,
    rather than one of the old idiosyncratic per-summary data fields.

    Processing Tensor summaries is complicated by the fact that Tensor summaries
    are not self-descriptive; you need to read the NodeDef of the corresponding
    TensorSummary op to know the summary_type, the tag, etc.

    This method emits ERROR-level messages to the logs if it encounters Tensor
    summaries that it cannot process.

    Args:
      value: A summary_pb2.Summary.Value with a Tensor field.
      event: The event_pb2.Event containing that value.
    """

    def LogErrorOnce(msg):
      logging.log_first_n(logging.ERROR, msg, 1)

    name = value.node_name
    if self._graph is None:
      LogErrorOnce('Attempting to process TensorSummary output, but '
                   'no graph is present, so processing is impossible. '
                   'All TensorSummary output will be ignored.')
      return

    if name not in self._tensor_summaries:
      LogErrorOnce('No node_def for TensorSummary {}; skipping this sequence.'.
                   format(name))
      return

    summary_description = self._tensor_summaries[name]
    type_hint = summary_description.type_hint

    if not type_hint:
      LogErrorOnce('No type_hint for TensorSummary {}; skipping this sequence.'.
                   format(name))
      return

    if type_hint == 'scalar':
      scalar = float(tensor_util.MakeNdarray(value.tensor))
      self._ProcessScalar(name, event.wall_time, event.step, scalar)
    else:
      LogErrorOnce(
          'Unsupported type {} for TensorSummary {}; skipping this sequence.'.
          format(type_hint, name))