Python tensorflow.python.platform.tf_logging.debug() Examples
The following are 30
code examples of tensorflow.python.platform.tf_logging.debug().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.python.platform.tf_logging
, or try the search function
.
Example #1
Source File: estimator.py From deep_image_model with Apache License 2.0 | 6 votes |
def _check_inputs(self, features, labels): if self._features_info is not None: logging.debug('Given features: %s, required signatures: %s.', str(features), str(self._features_info)) if not tensor_signature.tensors_compatible(features, self._features_info): raise ValueError('Features are incompatible with given information. ' 'Given features: %s, required signatures: %s.' % (str(features), str(self._features_info))) else: self._features_info = tensor_signature.create_signatures(features) logging.debug('Setting feature info to %s.', str(self._features_info)) if labels is not None: if self._labels_info is not None: logging.debug('Given labels: %s, required signatures: %s.', str(labels), str(self._labels_info)) if not tensor_signature.tensors_compatible(labels, self._labels_info): raise ValueError('Labels are incompatible with given information. ' 'Given labels: %s, required signatures: %s.' % (str(labels), str(self._labels_info))) else: self._labels_info = tensor_signature.create_signatures(labels) logging.debug('Setting labels info to %s', str(self._labels_info))
Example #2
Source File: estimator.py From keras-lambda with MIT License | 6 votes |
def _check_inputs(self, features, labels): if self._features_info is not None: logging.debug('Given features: %s, required signatures: %s.', str(features), str(self._features_info)) if not tensor_signature.tensors_compatible(features, self._features_info): raise ValueError('Features are incompatible with given information. ' 'Given features: %s, required signatures: %s.' % (str(features), str(self._features_info))) else: self._features_info = tensor_signature.create_signatures(features) logging.debug('Setting feature info to %s.', str(self._features_info)) if labels is not None: if self._labels_info is not None: logging.debug('Given labels: %s, required signatures: %s.', str(labels), str(self._labels_info)) if not tensor_signature.tensors_compatible(labels, self._labels_info): raise ValueError('Labels are incompatible with given information. ' 'Given labels: %s, required signatures: %s.' % (str(labels), str(self._labels_info))) else: self._labels_info = tensor_signature.create_signatures(labels) logging.debug('Setting labels info to %s', str(self._labels_info))
Example #3
Source File: event_file_loader.py From keras-lambda with MIT License | 6 votes |
def Load(self): """Loads all new values from disk. Calling Load multiple times in a row will not 'drop' events as long as the return value is not iterated over. Yields: All values that were written to disk that have not been yielded yet. """ while True: try: with errors.raise_exception_on_not_ok_status() as status: self._reader.GetNext(status) except (errors.DataLossError, errors.OutOfRangeError): # We ignore partial read exceptions, because a record may be truncated. # PyRecordReader holds the offset prior to the failed read, so retrying # will succeed. break event = event_pb2.Event() event.ParseFromString(self._reader.record()) yield event logging.debug('No more events in %s', self._file_path)
Example #4
Source File: directory_watcher.py From keras-lambda with MIT License | 6 votes |
def _SetPath(self, path): """Sets the current path to watch for new events. This also records the size of the old path, if any. If the size can't be found, an error is logged. Args: path: The full path of the file to watch. """ old_path = self._path if old_path and not io_wrapper.IsGCSPath(old_path): try: # We're done with the path, so store its size. size = gfile.Stat(old_path).length logging.debug('Setting latest size of %s to %d', old_path, size) self._finalized_sizes[old_path] = size except errors.OpError as e: logging.error('Unable to get size of %s: %s', old_path, e) self._path = path self._loader = self._loader_factory(path)
Example #5
Source File: registry.py From HumanRecognition with MIT License | 6 votes |
def _Create(baseclass, subclass_name, *args, **kwargs): """Creates an instance of a named subclass. Args: baseclass: The expected base class. subclass_name: The fully-qualified type name of the subclass to create. *args: Passed to the subclass constructor. **kwargs: Passed to the subclass constructor. Returns: An instance of the named subclass, or None on error. """ subclass = _GetClass(subclass_name) if subclass is None: return None # _GetClass() already logged an error if not issubclass(subclass, baseclass): logging.debug('Class "%s" is not a subclass of "%s"', subclass_name, baseclass.__name__) return None return subclass(*args, **kwargs)
Example #6
Source File: tpu_estimator.py From xlnet with Apache License 2.0 | 6 votes |
def _run_infeed(self, queue_ctx, session): logging.info('Starting infeed thread controller.') if self._initial_infeed_sleep_secs: logging.info('Infeed thread sleeping for %d seconds.', self._initial_infeed_sleep_secs) time.sleep(self._initial_infeed_sleep_secs) logging.info('Infeed thread starting after sleep') with self._rendezvous.catch_errors(source='infeed', session=session): if self._run_infeed_loop_on_coordinator: for count, steps in enumerate(queue_ctx.read_iteration_counts()): for i in xrange(steps): logging.debug('Infeed enqueue for iteration (%d, %d)', count, i) session.run(self._enqueue_ops) else: for _ in queue_ctx.read_iteration_counts(): session.run(self._enqueue_ops) logging.info('Infeed thread finished, shutting down.')
Example #7
Source File: registry.py From object_detection_kitti with Apache License 2.0 | 6 votes |
def _Create(baseclass, subclass_name, *args, **kwargs): """Creates an instance of a named subclass. Args: baseclass: The expected base class. subclass_name: The fully-qualified type name of the subclass to create. *args: Passed to the subclass constructor. **kwargs: Passed to the subclass constructor. Returns: An instance of the named subclass, or None on error. """ subclass = _GetClass(subclass_name) if subclass is None: return None # _GetClass() already logged an error if not issubclass(subclass, baseclass): logging.debug('Class "%s" is not a subclass of "%s"', subclass_name, baseclass.__name__) return None return subclass(*args, **kwargs)
Example #8
Source File: registry.py From hands-detection with MIT License | 6 votes |
def _Create(baseclass, subclass_name, *args, **kwargs): """Creates an instance of a named subclass. Args: baseclass: The expected base class. subclass_name: The fully-qualified type name of the subclass to create. *args: Passed to the subclass constructor. **kwargs: Passed to the subclass constructor. Returns: An instance of the named subclass, or None on error. """ subclass = _GetClass(subclass_name) if subclass is None: return None # _GetClass() already logged an error if not issubclass(subclass, baseclass): logging.debug('Class "%s" is not a subclass of "%s"', subclass_name, baseclass.__name__) return None return subclass(*args, **kwargs)
Example #9
Source File: tpu_estimator.py From transformer-xl with Apache License 2.0 | 6 votes |
def _run_infeed(self, queue_ctx, session): logging.info('Starting infeed thread controller.') if self._initial_infeed_sleep_secs: logging.info('%s thread sleeping for %d seconds.', self._name, self._initial_infeed_sleep_secs) time.sleep(self._initial_infeed_sleep_secs) logging.info('%s thread starting after sleep', self._name) with self._rendezvous.catch_errors(source='infeed', session=session): if self._run_infeed_loop_on_coordinator: for count, steps in enumerate(queue_ctx.read_iteration_counts()): for i in xrange(steps): logging.debug('Infeed enqueue for iteration (%d, %d)', count, i) session.run(self._enqueue_ops) else: for _ in queue_ctx.read_iteration_counts(): session.run(self._enqueue_ops) logging.info('Infeed thread finished, shutting down.')
Example #10
Source File: tpu_estimator.py From embedding-as-service with MIT License | 6 votes |
def _run_infeed(self, queue_ctx, session): logging.info('Starting infeed thread controller.') if self._initial_infeed_sleep_secs: logging.info('Infeed thread sleeping for %d seconds.', self._initial_infeed_sleep_secs) time.sleep(self._initial_infeed_sleep_secs) logging.info('Infeed thread starting after sleep') with self._rendezvous.catch_errors(source='infeed', session=session): if self._run_infeed_loop_on_coordinator: for count, steps in enumerate(queue_ctx.read_iteration_counts()): for i in xrange(steps): logging.debug('Infeed enqueue for iteration (%d, %d)', count, i) session.run(self._enqueue_ops) else: for _ in queue_ctx.read_iteration_counts(): session.run(self._enqueue_ops) logging.info('Infeed thread finished, shutting down.')
Example #11
Source File: directory_watcher.py From deep_image_model with Apache License 2.0 | 6 votes |
def _SetPath(self, path): """Sets the current path to watch for new events. This also records the size of the old path, if any. If the size can't be found, an error is logged. Args: path: The full path of the file to watch. """ old_path = self._path if old_path and not io_wrapper.IsGCSPath(old_path): try: # We're done with the path, so store its size. size = gfile.Stat(old_path).length logging.debug('Setting latest size of %s to %d', old_path, size) self._finalized_sizes[old_path] = size except errors.OpError as e: logging.error('Unable to get size of %s: %s', old_path, e) self._path = path self._loader = self._loader_factory(path)
Example #12
Source File: tpu_estimator.py From Chinese-XLNet with Apache License 2.0 | 6 votes |
def _run_infeed(self, queue_ctx, session): logging.info('Starting infeed thread controller.') if self._initial_infeed_sleep_secs: logging.info('Infeed thread sleeping for %d seconds.', self._initial_infeed_sleep_secs) time.sleep(self._initial_infeed_sleep_secs) logging.info('Infeed thread starting after sleep') with self._rendezvous.catch_errors(source='infeed', session=session): if self._run_infeed_loop_on_coordinator: for count, steps in enumerate(queue_ctx.read_iteration_counts()): for i in xrange(steps): logging.debug('Infeed enqueue for iteration (%d, %d)', count, i) session.run(self._enqueue_ops) else: for _ in queue_ctx.read_iteration_counts(): session.run(self._enqueue_ops) logging.info('Infeed thread finished, shutting down.')
Example #13
Source File: estimator.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def _check_inputs(self, features, labels): if self._features_info is not None: logging.debug('Given features: %s, required signatures: %s.', str(features), str(self._features_info)) if not tensor_signature.tensors_compatible(features, self._features_info): raise ValueError('Features are incompatible with given information. ' 'Given features: %s, required signatures: %s.' % (str(features), str(self._features_info))) else: self._features_info = tensor_signature.create_signatures(features) logging.debug('Setting feature info to %s.', str(self._features_info)) if labels is not None: if self._labels_info is not None: logging.debug('Given labels: %s, required signatures: %s.', str(labels), str(self._labels_info)) if not tensor_signature.tensors_compatible(labels, self._labels_info): raise ValueError('Labels are incompatible with given information. ' 'Given labels: %s, required signatures: %s.' % (str(labels), str(self._labels_info))) else: self._labels_info = tensor_signature.create_signatures(labels) logging.debug('Setting labels info to %s', str(self._labels_info))
Example #14
Source File: event_file_loader.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def Load(self): """Loads all new values from disk. Calling Load multiple times in a row will not 'drop' events as long as the return value is not iterated over. Yields: All values that were written to disk that have not been yielded yet. """ while True: try: with errors.raise_exception_on_not_ok_status() as status: self._reader.GetNext(status) except (errors.DataLossError, errors.OutOfRangeError): # We ignore partial read exceptions, because a record may be truncated. # PyRecordReader holds the offset prior to the failed read, so retrying # will succeed. break event = event_pb2.Event() event.ParseFromString(self._reader.record()) yield event logging.debug('No more events in %s', self._file_path)
Example #15
Source File: registry.py From DOTA_models with Apache License 2.0 | 6 votes |
def _Create(baseclass, subclass_name, *args, **kwargs): """Creates an instance of a named subclass. Args: baseclass: The expected base class. subclass_name: The fully-qualified type name of the subclass to create. *args: Passed to the subclass constructor. **kwargs: Passed to the subclass constructor. Returns: An instance of the named subclass, or None on error. """ subclass = _GetClass(subclass_name) if subclass is None: return None # _GetClass() already logged an error if not issubclass(subclass, baseclass): logging.debug('Class "%s" is not a subclass of "%s"', subclass_name, baseclass.__name__) return None return subclass(*args, **kwargs)
Example #16
Source File: transform.py From lambda-packs with MIT License | 6 votes |
def _copy_ops(self, info): """Copy ops without connecting them.""" for op in info.sgv.ops: logging.debug("Copying op: %s", op.name) # TODO(fkp): return a subgraph? op_, op_outputs_ = self.transform_op_handler(info, op) if op is op_: raise ValueError("In-place tranformation not allowed.") # Process op. info.transformed_ops[op] = op_ self.assign_collections_handler(info, op, op_) # Process output tensors. for op_output, op_output_ in zip(op.outputs, op_outputs_): info.transformed_ts[op_output] = op_output_ self.assign_collections_handler(info, op_output, op_output_)
Example #17
Source File: event_file_loader.py From lambda-packs with MIT License | 6 votes |
def Load(self): """Loads all new values from disk. Calling Load multiple times in a row will not 'drop' events as long as the return value is not iterated over. Yields: All values that were written to disk that have not been yielded yet. """ while True: try: with errors.raise_exception_on_not_ok_status() as status: self._reader.GetNext(status) except (errors.DataLossError, errors.OutOfRangeError): # We ignore partial read exceptions, because a record may be truncated. # PyRecordReader holds the offset prior to the failed read, so retrying # will succeed. break event = event_pb2.Event() event.ParseFromString(self._reader.record()) yield event logging.debug('No more events in %s', self._file_path)
Example #18
Source File: transform.py From lambda-packs with MIT License | 6 votes |
def _connect_ops(self, info): """Connect the previously copied ops.""" for op in info.sgv.ops: logging.debug("Finalizing op: %s", op.name) op_ = info.transformed_ops[op] # pylint: disable=protected-access if op_.inputs: raise ValueError("The newly transformed op should not have " "any inputs yet: {}".format(op_.name)) inputs_ = [self._transformed_t(info, t) for t in op.inputs] for t in inputs_: op_._add_input(t) # Finalize control inputs: control_inputs_ = [self.transform_control_input_handler(info, ci) for ci in op.control_inputs] control_inputs_ = [ci for ci in control_inputs_ if ci is not None] reroute.add_control_inputs(op_, control_inputs_)
Example #19
Source File: directory_watcher.py From lambda-packs with MIT License | 6 votes |
def _SetPath(self, path): """Sets the current path to watch for new events. This also records the size of the old path, if any. If the size can't be found, an error is logged. Args: path: The full path of the file to watch. """ old_path = self._path if old_path and not io_wrapper.IsGCSPath(old_path): try: # We're done with the path, so store its size. size = gfile.Stat(old_path).length logging.debug('Setting latest size of %s to %d', old_path, size) self._finalized_sizes[old_path] = size except errors.OpError as e: logging.error('Unable to get size of %s: %s', old_path, e) self._path = path self._loader = self._loader_factory(path)
Example #20
Source File: estimator.py From lambda-packs with MIT License | 6 votes |
def _check_inputs(self, features, labels): if self._features_info is not None: logging.debug('Given features: %s, required signatures: %s.', str(features), str(self._features_info)) if not tensor_signature.tensors_compatible(features, self._features_info): raise ValueError('Features are incompatible with given information. ' 'Given features: %s, required signatures: %s.' % (str(features), str(self._features_info))) else: self._features_info = tensor_signature.create_signatures(features) logging.debug('Setting feature info to %s.', str(self._features_info)) if labels is not None: if self._labels_info is not None: logging.debug('Given labels: %s, required signatures: %s.', str(labels), str(self._labels_info)) if not tensor_signature.tensors_compatible(labels, self._labels_info): raise ValueError('Labels are incompatible with given information. ' 'Given labels: %s, required signatures: %s.' % (str(labels), str(self._labels_info))) else: self._labels_info = tensor_signature.create_signatures(labels) logging.debug('Setting labels info to %s', str(self._labels_info))
Example #21
Source File: feature_column_ops.py From lambda-packs with MIT License | 6 votes |
def transform(self, feature_column): """Returns a Tensor which represents given feature_column. Args: feature_column: An instance of FeatureColumn. Returns: A Tensor which represents given feature_column. It may create a new Tensor or re-use an existing one. Raises: ValueError: if FeatureColumn cannot be handled by this Transformer. """ logging.debug('Transforming feature_column %s', feature_column) if feature_column in self._columns_to_tensors: # Feature_column is already transformed. return self._columns_to_tensors[feature_column] feature_column.insert_transformed_feature(self._columns_to_tensors) if feature_column not in self._columns_to_tensors: raise ValueError('Column {} is not supported.'.format( feature_column.name)) return self._columns_to_tensors[feature_column]
Example #22
Source File: directory_watcher.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def _SetPath(self, path): """Sets the current path to watch for new events. This also records the size of the old path, if any. If the size can't be found, an error is logged. Args: path: The full path of the file to watch. """ old_path = self._path if old_path and not io_wrapper.IsGCSPath(old_path): try: # We're done with the path, so store its size. size = gfile.Stat(old_path).length logging.debug('Setting latest size of %s to %d', old_path, size) self._finalized_sizes[old_path] = size except errors.OpError as e: logging.error('Unable to get size of %s: %s', old_path, e) self._path = path self._loader = self._loader_factory(path)
Example #23
Source File: event_file_loader.py From keras-lambda with MIT License | 5 votes |
def __init__(self, file_path): if file_path is None: raise ValueError('A file path is required') file_path = resource_loader.readahead_file_path(file_path) logging.debug('Opening a record reader pointing at %s', file_path) with errors.raise_exception_on_not_ok_status() as status: self._reader = pywrap_tensorflow.PyRecordReader_New( compat.as_bytes(file_path), 0, compat.as_bytes(''), status) # Store it for logging purposes. self._file_path = file_path if not self._reader: raise IOError('Failed to open a record reader pointing to %s' % file_path)
Example #24
Source File: subscribe.py From lambda-packs with MIT License | 5 votes |
def _subscribe(tensor, side_effects, control_cache): """Helper method that subscribes a single tensor to a list of side_effects. This method will check if the given tensor has already been subscribed or if it's a tensor returned by a previous call to `subscribe()` and, if so, will reuse the existing identity op, appending the given side effects to the list of existing ones. Args: tensor: The `tf.Tensor` to be subscribed. side_effects: List of side_effect functions, see subscribe for details. control_cache: `_ControlOutputCache` helper to get control_outputs faster. Returns: The modified replacement to the passed in tensor which triggers the side effects or the given tensor, if it was already been subscribed. """ # Check if the given tensor has a numpy compatible type (see dtypes.py). # If not, we cannot subscribe it, so we just return the original tensor. if not tensor.dtype.is_numpy_compatible: logging.debug(('Tensor {} has an un-supported {} type and cannot be ' 'subscribed.').format(tensor.name, tensor.dtype)) return tensor if _is_subscribed_identity(tensor): return _subscribe_extend(tensor, side_effects) # Check if the given tensor has already been subscribed by inspecting its # outputs. name_scope = tensor.op.name + '/subscription/Identity' consumers = tensor.consumers() matching_ops = [op for op in consumers if op.name.startswith(name_scope)] assert len(matching_ops) <= 1, ('Op {} must only have one subscription ' 'op connected to it').format(tensor.op.name) if len(matching_ops) == 1: candidate_tensor = matching_ops[0].outputs[0] if _is_subscribed_identity(candidate_tensor): return _subscribe_extend(candidate_tensor, side_effects) return _subscribe_new(tensor, side_effects, control_cache)
Example #25
Source File: subscribe.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def _subscribe(tensor, side_effects, control_cache): """Helper method that subscribes a single tensor to a list of side_effects. This method will check if the given tensor has already been subscribed or if it's a tensor returned by a previous call to `subscribe()` and, if so, will reuse the existing identity op, appending the given side effects to the list of existing ones. Args: tensor: The `tf.Tensor` to be subscribed. side_effects: List of side_effect functions, see subscribe for details. control_cache: `_ControlOutputCache` helper to get control_outputs faster. Returns: The modified replacement to the passed in tensor which triggers the side effects or the given tensor, if it was already been subscribed. """ # Check if the given tensor has a numpy compatible type (see dtypes.py). # If not, we cannot subscribe it, so we just return the original tensor. if not tensor.dtype.is_numpy_compatible: logging.debug(('Tensor {} has an un-supported {} type and cannot be ' 'subscribed.').format(tensor.name, tensor.dtype)) return tensor if _is_subscribed_identity(tensor): return _subscribe_extend(tensor, side_effects) # Check if the given tensor has already been subscribed by inspecting its # outputs. name_scope = tensor.op.name + '/subscription/Identity' consumers = tensor.consumers() matching_ops = [op for op in consumers if op.name.startswith(name_scope)] assert len(matching_ops) <= 1, ('Op {} must only have one subscription ' 'op connected to it').format(tensor.op.name) if len(matching_ops) == 1: candidate_tensor = matching_ops[0].outputs[0] if _is_subscribed_identity(candidate_tensor): return _subscribe_extend(candidate_tensor, side_effects) return _subscribe_new(tensor, side_effects, control_cache)
Example #26
Source File: feature_column.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def get(self, key): """Returns a `Tensor` for the given key. A `str` key is used to access a base feature (not-transformed). When a `_FeatureColumn` is passed, the transformed feature is returned if it already exists, otherwise the given `_FeatureColumn` is asked to provide its transformed output, which is then cached. Args: key: a `str` or a `_FeatureColumn`. Returns: The transformed `Tensor` corresponding to the `key`. Raises: ValueError: if key is not found or a transformed `Tensor` cannot be computed. """ if key in self._feature_tensors: # FeatureColumn is already transformed or converted. return self._feature_tensors[key] if key in self._features: feature_tensor = self._get_raw_feature_as_tensor(key) self._feature_tensors[key] = feature_tensor return feature_tensor if not isinstance(key, (str, _FeatureColumn)): raise TypeError('"key" must be either a "str" or "_FeatureColumn". ' 'Provided: {}'.format(key)) if not isinstance(key, _FeatureColumn): raise ValueError('Feature {} is not in features dictionary.'.format(key)) column = key logging.debug('Transforming feature_column %s.', column) transformed = column._transform_feature(self) # pylint: disable=protected-access if transformed is None: raise ValueError('Column {} is not supported.'.format(column.name)) self._feature_tensors[column] = transformed return transformed
Example #27
Source File: registry.py From HumanRecognition with MIT License | 5 votes |
def _GetClass(name): """Looks up a class by name. Args: name: The fully-qualified type name of the class to return. Returns: The class associated with the |name|, or None on error. """ elements = name.split('.') # Need at least "module.Class". if len(elements) < 2: logging.debug('Malformed type: "%s"', name) return None module_path = '.'.join(elements[:-1]) class_name = elements[-1] # Import the module. try: __import__(module_path) except ImportError as e: logging.debug('Unable to find module "%s": "%s"', module_path, e) return None module = sys.modules[module_path] # Look up the class. if not hasattr(module, class_name): logging.debug('Name "%s" not found in module: "%s"', class_name, module_path) return None class_obj = getattr(module, class_name) # Check that it is actually a class. if not inspect.isclass(class_obj): logging.debug('Name does not refer to a class: "%s"', name) return None return class_obj
Example #28
Source File: tpu_estimator.py From xlnet with Apache License 2.0 | 5 votes |
def _run_outfeed(self, queue_ctx, session): logging.info('Starting outfeed thread controller.') with self._rendezvous.catch_errors(source='outfeed', session=session): for count, steps in enumerate(queue_ctx.read_iteration_counts()): for i in xrange(steps): logging.debug('Outfeed dequeue for iteration (%d, %d)', count, i) session.run(self._dequeue_ops) logging.info('Outfeed thread finished, shutting down.')
Example #29
Source File: event_file_loader.py From lambda-packs with MIT License | 5 votes |
def __init__(self, file_path): if file_path is None: raise ValueError('A file path is required') file_path = resource_loader.readahead_file_path(file_path) logging.debug('Opening a record reader pointing at %s', file_path) with errors.raise_exception_on_not_ok_status() as status: self._reader = pywrap_tensorflow.PyRecordReader_New( compat.as_bytes(file_path), 0, compat.as_bytes(''), status) # Store it for logging purposes. self._file_path = file_path if not self._reader: raise IOError('Failed to open a record reader pointing to %s' % file_path)
Example #30
Source File: tpu_estimator.py From xlnet with Apache License 2.0 | 5 votes |
def read_iteration_counts(self): while True: iterations = self._queue.get(block=True) logging.debug('%s read iterations %s', self._name, iterations) if iterations == _SIGNAL.STOP: logging.info('%s received shutdown signal, stopping.', self._name) return yield iterations