Python absl.logging.exception() Examples
The following are 15
code examples of absl.logging.exception().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
absl.logging
, or try the search function
.
Example #1
Source File: versions_test.py From pysc2 with Apache License 2.0 | 6 votes |
def test_version_numbers(self): run_config = run_configs.get() failures = [] for game_version, version in sorted(run_config.get_versions().items()): try: self.assertEqual(game_version, version.game_version) log_center("starting version check: %s", game_version) run_config = run_configs.get(version=game_version) with run_config.start(want_rgb=False) as controller: ping = controller.ping() logging.info("expected: %s", version) logging.info("actual: %s", ", ".join(str(ping).strip().split("\n"))) self.assertEqual(version.build_version, ping.base_build) if version.game_version != "latest": self.assertEqual(major_version(ping.game_version), major_version(version.game_version)) self.assertEqual(version.data_version.lower(), ping.data_version.lower()) log_center("success: %s", game_version) except: # pylint: disable=bare-except log_center("failure: %s", game_version) logging.exception("Failed") failures.append(game_version) self.assertEmpty(failures)
Example #2
Source File: log_before_import_test.py From abseil-py with Apache License 2.0 | 6 votes |
def test_captured_pre_init_warnings(self): with open(before_set_verbosity_filename) as stderr_capture_file: captured_stderr = stderr_capture_file.read() self.assertNotIn('Debug message at parse time.', captured_stderr) self.assertNotIn('Info message at parse time.', captured_stderr) traceback_re = re.compile( r'\nTraceback \(most recent call last\):.*?Error: Exception reason.', re.MULTILINE | re.DOTALL) if not traceback_re.search(captured_stderr): self.fail( 'Cannot find traceback message from logging.exception ' 'in stderr:\n{}'.format(captured_stderr)) # Remove the traceback so the rest of the stderr is deterministic. captured_stderr = traceback_re.sub('', captured_stderr) captured_stderr_lines = captured_stderr.splitlines() self.assertLen(captured_stderr_lines, 3) self.assertIn('Error message at parse time.', captured_stderr_lines[0]) self.assertIn('Warning message at parse time.', captured_stderr_lines[1]) self.assertIn('Exception message at parse time.', captured_stderr_lines[2])
Example #3
Source File: process_action.py From loaner with Apache License 2.0 | 5 votes |
def post(self): """Process an async Action task with the correct Action class.""" payload = pickle.loads(self.request.body) async_actions = payload.pop('async_actions') action_name = async_actions.pop(0) action_instance = self.actions['async'].get(action_name) if action_instance: try: action_instance.run(**payload) # pylint: disable=broad-except, because this logic, in which tasks are # responsible for spawning subsequent tasks, creates a chain that could be # interrupted by any conceivable exception in an action's run method. This # handling ensures any further tasks will run. except Exception as error: logging.exception( 'Failed to run async Action %r due to error: %r', action_name, str(error)) # pylint: enable=broad-except else: logging.error('No async Action named %s found.', action_name) if async_actions: payload['async_actions'] = async_actions taskqueue.add( queue_name='process-action', payload=pickle.dumps(payload), target='default')
Example #4
Source File: versions_test.py From pysc2 with Apache License 2.0 | 5 votes |
def test_versions_create_game(self): run_config = run_configs.get() failures = [] for game_version in sorted(run_config.get_versions().keys()): try: log_center("starting create game: %s", game_version) run_config = run_configs.get(version=game_version) with run_config.start(want_rgb=False) as controller: interface = sc_pb.InterfaceOptions() interface.raw = True interface.score = True interface.feature_layer.width = 24 interface.feature_layer.resolution.x = 84 interface.feature_layer.resolution.y = 84 interface.feature_layer.minimap_resolution.x = 64 interface.feature_layer.minimap_resolution.y = 64 map_inst = maps.get("Simple64") create = sc_pb.RequestCreateGame(local_map=sc_pb.LocalMap( map_path=map_inst.path, map_data=map_inst.data(run_config))) create.player_setup.add(type=sc_pb.Participant) create.player_setup.add(type=sc_pb.Computer, race=sc_common.Terran, difficulty=sc_pb.VeryEasy) join = sc_pb.RequestJoinGame(race=sc_common.Terran, options=interface) controller.create_game(create) controller.join_game(join) for _ in range(5): controller.step(16) controller.observe() log_center("success: %s", game_version) except: # pylint: disable=bare-except logging.exception("Failed") log_center("failure: %s", game_version) failures.append(game_version) self.assertEmpty(failures)
Example #5
Source File: sc_process.py From pysc2 with Apache License 2.0 | 5 votes |
def _launch(self, run_config, args, **kwargs): """Launch the process and return the process object.""" del kwargs try: with sw("popen"): return subprocess.Popen(args, cwd=run_config.cwd, env=run_config.env) except OSError: logging.exception("Failed to launch") raise SC2LaunchError("Failed to launch: %s" % args)
Example #6
Source File: executor.py From tfx with Apache License 2.0 | 5 votes |
def _PrepareModelPath( self, model_uri: Text, serving_spec: infra_validator_pb2.ServingSpec) -> Text: model_path = path_utils.serving_model_path(model_uri) serving_binary = serving_spec.WhichOneof('serving_binary') if serving_binary == _TENSORFLOW_SERVING: # TensorFlow Serving requires model to be stored in its own directory # structure flavor. If current model_path does not conform to the flavor, # we need to make a copy to the temporary path. try: # Check whether current model_path conforms to the tensorflow serving # model path flavor. (Parsed without exception) tf_serving_flavor.parse_model_path( model_path, expected_model_name=serving_spec.model_name) except ValueError: # Copy the model to comply with the tensorflow serving model path # flavor. temp_model_path = tf_serving_flavor.make_model_path( model_base_path=self._get_tmp_dir(), model_name=serving_spec.model_name, version=int(time.time())) io_utils.copy_dir(src=model_path, dst=temp_model_path) self._AddCleanup(io_utils.delete_dir, self._context.get_tmp_path()) return temp_model_path return model_path
Example #7
Source File: executor.py From tfx with Apache License 2.0 | 5 votes |
def _ValidateWithRetry( self, model_path: Text, serving_binary: serving_bins.ServingBinary, serving_spec: infra_validator_pb2.ServingSpec, validation_spec: infra_validator_pb2.ValidationSpec, requests: List[iv_types.Request]): for i in range(validation_spec.num_tries): logging.info('Starting infra validation (attempt %d/%d).', i + 1, validation_spec.num_tries) try: self._ValidateOnce( model_path=model_path, serving_binary=serving_binary, serving_spec=serving_spec, validation_spec=validation_spec, requests=requests) except error_types.GracefulShutdown: # GracefulShutdown means infra validation aborted. No more retry and # escalate the error. raise except Exception as e: # pylint: disable=broad-except # Other exceptions indicates validation failure. Log the error and # retry. logging.exception('Infra validation (attempt %d/%d) failed.', i + 1, validation_spec.num_tries) if isinstance(e, error_types.DeadlineExceeded): logging.info('Consider increasing the value of ' 'ValidationSpec.max_loading_time_seconds.') else: # If validation has passed without any exception, succeeded. return True # Every trial has failed. Marking model as not blessed. return False
Example #8
Source File: logging_functional_test_helper.py From abseil-py with Apache License 2.0 | 5 votes |
def _test_unicode(): """Tests unicode handling.""" test_names = [] def log(name, msg, *args): """Logs the message, and ensures the same name is not logged again.""" assert name not in test_names, ('test_unicode expects unique names to work,' ' found existing name {}').format(name) test_names.append(name) # Add line seprators so that tests can verify the output for each log # message. sys.stderr.write('-- begin {} --\n'.format(name)) logging.info(msg, *args) sys.stderr.write('-- end {} --\n'.format(name)) log('unicode', u'G\u00eete: Ch\u00e2tonnaye') log('unicode % unicode', u'G\u00eete: %s', u'Ch\u00e2tonnaye') log('bytes % bytes', u'G\u00eete: %s'.encode('utf-8'), u'Ch\u00e2tonnaye'.encode('utf-8')) log('unicode % bytes', u'G\u00eete: %s', u'Ch\u00e2tonnaye'.encode('utf-8')) log('bytes % unicode', u'G\u00eete: %s'.encode('utf-8'), u'Ch\u00e2tonnaye') log('unicode % iso8859-15', u'G\u00eete: %s', u'Ch\u00e2tonnaye'.encode('iso-8859-15')) log('str % exception', 'exception: %s', Exception(u'Ch\u00e2tonnaye'))
Example #9
Source File: logging_test.py From abseil-py with Apache License 2.0 | 5 votes |
def test_exception_dict_format(self): # Just verify that this doesn't raise a TypeError. logging.exception('%(test)s', {'test': 'Hello world!'})
Example #10
Source File: _flagvalues_test.py From abseil-py with Apache License 2.0 | 5 votes |
def test_logging_do_not_recurse(self): logging.info('test info') try: raise ValueError('test exception') except ValueError: logging.exception('test message')
Example #11
Source File: remote_executor.py From federated with Apache License 2.0 | 4 votes |
def _lazy_init(self): """Lazily initialize the underlying gRPC stream.""" if self._is_initialized: return logging.debug('Initializing bidi stream') self._request_queue = queue.Queue() self._response_event_dict = {} self._stream_closed_event = threading.Event() def request_iter(): """Iterator that blocks on the request Queue.""" for seq in itertools.count(): logging.debug('Request thread: blocking for next request') val = self._request_queue.get() if val: py_typecheck.check_type(val[0], executor_pb2.ExecuteRequest) py_typecheck.check_type(val[1], threading.Event) req = val[0] req.sequence_number = seq logging.debug( 'Request thread: processing request of type %s, seq_no %s', val[0].WhichOneof('request'), seq) self._response_event_dict[seq] = val[1] yield val[0] else: logging.debug( 'Request thread: Final request received. Stream will close.') # None means we are done processing return response_iter = self._stub.Execute(request_iter()) def response_thread_fn(): """Consumes response iter and exposes the value on corresponding Event.""" try: logging.debug('Response thread: blocking for next response') for response in response_iter: logging.debug( 'Response thread: processing response of type %s, seq_no %s', response.WhichOneof('response'), response.sequence_number) # Get the corresponding response Event response_event = self._response_event_dict[response.sequence_number] # Attach the response as an attribute on the Event response_event.response = response response_event.set() # Set the event indicating the stream has been closed self._stream_closed_event.set() except grpc.RpcError as error: logging.exception('Error calling remote executor: %s', error) response_thread = threading.Thread(target=response_thread_fn) response_thread.daemon = True response_thread.start() self._is_initialized = True
Example #12
Source File: preprocessing.py From EfficientNet-PyTorch with Apache License 2.0 | 4 votes |
def preprocess_for_train(image_bytes, use_bfloat16, image_size=IMAGE_SIZE, augment_name=None, randaug_num_layers=None, randaug_magnitude=None): """Preprocesses the given image for evaluation. Args: image_bytes: `Tensor` representing an image binary of arbitrary size. use_bfloat16: `bool` for whether to use bfloat16. image_size: image size. augment_name: `string` that is the name of the augmentation method to apply to the image. `autoaugment` if AutoAugment is to be used or `randaugment` if RandAugment is to be used. If the value is `None` no augmentation method will be applied applied. See autoaugment.py for more details. randaug_num_layers: 'int', if RandAug is used, what should the number of layers be. See autoaugment.py for detailed description. randaug_magnitude: 'int', if RandAug is used, what should the magnitude be. See autoaugment.py for detailed description. Returns: A preprocessed image `Tensor`. """ image = _decode_and_random_crop(image_bytes, image_size) image = _flip(image) image = tf.reshape(image, [image_size, image_size, 3]) image = tf.image.convert_image_dtype( image, dtype=tf.bfloat16 if use_bfloat16 else tf.float32) if augment_name: try: import autoaugment # pylint: disable=g-import-not-at-top except ImportError as e: logging.exception('Autoaugment is not supported in TF 2.x.') raise e logging.info('Apply AutoAugment policy %s', augment_name) input_image_type = image.dtype image = tf.clip_by_value(image, 0.0, 255.0) image = tf.cast(image, dtype=tf.uint8) if augment_name == 'autoaugment': logging.info('Apply AutoAugment policy %s', augment_name) image = autoaugment.distort_image_with_autoaugment(image, 'v0') elif augment_name == 'randaugment': image = autoaugment.distort_image_with_randaugment( image, randaug_num_layers, randaug_magnitude) else: raise ValueError('Invalid value for augment_name: %s' % (augment_name)) image = tf.cast(image, dtype=input_image_type) return image
Example #13
Source File: executor.py From tfx with Apache License 2.0 | 4 votes |
def _InstallGracefulShutdownHandler(self): # pylint: disable=g-doc-return-or-yield """Install graceful shutdown behavior. Caveat: InfraValidator currently only recognizes SIGTERM signal as a graceful shutdown. Furthermore, SIGTERM can be handled only if the executor is running on the MainThread (the thread that runs the python interpreter) due to the limitation of Python API. When the executor is running on Kubernetes, SIGTERM is a standard way to signal the graceful shutdown. Python default behavior for receiving SIGTERM is to terminate the process without raising any exception. By registering a handler that raises on signal, we can effectively transform the signal to an exception, and we can reuse our cleanup code inside "except" or "finally" block during the grace period. When the executor is run by the local Beam DirectRunner, the executor thread is one of the worker threads (not a MainThread) therefore SIGTERM cannot be recognized. If either of MainThread or worker thread receives SIGTERM, executor will die immediately without grace period. Even if the executor fails to shutdown gracefully, external resources that are created by model server runner can be cleaned up if the platform supports such mechanism (e.g. activeDeadlineSeconds in Kubernetes). """ def _handler(signum, frame): del frame # Unused. raise error_types.GracefulShutdown('Got signal {}.'.format(signum)) try: old_handler = signal.signal(signal.SIGTERM, _handler) except ValueError: # If current thread is not a MainThread, it is not allowed to register # the signal handler (ValueError raised). logging.info('Unable to register signal handler for non-MainThread ' '(name=%s). SIGTERM will not be handled.', threading.current_thread().name) old_handler = None try: yield finally: self._Cleanup() if old_handler: signal.signal(signal.SIGTERM, old_handler)
Example #14
Source File: runner_lib.py From compare_gan with Apache License 2.0 | 4 votes |
def _run_eval(module_spec, checkpoints, task_manager, run_config, use_tpu, num_averaging_runs): """Evaluates the given checkpoints and add results to a result writer. Args: module_spec: `ModuleSpec` of the model. checkpoints: Generator for for checkpoint paths. task_manager: `TaskManager`. init_eval() will be called before adding results. run_config: `RunConfig` to use. Values for master and tpu_config are currently ignored. use_tpu: Whether to use TPU for evaluation. num_averaging_runs: Determines how many times each metric is computed. """ # By default, we compute FID and Inception scores. Other tasks defined in # the metrics folder (such as the one in metrics/kid_score.py) can be added # to this list if desired. eval_tasks = [ inception_score_lib.InceptionScoreTask(), fid_score_lib.FIDScoreTask() ] logging.info("eval_tasks: %s", eval_tasks) for checkpoint_path in checkpoints: step = os.path.basename(checkpoint_path).split("-")[-1] if step == 0: continue export_path = os.path.join(run_config.model_dir, "tfhub", str(step)) if not tf.gfile.Exists(export_path): module_spec.export(export_path, checkpoint_path=checkpoint_path) default_value = -1.0 try: result_dict = eval_gan_lib.evaluate_tfhub_module( export_path, eval_tasks, use_tpu=use_tpu, num_averaging_runs=num_averaging_runs) except ValueError as nan_found_error: result_dict = {} logging.exception(nan_found_error) default_value = eval_gan_lib.NAN_DETECTED logging.info("Evaluation result for checkpoint %s: %s (default value: %s)", checkpoint_path, result_dict, default_value) task_manager.add_eval_result(checkpoint_path, result_dict, default_value)
Example #15
Source File: inputs_test.py From models with Apache License 2.0 | 4 votes |
def test_use_labeled_classes(self, labeled_classes): def compute_fn(image, groundtruth_boxes, groundtruth_classes, groundtruth_labeled_classes): tensor_dict = { fields.InputDataFields.image: image, fields.InputDataFields.groundtruth_boxes: groundtruth_boxes, fields.InputDataFields.groundtruth_classes: groundtruth_classes, fields.InputDataFields.groundtruth_labeled_classes: groundtruth_labeled_classes } input_transformation_fn = functools.partial( inputs.transform_input_data, model_preprocess_fn=_fake_model_preprocessor_fn, image_resizer_fn=_fake_image_resizer_fn, num_classes=3) return input_transformation_fn(tensor_dict=tensor_dict) image = np.random.rand(4, 4, 3).astype(np.float32) groundtruth_boxes = np.array([[.5, .5, 1, 1], [.5, .5, 1, 1]], np.float32) groundtruth_classes = np.array([1, 2], np.int32) groundtruth_labeled_classes = np.array(labeled_classes, np.int32) transformed_inputs = self.execute_cpu(compute_fn, [ image, groundtruth_boxes, groundtruth_classes, groundtruth_labeled_classes ]) if labeled_classes == [1, 2] or labeled_classes == [1, -1, 2]: transformed_labeled_classes = [1, 1, 0] elif not labeled_classes: transformed_labeled_classes = [1, 1, 1] else: logging.exception('Unexpected labeled_classes %r', labeled_classes) self.assertAllEqual( np.array(transformed_labeled_classes, np.float32), transformed_inputs[fields.InputDataFields.groundtruth_labeled_classes])