Python absl.logging.getLogger() Examples
The following are 7
code examples of absl.logging.getLogger().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
absl.logging
, or try the search function
.
Example #1
Source File: program.py From tensorboard with Apache License 2.0 | 6 votes |
def _fix_werkzeug_logging(self): """Fix werkzeug logging setup so it inherits TensorBoard's log level. This addresses a change in werkzeug 0.15.0+ [1] that causes it set its own log level to INFO regardless of the root logger configuration. We instead want werkzeug to inherit TensorBoard's root logger log level (set via absl to WARNING by default). [1]: https://github.com/pallets/werkzeug/commit/4cf77d25858ff46ac7e9d64ade054bf05b41ce12 """ # Log once at DEBUG to force werkzeug to initialize its singleton logger, # which sets the logger level to INFO it if is unset, and then access that # object via logging.getLogger('werkzeug') to durably revert the level to # unset (and thus make messages logged to it inherit the root logger level). self.log( "debug", "Fixing werkzeug logger to inherit TensorBoard log level" ) logging.getLogger("werkzeug").setLevel(logging.NOTSET)
Example #2
Source File: base_learner.py From deep-smoke-machine with BSD 3-Clause "New" or "Revised" License | 5 votes |
def create_logger(self, log_path=None): if log_path is None: return None check_and_create_dir(log_path) handler = logging.handlers.RotatingFileHandler(log_path, mode="a", maxBytes=100000000, backupCount=200) logging.root.removeHandler(absl.logging._absl_handler) # this removes duplicated logging absl.logging._warn_preinit_stderr = False # this removes duplicated logging formatter = RequestFormatter("[%(asctime)s] %(levelname)s: %(message)s") handler.setFormatter(formatter) logger = logging.getLogger(log_path) logger.setLevel(logging.INFO) for hdlr in logger.handlers[:]: logger.removeHandler(hdlr) # remove old handlers logger.addHandler(handler) self.logger = logger
Example #3
Source File: ncf_common.py From models with Apache License 2.0 | 5 votes |
def get_v1_distribution_strategy(params): """Returns the distribution strategy to use.""" if params["use_tpu"]: # Some of the networking libraries are quite chatty. for name in ["googleapiclient.discovery", "googleapiclient.discovery_cache", "oauth2client.transport"]: logging.getLogger(name).setLevel(logging.ERROR) tpu_cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver( tpu=params["tpu"], zone=params["tpu_zone"], project=params["tpu_gcp_project"], coordinator_name="coordinator" ) logging.info("Issuing reset command to TPU to ensure a clean state.") tf.Session.reset(tpu_cluster_resolver.get_master()) # Estimator looks at the master it connects to for MonitoredTrainingSession # by reading the `TF_CONFIG` environment variable, and the coordinator # is used by StreamingFilesDataset. tf_config_env = { "session_master": tpu_cluster_resolver.get_master(), "eval_session_master": tpu_cluster_resolver.get_master(), "coordinator": tpu_cluster_resolver.cluster_spec() .as_dict()["coordinator"] } os.environ["TF_CONFIG"] = json.dumps(tf_config_env) distribution = tf.distribute.experimental.TPUStrategy( tpu_cluster_resolver, steps_per_run=100) else: distribution = distribution_utils.get_distribution_strategy( num_gpus=params["num_gpus"]) return distribution
Example #4
Source File: log.py From reinvent-randomized with MIT License | 5 votes |
def get_logger(name, level=logging.INFO, with_tqdm=True): if with_tqdm: handler = TQDMHandler() else: handler = logging.StreamHandler(stream=sys.stderr) formatter = logging.Formatter( fmt="%(asctime)s: %(module)s.%(funcName)s +%(lineno)s: %(levelname)-8s %(message)s", datefmt="%H:%M:%S" ) handler.setFormatter(formatter) logger = logging.getLogger(name) logger.setLevel(level) logger.addHandler(handler) return logger
Example #5
Source File: __init__.py From abseil-py with Apache License 2.0 | 5 votes |
def _initialize(): """Initializes loggers and handlers.""" global _absl_logger, _absl_handler if _absl_logger: return original_logger_class = logging.getLoggerClass() logging.setLoggerClass(ABSLLogger) _absl_logger = logging.getLogger('absl') logging.setLoggerClass(original_logger_class) python_logging_formatter = PythonFormatter() _absl_handler = ABSLHandler(python_logging_formatter)
Example #6
Source File: ncf_common.py From Live-feed-object-device-identification-using-Tensorflow-and-OpenCV with Apache License 2.0 | 5 votes |
def get_v1_distribution_strategy(params): """Returns the distribution strategy to use.""" if params["use_tpu"]: # Some of the networking libraries are quite chatty. for name in ["googleapiclient.discovery", "googleapiclient.discovery_cache", "oauth2client.transport"]: logging.getLogger(name).setLevel(logging.ERROR) tpu_cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver( tpu=params["tpu"], zone=params["tpu_zone"], project=params["tpu_gcp_project"], coordinator_name="coordinator" ) logging.info("Issuing reset command to TPU to ensure a clean state.") tf.Session.reset(tpu_cluster_resolver.get_master()) # Estimator looks at the master it connects to for MonitoredTrainingSession # by reading the `TF_CONFIG` environment variable, and the coordinator # is used by StreamingFilesDataset. tf_config_env = { "session_master": tpu_cluster_resolver.get_master(), "eval_session_master": tpu_cluster_resolver.get_master(), "coordinator": tpu_cluster_resolver.cluster_spec() .as_dict()["coordinator"] } os.environ["TF_CONFIG"] = json.dumps(tf_config_env) distribution = tf.distribute.experimental.TPUStrategy( tpu_cluster_resolver, steps_per_run=100) else: distribution = distribution_utils.get_distribution_strategy( num_gpus=params["num_gpus"]) return distribution
Example #7
Source File: ncf_common.py From models with Apache License 2.0 | 5 votes |
def get_v1_distribution_strategy(params): """Returns the distribution strategy to use.""" if params["use_tpu"]: # Some of the networking libraries are quite chatty. for name in ["googleapiclient.discovery", "googleapiclient.discovery_cache", "oauth2client.transport"]: logging.getLogger(name).setLevel(logging.ERROR) tpu_cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver( tpu=params["tpu"], zone=params["tpu_zone"], project=params["tpu_gcp_project"], coordinator_name="coordinator" ) logging.info("Issuing reset command to TPU to ensure a clean state.") tf.Session.reset(tpu_cluster_resolver.get_master()) # Estimator looks at the master it connects to for MonitoredTrainingSession # by reading the `TF_CONFIG` environment variable, and the coordinator # is used by StreamingFilesDataset. tf_config_env = { "session_master": tpu_cluster_resolver.get_master(), "eval_session_master": tpu_cluster_resolver.get_master(), "coordinator": tpu_cluster_resolver.cluster_spec() .as_dict()["coordinator"] } os.environ["TF_CONFIG"] = json.dumps(tf_config_env) distribution = tf.distribute.experimental.TPUStrategy( tpu_cluster_resolver, steps_per_run=100) else: distribution = distribution_utils.get_distribution_strategy( num_gpus=params["num_gpus"]) return distribution