Python logging.handlers.QueueHandler() Examples
The following are 17
code examples of logging.handlers.QueueHandler().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
logging.handlers
, or try the search function
.
Example #1
Source File: worker.py From doufen with MIT License | 7 votes |
def __call__(self, *args, **kwargs): queue_in = self.queue_in queue_out = self.queue_out logger = logging.getLogger() logger.addHandler(QueueHandler(queue_out)) logger.setLevel(logging.DEBUG if self._debug else logging.INFO) db.init(self._settings['db_path'], False) self._ready() heartbeat_sequence = 1 while True: try: task = queue_in.get(timeout=HEARTBEAT_INTERVAL) if isinstance(task, tasks.Task): self._work(str(task)) self._done(task(**self._settings)) except queues.Empty: self._heartbeat(heartbeat_sequence) heartbeat_sequence += 1 except Exception as e: self._error(e, traceback.format_exc()) except KeyboardInterrupt: break
Example #2
Source File: loci_processer.py From mikado with GNU Lesser General Public License v3.0 | 6 votes |
def __setstate__(self, state): self.__dict__.update(state) self.handler = logging_handlers.QueueHandler(self.logging_queue) self.logger = logging.getLogger(self.name) self.logger.addHandler(self.handler) self.logger.setLevel(self.json_conf["log_settings"]["log_level"]) self.logger.propagate = False self.engine = dbutils.connect(self.json_conf, self.logger) self.analyse_locus = functools.partial(analyse_locus, json_conf=self.json_conf, engine=self.engine, logging_queue=self.logging_queue) # self.dump_db, self.dump_conn, self.dump_cursor = self._create_temporary_store(self._tempdir, self.identifier) self.handler = logging_handlers.QueueHandler(self.logging_queue) self.logger.addHandler(self.handler)
Example #3
Source File: multiprocessing_logger.py From seedsync with Apache License 2.0 | 6 votes |
def get_process_safe_logger(self) -> logging.Logger: """ Returns a process-safe logger This logger sends all records to the main process :return: """ queue_handler = QueueHandler(self.__queue) root_logger = logging.getLogger() # The fork may have happened after the root logger was setup by the main process # Remove all handlers from the root logger for this process handlers = root_logger.handlers[:] for handler in handlers: handler.close() root_logger.removeHandler(handler) root_logger.addHandler(queue_handler) root_logger.setLevel(self.__logger_level) return root_logger
Example #4
Source File: autotune.py From scVI with MIT License | 5 votes |
def configure_asynchronous_logging(logging_queue: multiprocessing.Queue): """Helper for asynchronous logging - Writes all logs to a queue. """ root_logger = logging.getLogger() root_logger.setLevel(logging.DEBUG) queue_handler = QueueHandler(logging_queue) queue_handler.setLevel(logging.DEBUG) root_logger.addHandler(queue_handler) logger_all.debug("Asynchronous logging has been set.")
Example #5
Source File: accountant.py From mikado with GNU Lesser General Public License v3.0 | 5 votes |
def __setup_logger(self): """ Private method to set up the logger using indications in the args namespace. """ if hasattr(self.args, "log_queue"): # noinspection PyUnresolvedReferences self.queue_handler = log_handlers.QueueHandler(self.args.log_queue) else: self.queue_handler = logging.NullHandler if self._counter is None: self.logger = logging.getLogger("stat_logger") else: self.logger = logging.getLogger("stat_logger-{}".format(self._counter)) self.logger.addHandler(self.queue_handler) # noinspection PyUnresolvedReferences if self.args.verbose: self.logger.setLevel(logging.DEBUG) else: self.logger.setLevel(logging.INFO) self.logger.propagate = False return # pylint: disable=too-many-locals
Example #6
Source File: test_concurrent_futures.py From android_universal with MIT License | 5 votes |
def init_fail(log_queue=None): if log_queue is not None: logger = logging.getLogger('concurrent.futures') logger.addHandler(QueueHandler(log_queue)) logger.setLevel('CRITICAL') logger.propagate = False time.sleep(0.1) # let some futures be scheduled raise ValueError('error in initializer')
Example #7
Source File: server.py From coriolis with GNU Affero General Public License v3.0 | 5 votes |
def _setup_task_process(mp_log_q): # Setting up logging and cfg, needed since this is a new process cfg.CONF(sys.argv[1:], project='coriolis', version="1.0.0") utils.setup_logging() # Log events need to be handled in the parent process log_root = logging.getLogger(None).logger for handler in log_root.handlers: log_root.removeHandler(handler) log_root.addHandler(handlers.QueueHandler(mp_log_q))
Example #8
Source File: logging.py From flambe with MIT License | 5 votes |
def __init__(self, log_dir: str, verbose: bool = False, root_log_level: Optional[int] = None, capture_warnings: bool = True, console_prefix: Optional[str] = None, hyper_params: Optional[Dict] = None) -> None: self.log_dir = log_dir self.verbose = verbose self.log_level = logging.NOTSET self.capture_warnings = capture_warnings self.listener: handlers.QueueListener self.console_prefix = console_prefix self.handlers: List[logging.Handler] = [] self.queue_handler: handlers.QueueHandler self.old_root_log_level: int = logging.NOTSET self.hyper_params: Dict = hyper_params or {}
Example #9
Source File: queue.py From concurrent-log-handler with Apache License 2.0 | 5 votes |
def queuify_logger(logger, queue_handler, queue_listener): """Replace logger's handlers with a queue handler while adding existing handlers to a queue listener. This is useful when you want to use a default logging config but then optionally add a logger's handlers to a queue during runtime. Args: logger (mixed): Logger instance or string name of logger to queue-ify handlers. queue_handler (QueueHandler): Instance of a ``QueueHandler``. queue_listener (QueueListener): Instance of a ``QueueListener``. """ if isinstance(logger, str): logger = logging.getLogger(logger) # Get handlers that aren't being listened for. handlers = [handler for handler in logger.handlers if handler not in queue_listener.handlers] if handlers: # The default QueueListener stores handlers as a tuple. queue_listener.handlers = \ tuple(list(queue_listener.handlers) + handlers) # Remove logger's handlers and replace with single queue handler. del logger.handlers[:] logger.addHandler(queue_handler)
Example #10
Source File: queue.py From concurrent-log-handler with Apache License 2.0 | 5 votes |
def setup_logging_queues(): if sys.version_info.major < 3: raise RuntimeError("This feature requires Python 3.") queue_listeners = [] # Q: What about loggers created after this is called? # A: if they don't attach their own handlers they should be fine for logger in get_all_logger_names(include_root=True): logger = logging.getLogger(logger) if logger.handlers: log_queue = queue.Queue(-1) # No limit on size queue_handler = QueueHandler(log_queue) queue_listener = QueueListener( log_queue, respect_handler_level=True) queuify_logger(logger, queue_handler, queue_listener) # print("Replaced logger %s with queue listener: %s" % ( # logger, queue_listener # )) queue_listeners.append(queue_listener) for listener in queue_listeners: listener.start() atexit.register(stop_queue_listeners, *queue_listeners) return
Example #11
Source File: __init__.py From udata with GNU Affero General Public License v3.0 | 5 votes |
def _extract_output(q): '''Extract log output from a QueueHandler queue''' out = [] while not q.empty(): record = q.get() # Use list instead of tuple to have the same data before and after mongo persist out.append([record.levelname.lower(), record.getMessage()]) return out
Example #12
Source File: logging.py From quart with MIT License | 5 votes |
def _setup_logging_queue(*handlers: Handler) -> QueueHandler: """Create a new LocalQueueHandler and start an associated QueueListener. """ queue: Queue = Queue() queue_handler = LocalQueueHandler(queue) serving_listener = QueueListener(queue, *handlers, respect_handler_level=True) serving_listener.start() return queue_handler
Example #13
Source File: __init__.py From udata with GNU Affero General Public License v3.0 | 4 votes |
def execute(self, recordonly=False, dryrun=False): ''' Execute a migration If recordonly is True, the migration is only recorded If dryrun is True, the migration is neither executed nor recorded ''' q = queue.Queue(-1) # no limit on size handler = QueueHandler(q) handler.setFormatter(MigrationFormatter()) logger = getattr(self.module, 'log', logging.getLogger(self.module.__name__)) logger.propagate = False for h in logger.handlers: logger.removeHandler(h) logger.addHandler(handler) if not hasattr(self.module, 'migrate'): error = SyntaxError('A migration should at least have a migrate(db) function') raise MigrationError('Error while executing migration', exc=error) out = [['info', 'Recorded only']] if recordonly else [] state = {} if not recordonly and not dryrun: db = get_db() db._state = state try: self.module.migrate(db) out = _extract_output(q) except Exception as e: out = _extract_output(q) self.add_record('migrate', out, db._state, False) fe = MigrationError('Error while executing migration', output=out, exc=e) if hasattr(self.module, 'rollback'): try: self.module.rollback(db) out = _extract_output(q) self.add_record('rollback', out, db._state, True) msg = 'Error while executing migration, rollback has been applied' fe = RollbackError(msg, output=out, migrate_exc=fe) except Exception as re: out = _extract_output(q) self.add_record('rollback', out, db._state, False) msg = 'Error while executing migration rollback' fe = RollbackError(msg, output=out, exc=re, migrate_exc=fe) raise fe if not dryrun: self.add_record('migrate', out, state, True) return out
Example #14
Source File: loci_processer.py From mikado with GNU Lesser General Public License v3.0 | 4 votes |
def __init__(self, json_conf, locus_queue, logging_queue, status_queue, identifier, tempdir="mikado_pick_tmp" ): # current_counter, gene_counter, current_chrom = shared_values super(LociProcesser, self).__init__() json_conf = msgpack.loads(json_conf, raw=False) self.logging_queue = logging_queue self.status_queue = status_queue self.__identifier = identifier # Property directly unsettable self.name = "LociProcesser-{0}".format(self.identifier) self.json_conf = json_conf self.engine = None self.handler = logging_handlers.QueueHandler(self.logging_queue) self.logger = logging.getLogger(self.name) self.logger.addHandler(self.handler) self.logger.setLevel(self.json_conf["log_settings"]["log_level"]) self.logger.propagate = False self._tempdir = tempdir self.locus_queue = locus_queue self.regressor = None # self.dump_db, self.dump_conn, self.dump_cursor = self._create_temporary_store(self._tempdir, self.identifier) if self.json_conf["pick"]["scoring_file"].endswith((".pickle", ".model")): with open(self.json_conf["pick"]["scoring_file"], "rb") as forest: self.regressor = pickle.load(forest) from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier if not isinstance(self.regressor["scoring"], (RandomForestRegressor, RandomForestClassifier)): exc = TypeError("Invalid regressor provided, type: %s", type(self.regressor)) self.logger.critical(exc) self.exitcode = 9 self.join() self.logger.debug("Starting Process %s", self.name) self.logger.debug("Starting the pool for {0}".format(self.name)) try: self.engine = dbutils.connect(self.json_conf, self.logger) except KeyboardInterrupt: raise except EOFError: raise except Exception as exc: self.logger.exception(exc) return self.analyse_locus = functools.partial(analyse_locus, json_conf=self.json_conf, engine=self.engine, logging_queue=self.logging_queue)
Example #15
Source File: workers.py From pymeasure with MIT License | 4 votes |
def run(self): global log log = logging.getLogger() log.setLevel(self.log_level) # log.handlers = [] # Remove all other handlers # log.addHandler(TopicQueueHandler(self.monitor_queue)) # log.addHandler(QueueHandler(self.log_queue)) log.info("Worker thread started") self.procedure = self.results.procedure self.recorder = Recorder(self.results, self.recorder_queue) self.recorder.start() #locals()[self.procedures_file] = __import__(self.procedures_file) # route Procedure methods & log self.procedure.should_stop = self.should_stop self.procedure.emit = self.emit if self.port is not None and zmq is not None: try: self.context = zmq.Context() log.debug("Worker ZMQ Context: %r" % self.context) self.publisher = self.context.socket(zmq.PUB) self.publisher.bind('tcp://*:%d' % self.port) log.info("Worker connected to tcp://*:%d" % self.port) time.sleep(0.01) except Exception: log.exception("couldn't connect to ZMQ context") log.info("Worker started running an instance of %r", self.procedure.__class__.__name__) self.update_status(Procedure.RUNNING) self.emit('progress', 0.) try: self.procedure.startup() self.procedure.execute() except (KeyboardInterrupt, SystemExit): self.handle_abort() except Exception: self.handle_error() finally: self.shutdown() self.stop()
Example #16
Source File: compare.py From mikado with GNU Lesser General Public License v3.0 | 4 votes |
def setup_logger(args): """ Function to setup the logger for the compare function. :param args: :param manager: :return: """ args.log_queue = mp.Queue(-1) args.queue_handler = log_handlers.QueueHandler(args.log_queue) if args.log is not None: _log_folder = os.path.dirname(args.log) if _log_folder and not os.path.exists(_log_folder): os.makedirs(_log_folder) handler = logging.FileHandler(args.log, mode="w") logger = logging.getLogger("main_compare") handler.setFormatter(formatter) logger.addHandler(handler) logger.propagate = False else: logger = create_default_logger("main_compare") handler = logger.handlers[0] if args.verbose is False: logger.setLevel(logging.INFO) else: logger.setLevel(logging.DEBUG) logger.propagate = False log_queue_listener = log_handlers.QueueListener(args.log_queue, logger) log_queue_listener.propagate = False log_queue_listener.start() queue_logger = logging.getLogger("main_queue") for handler in queue_logger.handlers: queue_logger.removeHandler(handler) if args.verbose is False: queue_logger.setLevel(logging.INFO) else: queue_logger.setLevel(logging.DEBUG) main_queue_handler = log_handlers.QueueHandler(args.log_queue) queue_logger.propagate = False queue_logger.addHandler(main_queue_handler) return args, handler, logger, log_queue_listener, queue_logger
Example #17
Source File: bed12.py From mikado with GNU Lesser General Public License v3.0 | 4 votes |
def run(self, *args, **kwargs): self.handler = logging_handlers.QueueHandler(self.logging_queue) self.logger = logging.getLogger(self.name) self.logger.addHandler(self.handler) self.logger.setLevel(self._level) self.logger.propagate = False self.logger.info("Started %s", self.__identifier) if self.rec_queue is None: self.return_queue.put(b"FINISHED") raise ValueError while True: if self.rec_queue.empty(): sleep(0.1) continue line = self.rec_queue.get() if line in ("EXIT", b"EXIT"): self.rec_queue.put(b"EXIT") self.return_queue.put(b"FINISHED") break try: num, line, seq = line if seq is not None: seq = zlib.decompress(seq).decode() if not self._is_bed12: row = self.gff_next(line, seq) else: row = self.bed_next(line, seq) if not row or row.header is True: continue if row.invalid is True: self.logger.warning("Invalid entry, reason: %s\n%s", row.invalid_reason, row) continue # self.cache[num] = self.return_queue.put((num, msgpack.dumps(row.as_simple_dict()))) except AttributeError: pass except ValueError: raise ValueError(line)