Python daiquiri.getLogger() Examples

The following are 18 code examples of daiquiri.getLogger(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module daiquiri , or try the search function .
Example #1
Source File: utils.py    From mergify-engine with Apache License 2.0 6 votes vote down vote up
def get_pull_logger(pull):
    return daiquiri.getLogger(
        __name__,
        gh_owner=pull["base"]["user"]["login"] if "user" in pull else "<unknown-yet>",
        gh_repo=(pull["base"]["repo"]["name"] if "base" in pull else "<unknown-yet>"),
        gh_private=(
            pull["base"]["repo"]["private"] if "base" in pull else "<unknown-yet>"
        ),
        gh_branch=pull["base"]["ref"] if "base" in pull else "<unknown-yet>",
        gh_pull=pull["number"],
        gh_pull_url=pull.get("html_url", "<unknown-yet>"),
        gh_pull_state=(
            "merged"
            if pull.get("merged")
            else (pull.get("mergeable_state", "unknown") or "none")
        ),
    ) 
Example #2
Source File: config.py    From matrix-capsules-with-em-routing with Apache License 2.0 6 votes vote down vote up
def setup_logger(logger_dir, name="logger"):
  os.environ['TZ'] = 'Africa/Johannesburg'
  time.tzset()
  daiquiri_formatter = daiquiri.formatter.ColorFormatter(
      fmt= "%(asctime)s %(color)s%(levelname)s: %(message)s%(color_stop)s",
      datefmt="%Y-%m-%d %H:%M:%S")
  logger_path = os.path.join(logger_dir, name)
  daiquiri.setup(level=logging.INFO, outputs=(
      daiquiri.output.Stream(formatter=daiquiri_formatter),
      daiquiri.output.File(logger_path,formatter=daiquiri_formatter),
     ))
  # To access the logger from other files, just put this line at the top:
  # logger = daiquiri.getLogger(__name__)

  
#------------------------------------------------------------------------------
# LOAD OR SAVE HYPERPARAMETERS
#------------------------------------------------------------------------------ 
Example #3
Source File: processes.py    From crontabs with MIT License 6 votes vote down vote up
def wrapped_target(target, q_stdout, q_stderr, q_error, robust, name, *args, **kwargs):  # pragma: no cover
    """
    Wraps a target with queues replacing stdout and stderr
    """
    import sys
    sys.stdout = IOQueue(q_stdout)
    sys.stderr = IOQueue(q_stderr)

    try:
        target(*args, **kwargs)

    except:  # noqa
        if not robust:
            s = 'Error in tab\n' + traceback.format_exc()
            logger = daiquiri.getLogger(name)
            logger.error(s)
        else:
            raise

        if not robust:
            q_error.put(name)
        raise 
Example #4
Source File: verification.py    From msprime with GNU General Public License v3.0 5 votes vote down vote up
def setup_logging(args):
    log_level = "WARN"
    if args.verbose == 1:
        log_level = "INFO"
    elif args.verbose >= 2:
        log_level = "DEBUG"

    daiquiri.setup(level=log_level)
    msprime_logger = daiquiri.getLogger("msprime")
    msprime_logger.setLevel("WARN") 
Example #5
Source File: worker.py    From mergify-engine with Apache License 2.0 5 votes vote down vote up
def run_engine(owner, repo, pull_number, sources):
    logger = daiquiri.getLogger(
        __name__, gh_repo=repo, gh_owner=owner, gh_pull=pull_number
    )
    logger.debug("engine in thread start")
    try:
        result = asyncio.run(get_pull_for_engine(owner, repo, pull_number, logger))
        if result:
            subscription, pull = result
            with github.get_client(owner, repo) as client:
                engine.run(client, pull, subscription, sources)
    finally:
        logger.debug("engine in thread end") 
Example #6
Source File: test_formatter.py    From daiquiri with Apache License 2.0 5 votes vote down vote up
def setUpClass(cls):
        cls.logger = daiquiri.getLogger('my_module')
        cls.logger.setLevel(logging.INFO)
        cls.stream = six.moves.StringIO()
        cls.handler = daiquiri.handlers.TTYDetectorStreamHandler(cls.stream)
        cls.logger.logger.addHandler(cls.handler)
        super(TestColorExtrasFormatter, cls).setUpClass() 
Example #7
Source File: test_daiquiri.py    From daiquiri with Apache License 2.0 5 votes vote down vote up
def test_extra_with_two_loggers():
    stream = six.moves.StringIO()
    daiquiri.setup(outputs=(
        daiquiri.output.Stream(stream),
    ))
    log1 = daiquiri.getLogger("foobar")
    log1.error("argh")
    log2 = daiquiri.getLogger("foobar", key="value")
    log2.warning("boo")
    lines = stream.getvalue().strip().split("\n")
    assert lines[0].endswith("ERROR    foobar: argh")
    assert lines[1].endswith("WARNING  foobar [key: value]: boo") 
Example #8
Source File: test_daiquiri.py    From daiquiri with Apache License 2.0 5 votes vote down vote up
def test_get_logger_set_level(self):
        logger = daiquiri.getLogger(__name__)
        logger.setLevel(logging.DEBUG) 
Example #9
Source File: test_daiquiri.py    From daiquiri with Apache License 2.0 5 votes vote down vote up
def test_setup_json_formatter_with_extras(self):
        stream = six.moves.StringIO()
        daiquiri.setup(outputs=(
            daiquiri.output.Stream(
                stream, formatter=daiquiri.formatter.JSON_FORMATTER),
        ))
        daiquiri.getLogger(__name__).warning("foobar", foo="bar")
        self.assertEqual({"message": "foobar", "foo": "bar"},
                         json.loads(stream.getvalue())) 
Example #10
Source File: test_daiquiri.py    From daiquiri with Apache License 2.0 5 votes vote down vote up
def test_setup_json_formatter(self):
        stream = six.moves.StringIO()
        daiquiri.setup(outputs=(
            daiquiri.output.Stream(
                stream, formatter=daiquiri.formatter.JSON_FORMATTER),
        ))
        daiquiri.getLogger(__name__).warning("foobar")
        self.assertEqual({"message": "foobar"},
                         json.loads(stream.getvalue())) 
Example #11
Source File: processes.py    From crontabs with MIT License 5 votes vote down vote up
def process_error_queue(self, error_queue):
        try:
            error_name = error_queue.get(timeout=self.TIMEOUT_SECONDS)
            if error_name:
                error_name = error_name.strip()
                self._subprocesses = [s for s in self._subprocesses if s._name != error_name]
                logger = daiquiri.getLogger(error_name)
                logger.info('Will not auto-restart because it\'s not robust')

        except Empty:
            pass 
Example #12
Source File: processes.py    From crontabs with MIT License 5 votes vote down vote up
def expired(self):
        expired = False
        if self._until is not None and self._until < datetime.datetime.now():
            expired = True
            if not self._has_logged_expiration:
                self._has_logged_expiration = True
                logger = daiquiri.getLogger(self._name)
                logger.info('Process expired and will no longer run')
        return expired 
Example #13
Source File: crontabs.py    From crontabs with MIT License 5 votes vote down vote up
def _log(self, msg):
        if self._verbose and not self._SILENCE_LOGGER:  # pragma: no cover
            logger = daiquiri.getLogger(self._name)
            logger.info(msg) 
Example #14
Source File: crontabs.py    From crontabs with MIT License 5 votes vote down vote up
def get_logger(self, name='crontab_log'):
        logger = daiquiri.getLogger(name)
        return logger 
Example #15
Source File: save_engine.py    From raisetheempires with GNU General Public License v3.0 5 votes vote down vote up
def exception_handler(exc_type, exc_value, exc_traceback):
    if issubclass(exc_type, KeyboardInterrupt):
        sys.__excepthook__(exc_type, exc_value, exc_traceback)
        return

    logger.error("Uncaught exception", exc_info=(exc_type, exc_value, exc_traceback))
    if crash_log:
        text = editor.edit(filename=os.path.join(log_path(), "log.txt"))

# logger = logging.getLogger(__name__)
# handler = logging.StreamHandler(stream=sys.stdout)
# logger.addHandler(handler) 
Example #16
Source File: queue.py    From mergify-engine with Apache License 2.0 5 votes vote down vote up
def log(self):
        return daiquiri.getLogger(
            __name__, gh_owner=self.owner, gh_repo=self.repo, gh_branch=self.ref
        ) 
Example #17
Source File: worker.py    From mergify-engine with Apache License 2.0 5 votes vote down vote up
def _consume_pulls(self, stream_name, pulls):
        LOG.debug("stream contains %d pulls", len(pulls), stream_name=stream_name)
        for (owner, repo, pull_number), (message_ids, sources) in pulls.items():
            statsd.histogram("engine.streams.batch-size", len(sources))
            logger = daiquiri.getLogger(
                __name__, gh_repo=repo, gh_owner=owner, gh_pull=pull_number
            )

            try:
                logger.debug("engine start with %s sources", len(sources))
                start = time.monotonic()
                await self._run_engine_and_translate_exception_to_retries(
                    stream_name, owner, repo, pull_number, sources
                )
                await self.redis.execute_command("XDEL", stream_name, *message_ids)
                end = time.monotonic()
                logger.debug("engine finished in %s sec", end - start)
            except IgnoredException:
                await self.redis.execute_command("XDEL", stream_name, *message_ids)
                logger.debug("failed to process pull request, ignoring", exc_info=True)
            except MaxPullRetry as e:
                await self.redis.execute_command("XDEL", stream_name, *message_ids)
                logger.error(
                    "failed to process pull request, abandoning",
                    attempts=e.attempts,
                    exc_info=True,
                )
            except PullRetry as e:
                logger.info(
                    "failed to process pull request, retrying",
                    attempts=e.attempts,
                    exc_info=True,
                )
            except StreamRetry:
                raise
            except StreamUnused:
                raise
            except Exception:
                # Ignore it, it will retried later
                logger.error("failed to process pull request", exc_info=True) 
Example #18
Source File: worker.py    From mergify-engine with Apache License 2.0 4 votes vote down vote up
def _extract_pulls_from_stream(self, stream_name):
        messages = await self.redis.xrange(stream_name, count=config.STREAM_MAX_BATCH)
        LOG.debug("read stream", stream_name=stream_name, messages_count=len(messages))
        statsd.histogram("engine.streams.size", len(messages))

        # Groups stream by pull request
        pulls = collections.OrderedDict()
        for message_id, message in messages:
            data = msgpack.unpackb(message[b"event"], raw=False)
            owner = data["owner"]
            repo = data["repo"]
            source = data["source"]
            if data["pull_number"] is not None:
                key = (owner, repo, data["pull_number"])
                group = pulls.setdefault(key, ([], []))
                group[0].append(message_id)
                group[1].append(source)
            else:
                logger = daiquiri.getLogger(
                    __name__, gh_repo=repo, gh_owner=owner, source=source
                )
                logger.debug("unpacking event")
                try:
                    converted_messages = await self._convert_event_to_messages(
                        stream_name, owner, repo, source
                    )
                except IgnoredException:
                    converted_messages = []
                    logger.debug("ignored error", exc_info=True)
                except StreamRetry:
                    raise
                except StreamUnused:
                    raise
                except Exception:
                    # Ignore it, it will retried later
                    logger.error("failed to process incomplete event", exc_info=True)
                    continue

                logger.debug("event unpacked into %s messages", len(converted_messages))
                messages.extend(converted_messages)
                deleted = await self.redis.xdel(stream_name, message_id)
                if deleted != 1:
                    # FIXME(sileht): During shutdown, heroku may have already started
                    # another worker that have already take the lead of this stream_name
                    # This can create duplicate events in the streams but that should not
                    # be a big deal as the engine will not been ran by the worker that's
                    # shutdowning.
                    contents = await self.redis.xrange(
                        stream_name, start=message_id, end=message_id
                    )
                    if contents:
                        logger.error(
                            "message `%s` have not been deleted has expected, "
                            "(result: %s), content of current message id: %s",
                            message_id,
                            deleted,
                            contents,
                        )
        return pulls