Python logbook.WARNING Examples

The following are 19 code examples of logbook.WARNING(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module logbook , or try the search function .
Example #1
Source File: derivative.py    From btgym with GNU Lesser General Public License v3.0 6 votes vote down vote up
def __init__(
            self,
            filename=None,
            parsing_params=None,
            sampling_params=None,
            name=None,
            data_names=('default_asset',),
            task=0,
            log_level=WARNING,
            _config_stack=None,
    ):

        super(BTgymEpisode, self).__init__(
            filename=filename,
            parsing_params=parsing_params,
            sampling_params=None,
            name='episode',
            task=task,
            data_names=data_names,
            log_level=log_level,
            _config_stack=_config_stack
        ) 
Example #2
Source File: memory.py    From btgym with GNU Lesser General Public License v3.0 5 votes vote down vote up
def __init__(self, history_size, max_sample_size, priority_sample_size, log_level=WARNING,
                 rollout_provider=None, task=-1, reward_threshold=0.1, use_priority_sampling=False):
        """

        Args:
            history_size:           number of experiences stored;
            max_sample_size:        maximum allowed sample size (e.g. off-policy rollout length);
            priority_sample_size:   sample size of priority_sample() method
            log_level:              int, logbook.level;
            rollout_provider:       callable returning list of Rollouts NOT USED
            task:                   parent worker id;
            reward_threshold:       if |experience.reward| > reward_threshold: experience is saved as 'prioritized';
        """
        self._history_size = history_size
        self._frames = deque(maxlen=history_size)
        self.reward_threshold = reward_threshold
        self.max_sample_size = int(max_sample_size)
        self.priority_sample_size = int(priority_sample_size)
        self.rollout_provider = rollout_provider
        self.task = task
        self.log_level = log_level
        StreamHandler(sys.stdout).push_application()
        self.log = Logger('ReplayMemory_{}'.format(self.task), level=self.log_level)
        self.use_priority_sampling = use_priority_sampling
        # Indices for non-priority frames:
        self._zero_reward_indices = deque()
        # Indices for priority frames:
        self._non_zero_reward_indices = deque()
        self._top_frame_index = 0

        if use_priority_sampling:
            self.sample_priority = self._sample_priority

        else:
            self.sample_priority = self._sample_dummy 
Example #3
Source File: log.py    From code-review with Mozilla Public License 2.0 5 votes vote down vote up
def setup_sentry(name, channel, dsn):
    """
    Setup sentry account using taskcluster secrets
    """

    # Detect environment
    task_id = os.environ.get("TASK_ID")
    if task_id is not None:
        site = "taskcluster"
    elif "DYNO" in os.environ:
        site = "heroku"
    else:
        site = "unknown"

    sentry_client = raven.Client(
        dsn=dsn,
        site=site,
        name=name,
        environment=channel,
        release=raven.fetch_package_version(f"code-review-{name}"),
    )

    if task_id is not None:
        # Add a Taskcluster task id when available
        # It will be shown in the Additional Data section on the dashboard
        sentry_client.context.merge({"extra": {"task_id": task_id}})

    sentry_handler = raven.handlers.logbook.SentryHandler(
        sentry_client, level=logbook.WARNING, bubble=True
    )
    sentry_handler.push_application() 
Example #4
Source File: log.py    From code-coverage with Mozilla Public License 2.0 5 votes vote down vote up
def setup_sentry(name, channel, dsn):
    """
    Setup sentry account using taskcluster secrets
    """

    # Detect environment
    task_id = os.environ.get("TASK_ID")
    if task_id is not None:
        site = "taskcluster"
    elif "DYNO" in os.environ:
        site = "heroku"
    else:
        site = "unknown"

    sentry_client = raven.Client(
        dsn=dsn,
        site=site,
        name=name,
        environment=channel,
        release=raven.fetch_package_version(f"code-coverage-{name}"),
    )

    if task_id is not None:
        # Add a Taskcluster task id when available
        # It will be shown in the Additional Data section on the dashboard
        sentry_client.context.merge({"extra": {"task_id": task_id}})

    sentry_handler = raven.handlers.logbook.SentryHandler(
        sentry_client, level=logbook.WARNING, bubble=True
    )
    sentry_handler.push_application() 
Example #5
Source File: log_service.py    From cookiecutter-course with GNU General Public License v2.0 5 votes vote down vote up
def __get_logbook_logging_level(level_str):
        # logbook levels:
        # CRITICAL = 15
        # ERROR = 14
        # WARNING = 13
        # NOTICE = 12
        # INFO = 11
        # DEBUG = 10
        # TRACE = 9
        # NOTSET = 0

        level_str = level_str.upper().strip()

        if level_str == 'CRITICAL':
            return logbook.CRITICAL
        elif level_str == 'ERROR':
            return logbook.ERROR
        elif level_str == 'WARNING':
            return logbook.WARNING
        elif level_str == 'NOTICE':
            return logbook.NOTICE
        elif level_str == 'INFO':
            return logbook.INFO
        elif level_str == 'DEBUG':
            return logbook.DEBUG
        elif level_str == 'TRACE':
            return logbook.TRACE
        elif level_str == 'NOTSET':
            return logbook.NOTSET
        else:
            raise ValueError("Unknown logbook log level: {}".format(level_str)) 
Example #6
Source File: util.py    From threema-msgapi-sdk-python with MIT License 5 votes vote down vote up
def enable_logging(level=logbook.WARNING, asyncio_level=None, aiohttp_level=None):
    # Determine levels
    level = logbook.lookup_level(level)
    converted_level = _convert_level(level)
    if asyncio_level is None:
        asyncio_level = converted_level
    else:
        asyncio_level = _convert_level(asyncio_level)
    if aiohttp_level is None:
        aiohttp_level = converted_level
    else:
        aiohttp_level = _convert_level(aiohttp_level)

    # Enable logger group
    _logger_group.disabled = False

    # Enable asyncio debug logging
    os.environ['PYTHONASYNCIODEBUG'] = '1'

    # Redirect asyncio logger
    logger = logging.getLogger('asyncio')
    logger.setLevel(asyncio_level)
    logger.addHandler(_logger_redirect_handler)

    # Redirect aiohttp logger
    logger = logging.getLogger('aiohttp')
    logger.setLevel(aiohttp_level)
    logger.addHandler(_logger_redirect_handler) 
Example #7
Source File: custom_logger.py    From WindAdapter with MIT License 5 votes vote down vote up
def set_level(self, log_level):
        if log_level.lower() == LogLevel.INFO:
            self.logger.level = logbook.INFO
        elif log_level.lower() == LogLevel.WARNING:
            self.logger.level = logbook.WARNING
        elif log_level.lower() == LogLevel.CRITICAL:
            self.logger.level = logbook.CRITICAL
        elif log_level.lower() == LogLevel.NOTSET:
            self.logger.level = logbook.NOTSET 
Example #8
Source File: log_service.py    From cookiecutter-pyramid-talk-python-starter with MIT License 5 votes vote down vote up
def __get_logbook_logging_level(level_str):
        # logbook levels:
        # CRITICAL = 15
        # ERROR = 14
        # WARNING = 13
        # NOTICE = 12
        # INFO = 11
        # DEBUG = 10
        # TRACE = 9
        # NOTSET = 0

        level_str = level_str.upper().strip()

        if level_str == 'CRITICAL':
            return logbook.CRITICAL
        elif level_str == 'ERROR':
            return logbook.ERROR
        elif level_str == 'WARNING':
            return logbook.WARNING
        elif level_str == 'NOTICE':
            return logbook.NOTICE
        elif level_str == 'INFO':
            return logbook.INFO
        elif level_str == 'DEBUG':
            return logbook.DEBUG
        elif level_str == 'TRACE':
            return logbook.TRACE
        elif level_str == 'NOTSET':
            return logbook.NOTSET
        else:
            raise ValueError("Unknown logbook log level: {}".format(level_str)) 
Example #9
Source File: config.py    From pantalaimon with Apache License 2.0 5 votes vote down vote up
def parse_log_level(value):
    # type: (str) -> logbook
    value = value.lower()

    if value == "info":
        return logbook.INFO
    elif value == "warning":
        return logbook.WARNING
    elif value == "error":
        return logbook.ERROR
    elif value == "debug":
        return logbook.DEBUG

    return logbook.WARNING 
Example #10
Source File: bin.py    From saltyrtc-server-python with MIT License 5 votes vote down vote up
def _get_logging_level(verbosity: int) -> LogbookLevel:
    import logbook
    return LogbookLevel({
        1: logbook.CRITICAL,
        2: logbook.ERROR,
        3: logbook.WARNING,
        4: logbook.NOTICE,
        5: logbook.INFO,
        6: logbook.DEBUG,
        7: logbook.TRACE,
    }[verbosity]) 
Example #11
Source File: util.py    From saltyrtc-server-python with MIT License 5 votes vote down vote up
def enable_logging(
        level: Optional[LogbookLevel] = None,
        redirect_loggers: Optional[Mapping[str, LogbookLevel]] = None,
) -> None:
    """
    Enable logging for the *saltyrtc* logger group.

    Arguments:
        - `level`: A :mod:`logbook` logging level. Defaults to
          ``WARNING``.
        - `redirect_loggers`: A dictionary containing :mod:`logging`
          logger names as key and the targeted :mod:`logbook` logging
          level as value. Each logger will be looked up and redirected
          to :mod:`logbook`. Defaults to an empty dictionary.

    Raises :class:`ImportError` in case :mod:`logbook` is not
    installed.
    """
    if _logger_convert_level_handler is None:
        _logging_error()

    # At this point, logbook is either defined or an error has been returned
    if level is None:
        level = logbook.WARNING
    logger_group.disabled = False
    logger_group.level = level
    if redirect_loggers is not None:
        _redirect_logging_loggers(redirect_loggers, remove=False) 
Example #12
Source File: derivative.py    From btgym with GNU Lesser General Public License v3.0 5 votes vote down vote up
def __init__(
            self,
            filename=None,
            parsing_params=None,
            sampling_params=None,
            name=None,
            data_names=('default_asset',),
            frozen_time_split=None,
            task=0,
            log_level=WARNING,
            _config_stack=None,


    ):
        """
        Args:
            filename:           not used;
            sampling_params:    dict, sample retrieving options, see base class description for details;
            task:               int, optional;
            parsing_params:     csv parsing options, see base class description for details;
            log_level:          int, optional, logbook.level;
            _config_stack:      dict, holding configuration for nested child samples;
        """

        super(BTgymDataTrial, self).__init__(
            filename=filename,
            parsing_params=parsing_params,
            sampling_params=sampling_params,
            name='Trial',
            data_names=data_names,
            frozen_time_split=frozen_time_split,
            task=task,
            log_level=log_level,
            _config_stack=_config_stack
        ) 
Example #13
Source File: log_service.py    From python-for-entrepreneurs-course-demos with MIT License 5 votes vote down vote up
def __get_logbook_logging_level(level_str):
        # logbook levels:
        # CRITICAL = 15
        # ERROR = 14
        # WARNING = 13
        # NOTICE = 12
        # INFO = 11
        # DEBUG = 10
        # TRACE = 9
        # NOTSET = 0

        level_str = level_str.upper().strip()

        if level_str == 'CRITICAL':
            return logbook.CRITICAL
        elif level_str == 'ERROR':
            return logbook.ERROR
        elif level_str == 'WARNING':
            return logbook.WARNING
        elif level_str == 'NOTICE':
            return logbook.NOTICE
        elif level_str == 'INFO':
            return logbook.INFO
        elif level_str == 'DEBUG':
            return logbook.DEBUG
        elif level_str == 'TRACE':
            return logbook.TRACE
        elif level_str == 'NOTSET':
            return logbook.NOTSET
        else:
            raise ValueError("Unknown logbook log level: {}".format(level_str)) 
Example #14
Source File: test_suite_algo.py    From catalyst with Apache License 2.0 4 votes vote down vote up
def test_run_examples(self):
        # folder = join('..', '..', '..', 'catalyst', 'examples')
        HERE = os.path.dirname(os.path.abspath(__file__))
        folder = os.path.join(HERE, '..', '..', '..', 'catalyst', 'examples')

        files = [f for f in os.listdir(folder)
                 if os.path.isfile(os.path.join(folder, f))]

        algo_list = []
        for filename in files:
            name = os.path.basename(filename)
            if filter_algos and name not in filter_algos:
                continue

            module_name = 'catalyst.examples.{}'.format(
                name.replace('.py', '')
            )
            algo_list.append(module_name)

        exchanges = ['poloniex', 'bittrex', 'binance']
        asset_name = 'btc_usdt'
        quote_currency = 'usdt'
        capital_base = 10000
        data_freq = 'daily'
        start_date = pd.to_datetime('2017-10-01', utc=True)
        end_date = pd.to_datetime('2017-12-01', utc=True)

        for exchange_name in exchanges:
            ingest_exchange_bundles(exchange_name, data_freq, asset_name)

            for module_name in algo_list:
                algo = importlib.import_module(module_name)
                # namespace = module_name.replace('.', '_')

                log_catcher = TestHandler()
                with log_catcher:
                    run_algorithm(
                        capital_base=capital_base,
                        data_frequency=data_freq,
                        initialize=algo.initialize,
                        handle_data=algo.handle_data,
                        analyze=TestSuiteAlgo.analyze,
                        exchange_name=exchange_name,
                        algo_namespace='test_{}'.format(exchange_name),
                        quote_currency=quote_currency,
                        start=start_date,
                        end=end_date,
                        # output=out
                    )
                    warnings = [record for record in log_catcher.records if
                                record.level == WARNING]

                    assert(len(warnings) == 1)
                    assert (warnings[0].message == ALPHA_WARNING_MESSAGE)
                    assert (not log_catcher.has_errors)
                    assert (not log_catcher.has_criticals)

            clean_exchange_bundles(exchange_name, data_freq) 
Example #15
Source File: test_algorithm.py    From catalyst with Apache License 2.0 4 votes vote down vote up
def _test_eod_order_cancel_minute(self, direction, minute_emission):
        """
        Test that EOD order cancel works in minute mode for both shorts and
        longs, and both daily emission and minute emission
        """
        # order 1000 shares of asset1.  the volume is only 1 share per bar,
        # so the order should be cancelled at the end of the day.
        algo = self.prep_algo(
            "set_cancel_policy(cancel_policy.EODCancel())",
            amount=np.copysign(1000, direction),
            minute_emission=minute_emission
        )

        log_catcher = TestHandler()
        with log_catcher:
            results = algo.run(self.data_portal)

            for daily_positions in results.positions:
                self.assertEqual(1, len(daily_positions))
                self.assertEqual(
                    np.copysign(389, direction),
                    daily_positions[0]["amount"],
                )
                self.assertEqual(1, results.positions[0][0]["sid"])

            # should be an order on day1, but no more orders afterwards
            np.testing.assert_array_equal([1, 0, 0],
                                          list(map(len, results.orders)))

            # should be 389 txns on day 1, but no more afterwards
            np.testing.assert_array_equal([389, 0, 0],
                                          list(map(len, results.transactions)))

            the_order = results.orders[0][0]

            self.assertEqual(ORDER_STATUS.CANCELLED, the_order["status"])
            self.assertEqual(np.copysign(389, direction), the_order["filled"])

            warnings = [record for record in log_catcher.records if
                        record.level == WARNING]

            self.assertEqual(1, len(warnings))

            if direction == 1:
                self.assertEqual(
                    "Your order for 1000 shares of ASSET1 has been partially "
                    "filled. 389 shares were successfully purchased. "
                    "611 shares were not filled by the end of day and "
                    "were canceled.",
                    str(warnings[0].message)
                )
            elif direction == -1:
                self.assertEqual(
                    "Your order for -1000 shares of ASSET1 has been partially "
                    "filled. 389 shares were successfully sold. "
                    "611 shares were not filled by the end of day and "
                    "were canceled.",
                    str(warnings[0].message)
                ) 
Example #16
Source File: test_algorithm.py    From catalyst with Apache License 2.0 4 votes vote down vote up
def _test_order_in_quiet_period(self, name, sid):
        asset = self.asset_finder.retrieve_asset(sid)

        algo_code = dedent("""
        from catalyst.api import (
            sid,
            order,
            order_value,
            order_percent,
            order_target,
            order_target_percent,
            order_target_value
        )

        def initialize(context):
            pass

        def handle_data(context, data):
            order(sid({sid}), 1)
            order_value(sid({sid}), 100)
            order_percent(sid({sid}), 0.5)
            order_target(sid({sid}), 50)
            order_target_percent(sid({sid}), 0.5)
            order_target_value(sid({sid}), 50)
        """).format(sid=sid)

        # run algo from 1/6 to 1/7
        algo = TradingAlgorithm(
            script=algo_code,
            env=self.env,
            sim_params=SimulationParameters(
                start_session=pd.Timestamp("2016-01-06", tz='UTC'),
                end_session=pd.Timestamp("2016-01-07", tz='UTC'),
                trading_calendar=self.trading_calendar,
                data_frequency="minute"
            )
        )

        with make_test_handler(self) as log_catcher:
            algo.run(self.data_portal)

            warnings = [r for r in log_catcher.records
                        if r.level == logbook.WARNING]

            # one warning per order on the second day
            self.assertEqual(6 * 390, len(warnings))

            for w in warnings:
                expected_message = (
                    'Cannot place order for ASSET{sid}, as it has de-listed. '
                    'Any existing positions for this asset will be liquidated '
                    'on {date}.'.format(sid=sid, date=asset.auto_close_date)
                )
                self.assertEqual(expected_message, w.message) 
Example #17
Source File: conftest.py    From saltyrtc-server-python with MIT License 4 votes vote down vote up
def server_factory(request, event_loop, server_permanent_keys):
    """
    Return a factory to create :class:`saltyrtc.Server` instances.
    """
    # Enable asyncio debug logging
    event_loop.set_debug(True)

    # Enable logging
    util.enable_logging(level=logbook.DEBUG, redirect_loggers={
        'asyncio': logbook.WARNING,
        'websockets': logbook.WARNING,
    })

    # Push handlers
    logging_handler = logbook.StderrHandler(bubble=True)
    logging_handler.push_application()

    _server_instances = []

    def _server_factory(permanent_keys=None):
        if permanent_keys is None:
            permanent_keys = server_permanent_keys

        # Setup server
        port = unused_tcp_port()
        coroutine = serve(
            util.create_ssl_context(
                pytest.saltyrtc.cert, keyfile=pytest.saltyrtc.key,
                dh_params_file=pytest.saltyrtc.dh_params),
            permanent_keys,
            host=pytest.saltyrtc.host,
            port=port,
            loop=event_loop,
            server_class=TestServer,
        )
        server_ = event_loop.run_until_complete(coroutine)
        # Inject timeout and address (little bit of a hack but meh...)
        server_.timeout = _get_timeout(request=request)
        server_.address = (pytest.saltyrtc.host, port)

        _server_instances.append(server_)

        def fin():
            server_.close()
            event_loop.run_until_complete(server_.wait_closed())
            _server_instances.remove(server_)
            if len(_server_instances) == 0:
                logging_handler.pop_application()

        request.addfinalizer(fin)
        return server_
    return _server_factory 
Example #18
Source File: log.py    From ClusterRunner with Apache License 2.0 4 votes vote down vote up
def configure_logging(log_level=None, log_file=None, simplified_console_logs=False):
    """
    This should be called once as early as possible in app startup to configure logging handlers and formatting.

    :param log_level: The level at which to record log messages (DEBUG|INFO|NOTICE|WARNING|ERROR|CRITICAL)
    :type log_level: str
    :param log_file: The file to write logs to, or None to disable logging to a file
    :type log_file: str | None
    :param simplified_console_logs: Whether or not to use the simplified logging format and coloring
    :type simplified_console_logs: bool
    """
    # Set datetimes in log messages to be local timezone instead of UTC
    logbook.set_datetime_format('local')

    # Redirect standard lib logging to capture third-party logs in our log files (e.g., tornado, requests)
    logging.root.setLevel(logging.WARNING)  # don't include DEBUG/INFO/NOTICE-level logs from third parties
    logbook.compat.redirect_logging(set_root_logger_level=False)

    # Add a NullHandler to suppress all log messages lower than our desired log_level. (Otherwise they go to stderr.)
    NullHandler().push_application()

    log_level = log_level or Configuration['log_level']
    format_string, log_colors = _LOG_FORMAT_STRING, _LOG_COLORS
    if simplified_console_logs:
        format_string, log_colors = _SIMPLIFIED_LOG_FORMAT_STRING, _SIMPLIFIED_LOG_COLORS

    # handler for stdout
    log_handler = _ColorizingStreamHandler(
        stream=sys.stdout,
        level=log_level,
        format_string=format_string,
        log_colors=log_colors,
        bubble=True,
    )
    log_handler.push_application()

    # handler for log file
    if log_file:
        fs.create_dir(os.path.dirname(log_file))
        previous_log_file_exists = os.path.exists(log_file)

        event_handler = _ColorizingRotatingFileHandler(
            filename=log_file,
            level=log_level,
            format_string=_LOG_FORMAT_STRING,
            log_colors=_LOG_COLORS,
            bubble=True,
            max_size=Configuration['max_log_file_size'],
            backup_count=Configuration['max_log_file_backups'],
        )
        event_handler.push_application()
        if previous_log_file_exists:
            # Force application to create a new log file on startup.
            event_handler.perform_rollover(increment_logfile_counter=False)
        else:
            event_handler.log_application_summary() 
Example #19
Source File: threadrunner.py    From btgym with GNU Lesser General Public License v3.0 4 votes vote down vote up
def __init__(self,
                 env,
                 policy,
                 task,
                 rollout_length,
                 episode_summary_freq,
                 env_render_freq,
                 test,
                 ep_summary,
                 runner_fn_ref=BaseEnvRunnerFn,
                 memory_config=None,
                 log_level=WARNING,
                 **kwargs):
        """

        Args:
            env:                    environment instance
            policy:                 policy instance
            task:                   int
            rollout_length:         int
            episode_summary_freq:   int
            env_render_freq:        int
            test:                   Atari or BTGyn
            ep_summary:             tf.summary
            runner_fn_ref:          callable defining runner execution logic
            memory_config:          replay memory configuration dictionary
            log_level:              int, logbook.level
        """
        threading.Thread.__init__(self)
        self.queue = queue.Queue(5)
        self.rollout_length = rollout_length
        self.env = env
        self.last_features = None
        self.policy = policy
        self.runner_fn_ref = runner_fn_ref
        self.daemon = True
        self.sess = None
        self.summary_writer = None
        self.episode_summary_freq = episode_summary_freq
        self.env_render_freq = env_render_freq
        self.task = task
        self.test = test
        self.ep_summary = ep_summary
        self.memory_config = memory_config
        self.log_level = log_level
        StreamHandler(sys.stdout).push_application()
        self.log = Logger('ThreadRunner_{}'.format(self.task), level=self.log_level)