Python logging.critical() Examples
The following are 30
code examples of logging.critical().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
logging
, or try the search function
.
Example #1
Source File: WopiValidatorExecutor.py From wopi-validator-cli-python with MIT License | 7 votes |
def get_wopi_test_endpoint(wopi_discovery_service_url): logging.info("WOPI Discovery Service Url: " + wopi_discovery_service_url) discovery_service_response = requests.get(wopi_discovery_service_url) try: discovery_service_response.raise_for_status() except requests.exceptions.HTTPError as exception: print(Fore.RED + "Failed to retrieve WOPI Discovery Service XML: Check Logs for more information") logging.critical("Failed to retrieve WOPI Discovery Service XML - HTTP ErrorCode: ", exception.Code) sys.exit(1) try: discovery_xml = ElementTree.fromstring(discovery_service_response.content) wopi_test_endpoint_url = discovery_xml.find(WOPITESTAPPLICATION_NODE_PATH).attrib[ WOPITESTAPPLICATION_URLSRC_ATTRIBUTE] except Exception as exception: print(Fore.RED + "Failed to parse WOPI Discovery Service XML: Check Logs for more information") logging.critical("Failed to parse WOPI Discovery Service XML - Exception Details:", exception) sys.exit(1) return wopi_test_endpoint_url[:wopi_test_endpoint_url.find('?')]
Example #2
Source File: bot.py From wukong-itchat with MIT License | 7 votes |
def convert_mp3_to_wav(mp3_path): """ 将 mp3 文件转成 wav :param mp3_path: mp3 文件路径 :returns: wav 文件路径 """ target = mp3_path.replace(".mp3", ".wav") if not os.path.exists(mp3_path): logging.critical("文件错误 {}".format(mp3_path)) return None AudioSegment.from_mp3(mp3_path).export(target, format="wav") return target
Example #3
Source File: chclient.py From clickhouse-mysql-data-reader with MIT License | 6 votes |
def verify_connection_settings(self, connection_settings): if not connection_settings: logging.critical("Need CH connection settings") sys.exit(0) if 'host' not in connection_settings: logging.critical("Need CH host in connection settings") sys.exit(0) if not connection_settings['host']: logging.critical("Need CH host in connection settings") sys.exit(0) if 'port' not in connection_settings: logging.critical("Need CH port in connection settings") sys.exit(0) if not connection_settings['port']: logging.critical("Need CH port in connection settings") sys.exit(0) #self.client = CHClient(connection_settings) #self.client.execute(sql, rows)
Example #4
Source File: unicorn_binance_websocket_api_connection.py From unicorn-binance-websocket-api with MIT License | 6 votes |
def __aexit__(self, *args, **kwargs): try: await self._conn.__aexit__(*args, **kwargs) except AttributeError as error_msg: logging.error("binance_websocket_api_connection->__aexit__(*args, **kwargs): " "AttributeError - " + str(error_msg)) except websockets.exceptions.ConnectionClosed as error_msg: logging.critical("binance_websocket_api_connection->__aexit__(*args, **kwargs): " "ConnectionClosed - " + str(error_msg)) self.handler_binance_websocket_api_manager.stream_is_stopping(self.stream_id) if self.handler_binance_websocket_api_manager.is_stop_request(self.stream_id) is False and \ self.handler_binance_websocket_api_manager.is_stop_as_crash_request is False: self.handler_binance_websocket_api_manager.set_restart_request(self.stream_id) sys.exit(1)
Example #5
Source File: unicorn_binance_websocket_api_manager.py From unicorn-binance-websocket-api with MIT License | 6 votes |
def _start_monitoring_api_thread(self, host, port, warn_on_update): """ Threaded method that servces the monitoring api :param host: IP or hostname to use :type host: str :param port: Port to use :type port: int :param warn_on_update: Should the monitoring system report available updates? :type warn_on_update: bool """ logging.info("Starting monitoring API service ...") app = Flask(__name__) @app.route('/') @app.route('/status/') def redirect_to_wiki(): logging.debug("Visit https://github.com/oliver-zehentleitner/unicorn-binance-websocket-api/wiki/UNICORN-" "Monitoring-API-Service for further information!") return redirect("https://github.com/oliver-zehentleitner/unicorn-binance-websocket-api/wiki/" "UNICORN-Monitoring-API-Service", code=302) api = Api(app) api.add_resource(BinanceWebSocketApiRestServer, "/status/<string:statusformat>/", "/status/<string:statusformat>/<string:checkcommandversion>", resource_class_kwargs={'handler_binance_websocket_api_manager': self, 'warn_on_update': warn_on_update}) try: dispatcher = wsgi.PathInfoDispatcher({'/': app}) self.monitoring_api_server = wsgi.WSGIServer((host, port), dispatcher) self.monitoring_api_server.start() except RuntimeError as error_msg: logging.critical("Monitoring API service is going down! - Info: " + str(error_msg)) except OSError as error_msg: logging.critical("Monitoring API service is going down! - Info: " + str(error_msg))
Example #6
Source File: unicorn_binance_websocket_api_manager.py From unicorn-binance-websocket-api with MIT License | 6 votes |
def stream_is_crashing(self, stream_id, error_msg=False): """ If a stream can not heal itself in cause of wrong parameter (wrong market, channel type) it calls this method :param stream_id: id of a stream :type stream_id: uuid :param error_msg: Error msg to add to the stream status! :type error_msg: str """ logging.critical("BinanceWebSocketApiManager->stream_is_crashing(" + str(stream_id) + ")") self.stream_list[stream_id]['has_stopped'] = time.time() self.stream_list[stream_id]['status'] = "crashed" if error_msg: self.stream_list[stream_id]['status'] += " - " + str(error_msg)
Example #7
Source File: unicorn_binance_websocket_api_connection.py From unicorn-binance-websocket-api with MIT License | 6 votes |
def receive(self): self.handler_binance_websocket_api_manager.set_heartbeat(self.stream_id) try: received_data_json = await self.handler_binance_websocket_api_manager.websocket_list[self.stream_id].recv() try: if self.handler_binance_websocket_api_manager.restart_requests[self.stream_id]['status'] == "restarted": self.handler_binance_websocket_api_manager.increase_reconnect_counter(self.stream_id) del self.handler_binance_websocket_api_manager.restart_requests[self.stream_id] except KeyError: pass if received_data_json is not None: size = sys.getsizeof(received_data_json) self.handler_binance_websocket_api_manager.increase_processed_receives_statistic(self.stream_id) self.handler_binance_websocket_api_manager.add_total_received_bytes(size) self.handler_binance_websocket_api_manager.increase_received_bytes_per_second(self.stream_id, size) return received_data_json except RuntimeError as error_msg: logging.debug("binance_websocket_api_connection->receive(" + str(self.stream_id) + ") - RuntimeError - error_msg: " + str(error_msg)) sys.exit(1) except ssl.SSLError as error_msg: logging.debug("binance_websocket_api_connection->receive(" + str(self.stream_id) + ") - ssl.SSLError - error_msg: " + str(error_msg)) except KeyError as error_msg: logging.debug("binance_websocket_api_connection->receive(" + str(self.stream_id) + ") - KeyError - error_msg: " + str(error_msg)) self.handler_binance_websocket_api_manager.stream_is_stopping(self.stream_id) if self.handler_binance_websocket_api_manager.is_stop_request(self.stream_id) is False: self.handler_binance_websocket_api_manager.set_restart_request(self.stream_id) sys.exit(1) except asyncio.base_futures.InvalidStateError as error_msg: logging.critical("binance_websocket_api_connection->receive(" + str(self.stream_id) + ") - asyncio.base_futures.InvalidStateError - error_msg: " + str(error_msg) + " - Extra info: https://github.com/oliver-zehentleitner/unicorn-binance-" "websocket-api/issues/18 - open an own issue if needed!") self.handler_binance_websocket_api_manager.stream_is_stopping(self.stream_id) if self.handler_binance_websocket_api_manager.is_stop_request(self.stream_id) is False: self.handler_binance_websocket_api_manager.set_restart_request(self.stream_id) sys.exit(1)
Example #8
Source File: object_utils.py From ffn with Apache License 2.0 | 6 votes |
def load_equivalences(paths): """Loads equivalences from a text file. Args: paths: sequence of paths to the text files of equivalences; id0,id1 per line, or id0,id1,x,y,z. Returns: NX graph object representing the equivalences """ equiv_graph = nx.Graph() for path in paths: with open(path, "r") as f: reader = pd.read_csv( f, sep=",", engine="c", comment="#", chunksize=4096, header=None) for chunk in reader: if len(chunk.columns) not in (2, 5): logging.critical("Unexpected # of columns (%d), want 2 or 5", len(chunk.columns)) edges = chunk.values[:, :2] equiv_graph.add_edges_from(edges) return equiv_graph
Example #9
Source File: throttler.py From rucio with Apache License 2.0 | 6 votes |
def __schedule_requests(): """ Schedule requests """ try: throttler_mode = config_core.get('throttler', 'mode', default='DEST_PER_ACT', use_cache=False) direction, all_activities = get_parsed_throttler_mode(throttler_mode) result_dict = __get_request_stats(all_activities, direction) if direction == 'destination' or direction == 'source': for rse_id in result_dict: rse_name = result_dict[rse_id]['rse'] availability = get_rse(rse_id).availability # dest_rse is not blacklisted for write or src_rse is not blacklisted for read if (direction == 'destination' and availability & 2) or (direction == 'source' and availability & 4): if all_activities: __release_all_activities(result_dict[rse_id], direction, rse_name, rse_id) else: __release_per_activity(result_dict[rse_id], direction, rse_name, rse_id) except Exception: logging.critical("Failed to schedule requests, error: %s" % (traceback.format_exc()))
Example #10
Source File: transmogrifier.py From rucio with Apache License 2.0 | 6 votes |
def _retrial(func, *args, **kwargs): """ Retrial method """ delay = 0 while True: try: return func(*args, **kwargs) except DataIdentifierNotFound as error: logging.warning(error) return 1 except DatabaseException as error: logging.error(error) if exp(delay) > 600: logging.error('Cannot execute %s after %i attempt. Failing the job.' % (func.__name__, delay)) raise else: logging.error('Failure to execute %s. Retrial will be done in %d seconds ' % (func.__name__, exp(delay))) time.sleep(exp(delay)) delay += 1 except Exception: exc_type, exc_value, exc_traceback = exc_info() logging.critical(''.join(format_exception(exc_type, exc_value, exc_traceback)).strip()) raise
Example #11
Source File: localexecution.py From benchexec with Apache License 2.0 | 6 votes |
def run(self): while not STOPPED_BY_INTERRUPT: try: currentRun = _Worker.working_queue.get_nowait() except queue.Empty: return try: logging.debug('Executing run "%s"', currentRun.identifier) self.execute(currentRun) logging.debug('Finished run "%s"', currentRun.identifier) except SystemExit as e: logging.critical(e) except BenchExecException as e: logging.critical(e) except BaseException: logging.exception("Exception during run execution") self.run_finished_callback() _Worker.working_queue.task_done()
Example #12
Source File: baseexecutor.py From benchexec with Apache License 2.0 | 6 votes |
def _wait_for_process(self, pid, name): """Wait for the given process to terminate. @return tuple of exit code and resource usage """ try: logging.debug("Waiting for process %s with pid %s", name, pid) unused_pid, exitcode, ru_child = os.wait4(pid, 0) return exitcode, ru_child except OSError as e: if self.PROCESS_KILLED and e.errno == errno.EINTR: # Interrupted system call seems always to happen # if we killed the process ourselves after Ctrl+C was pressed # We can try again to get exitcode and resource usage. logging.debug( "OSError %s while waiting for termination of %s (%s): %s.", e.errno, name, pid, e.strerror, ) try: unused_pid, exitcode, ru_child = os.wait4(pid, 0) return exitcode, ru_child except OSError: pass # original error will be handled and this ignored logging.critical( "OSError %s while waiting for termination of %s (%s): %s.", e.errno, name, pid, e.strerror, ) return 0, None
Example #13
Source File: runexecutor.py From benchexec with Apache License 2.0 | 6 votes |
def _setup_cgroup_memory_limit(self, memlimit, cgroups, pid_to_kill): """Start memory-limit handler. @return None or the memory-limit handler for calling cancel() """ if memlimit is not None: try: oomThread = oomhandler.KillProcessOnOomThread( cgroups=cgroups, pid_to_kill=pid_to_kill, callbackFn=self._set_termination_reason, ) oomThread.start() return oomThread except OSError as e: logging.critical( "OSError %s during setup of OomEventListenerThread: %s.", e.errno, e.strerror, ) return None
Example #14
Source File: test_ace.py From ACE with Apache License 2.0 | 6 votes |
def tearDown(self, *args, **kwargs): if self.cli_process is not None: try: self.cli_process.terminate() self.cli_process.wait(5) except TimeoutExpired: try: self.cli_process.kill() self.cli_process.wait(5) except Exception as e: logging.critical("cannot stop subprocess {}: {}".format(self.cli_process, e)) if self.cli_process.returncode != 0: self.fail("subprocess {} returned exit code {}".format(' '.join(self.cli_args), self.cli_process.returncode)) if self.stdout_reader_thread is not None: self.stdout_reader_thread.join(5) if self.stdout_reader_thread.is_alive(): logging.error("reader thread not stopping...") if self.stderr_reader_thread is not None: self.stderr_reader_thread.join(5) if self.stderr_reader_thread.is_alive(): logging.error("reader thread not stopping...")
Example #15
Source File: __init__.py From ACE with Apache License 2.0 | 6 votes |
def stop_threaded_execution(self): if not self.is_threaded: return logging.info("stopping threaded execution for {}".format(self)) self.threaded_execution_stop_event.set() start = datetime.datetime.now() while True: self.threaded_execution_thread.join(5) if not self.threaded_execution_thread.is_alive(): break logging.error("thread {} is not stopping".format(self.threaded_execution_thread)) # have we been waiting for a really long time? if (datetime.datetime.now() - start).total_seconds() >= saq.EXECUTION_THREAD_LONG_TIMEOUT: logging.critical("execution thread {} is failing to stop - process dying".format( self.threaded_execution_thread)) # suicide os._exit(1) logging.debug("threaded execution module {} has stopped ({})".format(self, self.threaded_execution_thread))
Example #16
Source File: __init__.py From ACE with Apache License 2.0 | 6 votes |
def create_analysis(self, observable): """Initializes and adds the generated Analysis for this module to the given Observable. Returns the generated Analysis.""" # have we already created analysis for this observable? if self.generated_analysis_type is None: logging.critical("called create_analysis on {} which does not actually create Analysis".format(self)) return None analysis = observable.get_analysis(self.generated_analysis_type) if analysis: logging.debug("returning existing analysis {} in call to create analysis from {} for {}".format( analysis, self, observable)) return analysis # otherwise we create and initialize a new one analysis = self.generated_analysis_type() analysis.initialize_details() observable.add_analysis(analysis) return analysis # XXX this is not supported at all
Example #17
Source File: url.py From ACE with Apache License 2.0 | 6 votes |
def verify_environment(self): self.verify_config_exists('whitelist_path') self.verify_path_exists(self.config['whitelist_path']) self.verify_config_exists('regex_path') self.verify_path_exists(self.config['regex_path']) self.verify_config_exists('blacklist_path') self.verify_path_exists(self.config['blacklist_path']) self.verify_config_exists('uncommon_network_threshold') self.verify_config_exists('user-agent') self.verify_config_exists('timeout') self.verify_config_exists('max_download_size') self.verify_config_exists('max_file_name_length') self.verify_config_exists('cooldown_period') self.verify_config_exists('update_brocess') self.verify_config_exists('proxies') for name in self.config['proxies'].split(','): if name == 'GLOBAL': continue if 'proxy_{}'.format(name) not in saq.CONFIG: logging.critical("invalid proxy name {} in crawlphish config".format(name))
Example #18
Source File: validate_config.py From borgmatic with GNU General Public License v3.0 | 6 votes |
def main(): # pragma: no cover args = parse_arguments(*sys.argv[1:]) logging.basicConfig(level=logging.INFO, format='%(message)s') config_filenames = tuple(collect.collect_config_filenames(args.config_paths)) if len(config_filenames) == 0: logger.critical('No files to validate found') sys.exit(1) found_issues = False for config_filename in config_filenames: try: validate.parse_configuration(config_filename, validate.schema_filename()) except (ValueError, OSError, validate.Validation_error) as error: logging.critical('{}: Error parsing configuration file'.format(config_filename)) logging.critical(error) found_issues = True if found_issues: sys.exit(1) else: logger.info( 'All given configuration files are valid: {}'.format(', '.join(config_filenames)) )
Example #19
Source File: jwauth.py From pyArango with Apache License 2.0 | 6 votes |
def __get_auth_token(self): request_data = '{"username":"%s","password":"%s"}' % (self.username, self.password) for connection_url in self.urls: try: response = self.session.post('%s/_open/auth' % connection_url, data=request_data) if response.ok: json_data = response.content if json_data: data_dict = json_mod.loads(json_data.decode("utf-8")) return data_dict.get('jwt') except requests_exceptions.ConnectionError: if connection_url is not self.urls[-1]: logging.critical("Unable to connect to %s trying another", connection_url) else: logging.critical("Unable to connect to any of the urls: %s", self.urls) raise
Example #20
Source File: test_plugin.py From exopy with BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_handling_crash_of_watchdog(instr_workbench, caplog): """Test handling that we can close even if the observer fail to join. """ instr_workbench.register(InstrContributor1()) # Test starting p = instr_workbench.get_plugin('exopy.instruments') o = p._observer j = o.join def false_join(): import logging logging.critical('Crash') raise RuntimeError() o.join = false_join p.stop() j() assert any(r.levelname == 'CRITICAL' for r in caplog.records)
Example #21
Source File: events.py From botoflow with Apache License 2.0 | 6 votes |
def swf_event_to_object(event_dict): """ takes an event dictionary from botocore and converts it into a specific event instance. """ try: event_class = _event_type_name_to_class[event_dict['eventType']] except KeyError: # we cannot guarantee we do the right thing in the decider if there's an unsupported event type. logging.critical("Event type '%' is not implemented. Cannot continue processing decisions!", event_dict['eventType']) raise NotImplementedError( "Event type '{}' is not implemented. Cannot continue processing decisions!".format(event_dict['eventType'])) return event_class(event_dict['eventId'], event_dict['eventTimestamp'], event_dict[event_class.attribute_key])
Example #22
Source File: heater.py From PyCNC with MIT License | 6 votes |
def run(self): """ Thread worker implementation. There is a loop for PID control. """ last_error = None while True: self._mutex.acquire() if not self._is_run: break try: current_temperature = self._measure() except (IOError, OSError): self._control(0) if last_error is None: last_error = time.time() else: if time.time() - last_error > self.SENSOR_TIMEOUT_S: logging.critical("No data from temperature sensor." " Stop heating.") break continue last_error = None self._current_power = self._pid.update(current_temperature) * 100 self._control(self._current_power) self._mutex.release() time.sleep(self.LOOP_INTERVAL_S)
Example #23
Source File: train.py From dket with GNU General Public License v3.0 | 6 votes |
def _validate_params(self, params): decay_steps = params[self.DECAY_STEPS_PK] if decay_steps <= 0: msg = '{} must be a positive integer.'.format(self.DECAY_STEPS_PK) logging.critical(msg) raise ValueError(msg) decay_rate = params[self.DECAY_RATE_PK] if decay_rate <= 0.0 or decay_rate > 1.0: msg = '{} must be a float between 0.0 and 1.0'.format(self.DECAY_RATE_PK) logging.critical(msg) raise ValueError(msg) logging.debug('decay rate: %d', decay_rate) logging.debug('decay steps: %f', decay_steps) logging.debug('staircase: %s', str(params[self.STAIRCASE_PK])) return params
Example #24
Source File: train.py From dket with GNU General Public License v3.0 | 6 votes |
def _validate_params(self, params): min_value = params[self.MIN_VALUE_PK] max_value = params[self.MAX_VALUE_PK] msg = '{} min value cannot be `None`.' if min_value is None: msg = msg.format(self.MIN_VALUE_PK) logging.critical(msg) raise ValueError(msg) if max_value is None: msg = msg.format(self.MAX_VALUE_PK) logging.critical(msg) raise ValueError(msg) if min_value >= max_value: msg = '{} should be less than {}, found {} and {} instead.'\ .format(self.MIN_VALUE_PK, self.MAX_VALUE_PK, min_value, max_value) logging.critical(msg) raise ValueError(msg) return params
Example #25
Source File: Logger.py From URS with MIT License | 6 votes |
def log_login(function): def wrapper(parser, reddit): print("\nLogging in...") try: function(parser, reddit) logging.info("Successfully logged in as u/%s." % reddit.user.me()) logging.info("") except PrawcoreException as error: Titles.Titles.p_title(error) logging.critical("LOGIN FAILED.") logging.critical("PRAWCORE EXCEPTION: %s.\n" % error) parser.exit() return wrapper ### Wrapper for logging rate limit errors.
Example #26
Source File: ansistrm.py From ACE with Apache License 2.0 | 5 votes |
def main(): root = logging.getLogger() root.setLevel(logging.DEBUG) root.addHandler(ColorizingStreamHandler()) logging.debug('DEBUG') logging.info('INFO') logging.warning('WARNING') logging.error('ERROR') logging.critical('CRITICAL')
Example #27
Source File: basic02.py From Python24 with MIT License | 5 votes |
def test01(): """logging日志,可以用来记录用户不同页面的浏览顺序,用来分析用户喜好""" # logging的配置只需要被设置一次,5个级别,从低到高 # DEGUB 调试 # INFO 普通信息 # WARNING 警告:发送短信次数还剩100次 # ERROR 错误信息 # CRITICAL 很严重的bug:磁盘写入快要满了 # 这里logging的level是warning,只有级别 >=warning的时候,才会在终端输出 logging.basicConfig(level=logging.WARNING, filename="./log.txt", # 设置写到文件,不在终端输出 filemode="w", format='%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s') # 开始使用log功能 logging.debug('这是 loggging debug message') logging.info('这是 loggging info message') logging.warning('这是 loggging a warning message') logging.error('这是 an loggging error message') logging.critical('这是 loggging critical message') # asctime filename lineno levelname message # 2019-02-25 20:15:40,053 - basic02.py[line:24] - WARNING: 这是 loggging a warning message # 2019-02-25 20:15:40,053 - basic02.py[line:25] - ERROR: 这是 an loggging error message # 2019-02-25 20:15:40,053 - basic02.py[line:26] - CRITICAL: 这是 loggging critical message
Example #28
Source File: gevent_session.py From pyArango with Apache License 2.0 | 5 votes |
def _run(self, req): """Run the request.""" if not self.use_jwt_authentication and self.verify is not None: if isinstance(self.verify, CA_Certificate): req.kwargs['verify'] = self.verify.get_file_path() else : req.kwargs['verify'] = self.verify for _ in range(self.max_retries): gevent.joinall([gevent.spawn(req.send)]) if self.use_jwt_authentication: if hasattr(req, 'exception'): logging.critical("%s is raised, will try to reset the auth and request again.", req.exception) self.__reset_auth() elif req.response.status_code == 401: logging.critical("Invalid authentication token provided, will try to reset the auth and request again.") self.__reset_auth() else: return req.response else: if hasattr(req, 'exception'): logging.critical("%s is raised, will try to request again", req.exception) elif req.response.status_code == 401: logging.critical("Unauthorized access, you must supply a (username, password) with the correct credentials") else: return req.response logging.critical("Tried to send the request max number of times.") return req.response
Example #29
Source File: sspy.py From ssmanager with MIT License | 5 votes |
def _start_process(self): if self._print_ss_log: output = None # inherited from self else: output = DEVNULL args = [self._ss_bin, '--manager-address', self._manager_addr, '-s', '127.0.1.2', '-p', '0'] self._ss_proc = Popen(args, stdout=output, stderr=output) self._sock = socket(AF_UNIX, SOCK_DGRAM) self._sock.bind(self._client_addr) # Waiting for ssserver started. connected = False for t in 0.01, 0.1, 0.2, 0.4, 0.8, 1, 2, 4: time.sleep(t) try: self._sock.connect(self._manager_addr) except (FileNotFoundError, ConnectionRefusedError): pass else: connected = True break if not connected: logging.critical('Cannot connect to ssserver process on %s.', self._manager_addr) raise SSServerConnectionError() if not self._recv_thread.is_alive(): self._recv_thread = Thread(target=self._receiving, daemon=True) self._recv_thread.start() self._send('remove: {"server": "127.0.1.2"}') logging.info('Shadowsocks process started.')
Example #30
Source File: utils.py From courseraprogramming with Apache License 2.0 | 5 votes |
def set_logging_level(args): "Computes and sets the logging level from the parsed arguments." root_logger = logging.getLogger() level = logging.INFO logging.getLogger('requests.packages.urllib3').setLevel(logging.WARNING) if "verbose" in args and args.verbose is not None: logging.getLogger('requests.packages.urllib3').setLevel(0) # Unset if args.verbose > 1: level = 5 # "Trace" level elif args.verbose > 0: level = logging.DEBUG else: logging.critical("verbose is an unexpected value. (%s) exiting.", args.verbose) sys.exit(2) elif "quiet" in args and args.quiet is not None: if args.quiet > 1: level = logging.ERROR elif args.quiet > 0: level = logging.WARNING else: logging.critical("quiet is an unexpected value. (%s) exiting.", args.quiet) if level is not None: root_logger.setLevel(level) if args.silence_urllib3: # See: https://urllib3.readthedocs.org/en/latest/security.html requests.packages.urllib3.disable_warnings()