Python logging.config.get() Examples
The following are 30
code examples of logging.config.get().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
logging.config
, or try the search function
.
Example #1
Source File: assembly.py From assembly with MIT License | 6 votes |
def _get_action_endpoint(action): """ Return the endpoint base on the view's action :param action: :return: """ _endpoint = None if inspect.ismethod(action) and hasattr(action, "_rule_cache"): rc = action._rule_cache if rc: k = list(rc.keys())[0] rules = rc[k] len_rules = len(rules) if len_rules == 1: rc_kw = rules[0][1] _endpoint = rc_kw.get("endpoint", None) if not _endpoint: _endpoint = _make_routename_from_endpoint(action) elif len_rules > 1: _prefix = _make_routename_from_endpoint(action) for r in Assembly._app.url_map.iter_rules(): if ('GET' in r.methods or 'POST' in r.methods) and _prefix in r.endpoint: _endpoint = r.endpoint break return _endpoint
Example #2
Source File: config.py From ckanext-extractor with GNU Affero General Public License v3.0 | 6 votes |
def get(setting): """ Get configuration setting. ``setting`` is the setting without the ``ckanext.extractor.`` prefix. Handles defaults and transformations. """ setting = 'ckanext.extractor.' + setting value = config.get(setting, DEFAULTS[setting]) for transformation in TRANSFORMATIONS[setting]: value = transformation(value) return value # Adapted from ckanext-archiver
Example #3
Source File: mapreduce.py From edx-analytics-pipeline with GNU Affero General Public License v3.0 | 6 votes |
def __init__(self, libjars_in_hdfs=None, input_format=None): libjars_in_hdfs = libjars_in_hdfs or [] config = configuration.get_config() streaming_jar = config.get('hadoop', 'streaming-jar', '/tmp/hadoop-streaming.jar') if config.has_section('job-conf'): job_confs = dict(config.items('job-conf')) else: job_confs = {} super(MapReduceJobRunner, self).__init__( streaming_jar, input_format=input_format, libjars_in_hdfs=libjars_in_hdfs, jobconfs=job_confs, )
Example #4
Source File: worker.py From SecPi with GNU General Public License v3.0 | 6 votes |
def got_init_config(self, ch, method, properties, body): logging.info("Received intitial config %r" % (body)) if self.corr_id == properties.correlation_id: #we got the right config try: #TODO: add check if response is empty... new_conf = json.loads(body) new_conf["rabbitmq"] = config.get("rabbitmq") except Exception as e: logging.exception("Wasn't able to read JSON config from manager:\n%s" % e) time.sleep(60) #sleep for X seconds and then ask again self.fetch_init_config() return logging.info("Trying to apply config and reconnect") self.apply_config(new_conf) self.connection_cleanup() self.connect() #hope this is the right spot logging.info("Initial config activated") self.start() else: logging.info("This config isn't meant for us") # Create a zip of all the files which were collected while actions were executed
Example #5
Source File: convert_torch_onnx.py From yolo2-pytorch with GNU Lesser General Public License v3.0 | 6 votes |
def main(): args = make_args() config = configparser.ConfigParser() utils.load_config(config, args.config) for cmd in args.modify: utils.modify_config(config, cmd) with open(os.path.expanduser(os.path.expandvars(args.logging)), 'r') as f: logging.config.dictConfig(yaml.load(f)) height, width = tuple(map(int, config.get('image', 'size').split())) cache_dir = utils.get_cache_dir(config) model_dir = utils.get_model_dir(config) category = utils.get_category(config, cache_dir if os.path.exists(cache_dir) else None) anchors = utils.get_anchors(config) anchors = torch.from_numpy(anchors).contiguous() path, step, epoch = utils.train.load_model(model_dir) state_dict = torch.load(path, map_location=lambda storage, loc: storage) dnn = utils.parse_attr(config.get('model', 'dnn'))(model.ConfigChannels(config, state_dict), anchors, len(category)) inference = model.Inference(config, dnn, anchors) inference.eval() logging.info(humanize.naturalsize(sum(var.cpu().numpy().nbytes for var in inference.state_dict().values()))) dnn.load_state_dict(state_dict) image = torch.autograd.Variable(torch.randn(args.batch_size, 3, height, width), volatile=True) path = model_dir + '.onnx' logging.info('save ' + path) torch.onnx.export(dnn, image, path, export_params=True, verbose=args.verbose) # PyTorch's bug
Example #6
Source File: detect.py From yolo2-pytorch with GNU Lesser General Public License v3.0 | 6 votes |
def __init__(self, args, config): self.args = args self.config = config self.cache_dir = utils.get_cache_dir(config) self.model_dir = utils.get_model_dir(config) self.category = utils.get_category(config, self.cache_dir if os.path.exists(self.cache_dir) else None) self.draw_bbox = utils.visualize.DrawBBox(self.category, colors=args.colors, thickness=args.thickness) self.anchors = torch.from_numpy(utils.get_anchors(config)).contiguous() self.height, self.width = tuple(map(int, config.get('image', 'size').split())) self.path, self.step, self.epoch = utils.train.load_model(self.model_dir) state_dict = torch.load(self.path, map_location=lambda storage, loc: storage) self.dnn = utils.parse_attr(config.get('model', 'dnn'))(model.ConfigChannels(config, state_dict), self.anchors, len(self.category)) self.dnn.load_state_dict(state_dict) self.inference = model.Inference(config, self.dnn, self.anchors) self.inference.eval() if torch.cuda.is_available(): self.inference.cuda() logging.info(humanize.naturalsize(sum(var.cpu().numpy().nbytes for var in self.inference.state_dict().values()))) self.cap = self.create_cap() self.keys = set(args.keys) self.resize = transform.parse_transform(config, config.get('transform', 'resize_test')) self.transform_image = transform.get_transform(config, config.get('transform', 'image_test').split()) self.transform_tensor = transform.get_transform(config, config.get('transform', 'tensor').split())
Example #7
Source File: analyzer.py From quay with Apache License 2.0 | 6 votes |
def _analyze_recursively(self, layer, force_parents=False): # Check if there is a parent layer that needs to be analyzed. if layer.parent_id and ( force_parents or layer.parent.security_indexed_engine < self._target_version ): try: base_query = get_image_with_storage_and_parent_base() parent_layer = base_query.where(Image.id == layer.parent_id).get() except Image.DoesNotExist: logger.warning( "Image %s has Image %s as parent but doesn't exist.", layer.id, layer.parent_id ) raise AnalyzeLayerException("Parent image not found") self._analyze_recursively(parent_layer, force_parents=force_parents) # Analyze the layer itself. self._analyze(layer, force_parents=force_parents)
Example #8
Source File: logsetup.py From psyplot with GNU General Public License v2.0 | 6 votes |
def _get_home(): """Find user's home directory if possible. Otherwise, returns None. :see: http://mail.python.org/pipermail/python-list/2005-February/325395.html This function is copied from matplotlib version 1.4.3, Jan 2016 """ try: if six.PY2 and sys.platform == 'win32': path = os.path.expanduser(b"~").decode(sys.getfilesystemencoding()) else: path = os.path.expanduser("~") except ImportError: # This happens on Google App Engine (pwd module is not present). pass else: if os.path.isdir(path): return path for evar in ('HOME', 'USERPROFILE', 'TMP'): path = os.environ.get(evar) if path is not None and os.path.isdir(path): return path return None
Example #9
Source File: eval.py From yolo2-pytorch with GNU Lesser General Public License v3.0 | 6 votes |
def get_loader(self): paths = [os.path.join(self.cache_dir, phase + '.pkl') for phase in self.config.get('eval', 'phase').split()] dataset = utils.data.Dataset(utils.data.load_pickles(paths)) logging.info('num_examples=%d' % len(dataset)) size = tuple(map(int, self.config.get('image', 'size').split())) try: workers = self.config.getint('data', 'workers') except configparser.NoOptionError: workers = multiprocessing.cpu_count() collate_fn = utils.data.Collate( transform.parse_transform(self.config, self.config.get('transform', 'resize_eval')), [size], transform_image=transform.get_transform(self.config, self.config.get('transform', 'image_test').split()), transform_tensor=transform.get_transform(self.config, self.config.get('transform', 'tensor').split()), ) return torch.utils.data.DataLoader(dataset, batch_size=self.args.batch_size, num_workers=workers, collate_fn=collate_fn)
Example #10
Source File: utils.py From mead-baseline with Apache License 2.0 | 6 votes |
def remove_extra_keys(config, keys=KEYS): """Remove config items that don't effect the model. We base most things off of the sha1 hash of the model configs but there is a problem. Some things in the config file don't effect the model such as the name of the `conll_output` file or if you are using `visdom` reporting. This strips out these kind of things so that as long as the model parameters match the sha1 will too. :param config: dict, The json data. :param keys: Set[Tuple[str]], The keys to remove. :returns: dict, The config with certain keys removed. """ c = deepcopy(config) for key in keys: x = c for k in key[:-1]: x = x.get(k) if x is None: break else: _ = x.pop(key[-1], None) return c
Example #11
Source File: demo_lr.py From yolo2-pytorch with GNU Lesser General Public License v3.0 | 6 votes |
def main(): args = make_args() config = configparser.ConfigParser() utils.load_config(config, args.config) for cmd in args.modify: utils.modify_config(config, cmd) with open(os.path.expanduser(os.path.expandvars(args.logging)), 'r') as f: logging.config.dictConfig(yaml.load(f)) category = utils.get_category(config) anchors = torch.from_numpy(utils.get_anchors(config)).contiguous() dnn = utils.parse_attr(config.get('model', 'dnn'))(model.ConfigChannels(config), anchors, len(category)) inference = model.Inference(config, dnn, anchors) inference.train() optimizer = eval(config.get('train', 'optimizer'))(filter(lambda p: p.requires_grad, inference.parameters()), args.learning_rate) scheduler = eval(config.get('train', 'scheduler'))(optimizer) for epoch in range(args.epoch): scheduler.step(epoch) lr = scheduler.get_lr() print('\t'.join(map(str, [epoch] + lr)))
Example #12
Source File: utils.py From mead-baseline with Apache License 2.0 | 6 votes |
def order_json(data): """Sort json to a consistent order. When you hash json that has the some content but is different orders you get different fingerprints. In: hashlib.sha1(json.dumps({'a': 12, 'b':14}).encode('utf-8')).hexdigest() Out: '647aa7508f72ece3f8b9df986a206d95fd9a2caf' In: hashlib.sha1(json.dumps({'b': 14, 'a':12}).encode('utf-8')).hexdigest() Out: 'a22215982dc0e53617be08de7ba9f1a80d232b23' This function sorts json by key so that hashes are consistent. Note: In our configs we only have lists where the order doesn't matter so we can sort them for consistency. This would have to change if we add a config field that needs order we will need to refactor this. :param data: dict, The json data. :returns: collections.OrderedDict: The data in a consistent order (keys sorted alphabetically). """ new = OrderedDict() for (key, value) in sorted(data.items(), key=lambda x: x[0]): if isinstance(value, dict): value = order_json(value) new[key] = value return new
Example #13
Source File: train.py From yolo2-pytorch with GNU Lesser General Public License v3.0 | 6 votes |
def __init__(self, env): super(SummaryWorker, self).__init__() self.env = env self.config = env.config self.queue = multiprocessing.Queue() try: self.timer_scalar = utils.train.Timer(env.config.getfloat('summary', 'scalar')) except configparser.NoOptionError: self.timer_scalar = lambda: False try: self.timer_image = utils.train.Timer(env.config.getfloat('summary', 'image')) except configparser.NoOptionError: self.timer_image = lambda: False try: self.timer_histogram = utils.train.Timer(env.config.getfloat('summary', 'histogram')) except configparser.NoOptionError: self.timer_histogram = lambda: False with open(os.path.expanduser(os.path.expandvars(env.config.get('summary_histogram', 'parameters'))), 'r') as f: self.histogram_parameters = utils.RegexList([line.rstrip() for line in f]) self.draw_bbox = utils.visualize.DrawBBox(env.category) self.draw_feature = utils.visualize.DrawFeature()
Example #14
Source File: train.py From yolo2-pytorch with GNU Lesser General Public License v3.0 | 6 votes |
def run(self): self.writer = SummaryWriter(os.path.join(self.env.model_dir, self.env.args.run)) try: height, width = tuple(map(int, self.config.get('image', 'size').split())) tensor = torch.randn(1, 3, height, width) step, epoch, dnn = self.env.load() self.writer.add_graph(dnn, (torch.autograd.Variable(tensor),)) except: traceback.print_exc() while True: name, kwargs = self.queue.get() if name is None: break func = getattr(self, 'summary_' + name) try: func(**kwargs) except: traceback.print_exc()
Example #15
Source File: train.py From yolo2-pytorch with GNU Lesser General Public License v3.0 | 6 votes |
def get_loader(self): paths = [os.path.join(self.cache_dir, phase + '.pkl') for phase in self.config.get('train', 'phase').split()] dataset = utils.data.Dataset( utils.data.load_pickles(paths), transform=transform.augmentation.get_transform(self.config, self.config.get('transform', 'augmentation').split()), one_hot=None if self.config.getboolean('train', 'cross_entropy') else len(self.category), shuffle=self.config.getboolean('data', 'shuffle'), dir=os.path.join(self.model_dir, 'exception'), ) logging.info('num_examples=%d' % len(dataset)) try: workers = self.config.getint('data', 'workers') if torch.cuda.is_available(): workers = workers * torch.cuda.device_count() except configparser.NoOptionError: workers = multiprocessing.cpu_count() collate_fn = utils.data.Collate( transform.parse_transform(self.config, self.config.get('transform', 'resize_train')), utils.train.load_sizes(self.config), maintain=self.config.getint('data', 'maintain'), transform_image=transform.get_transform(self.config, self.config.get('transform', 'image_train').split()), transform_tensor=transform.get_transform(self.config, self.config.get('transform', 'tensor').split()), dir=os.path.join(self.model_dir, 'exception'), ) return torch.utils.data.DataLoader(dataset, batch_size=self.args.batch_size * torch.cuda.device_count() if torch.cuda.is_available() else self.args.batch_size, shuffle=True, num_workers=workers, collate_fn=collate_fn, pin_memory=torch.cuda.is_available())
Example #16
Source File: utils.py From mead-baseline with Apache License 2.0 | 6 votes |
def unflatten(dictionary, sep: str = "."): """Turn a flattened dict into a nested dict. :param dictionary: The dict to unflatten. :param sep: This character represents a nesting level in a flattened key :returns: The nested dict """ nested = {} for k, v in dictionary.items(): keys = k.split(sep) it = nested for key in keys[:-1]: # If key is in `it` we get the value otherwise we get a new dict. # .setdefault will also set the new dict to the value of `it[key]`. # assigning to `it` will move us a step deeper into the nested dict. it = it.setdefault(key, {}) it[keys[-1]] = v return nested
Example #17
Source File: jlog.py From falsy with MIT License | 6 votes |
def trace(self, kwargs): exc_type, exc_value, exc_traceback = sys.exc_info() stack = traceback.extract_tb(exc_traceback) lines = [] for i, s in enumerate(stack): filename = s.filename l = len(filename) shortfile = kwargs.get('shortfile', 40) if l > shortfile: filename = filename[filename.find('/', l - shortfile):] line = '%-40s:%-4s %s' % ( blue() + filename, yellow() + str(s.lineno), '|' + '-' * (i * 4) + cyan() + s.name + ':' + red() + s.line) lines.append(line) lines = '\n\t'.join(lines) kwargs['extra'] = { 'trace': magenta() + str(exc_type) + ' ' + bold() + magenta() + str(exc_value) + '\n\t' + lines}
Example #18
Source File: manager.py From xqueue-watcher with GNU Affero General Public License v3.0 | 6 votes |
def enable_codejail(self, codejail_config): """ Enable codejail for the process. codejail_config is a dict like this: { "name": "python", "bin_path": "/path/to/python", "user": "sandbox_username", "limits": { "CPU": 1, ... } } limits are optional user defaults to the current user """ name = codejail_config["name"] bin_path = codejail_config['bin_path'] user = codejail_config.get('user', getpass.getuser()) jail_code.configure(name, bin_path, user=user) limits = codejail_config.get("limits", {}) for name, value in limits.items(): jail_code.set_limit(name, value) self.log.info("configured codejail -> %s %s %s", name, bin_path, user) return name
Example #19
Source File: prt.py From Plex-Remote-Transcoder with MIT License | 5 votes |
def et_get(node, attrib, default=None): if node is not None: return node.attrib.get(attrib, default) return default
Example #20
Source File: config.py From ckanext-extractor with GNU Affero General Public License v3.0 | 5 votes |
def is_field_indexed(field): """ Check if a metadata field is configured to be indexed. """ return _any_match(field.lower(), get('indexed_fields'))
Example #21
Source File: prt.py From Plex-Remote-Transcoder with MIT License | 5 votes |
def build_env(host=None): # TODO: This really should be done in a way that is specific to the target # in the case that the target is a different architecture than the host ffmpeg_path = os.environ.get("FFMPEG_EXTERNAL_LIBS", "") backslashcheck = re.search(r'\\', ffmpeg_path) if backslashcheck is not None: ffmpeg_path_fixed = ffmpeg_path.replace('\\','') os.environ["FFMPEG_EXTERNAL_LIBS"] = str(ffmpeg_path_fixed) envs = ["export %s=%s" % (k, pipes.quote(v)) for k,v in os.environ.items()] envs.append("export PRT_ID=%s" % uuid.uuid1().hex) return ";".join(envs) # def check_gracenote_tmp():
Example #22
Source File: ansible_runner_service.py From ansible-runner-service with Apache License 2.0 | 5 votes |
def get_mode(): """ get the runtime mode """ # set the mode based on where this is running from if os.path.dirname(__file__).startswith("/usr"): return 'prod' else: return 'dev'
Example #23
Source File: ansible_runner_service.py From ansible-runner-service with Apache License 2.0 | 5 votes |
def setup_logging(): """ Setup logging """ logging_config = configuration.settings.logging_conf pfx = configuration.settings.log_path if os.path.exists(logging_config): try: config = yaml.safe_load(fread(logging_config)) except yaml.YAMLError as _e: print("ERROR: logging configuration error: {}".format(_e)) sys.exit(12) fname = config.get('handlers').get('file_handler')['filename'] full_path = os.path.join(pfx, fname) config.get('handlers').get('file_handler')['filename'] = full_path logging.config.dictConfig(config) logging.info("Loaded logging configuration from " "{}".format(logging_config)) else: logging.basicConfig(level=logging.DEBUG) logging.warning("Logging configuration file ({}) not found, using " "basic logging".format(logging_config))
Example #24
Source File: __init__.py From autosuspend with GNU General Public License v2.0 | 5 votes |
def get_notify_and_suspend_func(config: configparser.ConfigParser) -> Callable: return functools.partial( notify_and_suspend, config.get("general", "suspend_cmd"), config.get( "general", # type: ignore # python/typeshed#2093 "notify_cmd_wakeup", fallback=None, ), config.get( "general", # type: ignore # python/typeshed#2093 "notify_cmd_no_wakeup", fallback=None, ), )
Example #25
Source File: logger.py From OpenDeep with Apache License 2.0 | 5 votes |
def delete_root_logger(): """ Deletes the root logger (returned from get_root_logger()). This removes all existing handlers for the logger, which effectively renders it useless. """ # get rid of all the existing handlers - effectively renders the logger useless root_logger = get_root_logger() while root_logger.handlers: root_logger.handlers.pop()
Example #26
Source File: prt.py From Plex-Remote-Transcoder with MIT License | 5 votes |
def printf(message, *args, **kwargs): color = kwargs.get('color') attrs = kwargs.get('attrs') sys.stdout.write(colored(message % args, color, attrs=attrs)) sys.stdout.flush()
Example #27
Source File: config.py From cointrader with MIT License | 5 votes |
def __init__(self, configfile=None): self.verbose = False self.market = "poloniex" self.api_key = None self.api_secret = None if configfile: logging.config.fileConfig(configfile.name) config = configparser.ConfigParser() config.readfp(configfile) exchange = config.get("DEFAULT", "exchange") self.api_key = config.get(exchange, "api_key") self.api_secret = config.get(exchange, "api_secret")
Example #28
Source File: InverterServer.py From Inverter-Data-Logger with GNU General Public License v3.0 | 5 votes |
def build_logger(self, config): # Build logger """ Build logger for this program Args: config: configparser with settings from file """ log_levels = dict(notset=0, debug=10, info=20, warning=30, error=40, critical=50) log_dict = { 'version': 1, 'formatters': { 'f': {'format': '%(asctime)s %(levelname)s %(message)s'} }, 'handlers': { 'none': {'class': 'logging.NullHandler'}, 'console': { 'class': 'logging.StreamHandler', 'formatter': 'f' }, 'file': { 'class': 'logging.FileHandler', 'filename': InverterLib.expand_path(config.get('log', 'filename')), 'formatter': 'f'}, }, 'loggers': { 'InverterLogger': { 'handlers': config.get('log', 'type').split(','), 'level': log_levels[config.get('log', 'level')] } } } logging.config.dictConfig(log_dict) self.logger = logging.getLogger('InverterLogger')
Example #29
Source File: InverterExport.py From Inverter-Data-Logger with GNU General Public License v3.0 | 5 votes |
def build_logger(self, config): # Build logger """ Build logger for this program Args: config: configparser with settings from file """ log_levels = dict(notset=0, debug=10, info=20, warning=30, error=40, critical=50) log_dict = { 'version': 1, 'formatters': { 'f': {'format': '%(asctime)s %(levelname)s %(message)s'} }, 'handlers': { 'none': {'class': 'logging.NullHandler'}, 'console': { 'class': 'logging.StreamHandler', 'formatter': 'f' }, 'file': { 'class': 'logging.FileHandler', 'filename': InverterLib.expand_path(config.get('log', 'filename')), 'formatter': 'f'}, }, 'loggers': { 'InverterLogger': { 'handlers': config.get('log', 'type').split(','), 'level': log_levels[config.get('log', 'level')] } } } logging.config.dictConfig(log_dict) self.logger = logging.getLogger('InverterLogger')
Example #30
Source File: utils.py From mead-baseline with Apache License 2.0 | 5 votes |
def print_dataset_info(dataset): logger.info("[train file]: {}".format(dataset['train_file'])) logger.info("[valid file]: {}".format(dataset['valid_file'])) if 'test_file' in dataset: logger.info("[test file]: {}".format(dataset['test_file'])) vocab_file = dataset.get('vocab_file') if vocab_file is not None: logger.info("[vocab file]: {}".format(vocab_file)) label_file = dataset.get('label_file') if label_file is not None: logger.info("[label file]: {}".format(label_file))