Python loguru.logger.debug() Examples
The following are 30
code examples of loguru.logger.debug().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
loguru.logger
, or try the search function
.
Example #1
Source File: run_deploy.py From keras-bert-ner with MIT License | 6 votes |
def log_init(log_path): log_file_path = os.path.join(log_path, "info.log") err_file_path = os.path.join(log_path, "error.log") if not os.path.exists(log_path): os.makedirs(log_path) logger.add(sys.stderr, format="{time} {level} {message}", filter="my_module", level="INFO") logger.add(log_file_path, rotation="12:00", retention="14 days", encoding="utf-8") logger.add(err_file_path, rotation="100 MB", retention="14 days", encoding="utf-8", level="ERROR") logger.debug("logger initialized") return logger
Example #2
Source File: video.py From stagesepx with MIT License | 6 votes |
def load_frames(self): # TODO full frames list can be very huge, for some devices logger.info(f"start loading {self.path} to memory ...") data: typing.List[VideoFrame] = [] with toolbox.video_capture(self.path) as cap: success, frame = cap.read() while success: frame_object = VideoFrame.init(cap, frame) data.append(frame_object) success, frame = cap.read() # calculate memory cost each_cost = data[0].data.nbytes logger.debug(f"single frame cost: {each_cost} bytes") total_cost = each_cost * self.frame_count logger.debug(f"total frame cost: {total_cost} bytes") logger.info( f"frames loaded. frame count: {self.frame_count}. memory cost: {total_cost} bytes" ) # lock the order self.data = tuple(data) # fix the length ( the last frame may be broken sometimes ) self.frame_count = len(data)
Example #3
Source File: video.py From stagesepx with MIT License | 6 votes |
def __init__( self, path: typing.Union[bytes, str, os.PathLike], pre_load: bool = None, fps: int = None, *_, **__, ): assert os.path.isfile(path), f"video [{path}] not existed" self.path: str = str(path) self.data: typing.Optional[typing.Tuple[VideoFrame]] = tuple() self.fps: int = fps if fps: video_path = os.path.join(tempfile.mkdtemp(), f"tmp_{fps}.mp4") logger.debug(f"convert video, and bind path to {video_path}") toolbox.fps_convert(fps, self.path, video_path, constants.FFMPEG) self.path = video_path with toolbox.video_capture(self.path) as cap: self.frame_count = toolbox.get_frame_count(cap) self.frame_size = toolbox.get_frame_size(cap) if pre_load: self.load_frames()
Example #4
Source File: __main__.py From sqlitebiter with MIT License | 6 votes |
def configure(ctx): """ Configure the following application settings: (1) Default encoding to load files. (2) HTTP/HTTPS proxy server URI (for url sub-command). Configurations are written to '~/.sqlitebiter'. You can remove these settings by deleting '~/.sqlitebiter'. """ initialize_logger("{:s} file".format(PROGRAM_NAME), ctx.obj[Context.LOG_LEVEL]) logger.debug("{} configuration file existence: {}".format(PROGRAM_NAME, app_config_mgr.exists)) sys.exit(app_config_mgr.configure())
Example #5
Source File: toolbox.py From stagesepx with MIT License | 6 votes |
def fps_convert( target_fps: int, source_path: str, target_path: str, ffmpeg_exe: str = None ) -> int: # for portable ffmpeg if not ffmpeg_exe: ffmpeg_exe = r"ffmpeg" command: typing.List[str] = [ ffmpeg_exe, "-i", source_path, "-r", str(target_fps), target_path, ] logger.debug(f"convert video: {command}") return subprocess.check_call(command)
Example #6
Source File: toolbox.py From stagesepx with MIT License | 6 votes |
def match_template_with_object( template: np.ndarray, target: np.ndarray, engine_template_cv_method_name: str = None, **kwargs, ) -> typing.Dict[str, typing.Any]: # change the default method if not engine_template_cv_method_name: engine_template_cv_method_name = "cv2.TM_CCOEFF_NORMED" fi = FindIt( engine=["template"], engine_template_cv_method_name=engine_template_cv_method_name, **kwargs, ) # load template fi_template_name = "default" fi.load_template(fi_template_name, pic_object=template) result = fi.find(target_pic_name="", target_pic_object=target, **kwargs) logger.debug(f"findit result: {result}") return result["data"][fi_template_name]["TemplateEngine"]
Example #7
Source File: reporter.py From stagesepx with MIT License | 6 votes |
def _draw_bar(result: ClassifierResult) -> Bar: # draw bar chart bar = Bar(init_opts=opts.InitOpts(bg_color=constants.BACKGROUND_COLOR)) x_axis = sorted(list(result.get_stage_set())) y_axis = list() offset = result.get_offset() for each_stage_name in x_axis: ranges = result.get_specific_stage_range(each_stage_name) time_cost: float = 0.0 for each in ranges: # last frame - first frame time_cost += each[-1].timestamp - each[0].timestamp + offset y_axis.append(time_cost) bar.add_xaxis(x_axis) bar.add_yaxis("time cost", y_axis) bar.set_global_opts( title_opts=opts.TitleOpts(title="Time Cost", subtitle="... of each stages"), toolbox_opts=opts.ToolboxOpts(is_show=True), ) logger.debug(f"time cost: {dict(zip(x_axis, y_axis))}") return bar
Example #8
Source File: hook.py From stagesepx with MIT License | 6 votes |
def __init__( self, size: typing.Tuple[typing.Union[int, float], typing.Union[int, float]], offset: typing.Tuple[typing.Union[int, float], typing.Union[int, float]] = None, *_, **__, ): """ init crop hook, (height, width) :param size: :param offset: :param _: :param __: """ super().__init__(*_, **__) self.size = size self.offset = offset or (0, 0) logger.debug(f"size: {self.size}") logger.debug(f"offset: {self.offset}")
Example #9
Source File: svm.py From stagesepx with MIT License | 6 votes |
def save_model(self, model_path: str, overwrite: bool = None): """ save trained model :param model_path: :param overwrite: :return: """ logger.debug(f"save model to {model_path}") # assert model file if os.path.isfile(model_path) and not overwrite: raise FileExistsError( f"model file {model_path} already existed, you can set `overwrite` True to cover it" ) # assert model data is not empty assert self._model, "model is empty" with open(model_path, "wb") as f: pickle.dump(self._model, f)
Example #10
Source File: svm.py From stagesepx with MIT License | 6 votes |
def load_model(self, model_path: str, overwrite: bool = None): """ load trained model :param model_path: :param overwrite: :return: """ logger.debug(f"load model from {model_path}") # assert model file assert os.path.isfile(model_path), f"model file {model_path} not existed" # assert model data is empty if self._model and not overwrite: raise RuntimeError( f"model is not empty, you can set `overwrite` True to cover it" ) # joblib raise an error ( i have no idea about how to fix it ) here, so use pickle instead with open(model_path, "rb") as f: self._model = pickle.load(f)
Example #11
Source File: cut_result.py From stagesepx with MIT License | 6 votes |
def _prune( threshold: float, stages: typing.List[typing.Tuple[str, typing.List[VideoFrame]]], ) -> typing.List[typing.Tuple[str, typing.List[VideoFrame]]]: logger.debug( f"start pruning ranges, origin length is {len(stages)}, threshold is {threshold}" ) after = list() for i in range(len(stages)): index, frames = stages[i] for j in range(i + 1, len(stages)): next_index, next_frames = stages[j] ssim_list = toolbox.multi_compare_ssim(frames, next_frames) min_ssim = min(ssim_list) logger.debug(f"compare {index} with {next_index}: {ssim_list}") if min_ssim > threshold: logger.debug(f"stage {index} has been pruned") break else: after.append(stages[i]) return after
Example #12
Source File: cut_range.py From stagesepx with MIT License | 6 votes |
def pick( self, frame_count: int = None, is_random: bool = None, *_, **__ ) -> typing.List[int]: if not frame_count: frame_count = 3 logger.debug( f"pick {frame_count} frames " f"from {self.start}({self.start_time}) " f"to {self.end}({self.end_time}) " f"on video {self.video.path}" ) result = list() if is_random: return random.sample(range(self.start, self.end), frame_count) length = self.get_length() # https://github.com/williamfzc/stagesepx/issues/37 frame_count += 1 for _ in range(1, frame_count): cur = int(self.start + length / frame_count * _) result.append(cur) return result
Example #13
Source File: Base_Logging.py From makemework with MIT License | 6 votes |
def write(self,msg,level='info'): "Write out a message" fname = inspect.stack()[2][3] #May be use a entry-exit decorator instead d = {'caller_func': fname} if level.lower()== 'debug': logger.debug("{module} | {msg}",module=d['caller_func'],msg=msg) elif level.lower()== 'info': logger.info("{module} | {msg}",module=d['caller_func'],msg=msg) elif level.lower()== 'warn' or level.lower()=='warning': logger.warning("{module} | {msg}",module=d['caller_func'],msg=msg) elif level.lower()== 'error': logger.error("{module} | {msg}",module=d['caller_func'],msg=msg) elif level.lower()== 'critical': logger.critical("{module} | {msg}",module=d['caller_func'],msg=msg) else: logger.critical("Unknown level passed for the msg: {}", msg)
Example #14
Source File: cmd.py From Projectors with GNU General Public License v3.0 | 6 votes |
def release(self): """Create a zipfile release with the current version number defined in bl_info dict in __init__.py""" # Builds dir builds = Path('.', 'builds') if not builds.exists(): builds.mkdir() # Extract the version number from the __init__.py file. regex = r"\"version\":\s*(\(\d\,\s*\d\,\s*\d\))" with Path('__init__.py').open('r') as f: string = f.read().replace("\n", '') match = re.findall(regex, string, re.MULTILINE)[0] log.debug(match) log.info(f'Create release version: {match}') postfix = '.'.join([str(x) for x in eval(match)]) # Zip all needed file into a realease. zip_file = builds / f'Projectors {postfix}.zip' with zipfile.ZipFile(zip_file, 'w') as zf: for f in Path('.').glob('*.py'): zf.write(f) zf.write('README.md') zf.write('LICENSE') return f'A realease zipfile was created: {zip_file}'
Example #15
Source File: utils.py From fitch with MIT License | 6 votes |
def is_device_connected(device_id: str): """ return True if device connected, else return False """ _ADB = config.ADB_EXECUTOR try: device_name = subprocess.check_output( [_ADB, "-s", device_id, "shell", "getprop", "ro.product.model"] ) device_name = ( device_name.decode(config.DEFAULT_CHARSET) .replace("\n", "") .replace("\r", "") ) logger.debug("Device [{}] available".format(device_name)) except subprocess.CalledProcessError: return False return True
Example #16
Source File: device.py From fitch with MIT License | 6 votes |
def screen_shot(self, save_to: str = None) -> str: """ screen shot and return its path (YOU SHOULD REMOVE IT BY YOURSELF!) """ self.mnc.screen_shot() # save to specific place if save_to: if os.path.isdir(save_to): pic_name = "{}.png".format(uuid.uuid1()) final_path = os.path.join(save_to, pic_name) else: final_path = save_to # use tempfile else: temp_pic = tempfile.NamedTemporaryFile("w+", delete=False, suffix=".png") temp_pic_name = temp_pic.name final_path = temp_pic_name self.mnc.export_screen(final_path) logger.debug("Screenshot saved in [{}]".format(final_path)) return final_path
Example #17
Source File: conftest.py From httprunner with Apache License 2.0 | 6 votes |
def session_fixture(request): """setup and teardown each task""" total_testcases_num = request.node.testscollected testcases = [] for item in request.node.items: testcase = { "name": item.cls.config.name, "path": item.cls.config.path, "node_id": item.nodeid, } testcases.append(testcase) logger.debug(f"collected {total_testcases_num} testcases: {testcases}") yield logger.debug(f"teardown task fixture") # teardown task # TODO: upload task summary
Example #18
Source File: values.py From vivarium with GNU General Public License v3.0 | 6 votes |
def _register_value_producer(self, value_name: str, source: Callable, preferred_combiner: Callable, preferred_post_processor: Callable): """Configure the named value pipeline with a source, combiner, and post-processor.""" logger.debug(f"Registering value pipeline {value_name}") pipeline = self._pipelines[value_name] if pipeline.source: raise DynamicValueError(f'A second component is attempting to set the source for pipeline {value_name} ' f'with {source}, but it already has a source: {pipeline.source}.') pipeline.name = value_name pipeline.source = source pipeline.combiner = preferred_combiner pipeline.post_processor = preferred_post_processor pipeline.manager = self return pipeline
Example #19
Source File: keras.py From stagesepx with MIT License | 6 votes |
def load_model(self, model_path: str, overwrite: bool = None): """ load trained model :param model_path: :param overwrite: :return: """ logger.debug(f"load model from {model_path}") # assert model file assert os.path.isfile(model_path), f"model file {model_path} not existed" # assert model data is empty if self._model and not overwrite: raise RuntimeError( f"model is not empty, you can set `overwrite` True to cover it" ) self._model = self.create_model() self._model.load_weights(model_path)
Example #20
Source File: ssim.py From stagesepx with MIT License | 6 votes |
def _classify_frame( self, frame: VideoFrame, threshold: float = None, *_, **__ ) -> str: if not threshold: threshold = 0.85 result = list() for each_stage_name, each_stage_pic_list in self.read(): each_result = list() for target_pic in each_stage_pic_list: # apply hooks target_pic = self._apply_hook(VideoFrame(-1, -1.0, target_pic)) target_pic = target_pic.data each_pic_ssim = toolbox.compare_ssim(frame.data, target_pic) each_result.append(each_pic_ssim) ssim = max(each_result) result.append((each_stage_name, ssim)) logger.debug(f"stage [{each_stage_name}]: {ssim}") result = max(result, key=lambda x: x[1]) if result[1] < threshold: logger.debug("not a known stage, set it -1") result = ("-1", result[1]) return result[0]
Example #21
Source File: config.py From ThreatIngestor with GNU General Public License v2.0 | 5 votes |
def sources(self): """Return a list of (name, Source class, {kwargs}) tuples. :raises: threatingestor.exceptions.PluginError """ sources = [] for source in self.config['sources']: kwargs = {} for key, value in source.items(): if key not in INTERNAL_OPTIONS: kwargs[key] = value elif key == 'credentials': # Grab these named credentials credential_name = value for credential_key, credential_value in self.credentials(credential_name).items(): if credential_key != NAME: kwargs[credential_key] = credential_value # load and initialize the plugin logger.debug(f"Found source '{source[NAME]}'") sources.append((source[NAME], self._load_plugin(SOURCE, source['module']), kwargs)) logger.debug(f"Found {len(sources)} total sources") return sources
Example #22
Source File: cut_range.py From stagesepx with MIT License | 5 votes |
def __init__( self, # TODO why can it be a dict? video: typing.Union[VideoObject, typing.Dict], start: int, end: int, # TODO need refactored ? ssim: typing.List[float], mse: typing.List[float], psnr: typing.List[float], start_time: float, end_time: float, ): if isinstance(video, dict): self.video = VideoObject(**video) else: self.video = video self.start = start self.end = end self.ssim = ssim self.mse = mse self.psnr = psnr self.start_time = start_time self.end_time = end_time # if length is 1 # https://github.com/williamfzc/stagesepx/issues/9 if start > end: self.start, self.end = self.end, self.start self.start_time, self.end_time = self.end_time, self.start_time logger.debug( f"new a range: {self.start}({self.start_time}) - {self.end}({self.end_time})" )
Example #23
Source File: api.py From stagesepx with MIT License | 5 votes |
def keras_train( train_data_path: str, model_path: str, # options epochs: int = 10, target_size: str = "600x800", overwrite: bool = False, **kwargs, ): from stagesepx.classifier.keras import KerasClassifier # handle args target_size: typing.Sequence[int] = [int(each) for each in target_size.split("x")] cl = KerasClassifier( # 轮数 epochs=epochs, # 保证数据集的分辨率统一性 target_size=target_size, **kwargs, ) cl.train(train_data_path) # file existed while os.path.isfile(model_path): logger.warning(f"file {model_path} already existed") model_path = f"{uuid.uuid4()}.h5" logger.debug(f"trying to save it to {model_path}") cl.save_model(model_path, overwrite=overwrite)
Example #24
Source File: cmd.py From Projectors with GNU General Public License v3.0 | 5 votes |
def test(self, versions_dir=None): """ This function allows running the test suite agains different version of Blender. !!MacOS only!! """ versions_dir = versions_dir if versions_dir else blender_versions_dir binaries = blender_binaries(versions_dir) # 1) Mimic the Blender User Script directory. # 2) Copy the addon into the temporally created structure. # 3) Use the BLENDER_USER_SCRIPTS environment variable to point Blender to the created scripts directory. with tempfile.TemporaryDirectory() as tempdir: tempdir = Path(tempdir) addon_dir = tempdir / 'scripts' / 'addons' / 'Projectors' addon_dir.mkdir(parents=True) scripts_dir = addon_dir.parent.parent # Copy addon into temp dir. copy_tree(str(Path(__file__).parent), str(addon_dir)) # Set the environment variable to the temp scripts dir. os.environ['BLENDER_USER_SCRIPTS'] = str(scripts_dir) log.debug( f'BLENDER_USER_SCRIPTS: {os.environ.get("BLENDER_USER_SCRIPTS")}') # Run the tests against all Blender versions. for name, path in binaries.items(): print('\n'*3) log.info(f'Testing against: {name}') print('=='*50) subprocess.run([str(path.resolve()), '--addons', 'Projectors', '--factory-startup', '-noaudio', '-b', '-P', 'tests.py']) log.debug(f'Temp dir { tempdir } was deleted: {not tempdir.exists()}') return 'Finished Testing'
Example #25
Source File: board.py From seagull with MIT License | 5 votes |
def clear(self): """Clear the board and remove all lifeforms""" logger.debug("Board cleared!") self.state = np.zeros(self.size, dtype=bool)
Example #26
Source File: __init__.py From ThreatIngestor with GNU General Public License v2.0 | 5 votes |
def run(self): """Run once, or forever, depending on config.""" if self.config.daemon(): logger.debug("Running forever, in a loop") self.run_forever() else: logger.debug("Running once, to completion") with self.statsd.timer('run_once'): self.run_once()
Example #27
Source File: detector.py From fitch with MIT License | 5 votes |
def detect( template: typing.Sequence, target: typing.Union[str, np.ndarray] ) -> typing.Dict[str, typing.List]: """ return a point list """ if isinstance(target, str): f = fi_client.get_target_point_with_path elif isinstance(target, np.ndarray): f = fi_client.get_target_point_with_object else: raise TypeError(f"error type of target: {type(target)}") result = f(target, template, threshold=config.CV_THRESHOLD) logger.debug("Detect result: {}".format(json.dumps(result))) return dict(zip(template, result))
Example #28
Source File: device.py From fitch with MIT License | 5 votes |
def start(self, simple_mode: bool = None): """ start device """ self.mnc = MNCDevice(self.device_id) self.toolkit = PYAToolkit(self.device_id) if not simple_mode: self.player = ActionPlayer(self.device_id) self.adb_utils = adb.device(serial=self.device_id) else: self.player = AdbPlayer(self.device_id) logger.debug("FDevice [{}] started".format(self.device_id))
Example #29
Source File: device.py From fitch with MIT License | 5 votes |
def stop(self): """ stop device, and clean up """ self.player and self.player.stop() self.mnc = None self.player = None self.toolkit = None logger.debug("FDevice [{}] stopped".format(self.device_id))
Example #30
Source File: __init__.py From ThreatIngestor with GNU General Public License v2.0 | 5 votes |
def run_once(self): """Run each source once, passing artifacts to each operator.""" # Track some statistics about artifacts in a summary object. summary = collections.Counter() for source in self.sources: # Run the source to collect artifacts. logger.debug(f"Running source '{source}'") try: with self.statsd.timer(f'source.{source}'): saved_state, artifacts = self.sources[source].run(self.statedb.get_state(source)) except Exception: self.statsd.incr(f'error.source.{source}') logger.exception(f"Unknown error in source '{source}'") continue # Save the source state. self.statedb.save_state(source, saved_state) # Process artifacts with each operator. for operator in self.operators: logger.debug(f"Processing {len(artifacts)} artifacts from source '{source}' with operator '{operator}'") try: with self.statsd.timer(f'operator.{operator}'): self.operators[operator].process(artifacts) except Exception: self.statsd.incr(f'error.operator.{operator}') logger.exception(f"Unknown error in operator '{operator}'") continue # Record stats and update the summary. types = artifact_types(artifacts) summary.update(types) for artifact_type in types: self.statsd.incr(f'source.{source}.{artifact_type}', types[artifact_type]) self.statsd.incr(f'artifacts.{artifact_type}', types[artifact_type]) # Log the summary. logger.log('NOTIFY', f"New artifacts: {dict(summary)}")