Python tensorpack.utils.logger.set_logger_dir() Examples
The following are 14
code examples of tensorpack.utils.logger.set_logger_dir().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorpack.utils.logger
, or try the search function
.
Example #1
Source File: train.py From ADL with MIT License | 6 votes |
def main(): args = get_args() nr_gpu = get_nr_gpu() args.batch_size = args.batch_size // nr_gpu model = Model(args) if args.evaluate: evaluate_wsol(args, model, interval=False) sys.exit() logger.set_logger_dir(ospj('train_log', args.log_dir)) config = get_config(model, args) if args.use_pretrained_model: config.session_init = get_model_loader(_CKPT_NAMES[args.arch_name]) launch_train_with_config(config, SyncMultiGPUTrainerParameterServer(nr_gpu)) evaluate_wsol(args, model, interval=True)
Example #2
Source File: logger.py From dataflow with Apache License 2.0 | 5 votes |
def auto_set_dir(action=None, name=None): """ Use :func:`logger.set_logger_dir` to set log directory to "./train_log/{scriptname}:{name}". "scriptname" is the name of the main python file currently running""" mod = sys.modules['__main__'] basename = os.path.basename(mod.__file__) auto_dirname = os.path.join('train_log', basename[:basename.rfind('.')]) if name: auto_dirname += '_%s' % name if os.name == 'nt' else ':%s' % name set_logger_dir(auto_dirname, action=action)
Example #3
Source File: local_crawler.py From petridishnn with MIT License | 5 votes |
def local_crawler_main( auto_dir, nr_gpu, launch_log_dir, n_parallel=10000, num_init_use_all_gpu=2): """ Args: auto_dir (str) : dir for looking for xxx.sh to run nr_gpu (int): Number of gpu on local contaienr launch_log_dir (str) : where the launcher logs stuff and hold tmp scripts. n_parallel (int) : maximum number of parallel jobs. num_init_use_all_gpu (int) : num of init jobs that will use all gpu """ logger.set_logger_dir(launch_log_dir, action='d') launcher = os.path.basename(os.path.normpath(launch_log_dir)) crawl_local_auto_scripts_and_launch( auto_dir, nr_gpu, launcher, n_parallel, num_init_use_all_gpu)
Example #4
Source File: logger.py From petridishnn with MIT License | 5 votes |
def auto_set_dir(action=None, name=None): """ Use :func:`logger.set_logger_dir` to set log directory to "./train_log/{scriptname}:{name}". "scriptname" is the name of the main python file currently running""" mod = sys.modules['__main__'] basename = os.path.basename(mod.__file__) auto_dirname = os.path.join(LOG_ROOT, basename[:basename.rfind('.')]) if name: auto_dirname += '_%s' % name if os.name == 'nt' else ':%s' % name set_logger_dir(auto_dirname, action=action)
Example #5
Source File: logger.py From ADL with MIT License | 5 votes |
def auto_set_dir(action=None, name=None): """ Use :func:`logger.set_logger_dir` to set log directory to "./train_log/{scriptname}:{name}". "scriptname" is the name of the main python file currently running""" mod = sys.modules['__main__'] basename = os.path.basename(mod.__file__) auto_dirname = os.path.join('train_log', basename[:basename.rfind('.')]) if name: auto_dirname += '_%s' % name if os.name == 'nt' else ':%s' % name set_logger_dir(auto_dirname, action=action)
Example #6
Source File: train1.py From deep-voice-conversion with MIT License | 5 votes |
def train(args, logdir): # model model = Net1() # dataflow df = Net1DataFlow(hp.train1.data_path, hp.train1.batch_size) # set logger for event and model saver logger.set_logger_dir(logdir) session_conf = tf.ConfigProto( gpu_options=tf.GPUOptions( allow_growth=True, ),) train_conf = TrainConfig( model=model, data=QueueInput(df(n_prefetch=1000, n_thread=4)), callbacks=[ ModelSaver(checkpoint_dir=logdir), # TODO EvalCallback() ], max_epoch=hp.train1.num_epochs, steps_per_epoch=hp.train1.steps_per_epoch, # session_config=session_conf ) ckpt = '{}/{}'.format(logdir, args.ckpt) if args.ckpt else tf.train.latest_checkpoint(logdir) if ckpt: train_conf.session_init = SaverRestore(ckpt) if args.gpu: os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu train_conf.nr_tower = len(args.gpu.split(',')) trainer = SyncMultiGPUTrainerReplicated(hp.train1.num_gpu) launch_train_with_config(train_conf, trainer=trainer)
Example #7
Source File: logger.py From tensorpack with Apache License 2.0 | 5 votes |
def auto_set_dir(action=None, name=None): """ Use :func:`logger.set_logger_dir` to set log directory to "./train_log/{scriptname}:{name}". "scriptname" is the name of the main python file currently running""" mod = sys.modules['__main__'] basename = os.path.basename(mod.__file__) auto_dirname = os.path.join('train_log', basename[:basename.rfind('.')]) if name: auto_dirname += '_%s' % name if os.name == 'nt' else ':%s' % name set_logger_dir(auto_dirname, action=action)
Example #8
Source File: logger.py From dataflow with Apache License 2.0 | 4 votes |
def set_logger_dir(dirname, action=None): """ Set the directory for global logging. Args: dirname(str): log directory action(str): an action of ["k","d","q"] to be performed when the directory exists. Will ask user by default. "d": delete the directory. Note that the deletion may fail when the directory is used by tensorboard. "k": keep the directory. This is useful when you resume from a previous training and want the directory to look as if the training was not interrupted. Note that this option does not load old models or any other old states for you. It simply does nothing. """ dirname = os.path.normpath(dirname) global LOG_DIR, _FILE_HANDLER if _FILE_HANDLER: # unload and close the old file handler, so that we may safely delete the logger directory _logger.removeHandler(_FILE_HANDLER) del _FILE_HANDLER def dir_nonempty(dirname): # If directory exists and nonempty (ignore hidden files), prompt for action return os.path.isdir(dirname) and len([x for x in os.listdir(dirname) if x[0] != '.']) if dir_nonempty(dirname): if not action: _logger.warning("""\ Log directory {} exists! Use 'd' to delete it. """.format(dirname)) _logger.warning("""\ If you're resuming from a previous run, you can choose to keep it. Press any other key to exit. """) while not action: action = input("Select Action: k (keep) / d (delete) / q (quit):").lower().strip() act = action if act == 'b': backup_name = dirname + _get_time_str() shutil.move(dirname, backup_name) info("Directory '{}' backuped to '{}'".format(dirname, backup_name)) # noqa: F821 elif act == 'd': shutil.rmtree(dirname, ignore_errors=True) if dir_nonempty(dirname): shutil.rmtree(dirname, ignore_errors=False) elif act == 'n': dirname = dirname + _get_time_str() info("Use a new log directory {}".format(dirname)) # noqa: F821 elif act == 'k': pass else: raise OSError("Directory {} exits!".format(dirname)) LOG_DIR = dirname from .fs import mkdir_p mkdir_p(dirname) _set_file(os.path.join(dirname, 'log.log'))
Example #9
Source File: train.py From parallel-wavenet-vocoder with MIT License | 4 votes |
def train(case='default', ckpt=None, gpu=None, r=False): ''' :param case: experiment case name :param ckpt: checkpoint to load model :param gpu: comma separated list of GPU(s) to use :param r: start from the beginning. ''' hp.set_hparam_yaml(case) if r: remove_all_files(hp.logdir) # model model = IAFVocoder(batch_size=hp.train.batch_size, length=hp.signal.length) # dataset dataset = Dataset(hp.data_path, hp.train.batch_size, length=hp.signal.length) print('dataset size is {}'.format(len(dataset.wav_files))) # set logger for event and model saver logger.set_logger_dir(hp.logdir) train_conf = TrainConfig( model=model, data=TFDatasetInput(dataset()), callbacks=[ ModelSaver(checkpoint_dir=hp.logdir), RunUpdateOps() # for batch norm, exponential moving average # TODO GenerateCallback() ], max_epoch=hp.train.num_epochs, steps_per_epoch=hp.train.steps_per_epoch, ) ckpt = '{}/{}'.format(hp.logdir, ckpt) if ckpt else tf.train.latest_checkpoint(hp.logdir) if ckpt: train_conf.session_init = SaverRestore(ckpt) if gpu is not None: os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(map(str, gpu)) train_conf.nr_tower = len(gpu) if hp.train.num_gpu <= 1: trainer = SimpleTrainer() else: trainer = SyncMultiGPUTrainerReplicated(gpus=hp.train.num_gpu) launch_train_with_config(train_conf, trainer=trainer)
Example #10
Source File: train_tf.py From imgclsmob with MIT License | 4 votes |
def main(): """ Main body of script. """ args = parse_args() args.seed = init_rand(seed=args.seed) _, log_file_exist = initialize_logging( logging_dir_path=args.save_dir, logging_file_name=args.logging_file_name, script_args=args, log_packages=args.log_packages, log_pip_packages=args.log_pip_packages) logger.set_logger_dir(args.save_dir) batch_size = prepare_tf_context( num_gpus=args.num_gpus, batch_size=args.batch_size) net, inputs_desc = prepare_model( model_name=args.model, use_pretrained=args.use_pretrained, pretrained_model_file_path=args.resume.strip(), data_format=args.data_format) train_dataflow = get_data( is_train=True, batch_size=batch_size, data_dir_path=args.data_dir, input_image_size=net.image_size, resize_inv_factor=args.resize_inv_factor) val_dataflow = get_data( is_train=False, batch_size=batch_size, data_dir_path=args.data_dir, input_image_size=net.image_size, resize_inv_factor=args.resize_inv_factor) train_net( net=net, session_init=inputs_desc, batch_size=batch_size, num_epochs=args.num_epochs, train_dataflow=train_dataflow, val_dataflow=val_dataflow)
Example #11
Source File: logger.py From petridishnn with MIT License | 4 votes |
def set_logger_dir(dirname, action=None): """ Set the directory for global logging. Args: dirname(str): log directory action(str): an action of ["k","d","q"] to be performed when the directory exists. Will ask user by default. "d": delete the directory. Note that the deletion may fail when the directory is used by tensorboard. "k": keep the directory. This is useful when you resume from a previous training and want the directory to look as if the training was not interrupted. Note that this option does not load old models or any other old states for you. It simply does nothing. """ global LOG_ROOT, LOG_DIR, _FILE_HANDLER if _FILE_HANDLER: # unload and close the old file handler, so that we may safely delete the logger directory _logger.removeHandler(_FILE_HANDLER) del _FILE_HANDLER def dir_nonempty(dirname): # If directory exists and nonempty (ignore hidden files), prompt for action return os.path.isdir(dirname) and len([x for x in os.listdir(dirname) if x[0] != '.']) if dir_nonempty(dirname): if not action: _logger.warn("""\ Log directory {} exists! Use 'd' to delete it. """.format(dirname)) _logger.warn("""\ If you're resuming from a previous run, you can choose to keep it. Press any other key to exit. """) while not action: action = input("Select Action: k (keep) / d (delete) / q (quit):").lower().strip() act = action if act == 'b': backup_name = dirname + _get_time_str() shutil.move(dirname, backup_name) info("Directory '{}' backuped to '{}'".format(dirname, backup_name)) # noqa: F821 elif act == 'd': shutil.rmtree(dirname, ignore_errors=True) if dir_nonempty(dirname): shutil.rmtree(dirname, ignore_errors=False) elif act == 'n': dirname = dirname + _get_time_str() info("Use a new log directory {}".format(dirname)) # noqa: F821 elif act == 'k': pass else: raise OSError("Directory {} exits!".format(dirname)) LOG_DIR = dirname from .fs import mkdir_p mkdir_p(dirname) _set_file(os.path.join(dirname, 'log.log'))
Example #12
Source File: logger.py From ADL with MIT License | 4 votes |
def set_logger_dir(dirname, action=None): """ Set the directory for global logging. Args: dirname(str): log directory action(str): an action of ["k","d","q"] to be performed when the directory exists. Will ask user by default. "d": delete the directory. Note that the deletion may fail when the directory is used by tensorboard. "k": keep the directory. This is useful when you resume from a previous training and want the directory to look as if the training was not interrupted. Note that this option does not load old models or any other old states for you. It simply does nothing. """ dirname = os.path.normpath(dirname) global LOG_DIR, _FILE_HANDLER if _FILE_HANDLER: # unload and close the old file handler, so that we may safely delete the logger directory _logger.removeHandler(_FILE_HANDLER) del _FILE_HANDLER def dir_nonempty(dirname): # If directory exists and nonempty (ignore hidden files), prompt for action return os.path.isdir(dirname) and len([x for x in os.listdir(dirname) if x[0] != '.']) if dir_nonempty(dirname): if not action: _logger.warning("""\ Log directory {} exists! Use 'd' to delete it. """.format(dirname)) _logger.warning("""\ If you're resuming from a previous run, you can choose to keep it. Press any other key to exit. """) while not action: action = input("Select Action: k (keep) / d (delete) / q (quit):").lower().strip() act = action if act == 'b': backup_name = dirname + _get_time_str() shutil.move(dirname, backup_name) info("Directory '{}' backuped to '{}'".format(dirname, backup_name)) # noqa: F821 elif act == 'd': shutil.rmtree(dirname, ignore_errors=True) if dir_nonempty(dirname): shutil.rmtree(dirname, ignore_errors=False) elif act == 'n': dirname = dirname + _get_time_str() info("Use a new log directory {}".format(dirname)) # noqa: F821 elif act == 'k': pass else: raise OSError("Directory {} exits!".format(dirname)) LOG_DIR = dirname from .fs import mkdir_p mkdir_p(dirname) _set_file(os.path.join(dirname, 'log.log'))
Example #13
Source File: train2.py From deep-voice-conversion with MIT License | 4 votes |
def train(args, logdir1, logdir2): # model model = Net2() # dataflow df = Net2DataFlow(hp.train2.data_path, hp.train2.batch_size) # set logger for event and model saver logger.set_logger_dir(logdir2) # session_conf = tf.ConfigProto( # gpu_options=tf.GPUOptions( # allow_growth=True, # per_process_gpu_memory_fraction=0.6, # ), # ) session_inits = [] ckpt2 = '{}/{}'.format(logdir2, args.ckpt) if args.ckpt else tf.train.latest_checkpoint(logdir2) if ckpt2: session_inits.append(SaverRestore(ckpt2)) ckpt1 = tf.train.latest_checkpoint(logdir1) if ckpt1: session_inits.append(SaverRestore(ckpt1, ignore=['global_step'])) train_conf = TrainConfig( model=model, data=QueueInput(df(n_prefetch=1000, n_thread=4)), callbacks=[ # TODO save on prefix net2 ModelSaver(checkpoint_dir=logdir2), # ConvertCallback(logdir2, hp.train2.test_per_epoch), ], max_epoch=hp.train2.num_epochs, steps_per_epoch=hp.train2.steps_per_epoch, session_init=ChainInit(session_inits) ) if args.gpu: os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu train_conf.nr_tower = len(args.gpu.split(',')) trainer = SyncMultiGPUTrainerReplicated(hp.train2.num_gpu) launch_train_with_config(train_conf, trainer=trainer) # def get_cyclic_lr(step): # lr_margin = hp.train2.lr_cyclic_margin * math.sin(2. * math.pi / hp.train2.lr_cyclic_steps * step) # lr = hp.train2.lr + lr_margin # return lr
Example #14
Source File: logger.py From tensorpack with Apache License 2.0 | 4 votes |
def set_logger_dir(dirname, action=None): """ Set the directory for global logging. Args: dirname(str): log directory action(str): an action of ["k","d","q"] to be performed when the directory exists. Will ask user by default. "d": delete the directory. Note that the deletion may fail when the directory is used by tensorboard. "k": keep the directory. This is useful when you resume from a previous training and want the directory to look as if the training was not interrupted. Note that this option does not load old models or any other old states for you. It simply does nothing. """ dirname = os.path.normpath(dirname) global LOG_DIR, _FILE_HANDLER if _FILE_HANDLER: # unload and close the old file handler, so that we may safely delete the logger directory _logger.removeHandler(_FILE_HANDLER) del _FILE_HANDLER def dir_nonempty(dirname): # If directory exists and nonempty (ignore hidden files), prompt for action return os.path.isdir(dirname) and len([x for x in os.listdir(dirname) if x[0] != '.']) if dir_nonempty(dirname): if not action: _logger.warning("""\ Log directory {} exists! Use 'd' to delete it. """.format(dirname)) _logger.warning("""\ If you're resuming from a previous run, you can choose to keep it. Press any other key to exit. """) while not action: action = input("Select Action: k (keep) / d (delete) / q (quit):").lower().strip() act = action if act == 'b': backup_name = dirname + _get_time_str() shutil.move(dirname, backup_name) info("Directory '{}' backuped to '{}'".format(dirname, backup_name)) # noqa: F821 elif act == 'd': shutil.rmtree(dirname, ignore_errors=True) if dir_nonempty(dirname): shutil.rmtree(dirname, ignore_errors=False) elif act == 'n': dirname = dirname + _get_time_str() info("Use a new log directory {}".format(dirname)) # noqa: F821 elif act == 'k': pass else: raise OSError("Directory {} exits!".format(dirname)) LOG_DIR = dirname from .fs import mkdir_p mkdir_p(dirname) _set_file(os.path.join(dirname, 'log.log'))