Python tensorboard_logger.configure() Examples
The following are 7
code examples of tensorboard_logger.configure().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorboard_logger
, or try the search function
.
Example #1
Source File: LogMetric.py From nmp_qc with MIT License | 5 votes |
def __init__(self, log_dir): if not os.path.isdir(log_dir): # if the directory does not exist we create the directory os.makedirs(log_dir) else: # clean previous logged data under the same directory name self._remove(log_dir) # configure the project configure(log_dir) self.global_step = 0
Example #2
Source File: fit_harness.py From ibeis with Apache License 2.0 | 5 votes |
def run(harn): harn.log('Begin training') if False: # TODO: can we run this as a subprocess that dies when we die? # or do we need to run externally? # tensorboard --logdir runs # http://aretha:6006 pass if tensorboard_logger: harn.log('Initializing tensorboard') tensorboard_logger.configure("runs/ibeis", flush_secs=2) if harn.use_cuda: harn.log('Fitting model on GPU({})'.format(harn.gpu_num)) harn.model.cuda(harn.gpu_num) else: harn.log('Fitting model on the CPU') if harn.class_weights is not None: harn.class_weights, = harn._to_xpu(harn.class_weights) lr = harn.lr_scheduler(harn.epoch) harn.optimizer = harn.optimizer_cls(harn.model.parameters(), lr=lr) # train loop while not harn.check_termination(): harn.train_epoch() if harn.vali_loader: harn.validation_epoch() harn.save_snapshot() harn.epoch += 1
Example #3
Source File: logger.py From DeepSpeaker-pytorch with MIT License | 5 votes |
def __init__(self, log_dir): # clean previous logged data under the same directory name self._remove(log_dir) # configure the project configure(log_dir) self.global_step = 0
Example #4
Source File: logger.py From skiprnn_pytorch with MIT License | 5 votes |
def __init__(self, log_dir, remove_previous_files = False): # clean previous logged data under the same directory name if remove_previous_files: self._remove(log_dir) # configure the project configure(log_dir) self.global_step = 0
Example #5
Source File: Loggers.py From hardnet with MIT License | 5 votes |
def __init__(self, log_dir): # clean previous logged data under the same directory name self._remove(log_dir) # configure the project configure(log_dir) self.global_step = 0
Example #6
Source File: logger.py From FewShotLearning with MIT License | 5 votes |
def __init__(self, log_dir): # clean previous logged data under the same directory name self._remove(log_dir) # configure the project configure(log_dir) self.global_step = 0
Example #7
Source File: train_node.py From Auto-PyTorch with Apache License 2.0 | 5 votes |
def tensorboard_log(self, budget, epoch, log, logdir): import tensorboard_logger as tl worker_path = 'Train/' try: tl.log_value(worker_path + 'budget', float(budget), int(time.time())) except: tl.configure(logdir) tl.log_value(worker_path + 'budget', float(budget), int(time.time())) tl.log_value(worker_path + 'epoch', float(epoch + 1), int(time.time())) for name, value in log.items(): tl.log_value(worker_path + name, float(value), int(time.time()))