Python tensorpack.logger.set_logger_dir() Examples
The following are 1
code examples of tensorpack.logger.set_logger_dir().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorpack.logger
, or try the search function
.
Example #1
Source File: train.py From hover_net with MIT License | 4 votes |
def run_once(self, opt, sess_init=None, save_dir=None): #### train_datagen = self.get_datagen(opt['train_batch_size'], mode='train') valid_datagen = self.get_datagen(opt['infer_batch_size'], mode='valid') ###### must be called before ModelSaver if save_dir is None: logger.set_logger_dir(self.save_dir) else: logger.set_logger_dir(save_dir) ###### model_flags = opt['model_flags'] model = self.get_model()(**model_flags) ###### callbacks=[ ModelSaver(max_to_keep=opt['nr_epochs']), ] for param_name, param_info in opt['manual_parameters'].items(): model.add_manual_variable(param_name, param_info[0]) callbacks.append(ScheduledHyperParamSetter(param_name, param_info[1])) # multi-GPU inference (with mandatory queue prefetch) infs = [StatCollector()] callbacks.append(DataParallelInferenceRunner( valid_datagen, infs, list(range(nr_gpus)))) callbacks.append(MaxSaver('valid_dice')) ###### steps_per_epoch = train_datagen.size() // nr_gpus config = TrainConfig( model = model, callbacks = callbacks , dataflow = train_datagen , steps_per_epoch = steps_per_epoch, max_epoch = opt['nr_epochs'], ) config.session_init = sess_init launch_train_with_config(config, SyncMultiGPUTrainerParameterServer(nr_gpus)) tf.reset_default_graph() # remove the entire graph in case of multiple runs return ####