Python tensorpack.tfutils.sessinit.SaverRestore() Examples
The following are 10
code examples of tensorpack.tfutils.sessinit.SaverRestore().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorpack.tfutils.sessinit
, or try the search function
.
Example #1
Source File: critic.py From petridishnn with MIT License | 6 votes |
def critic_predictor(ctrl, model_dir, vs_name): """ Create an OfflinePredictorWithSaver for test-time use. """ model = critic_factory(ctrl, is_train=False, vs_name=vs_name) output_names = ['{}/predicted_accuracy:0'.format(vs_name)] session_config=None if ctrl.critic_type == CriticTypes.LSTM: session_config = tf.ConfigProto(device_count = {'GPU': 0}) pred_config = PredictConfig( model=model, input_names=model.input_names, output_names=output_names, session_creator=NewSessionCreator(config=session_config) ) if model_dir: ckpt = tf.train.latest_checkpoint(model_dir) logger.info("Loading {} predictor from {}".format(vs_name, ckpt)) if ckpt: pred_config.session_init = SaverRestore(ckpt) predictor = OfflinePredictorWithSaver(pred_config) return predictor
Example #2
Source File: eval.py From voice-vector with MIT License | 5 votes |
def compute_accuracy(model, mel_spec, speaker_id, ckpt=None): pred_conf = PredictConfig( model=model, input_names=get_eval_input_names(), output_names=get_eval_output_names(), session_init=SaverRestore(ckpt) if ckpt else None) accuracy_pred = OfflinePredictor(pred_conf) acc, = accuracy_pred(mel_spec, speaker_id) return acc
Example #3
Source File: critic.py From petridishnn with MIT License | 5 votes |
def critic_predict_dataflow(ctrl, data, log_dir, model_dir, vs_name): """ Prediction on a dataflow, used for testing a large batch of data """ ckpt = tf.train.latest_checkpoint(model_dir) if not ckpt: outputs = [0] * len(data[0]) logger.info("No model exists. Do not sort") return outputs model = critic_factory(ctrl, is_train=False, vs_name=vs_name) ds_val = critic_dataflow_factory(ctrl, data, is_train=False) output_names = ['{}/predicted_accuracy:0'.format(vs_name)] session_config=None if ctrl.critic_type == CriticTypes.LSTM: session_config = tf.ConfigProto(device_count = {'GPU': 0}) pred_config = PredictConfig( model=model, input_names=model.input_names, output_names=output_names, session_creator=NewSessionCreator(config=session_config), session_init=SaverRestore(ckpt) ) #with tf.Graph().as_default(): predictor = SimpleDatasetPredictor(pred_config, ds_val) outputs = [] for o in predictor.get_result(): outputs.extend(o[0]) return outputs
Example #4
Source File: eval2.py From deep-voice-conversion with MIT License | 5 votes |
def eval(logdir1, logdir2): # Load graph model = Net2() # dataflow df = Net2DataFlow(hp.test2.data_path, hp.test2.batch_size) ckpt1 = tf.train.latest_checkpoint(logdir1) ckpt2 = tf.train.latest_checkpoint(logdir2) session_inits = [] if ckpt2: session_inits.append(SaverRestore(ckpt2)) if ckpt1: session_inits.append(SaverRestore(ckpt1, ignore=['global_step'])) pred_conf = PredictConfig( model=model, input_names=get_eval_input_names(), output_names=get_eval_output_names(), session_init=ChainInit(session_inits)) predictor = OfflinePredictor(pred_conf) x_mfccs, y_spec, _ = next(df().get_data()) summ_loss, = predictor(x_mfccs, y_spec) writer = tf.summary.FileWriter(logdir2) writer.add_summary(summ_loss) writer.close()
Example #5
Source File: train1.py From deep-voice-conversion with MIT License | 5 votes |
def train(args, logdir): # model model = Net1() # dataflow df = Net1DataFlow(hp.train1.data_path, hp.train1.batch_size) # set logger for event and model saver logger.set_logger_dir(logdir) session_conf = tf.ConfigProto( gpu_options=tf.GPUOptions( allow_growth=True, ),) train_conf = TrainConfig( model=model, data=QueueInput(df(n_prefetch=1000, n_thread=4)), callbacks=[ ModelSaver(checkpoint_dir=logdir), # TODO EvalCallback() ], max_epoch=hp.train1.num_epochs, steps_per_epoch=hp.train1.steps_per_epoch, # session_config=session_conf ) ckpt = '{}/{}'.format(logdir, args.ckpt) if args.ckpt else tf.train.latest_checkpoint(logdir) if ckpt: train_conf.session_init = SaverRestore(ckpt) if args.gpu: os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu train_conf.nr_tower = len(args.gpu.split(',')) trainer = SyncMultiGPUTrainerReplicated(hp.train1.num_gpu) launch_train_with_config(train_conf, trainer=trainer)
Example #6
Source File: eval1.py From deep-voice-conversion with MIT License | 5 votes |
def eval(logdir): # Load graph model = Net1() # dataflow df = Net1DataFlow(hp.test1.data_path, hp.test1.batch_size) ckpt = tf.train.latest_checkpoint(logdir) pred_conf = PredictConfig( model=model, input_names=get_eval_input_names(), output_names=get_eval_output_names()) if ckpt: pred_conf.session_init = SaverRestore(ckpt) predictor = OfflinePredictor(pred_conf) x_mfccs, y_ppgs = next(df().get_data()) y_ppg_1d, pred_ppg_1d, summ_loss, summ_acc = predictor(x_mfccs, y_ppgs) # plot confusion matrix _, idx2phn = load_vocab() y_ppg_1d = [idx2phn[i] for i in y_ppg_1d] pred_ppg_1d = [idx2phn[i] for i in pred_ppg_1d] summ_cm = plot_confusion_matrix(y_ppg_1d, pred_ppg_1d, phns) writer = tf.summary.FileWriter(logdir) writer.add_summary(summ_loss) writer.add_summary(summ_acc) writer.add_summary(summ_cm) writer.close()
Example #7
Source File: train.py From parallel-wavenet-vocoder with MIT License | 4 votes |
def train(case='default', ckpt=None, gpu=None, r=False): ''' :param case: experiment case name :param ckpt: checkpoint to load model :param gpu: comma separated list of GPU(s) to use :param r: start from the beginning. ''' hp.set_hparam_yaml(case) if r: remove_all_files(hp.logdir) # model model = IAFVocoder(batch_size=hp.train.batch_size, length=hp.signal.length) # dataset dataset = Dataset(hp.data_path, hp.train.batch_size, length=hp.signal.length) print('dataset size is {}'.format(len(dataset.wav_files))) # set logger for event and model saver logger.set_logger_dir(hp.logdir) train_conf = TrainConfig( model=model, data=TFDatasetInput(dataset()), callbacks=[ ModelSaver(checkpoint_dir=hp.logdir), RunUpdateOps() # for batch norm, exponential moving average # TODO GenerateCallback() ], max_epoch=hp.train.num_epochs, steps_per_epoch=hp.train.steps_per_epoch, ) ckpt = '{}/{}'.format(hp.logdir, ckpt) if ckpt else tf.train.latest_checkpoint(hp.logdir) if ckpt: train_conf.session_init = SaverRestore(ckpt) if gpu is not None: os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(map(str, gpu)) train_conf.nr_tower = len(gpu) if hp.train.num_gpu <= 1: trainer = SimpleTrainer() else: trainer = SyncMultiGPUTrainerReplicated(gpus=hp.train.num_gpu) launch_train_with_config(train_conf, trainer=trainer)
Example #8
Source File: critic.py From petridishnn with MIT License | 4 votes |
def critic_train(ctrl, data, log_dir, model_dir, prev_dir, vs_name, split_train_val=False): if not os.path.exists(model_dir): os.makedirs(model_dir) lr_schedule = [] max_epoch = ctrl.critic_train_epoch lr = ctrl.critic_init_lr for epoch in range(0, max_epoch): if epoch % 1 == 0: lr_schedule.append((epoch+1, lr)) lr *= 0.9 ds_size = len(data[0]) idxs = list(range(ds_size)) np.random.shuffle(idxs) if split_train_val: train_size = ds_size * 9 // 10 if train_size == 0: train_size = ds_size val_start = train_size else: train_size = ds_size val_start = ds_size * 9 // 10 if ds_size - val_start == 0: val_start = 0 data_train = [ [col[k] for k in idxs[:train_size]] for col in data ] data_val = [ [col[k] for k in idxs[val_start:]] for col in data ] model = critic_factory(ctrl, is_train=True, vs_name=vs_name) ds_train = critic_dataflow_factory(ctrl, data_train, is_train=True) ds_val = critic_dataflow_factory(ctrl, data_val, is_train=False) session_config = None device = 0 if ctrl.critic_type == CriticTypes.LSTM: session_config = tf.ConfigProto(device_count = {'GPU': 0}) device = -1 extra_callbacks = DEFAULT_CALLBACKS() extra_callbacks = list(filter(lambda x : not isinstance(x, ProgressBar), extra_callbacks)) logger.info("Extra callbacks are {}".format(list(map(lambda x : x.__class__, extra_callbacks)))) # Put this into callbacks for in-training validation/inferencing inference_callback = InferenceRunner(ds_val, [ScalarStats('{}/cost'.format(vs_name))], device=device) config = TrainConfig( dataflow=ds_train, callbacks=[ ModelSaver(checkpoint_dir=model_dir, max_to_keep=1, keep_checkpoint_every_n_hours=100), ScheduledHyperParamSetter('learning_rate', lr_schedule) ], extra_callbacks=extra_callbacks, model=model, monitors=[JSONWriter(), ScalarPrinter()], #, TFEventWriter()], steps_per_epoch=ds_train.size(), max_epoch=max_epoch, session_config=session_config ) ckpt = tf.train.latest_checkpoint(prev_dir if prev_dir else model_dir) if ckpt: config.session_init = SaverRestore(ckpt) launch_train_with_config(config, SimpleTrainer())
Example #9
Source File: convert.py From deep-voice-conversion with MIT License | 4 votes |
def do_convert(args, logdir1, logdir2): # Load graph model = Net2() df = Net2DataFlow(hp.convert.data_path, hp.convert.batch_size) ckpt1 = tf.train.latest_checkpoint(logdir1) ckpt2 = '{}/{}'.format(logdir2, args.ckpt) if args.ckpt else tf.train.latest_checkpoint(logdir2) session_inits = [] if ckpt2: session_inits.append(SaverRestore(ckpt2)) if ckpt1: session_inits.append(SaverRestore(ckpt1, ignore=['global_step'])) pred_conf = PredictConfig( model=model, input_names=get_eval_input_names(), output_names=get_eval_output_names(), session_init=ChainInit(session_inits)) predictor = OfflinePredictor(pred_conf) audio, y_audio, ppgs = convert(predictor, df) # Write the result tf.summary.audio('A', y_audio, hp.default.sr, max_outputs=hp.convert.batch_size) tf.summary.audio('B', audio, hp.default.sr, max_outputs=hp.convert.batch_size) # Visualize PPGs heatmap = np.expand_dims(ppgs, 3) # channel=1 tf.summary.image('PPG', heatmap, max_outputs=ppgs.shape[0]) writer = tf.summary.FileWriter(logdir2) with tf.Session() as sess: summ = sess.run(tf.summary.merge_all()) writer.add_summary(summ) writer.close() # session_conf = tf.ConfigProto( # allow_soft_placement=True, # device_count={'CPU': 1, 'GPU': 0}, # gpu_options=tf.GPUOptions( # allow_growth=True, # per_process_gpu_memory_fraction=0.6 # ), # )
Example #10
Source File: train2.py From deep-voice-conversion with MIT License | 4 votes |
def train(args, logdir1, logdir2): # model model = Net2() # dataflow df = Net2DataFlow(hp.train2.data_path, hp.train2.batch_size) # set logger for event and model saver logger.set_logger_dir(logdir2) # session_conf = tf.ConfigProto( # gpu_options=tf.GPUOptions( # allow_growth=True, # per_process_gpu_memory_fraction=0.6, # ), # ) session_inits = [] ckpt2 = '{}/{}'.format(logdir2, args.ckpt) if args.ckpt else tf.train.latest_checkpoint(logdir2) if ckpt2: session_inits.append(SaverRestore(ckpt2)) ckpt1 = tf.train.latest_checkpoint(logdir1) if ckpt1: session_inits.append(SaverRestore(ckpt1, ignore=['global_step'])) train_conf = TrainConfig( model=model, data=QueueInput(df(n_prefetch=1000, n_thread=4)), callbacks=[ # TODO save on prefix net2 ModelSaver(checkpoint_dir=logdir2), # ConvertCallback(logdir2, hp.train2.test_per_epoch), ], max_epoch=hp.train2.num_epochs, steps_per_epoch=hp.train2.steps_per_epoch, session_init=ChainInit(session_inits) ) if args.gpu: os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu train_conf.nr_tower = len(args.gpu.split(',')) trainer = SyncMultiGPUTrainerReplicated(hp.train2.num_gpu) launch_train_with_config(train_conf, trainer=trainer) # def get_cyclic_lr(step): # lr_margin = hp.train2.lr_cyclic_margin * math.sin(2. * math.pi / hp.train2.lr_cyclic_steps * step) # lr = hp.train2.lr + lr_margin # return lr