Python datasets.dataset_factory.dataset_factory() Examples
The following are 9
code examples of datasets.dataset_factory.dataset_factory().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
datasets.dataset_factory
, or try the search function
.
Example #1
Source File: evaluate.py From centerpose with MIT License | 5 votes |
def test(cfg): Dataset = dataset_factory[cfg.SAMPLE_METHOD] Logger(cfg) Detector = detector_factory[cfg.TEST.TASK] dataset = Dataset(cfg, 'val') detector = Detector(cfg) results = {} num_iters = len(dataset) bar = Bar('{}'.format(cfg.EXP_ID), max=num_iters) time_stats = ['tot', 'load', 'pre', 'net', 'dec', 'post', 'merge'] avg_time_stats = {t: AverageMeter() for t in time_stats} for ind in range(num_iters): img_id = dataset.images[ind] img_info = dataset.coco.loadImgs(ids=[img_id])[0] img_path = os.path.join(dataset.img_dir, img_info['file_name']) #img_path = '/home/tensorboy/data/coco/images/val2017/000000004134.jpg' ret = detector.run(img_path) results[img_id] = ret['results'] Bar.suffix = '[{0}/{1}]|Tot: {total:} |ETA: {eta:} '.format( ind, num_iters, total=bar.elapsed_td, eta=bar.eta_td) for t in avg_time_stats: avg_time_stats[t].update(ret[t]) Bar.suffix = Bar.suffix + '|{} {:.3f} '.format(t, avg_time_stats[t].avg) bar.next() bar.finish() dataset.run_eval(results, cfg.OUTPUT_DIR)
Example #2
Source File: test.py From CenterNet-CondInst with MIT License | 5 votes |
def prefetch_test(opt): os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str Dataset = dataset_factory[opt.dataset] opt = opts().update_dataset_info_and_set_heads(opt, Dataset) print(opt) Logger(opt) Detector = detector_factory[opt.task] split = 'val' if not opt.trainval else 'test' dataset = Dataset(opt, split) detector = Detector(opt) data_loader = torch.utils.data.DataLoader( PrefetchDataset(opt, dataset, detector.pre_process), batch_size=1, shuffle=False, num_workers=1, pin_memory=True) results = {} num_iters = len(dataset) bar = Bar('{}'.format(opt.exp_id), max=num_iters) time_stats = ['tot', 'load', 'pre', 'net', 'dec', 'post', 'merge'] avg_time_stats = {t: AverageMeter() for t in time_stats} for ind, (img_id, pre_processed_images) in enumerate(data_loader): ret = detector.run(pre_processed_images) results[img_id.numpy().astype(np.int32)[0]] = ret['results'] Bar.suffix = '[{0}/{1}]|Tot: {total:} |ETA: {eta:} '.format( ind, num_iters, total=bar.elapsed_td, eta=bar.eta_td) for t in avg_time_stats: avg_time_stats[t].update(ret[t]) Bar.suffix = Bar.suffix + '|{} {tm.val:.3f}s ({tm.avg:.3f}s) '.format( t, tm = avg_time_stats[t]) bar.next() bar.finish() dataset.run_eval(results, opt.save_dir)
Example #3
Source File: test.py From CenterNet-CondInst with MIT License | 5 votes |
def test(opt): os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str Dataset = dataset_factory[opt.dataset] opt = opts().update_dataset_info_and_set_heads(opt, Dataset) print(opt) Logger(opt) Detector = detector_factory[opt.task] split = 'val' if not opt.trainval else 'test' dataset = Dataset(opt, split) detector = Detector(opt) results = {} num_iters = len(dataset) for ind in tqdm(range(num_iters)): img_id = dataset.images[ind] img_info = dataset.coco.loadImgs(ids=[img_id])[0] img_path = os.path.join(dataset.img_dir, img_info['file_name']) if opt.task == 'ddd': ret = detector.run(img_path, img_info['calib']) else: ret = detector.run(img_path) results[img_id] = ret['results'] dataset.run_eval(results, opt.save_dir)
Example #4
Source File: test.py From centerNet-deep-sort with GNU General Public License v3.0 | 5 votes |
def prefetch_test(opt): os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str Dataset = dataset_factory[opt.dataset] opt = opts().update_dataset_info_and_set_heads(opt, Dataset) print(opt) Logger(opt) Detector = detector_factory[opt.task] split = 'val' if not opt.trainval else 'test' dataset = Dataset(opt, split) detector = Detector(opt) data_loader = torch.utils.data.DataLoader( PrefetchDataset(opt, dataset, detector.pre_process), batch_size=1, shuffle=False, num_workers=1, pin_memory=True) results = {} num_iters = len(dataset) bar = Bar('{}'.format(opt.exp_id), max=num_iters) time_stats = ['tot', 'load', 'pre', 'net', 'dec', 'post', 'merge'] avg_time_stats = {t: AverageMeter() for t in time_stats} for ind, (img_id, pre_processed_images) in enumerate(data_loader): ret = detector.run(pre_processed_images) results[img_id.numpy().astype(np.int32)[0]] = ret['results'] Bar.suffix = '[{0}/{1}]|Tot: {total:} |ETA: {eta:} '.format( ind, num_iters, total=bar.elapsed_td, eta=bar.eta_td) for t in avg_time_stats: avg_time_stats[t].update(ret[t]) Bar.suffix = Bar.suffix + '|{} {tm.val:.3f}s ({tm.avg:.3f}s) '.format( t, tm = avg_time_stats[t]) bar.next() bar.finish() dataset.run_eval(results, opt.save_dir)
Example #5
Source File: test.py From centerNet-deep-sort with GNU General Public License v3.0 | 5 votes |
def test(opt): os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str Dataset = dataset_factory[opt.dataset] opt = opts().update_dataset_info_and_set_heads(opt, Dataset) print(opt) Logger(opt) Detector = detector_factory[opt.task] split = 'val' if not opt.trainval else 'test' dataset = Dataset(opt, split) detector = Detector(opt) results = {} num_iters = len(dataset) bar = Bar('{}'.format(opt.exp_id), max=num_iters) time_stats = ['tot', 'load', 'pre', 'net', 'dec', 'post', 'merge'] avg_time_stats = {t: AverageMeter() for t in time_stats} for ind in range(num_iters): img_id = dataset.images[ind] img_info = dataset.coco.loadImgs(ids=[img_id])[0] img_path = os.path.join(dataset.img_dir, img_info['file_name']) if opt.task == 'ddd': ret = detector.run(img_path, img_info['calib']) else: ret = detector.run(img_path) results[img_id] = ret['results'] Bar.suffix = '[{0}/{1}]|Tot: {total:} |ETA: {eta:} '.format( ind, num_iters, total=bar.elapsed_td, eta=bar.eta_td) for t in avg_time_stats: avg_time_stats[t].update(ret[t]) Bar.suffix = Bar.suffix + '|{} {:.3f} '.format(t, avg_time_stats[t].avg) bar.next() bar.finish() dataset.run_eval(results, opt.save_dir)
Example #6
Source File: predict_mel.py From self-attention-tacotron with BSD 3-Clause "New" or "Revised" License | 5 votes |
def predict(hparams, model_dir, checkpoint_path, output_dir, test_source_files, test_target_files): def predict_input_fn(): source = tf.data.TFRecordDataset(list(test_source_files)) target = tf.data.TFRecordDataset(list(test_target_files)) dataset = dataset_factory(source, target, hparams) batched = dataset.prepare_and_zip().group_by_batch( batch_size=1).merge_target_to_source() return batched.dataset estimator = tacotron_model_factory(hparams, model_dir, None) predictions = map( lambda p: PredictedMel(p["id"], p["key"], p["mel"], p.get("mel_postnet"), p["mel"].shape[1], p["mel"].shape[0], p["ground_truth_mel"], p["alignment"], p.get("alignment2"), p.get("alignment3"), p.get("alignment4"), p.get("alignment5"), p.get("alignment6"), p["source"], p["text"], p.get("accent_type")), estimator.predict(predict_input_fn, checkpoint_path=checkpoint_path)) for v in predictions: key = v.key.decode('utf-8') mel_filename = f"{key}.{hparams.predicted_mel_extension}" mel_filepath = os.path.join(output_dir, mel_filename) mel = v.predicted_mel_postnet if hparams.use_postnet_v2 else v.predicted_mel assert mel.shape[1] == hparams.num_mels mel.tofile(mel_filepath, format='<f4') text = v.text.decode("utf-8") plot_filename = f"{key}.png" plot_filepath = os.path.join(output_dir, plot_filename) alignments = list(filter(lambda x: x is not None, [v.alignment, v.alignment2, v.alignment3, v.alignment4, v.alignment5, v.alignment6])) plot_predictions(alignments, v.ground_truth_mel, v.predicted_mel, v.predicted_mel_postnet, text, v.key, plot_filepath) prediction_filename = f"{key}.tfrecord" prediction_filepath = os.path.join(output_dir, prediction_filename) write_prediction_result(v.id, key, alignments, mel, v.ground_truth_mel, text, v.source, v.accent_type, prediction_filepath)
Example #7
Source File: test.py From CenterNet with MIT License | 5 votes |
def prefetch_test(opt): os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str Dataset = dataset_factory[opt.dataset] opt = opts().update_dataset_info_and_set_heads(opt, Dataset) print(opt) Logger(opt) Detector = detector_factory[opt.task] split = 'val' if not opt.trainval else 'test' dataset = Dataset(opt, split) detector = Detector(opt) data_loader = torch.utils.data.DataLoader( PrefetchDataset(opt, dataset, detector.pre_process), batch_size=1, shuffle=False, num_workers=1, pin_memory=True) results = {} num_iters = len(dataset) bar = Bar('{}'.format(opt.exp_id), max=num_iters) time_stats = ['tot', 'load', 'pre', 'net', 'dec', 'post', 'merge'] avg_time_stats = {t: AverageMeter() for t in time_stats} for ind, (img_id, pre_processed_images) in enumerate(data_loader): ret = detector.run(pre_processed_images) results[img_id.numpy().astype(np.int32)[0]] = ret['results'] Bar.suffix = '[{0}/{1}]|Tot: {total:} |ETA: {eta:} '.format( ind, num_iters, total=bar.elapsed_td, eta=bar.eta_td) for t in avg_time_stats: avg_time_stats[t].update(ret[t]) Bar.suffix = Bar.suffix + '|{} {tm.val:.3f}s ({tm.avg:.3f}s) '.format( t, tm = avg_time_stats[t]) bar.next() bar.finish() dataset.run_eval(results, opt.save_dir)
Example #8
Source File: test.py From CenterNet with MIT License | 5 votes |
def test(opt): os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str Dataset = dataset_factory[opt.dataset] opt = opts().update_dataset_info_and_set_heads(opt, Dataset) print(opt) Logger(opt) Detector = detector_factory[opt.task] split = 'val' if not opt.trainval else 'test' dataset = Dataset(opt, split) detector = Detector(opt) results = {} num_iters = len(dataset) bar = Bar('{}'.format(opt.exp_id), max=num_iters) time_stats = ['tot', 'load', 'pre', 'net', 'dec', 'post', 'merge'] avg_time_stats = {t: AverageMeter() for t in time_stats} for ind in range(num_iters): img_id = dataset.images[ind] img_info = dataset.coco.loadImgs(ids=[img_id])[0] img_path = os.path.join(dataset.img_dir, img_info['file_name']) if opt.task == 'ddd': ret = detector.run(img_path, img_info['calib']) else: ret = detector.run(img_path) results[img_id] = ret['results'] Bar.suffix = '[{0}/{1}]|Tot: {total:} |ETA: {eta:} '.format( ind, num_iters, total=bar.elapsed_td, eta=bar.eta_td) for t in avg_time_stats: avg_time_stats[t].update(ret[t]) Bar.suffix = Bar.suffix + '|{} {:.3f} '.format(t, avg_time_stats[t].avg) bar.next() bar.finish() dataset.run_eval(results, opt.save_dir)
Example #9
Source File: train.py From self-attention-tacotron with BSD 3-Clause "New" or "Revised" License | 4 votes |
def train_and_evaluate(hparams, model_dir, train_source_files, train_target_files, eval_source_files, eval_target_files, use_multi_gpu): interleave_parallelism = get_parallelism(hparams.interleave_cycle_length_cpu_factor, hparams.interleave_cycle_length_min, hparams.interleave_cycle_length_max) tf.logging.info("Interleave parallelism is %d.", interleave_parallelism) def train_input_fn(): source_and_target_files = list(zip(train_source_files, train_target_files)) shuffle(source_and_target_files) source = [s for s, _ in source_and_target_files] target = [t for _, t in source_and_target_files] dataset = create_from_tfrecord_files(source, target, hparams, cycle_length=interleave_parallelism, buffer_output_elements=hparams.interleave_buffer_output_elements, prefetch_input_elements=hparams.interleave_prefetch_input_elements) zipped = dataset.prepare_and_zip() zipped = zipped.cache(hparams.cache_file_name) if hparams.use_cache else zipped batched = zipped.filter_by_max_output_length().repeat(count=None).shuffle( hparams.suffle_buffer_size).group_by_batch().prefetch(hparams.prefetch_buffer_size) return batched.dataset def eval_input_fn(): source_and_target_files = list(zip(eval_source_files, eval_target_files)) shuffle(source_and_target_files) source = tf.data.TFRecordDataset([s for s, _ in source_and_target_files]) target = tf.data.TFRecordDataset([t for _, t in source_and_target_files]) dataset = dataset_factory(source, target, hparams) zipped = dataset.prepare_and_zip() dataset = zipped.filter_by_max_output_length().repeat().group_by_batch(batch_size=1) return dataset.dataset distribution = tf.contrib.distribute.MirroredStrategy() if use_multi_gpu else None run_config = tf.estimator.RunConfig(save_summary_steps=hparams.save_summary_steps, save_checkpoints_steps=hparams.save_checkpoints_steps, keep_checkpoint_max=hparams.keep_checkpoint_max, log_step_count_steps=hparams.log_step_count_steps, train_distribute=distribution) ws = tf.estimator.WarmStartSettings( ckpt_to_initialize_from=hparams.ckpt_to_initialize_from, vars_to_warm_start=hparams.vars_to_warm_start) if hparams.warm_start else None estimator = tacotron_model_factory(hparams, model_dir, run_config, ws) train_spec = tf.estimator.TrainSpec(input_fn=train_input_fn) eval_spec = tf.estimator.EvalSpec(input_fn=eval_input_fn, steps=hparams.num_evaluation_steps, throttle_secs=hparams.eval_throttle_secs, start_delay_secs=hparams.eval_start_delay_secs) tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)