Python datasets.load_dataset() Examples
The following are 5
code examples of datasets.load_dataset().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
datasets
, or try the search function
.
Example #1
Source File: run.py From Hydra with MIT License | 6 votes |
def import_data_loaders(config, n_workers, verbose=1): """Import datasets and wrap them into DataLoaders from configuration """ train_loaders, test_loaders = dict(), dict() for dataset_config in config['datasets']: train_data, test_data = datasets.load_dataset( dataset_config['name'], dataset_config['kwargs']) train_loader = torch.utils.data.DataLoader( train_data, batch_size=config['batch_size'], shuffle=True, num_workers=n_workers) test_loader = torch.utils.data.DataLoader( test_data, batch_size=config['batch_size'], shuffle=False, num_workers=n_workers) train_loaders[dataset_config['task_id']] = train_loader test_loaders[dataset_config['task_id']] = test_loader log_utils.print_datasets_info(train_loaders, test_loaders, verbose) return train_loaders, test_loaders
Example #2
Source File: estrain.py From geoseg with MIT License | 6 votes |
def main(args): if args.cuda and not torch.cuda.is_available(): raise ValueError("GPUs are not available, please run at cpu mode") # initialize datasets train_set, val_set = load_dataset(args.root, 'IM') print("Dataset : {} ==> Train : {} ; Val : {} .".format(args.dataset, len(train_set), len(val_set))) # initialize network args.src_ch = train_set.src_ch args.tar_ch = train_set.tar_ch net = load_model(args) print("Model : {} ==> (Src_ch : {} ; Tar_ch : {} ; Base_Kernel : {})".format(net.symbol, args.src_ch, args.tar_ch, args.base_kernel)) # initialize runner method = "{}-{}".format(net.symbol, args.dataset) run = set_trainer(args, method) print("Start training ...") run.training(net, [train_set, val_set]) run.save_log() run.learning_curve()
Example #3
Source File: trainIE.py From geoseg with MIT License | 5 votes |
def main(args): if args.cuda and not torch.cuda.is_available(): raise ValueError("GPUs are not available, please run at cpu mode") # initialize datasets if "MCFCN" in args.net: mode = 'IMS' elif "BRNet" in args.net: mode = 'IME' else: mode = 'IE' train_set, val_set = load_dataset(args.root, mode) print("Dataset : {} ==> Train : {} ; Val : {}".format(args.root, len(train_set), len(val_set))) # initialize network args.src_ch = train_set.src_ch args.tar_ch = train_set.tar_ch net = load_model(args) print("Model : {} ==> (Src_ch : {} ; Tar_ch : {} ; Base_Kernel : {})".format(args.net, args.src_ch, args.tar_ch, args.base_kernel)) # initialize runner method = "{}-{}*{}*{}-{}{}-{}".format(args.net, args.src_ch, args.tar_ch, args.base_kernel, args.root, mode, args.loss) run = set_trainer(args, method) print("Start training ...") run.training(net, [train_set, val_set]) run.save_log() run.learning_curve()
Example #4
Source File: train.py From geoseg with MIT License | 5 votes |
def main(args): if args.cuda and not torch.cuda.is_available(): raise ValueError("GPUs are not available, please run at cpu mode") # initialize datasets if "MCFCN" in args.net: mode = 'IMS' elif "BRNet" in args.net: mode = 'IME' else: mode = 'IM' train_set, val_set = load_dataset(args.root, mode) print("Dataset : {} ==> Train : {} ; Val : {}".format(args.root, len(train_set), len(val_set))) # initialize network args.src_ch = train_set.src_ch args.tar_ch = train_set.tar_ch net = load_model(args) print("Model : {} ==> (Src_ch : {} ; Tar_ch : {} ; Base_Kernel : {})".format(args.net, args.src_ch, args.tar_ch, args.base_kernel)) # initialize runner method = "{}-{}*{}*{}-{}".format(args.net, args.src_ch, args.tar_ch, args.base_kernel, args.root) run = set_trainer(args, method) print("Start training ...") run.training(net, [train_set, val_set]) run.save_log() run.learning_curve()
Example #5
Source File: train.py From shake-drop_pytorch with MIT License | 4 votes |
def main(args): train_loader, test_loader = load_dataset(args.label, args.batch_size) model = ShakePyramidNet(depth=args.depth, alpha=args.alpha, label=args.label) model = torch.nn.DataParallel(model).cuda() cudnn.benckmark = True opt = optim.SGD(model.parameters(), lr=args.lr, momentum=0.9, weight_decay=args.weight_decay, nesterov=args.nesterov) scheduler = optim.lr_scheduler.MultiStepLR(opt, [args.epochs // 2, args.epochs * 3 // 4]) loss_func = nn.CrossEntropyLoss().cuda() headers = ["Epoch", "LearningRate", "TrainLoss", "TestLoss", "TrainAcc.", "TestAcc."] logger = utils.Logger(args.checkpoint, headers) for e in range(args.epochs): scheduler.step() model.train() train_loss, train_acc, train_n = 0, 0, 0 bar = tqdm(total=len(train_loader), leave=False) for x, t in train_loader: x, t = Variable(x.cuda()), Variable(t.cuda()) y = model(x) loss = loss_func(y, t) opt.zero_grad() loss.backward() opt.step() train_acc += utils.accuracy(y, t).item() train_loss += loss.item() * t.size(0) train_n += t.size(0) bar.set_description("Loss: {:.4f}, Accuracy: {:.2f}".format( train_loss / train_n, train_acc / train_n * 100), refresh=True) bar.update() bar.close() model.eval() test_loss, test_acc, test_n = 0, 0, 0 for x, t in tqdm(test_loader, total=len(test_loader), leave=False): with torch.no_grad(): x, t = Variable(x.cuda()), Variable(t.cuda()) y = model(x) loss = loss_func(y, t) test_loss += loss.item() * t.size(0) test_acc += utils.accuracy(y, t).item() test_n += t.size(0) if (e + 1) % args.snapshot_interval == 0: torch.save({ "state_dict": model.state_dict(), "optimizer": opt.state_dict() }, os.path.join(args.checkpoint, "{}.tar".format(e + 1))) lr = opt.param_groups[0]["lr"] logger.write(e+1, lr, train_loss / train_n, test_loss / test_n, train_acc / train_n * 100, test_acc / test_n * 100)