Python dataset.DatasetFromHdf5() Examples
The following are 4
code examples of dataset.DatasetFromHdf5().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
dataset
, or try the search function
.
Example #1
Source File: main_edsr.py From pytorch-edsr with MIT License | 4 votes |
def main(): global opt, model opt = parser.parse_args() print(opt) cuda = opt.cuda if cuda and not torch.cuda.is_available(): raise Exception("No GPU found, please run without --cuda") opt.seed = random.randint(1, 10000) print("Random Seed: ", opt.seed) torch.manual_seed(opt.seed) if cuda: torch.cuda.manual_seed(opt.seed) cudnn.benchmark = True print("===> Loading datasets") train_set = DatasetFromHdf5("path_to_dataset.h5") training_data_loader = DataLoader(dataset=train_set, num_workers=opt.threads, batch_size=opt.batchSize, shuffle=True) print("===> Building model") model = Net() criterion = nn.L1Loss(size_average=False) print("===> Setting GPU") if cuda: model = model.cuda() criterion = criterion.cuda() # optionally resume from a checkpoint if opt.resume: if os.path.isfile(opt.resume): print("=> loading checkpoint '{}'".format(opt.resume)) checkpoint = torch.load(opt.resume) opt.start_epoch = checkpoint["epoch"] + 1 model.load_state_dict(checkpoint["model"].state_dict()) else: print("=> no checkpoint found at '{}'".format(opt.resume)) print("===> Setting Optimizer") optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=opt.lr, weight_decay=opt.weight_decay, betas = (0.9, 0.999), eps=1e-08) print("===> Training") for epoch in range(opt.start_epoch, opt.nEpochs + 1): train(training_data_loader, optimizer, model, criterion, epoch) save_checkpoint(model, epoch)
Example #2
Source File: main.py From DRRN-pytorch with MIT License | 4 votes |
def main(): global opt, model opt = parser.parse_args() print(opt) cuda = opt.cuda if cuda and not torch.cuda.is_available(): raise Exception("No GPU found, please run without --cuda") opt.seed = random.randint(1, 10000) print("Random Seed: ", opt.seed) cudnn.benchmark = True print("===> Loading datasets") train_set = DatasetFromHdf5("data/train_291_32_x234.h5") training_data_loader = DataLoader(dataset=train_set, num_workers=opt.threads, batch_size=opt.batchSize, shuffle=True) print("===> Building model") model = DRRN() criterion = nn.MSELoss(size_average=False) print("===> Setting GPU") if cuda: model = torch.nn.DataParallel(model).cuda() criterion = criterion.cuda() # optionally resume from a checkpoint if opt.resume: if os.path.isfile(opt.resume): print("===> loading checkpoint: {}".format(opt.resume)) checkpoint = torch.load(opt.resume) opt.start_epoch = checkpoint["epoch"] + 1 model.load_state_dict(checkpoint["model"].state_dict()) else: print("===> no checkpoint found at {}".format(opt.resume)) # optionally copy weights from a checkpoint if opt.pretrained: if os.path.isfile(opt.pretrained): print("===> load model {}".format(opt.pretrained)) weights = torch.load(opt.pretrained) model.load_state_dict(weights['model'].state_dict()) else: print("===> no model found at {}".format(opt.pretrained)) print("===> Setting Optimizer") optimizer = optim.SGD(model.parameters(), lr=opt.lr, momentum=opt.momentum, weight_decay=opt.weight_decay) print("===> Training") for epoch in range(opt.start_epoch, opt.nEpochs + 1): train(training_data_loader, optimizer, model, criterion, epoch) save_checkpoint(model, epoch) # os.system("python eval.py --cuda --model=model/model_epoch_{}.pth".format(epoch))
Example #3
Source File: main_lapsrn.py From pytorch-LapSRN with MIT License | 4 votes |
def main(): global opt, model opt = parser.parse_args() print(opt) cuda = opt.cuda if cuda and not torch.cuda.is_available(): raise Exception("No GPU found, please run without --cuda") opt.seed = random.randint(1, 10000) print("Random Seed: ", opt.seed) torch.manual_seed(opt.seed) if cuda: torch.cuda.manual_seed(opt.seed) cudnn.benchmark = True print("===> Loading datasets") train_set = DatasetFromHdf5("data/lap_pry_x4_small.h5") training_data_loader = DataLoader(dataset=train_set, num_workers=opt.threads, batch_size=opt.batchSize, shuffle=True) print("===> Building model") model = Net() criterion = L1_Charbonnier_loss() print("===> Setting GPU") if cuda: model = model.cuda() criterion = criterion.cuda() else: model = model.cpu() # optionally resume from a checkpoint if opt.resume: if os.path.isfile(opt.resume): print("=> loading checkpoint '{}'".format(opt.resume)) checkpoint = torch.load(opt.resume) opt.start_epoch = checkpoint["epoch"] + 1 model.load_state_dict(checkpoint["model"].state_dict()) else: print("=> no checkpoint found at '{}'".format(opt.resume)) # optionally copy weights from a checkpoint if opt.pretrained: if os.path.isfile(opt.pretrained): print("=> loading model '{}'".format(opt.pretrained)) weights = torch.load(opt.pretrained) model.load_state_dict(weights['model'].state_dict()) else: print("=> no model found at '{}'".format(opt.pretrained)) print("===> Setting Optimizer") optimizer = optim.Adam(model.parameters(), lr=opt.lr) print("===> Training") for epoch in range(opt.start_epoch, opt.nEpochs + 1): train(training_data_loader, optimizer, model, criterion, epoch) save_checkpoint(model, epoch)
Example #4
Source File: main.py From pytorch-SRDenseNet with MIT License | 4 votes |
def main(): global opt, model opt = parser.parse_args() print opt cuda = opt.cuda if cuda and not torch.cuda.is_available(): raise Exception("No GPU found, please run without --cuda") opt.seed = random.randint(1, 10000) print("Random Seed: ", opt.seed) torch.manual_seed(opt.seed) if cuda: torch.cuda.manual_seed(opt.seed) cudnn.benchmark = True print("===> Loading datasets") train_set = DatasetFromHdf5("/path/to/your/dataset/like/imagenet_50K.h5") training_data_loader = DataLoader(dataset=train_set, num_workers=opt.threads, batch_size=opt.batchSize, shuffle=True) print("===> Building model") model = Net() criterion = L1_Charbonnier_loss() print("===> Setting GPU") if cuda: model = model.cuda() criterion = criterion.cuda() # optionally resume from a checkpoint if opt.resume: if os.path.isfile(opt.resume): print("=> loading checkpoint '{}'".format(opt.resume)) checkpoint = torch.load(opt.resume) opt.start_epoch = checkpoint["epoch"] + 1 model.load_state_dict(checkpoint["model"].state_dict()) else: print("=> no checkpoint found at '{}'".format(opt.resume)) # optionally copy weights from a checkpoint if opt.pretrained: if os.path.isfile(opt.pretrained): print("=> loading model '{}'".format(opt.pretrained)) weights = torch.load(opt.pretrained) model.load_state_dict(weights['model'].state_dict()) else: print("=> no model found at '{}'".format(opt.pretrained)) print("===> Setting Optimizer") optimizer = optim.Adam(model.parameters(), lr=opt.lr) print("===> Training") for epoch in range(opt.start_epoch, opt.nEpochs + 1): train(training_data_loader, optimizer, model, criterion, epoch) save_checkpoint(model, epoch)