Python torchvision.datasets.ImageFolder() Examples
The following are 30
code examples of torchvision.datasets.ImageFolder().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
torchvision.datasets
, or try the search function
.
Example #1
Source File: data_loader.py From transferlearning with MIT License | 10 votes |
def load_data(root_path, dir, batch_size, phase): transform_dict = { 'src': transforms.Compose( [transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]), 'tar': transforms.Compose( [transforms.Resize(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ])} data = datasets.ImageFolder(root=root_path + dir, transform=transform_dict[phase]) data_loader = torch.utils.data.DataLoader(data, batch_size=batch_size, shuffle=True, drop_last=False, num_workers=4) return data_loader
Example #2
Source File: 48_fine_tune_hotdog.py From deep-learning-note with MIT License | 9 votes |
def train_fine_tuning(net, optimizer, batch_size=128, num_epochs=4): train_iter = DataLoader(ImageFolder(os.path.join(data_dir, 'train'), transform=train_augs), batch_size, shuffle=True) test_iter = DataLoader(ImageFolder(os.path.join(data_dir, 'test'), transform=test_augs), batch_size) loss = torch.nn.CrossEntropyLoss() utils.train(train_iter, test_iter, net, loss, optimizer, device, num_epochs)
Example #3
Source File: data_loader.py From transferlearning with MIT License | 8 votes |
def load_training(root_path, dir, batch_size, kwargs): transform = transforms.Compose( [transforms.Resize([256, 256]), transforms.RandomCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor()]) data = datasets.ImageFolder(root=root_path + dir, transform=transform) train_loader = torch.utils.data.DataLoader(data, batch_size=batch_size, shuffle=True, drop_last=True, **kwargs) return train_loader
Example #4
Source File: dataloader.py From imagenet18_old with The Unlicense | 7 votes |
def get_loaders(traindir, valdir, sz, bs, fp16=True, val_bs=None, workers=8, rect_val=False, min_scale=0.08, distributed=False): val_bs = val_bs or bs train_tfms = [ transforms.RandomResizedCrop(sz, scale=(min_scale, 1.0)), transforms.RandomHorizontalFlip() ] train_dataset = datasets.ImageFolder(traindir, transforms.Compose(train_tfms)) train_sampler = (DistributedSampler(train_dataset, num_replicas=env_world_size(), rank=env_rank()) if distributed else None) train_loader = torch.utils.data.DataLoader( train_dataset, batch_size=bs, shuffle=(train_sampler is None), num_workers=workers, pin_memory=True, collate_fn=fast_collate, sampler=train_sampler) val_dataset, val_sampler = create_validation_set(valdir, val_bs, sz, rect_val=rect_val, distributed=distributed) val_loader = torch.utils.data.DataLoader( val_dataset, num_workers=workers, pin_memory=True, collate_fn=fast_collate, batch_sampler=val_sampler) train_loader = BatchTransformDataLoader(train_loader, fp16=fp16) val_loader = BatchTransformDataLoader(val_loader, fp16=fp16) return train_loader, val_loader, train_sampler, val_sampler
Example #5
Source File: data_loader.py From transferlearning with MIT License | 7 votes |
def load_data(data_folder, batch_size, train, kwargs): transform = { 'train': transforms.Compose( [transforms.Resize([256, 256]), transforms.RandomCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]), 'test': transforms.Compose( [transforms.Resize([224, 224]), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) } data = datasets.ImageFolder(root = data_folder, transform=transform['train' if train else 'test']) data_loader = torch.utils.data.DataLoader(data, batch_size=batch_size, shuffle=True, **kwargs, drop_last = True if train else False) return data_loader
Example #6
Source File: data_loader.py From ImageNet with MIT License | 7 votes |
def data_loader(root, batch_size=256, workers=1, pin_memory=True): traindir = os.path.join(root, 'ILSVRC2012_img_train') valdir = os.path.join(root, 'ILSVRC2012_img_val') normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) train_dataset = datasets.ImageFolder( traindir, transforms.Compose([ transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize ]) ) val_dataset = datasets.ImageFolder( valdir, transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize ]) ) train_loader = torch.utils.data.DataLoader( train_dataset, batch_size=batch_size, shuffle=True, num_workers=workers, pin_memory=pin_memory, sampler=None ) val_loader = torch.utils.data.DataLoader( val_dataset, batch_size=batch_size, shuffle=False, num_workers=workers, pin_memory=pin_memory ) return train_loader, val_loader
Example #7
Source File: align_mtcnn.py From face-recognition with BSD 3-Clause "New" or "Revised" License | 6 votes |
def main(): args = parse_args() trans = transforms.Compose([ preprocessing.ExifOrientationNormalize(), transforms.Resize(1024) ]) images = datasets.ImageFolder(root=args.input_folder) images.idx_to_class = {v: k for k, v in images.class_to_idx.items()} create_dirs(args.output_folder, images.classes) mtcnn = MTCNN(prewhiten=False) for idx, (path, y) in enumerate(images.imgs): print("Aligning {} {}/{} ".format(path, idx + 1, len(images)), end='') aligned_path = args.output_folder + os.path.sep + images.idx_to_class[y] + os.path.sep + os.path.basename(path) if not os.path.exists(aligned_path): img = mtcnn(img=trans(Image.open(path).convert('RGB')), save_path=aligned_path) print("No face found" if img is None else '') else: print('Already aligned')
Example #8
Source File: pytorch_dvc_cnn_hvd.py From intro-to-dl with MIT License | 6 votes |
def get_train_loader(batch_size=25): if hvd.rank() == 0: print('Train: ', end="") train_dataset = datasets.ImageFolder(root=datapath+'/train', transform=data_transform) train_sampler = torch.utils.data.distributed.DistributedSampler( train_dataset, num_replicas=hvd.size(), rank=hvd.rank()) train_loader = DataLoader(train_dataset, batch_size=batch_size, sampler=train_sampler, num_workers=4, pin_memory=True) if hvd.rank() == 0: print('Found', len(train_dataset), 'images belonging to', len(train_dataset.classes), 'classes') return train_loader, train_sampler
Example #9
Source File: acc_under_attack.py From RobGAN with MIT License | 6 votes |
def make_dataset(): if opt.dataset in ("imagenet", "dog_and_cat_64", "dog_and_cat_128"): trans = tfs.Compose([ tfs.Resize(opt.img_width), tfs.ToTensor(), tfs.Normalize(mean=[.5, .5, .5], std=[.5, .5, .5])]) data = ImageFolder(opt.root, transform=trans) loader = DataLoader(data, batch_size=100, shuffle=False, num_workers=opt.workers) elif opt.dataset == "cifar10": trans = tfs.Compose([ tfs.Resize(opt.img_width), tfs.ToTensor(), tfs.Normalize(mean=[.5, .5, .5], std=[.5, .5, .5])]) data = CIFAR10(root=opt.root, train=True, download=False, transform=trans) loader = DataLoader(data, batch_size=100, shuffle=True, num_workers=opt.workers) else: raise ValueError(f"Unknown dataset: {opt.dataset}") return loader
Example #10
Source File: imagenet_utils.py From BigLittleNet with Apache License 2.0 | 6 votes |
def get_imagenet_dataflow(is_train, data_dir, batch_size, augmentor, workers=18, is_distributed=False): workers = min(workers, multiprocessing.cpu_count()) sampler = None shuffle = False if is_train: dataset = datasets.ImageFolder(data_dir, augmentor) sampler = torch.utils.data.distributed.DistributedSampler(dataset) if is_distributed else None shuffle = sampler is None else: dataset = datasets.ImageFolder(data_dir, augmentor) data_loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, num_workers=workers, pin_memory=True, sampler=sampler) return data_loader
Example #11
Source File: dataloaders.py From HBONet with Apache License 2.0 | 6 votes |
def get_pytorch_train_loader(data_path, batch_size, workers=5, _worker_init_fn=None, input_size=224): traindir = os.path.join(data_path, 'train') train_dataset = datasets.ImageFolder( traindir, transforms.Compose([ transforms.RandomResizedCrop(input_size), transforms.RandomHorizontalFlip(), ])) if torch.distributed.is_initialized(): train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset) else: train_sampler = None train_loader = torch.utils.data.DataLoader( train_dataset, batch_size=batch_size, shuffle=(train_sampler is None), num_workers=workers, worker_init_fn=_worker_init_fn, pin_memory=True, sampler=train_sampler, collate_fn=fast_collate) return PrefetchedWrapper(train_loader), len(train_loader)
Example #12
Source File: dataloaders.py From HBONet with Apache License 2.0 | 6 votes |
def get_pytorch_val_loader(data_path, batch_size, workers=5, _worker_init_fn=None, input_size=224): valdir = os.path.join(data_path, 'val') val_dataset = datasets.ImageFolder( valdir, transforms.Compose([ transforms.Resize(int(input_size / 0.875)), transforms.CenterCrop(input_size), ])) if torch.distributed.is_initialized(): val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset) else: val_sampler = None val_loader = torch.utils.data.DataLoader( val_dataset, sampler=val_sampler, batch_size=batch_size, shuffle=False, num_workers=workers, worker_init_fn=_worker_init_fn, pin_memory=True, collate_fn=fast_collate) return PrefetchedWrapper(val_loader), len(val_loader)
Example #13
Source File: eval_mobilenet_torch.py From amc-models with Apache License 2.0 | 6 votes |
def get_dataset(): # lazy import import torchvision.datasets as datasets import torchvision.transforms as transforms if not args.imagenet_path: raise Exception('Please provide valid ImageNet path!') print('=> Preparing data..') valdir = os.path.join(args.imagenet_path, 'val') normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) input_size = 224 val_loader = torch.utils.data.DataLoader( datasets.ImageFolder(valdir, transforms.Compose([ transforms.Resize(int(input_size / 0.875)), transforms.CenterCrop(input_size), transforms.ToTensor(), normalize, ])), batch_size=args.batch_size, shuffle=False, num_workers=args.n_worker, pin_memory=True) n_class = 1000 return val_loader, n_class
Example #14
Source File: data_load.py From transferlearning with MIT License | 6 votes |
def load_data(data_folder, batch_size, phase='train', train_val_split=True, train_ratio=.8): transform_dict = { 'train': transforms.Compose( [transforms.Resize(256), transforms.RandomCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]), 'test': transforms.Compose( [transforms.Resize(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ])} data = datasets.ImageFolder(root=data_folder, transform=transform_dict[phase]) if phase == 'train': if train_val_split: train_size = int(train_ratio * len(data)) test_size = len(data) - train_size data_train, data_val = torch.utils.data.random_split(data, [train_size, test_size]) train_loader = torch.utils.data.DataLoader(data_train, batch_size=batch_size, shuffle=True, drop_last=True, num_workers=4) val_loader = torch.utils.data.DataLoader(data_val, batch_size=batch_size, shuffle=False, drop_last=False, num_workers=4) return [train_loader, val_loader] else: train_loader = torch.utils.data.DataLoader(data, batch_size=batch_size, shuffle=True, drop_last=True, num_workers=4) return train_loader else: test_loader = torch.utils.data.DataLoader(data, batch_size=batch_size, shuffle=False, drop_last=False, num_workers=4) return test_loader ## Below are for ImageCLEF datasets
Example #15
Source File: data.py From MobileNetV2-pytorch with MIT License | 5 votes |
def get_loaders(dataroot, val_batch_size, train_batch_size, input_size, workers): val_data = datasets.ImageFolder(root=os.path.join(dataroot, 'val'), transform=get_transform(False, input_size)) val_loader = torch.utils.data.DataLoader(val_data, batch_size=val_batch_size, shuffle=False, num_workers=workers, pin_memory=True) train_data = datasets.ImageFolder(root=os.path.join(dataroot, 'train'), transform=get_transform(input_size=input_size)) train_loader = torch.utils.data.DataLoader(train_data, batch_size=train_batch_size, shuffle=True, num_workers=workers, pin_memory=True) return train_loader, val_loader
Example #16
Source File: data.py From cyclegan-qp with MIT License | 5 votes |
def load_data(): # Preprocessing transform = transforms.Compose([ transforms.RandomHorizontalFlip(), transforms.Resize(LOAD_DIM), transforms.RandomCrop(CROP_DIM), transforms.ToTensor(), transforms.Normalize(mean=[0.5] * 3, std=[0.5] * 3) ]) # Make datasets X_folder = dsets.ImageFolder(DATASET_PATH["trainA"], transform=transform) Y_folder = dsets.ImageFolder(DATASET_PATH["trainB"], transform=transform) # Make dataset loaders X_set = DataLoader(X_folder, batch_size=BATCH_SIZE, shuffle=True) Y_set = DataLoader(Y_folder, batch_size=BATCH_SIZE, shuffle=True) # Print length of sample batches print("Dataset Details") print(f"X_set batches: {len(X_set)}") print(f"Y_set batches: {len(Y_set)}") print("") # Return the datasets return X_set, Y_set
Example #17
Source File: prepare_data.py From MetaFGNet with MIT License | 5 votes |
def generate_dataloader(args): # Data loading code traindir = os.path.join(args.data_path, 'splited_image/train') valdir = os.path.join(args.data_path, 'splited_image/val') if not os.path.isdir(traindir): split_train_test_images(args.data_path) normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) train_dataset = datasets.ImageFolder( traindir, transforms.Compose([ transforms.Resize(256), transforms.RandomCrop(224), # transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize, ]) ) train_loader = torch.utils.data.DataLoader( train_dataset, batch_size=args.batch_size, shuffle=True, num_workers = args.workers, pin_memory=True, sampler=None ) val_loader = torch.utils.data.DataLoader( datasets.ImageFolder(valdir, transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize, ])), batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True ) return train_loader, val_loader
Example #18
Source File: masks.py From pixel-constrained-cnn-pytorch with Apache License 2.0 | 5 votes |
def __init__(self, img_size, mask_descriptor): self.img_size = img_size self.num_pixels = img_size[1] * img_size[2] self.mask_type, self.mask_attribute = mask_descriptor if self.mask_type == 'random_blob_cache': dset = datasets.ImageFolder(self.mask_attribute[0], transform=transforms.Compose([transforms.Grayscale(), transforms.ToTensor()])) self.data_loader = DataLoader(dset, batch_size=self.mask_attribute[1], shuffle=True)
Example #19
Source File: eval.py From PeleeNet with Apache License 2.0 | 5 votes |
def main(): global args, best_prec1 args = parser.parse_args() print( 'args:',args) # Data loading code # Val data loading valdir = os.path.join(args.data, 'val') normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) val_dataset = datasets.ImageFolder( valdir, transforms.Compose([ transforms.Resize(args.input_dim+32), transforms.CenterCrop(args.input_dim), transforms.ToTensor(), normalize, ])) val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True) num_classes = len(val_dataset.classes) print('Total classes: ',num_classes) # create model print("=> creating {} model '{}'".format(args.engine, args.arch)) model = create_model(num_classes, args.engine) if args.engine == 'torch': validate_torch(val_loader, model) else: validate_caffe(val_loader, model)
Example #20
Source File: utils.py From pytorch-Conditional-image-to-image-translation with MIT License | 5 votes |
def data_load(path, subfolder, transform, batch_size, shuffle=False, drop_last=False): dset = datasets.ImageFolder(path, transform) ind = dset.class_to_idx[subfolder] n = 0 for i in range(dset.__len__()): if ind != dset.imgs[n][1]: del dset.imgs[n] n -= 1 n += 1 return torch.utils.data.DataLoader(dset, batch_size=batch_size, shuffle=shuffle, drop_last=drop_last)
Example #21
Source File: pytorch_gtsrb_cnn.py From intro-to-dl with MIT License | 5 votes |
def get_train_loader(batch_size=50): print('Train: ', end="") train_dataset = datasets.ImageFolder(root=datapath+'/train', transform=data_transform) train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=4) print('Found', len(train_dataset), 'images belonging to', len(train_dataset.classes), 'classes') return train_loader
Example #22
Source File: celeba_fid.py From misgan with MIT License | 5 votes |
def complete_data(self): data = datasets.ImageFolder( 'celeba', transforms.Compose([ transforms.CenterCrop(108), transforms.Resize(size=64, interpolation=Image.BICUBIC), transforms.ToTensor(), # transforms.Normalize(mean=(.5, .5, .5), std=(.5, .5, .5)), ])) images = len(data) data_loader = DataLoader( data, batch_size=self.batch_size, num_workers=self.workers) return data_loader, images
Example #23
Source File: imagenet.py From nn_tools with MIT License | 5 votes |
def Imagenet_LMDB_generate(imagenet_dir, output_dir, make_val=False, make_train=False): # the imagenet_dir should have direction named 'train' or 'val',with 1000 folders of raw jpeg photos train_name = 'imagenet_train_lmdb' val_name = 'imagenet_val_lmdb' def target_trans(target): return target if make_val: val_lmdb=lmdb_datasets.LMDB_generator(osp.join(output_dir,val_name)) def trans_val_data(dir): tensor = transforms.Compose([ transforms.Scale(256), transforms.CenterCrop(224), transforms.ToTensor() ])(dir) tensor=(tensor.numpy()*255).astype(np.uint8) return tensor val = datasets.ImageFolder(osp.join(imagenet_dir,'val'), trans_val_data,target_trans) val_lmdb.write_classification_lmdb(val, num_per_dataset=DATASET_SIZE) if make_train: train_lmdb = lmdb_datasets.LMDB_generator(osp.join(output_dir, train_name)) def trans_train_data(dir): tensor = transforms.Compose([ transforms.Scale(256), transforms.ToTensor() ])(dir) tensor=(tensor.numpy()*255).astype(np.uint8) return tensor train = datasets.ImageFolder(osp.join(imagenet_dir, 'train'), trans_train_data, target_trans) train.imgs=np.random.permutation(train.imgs) train_lmdb.write_classification_lmdb(train, num_per_dataset=DATASET_SIZE, write_shape=True)
Example #24
Source File: data.py From ganzo with Apache License 2.0 | 5 votes |
def __init__(self, options): transform_list = [] if options.image_size is not None: transform_list.append(transforms.Resize((options.image_size, options.image_size))) # transform_list.append(transforms.CenterCrop(options.image_size)) transform_list.append(transforms.ToTensor()) if options.image_colors == 1: transform_list.append(transforms.Normalize(mean=[0.5], std=[0.5])) elif options.image_colors == 3: transform_list.append(transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])) transform = transforms.Compose(transform_list) if options.dataset == 'mnist': dataset = datasets.MNIST(options.data_dir, train=True, download=True, transform=transform) elif options.dataset == 'emnist': # Updated URL from https://www.westernsydney.edu.au/bens/home/reproducible_research/emnist datasets.EMNIST.url = 'https://cloudstor.aarnet.edu.au/plus/s/ZNmuFiuQTqZlu9W/download' dataset = datasets.EMNIST(options.data_dir, split=options.image_class, train=True, download=True, transform=transform) elif options.dataset == 'fashion-mnist': dataset = datasets.FashionMNIST(options.data_dir, train=True, download=True, transform=transform) elif options.dataset == 'lsun': training_class = options.image_class + '_train' dataset = datasets.LSUN(options.data_dir, classes=[training_class], transform=transform) elif options.dataset == 'cifar10': dataset = datasets.CIFAR10(options.data_dir, train=True, download=True, transform=transform) elif options.dataset == 'cifar100': dataset = datasets.CIFAR100(options.data_dir, train=True, download=True, transform=transform) else: dataset = datasets.ImageFolder(root=options.data_dir, transform=transform) self.dataloader = DataLoader( dataset, batch_size=options.batch_size, num_workers=options.loader_workers, shuffle=True, drop_last=True, pin_memory=options.pin_memory ) self.iterator = iter(self.dataloader)
Example #25
Source File: utils.py From self-attention-GAN-pytorch with MIT License | 5 votes |
def make_dataloader(batch_size, dataset_type, data_path, shuffle=True, drop_last=True, dataloader_args={}, resize=True, imsize=128, centercrop=False, centercrop_size=128, totensor=True, normalize=False, norm_mean=(0.5, 0.5, 0.5), norm_std=(0.5, 0.5, 0.5)): # Make transform transform = make_transform(resize=resize, imsize=imsize, centercrop=centercrop, centercrop_size=centercrop_size, totensor=totensor, normalize=normalize, norm_mean=norm_mean, norm_std=norm_std) # Make dataset if dataset_type in ['folder', 'imagenet', 'lfw']: # folder dataset assert os.path.exists(data_path), "data_path does not exist! Given: " + data_path dataset = dset.ImageFolder(root=data_path, transform=transform) elif dataset_type == 'lsun': assert os.path.exists(data_path), "data_path does not exist! Given: " + data_path dataset = dset.LSUN(root=data_path, classes=['bedroom_train'], transform=transform) elif dataset_type == 'cifar10': if not os.path.exists(data_path): print("data_path does not exist! Given: {}\nDownloading CIFAR10 dataset...".format(data_path)) dataset = dset.CIFAR10(root=data_path, download=True, transform=transform) elif dataset_type == 'fake': dataset = dset.FakeData(image_size=(3, centercrop_size, centercrop_size), transform=transforms.ToTensor()) assert dataset num_of_classes = len(dataset.classes) print("Data found! # of images =", len(dataset), ", # of classes =", num_of_classes, ", classes:", dataset.classes) # Make dataloader from dataset dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, drop_last=drop_last, **dataloader_args) return dataloader, num_of_classes
Example #26
Source File: test_first_block.py From kinetics_i3d_pytorch with MIT License | 5 votes |
def test_input_block(): normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) dataset = datasets.ImageFolder('/sequoia/data1/yhasson/datasets/test-dataset', transforms.Compose([ transforms.RandomSizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize, ])) densenet = torchvision.models.densenet121(pretrained=True) features = densenet.features seq2d = torch.nn.Sequential( features.conv0, features.norm0, features.relu0, features.pool0) seq3d = torch.nn.Sequential( inflate.inflate_conv(features.conv0, 3), inflate.inflate_batch_norm(features.norm0), features.relu0, inflate.inflate_pool(features.pool0, 1)) loader = torch.utils.data.DataLoader(dataset, batch_size=2, shuffle=False) frame_nb = 4 for i, (input_2d, target) in enumerate(loader): target = target.cuda() target_var = torch.autograd.Variable(target) input_2d_var = torch.autograd.Variable(input_2d) out2d = seq2d(input_2d_var) time_pad = torch.nn.ReplicationPad3d((0, 0, 0, 0, 1, 1)) input_3d = input_2d.unsqueeze(2).repeat(1, 1, frame_nb, 1, 1) input_3d_var = time_pad(input_3d) out3d = seq3d(input_3d_var) expected_out_3d = out2d.data.unsqueeze(2).repeat(1, 1, frame_nb, 1, 1) out_diff = expected_out_3d - out3d.data print(out_diff.max()) assert(out_diff.max() < 0.0001)
Example #27
Source File: i3d_pt_profiling.py From kinetics_i3d_pytorch with MIT License | 5 votes |
def run_profile(args): normalize = transforms.Normalize( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) # Use pytorch image dataset, each image is duplicated in the # temporal dimension in order to produce a proxy for a # spatio-temporal video input dataset_path = 'data/dummy-dataset' dataset = datasets.ImageFolder(dataset_path, transforms.Compose([ transforms.CenterCrop(args.im_size), transforms.ToTensor(), normalize, ])) # Initialize input params batch_size = 2 # Initialize dataset loader = torch.utils.data.DataLoader( dataset, batch_size=batch_size, shuffle=False) # Initialize pytorch I3D i3nception_pt = I3D(num_classes=400) i3nception_pt.eval() i3nception_pt.load_state_dict(torch.load(args.rgb_weights_path)) i3nception_pt.train() i3nception_pt.cuda() l1_loss = torch.nn.L1Loss() sgd = torch.optim.SGD(i3nception_pt.parameters(), lr=0.001, momentum=0.9) run(i3nception_pt, loader, l1_loss, sgd, frame_nb=args.frame_nb)
Example #28
Source File: augmented_cycle_gan.py From torchsupport with MIT License | 5 votes |
def __init__(self, path, mode=0): self.data = ImageFolder(path) self.indices = [ idx for idx, sample in enumerate(self.data.samples) if sample[1] == mode ]
Example #29
Source File: pix2pix.py From torchsupport with MIT License | 5 votes |
def __init__(self, path, mode=0): self.data = ImageFolder(path) self.indices = [ idx for idx, sample in enumerate(self.data.samples) if sample[1] == mode ]
Example #30
Source File: cycle_gan.py From torchsupport with MIT License | 5 votes |
def __init__(self, path, mode=0): self.data = ImageFolder(path) self.indices = [ idx for idx, sample in enumerate(self.data.samples) if sample[1] == mode ]