Python torchvision.transforms.RandomHorizontalFlip() Examples
The following are 30
code examples of torchvision.transforms.RandomHorizontalFlip().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
torchvision.transforms
, or try the search function
.
Example #1
Source File: data_loader.py From transferlearning with MIT License | 10 votes |
def load_data(root_path, dir, batch_size, phase): transform_dict = { 'src': transforms.Compose( [transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]), 'tar': transforms.Compose( [transforms.Resize(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ])} data = datasets.ImageFolder(root=root_path + dir, transform=transform_dict[phase]) data_loader = torch.utils.data.DataLoader(data, batch_size=batch_size, shuffle=True, drop_last=False, num_workers=4) return data_loader
Example #2
Source File: data_loader.py From transferlearning with MIT License | 8 votes |
def load_training(root_path, dir, batch_size, kwargs): transform = transforms.Compose( [transforms.Resize([256, 256]), transforms.RandomCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor()]) data = datasets.ImageFolder(root=root_path + dir, transform=transform) train_loader = torch.utils.data.DataLoader(data, batch_size=batch_size, shuffle=True, drop_last=True, **kwargs) return train_loader
Example #3
Source File: data_loader.py From ImageNet with MIT License | 7 votes |
def data_loader(root, batch_size=256, workers=1, pin_memory=True): traindir = os.path.join(root, 'ILSVRC2012_img_train') valdir = os.path.join(root, 'ILSVRC2012_img_val') normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) train_dataset = datasets.ImageFolder( traindir, transforms.Compose([ transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize ]) ) val_dataset = datasets.ImageFolder( valdir, transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize ]) ) train_loader = torch.utils.data.DataLoader( train_dataset, batch_size=batch_size, shuffle=True, num_workers=workers, pin_memory=pin_memory, sampler=None ) val_loader = torch.utils.data.DataLoader( val_dataset, batch_size=batch_size, shuffle=False, num_workers=workers, pin_memory=pin_memory ) return train_loader, val_loader
Example #4
Source File: train.py From pytorch-multigpu with MIT License | 7 votes |
def main(): best_acc = 0 device = 'cuda' if torch.cuda.is_available() else 'cpu' print('==> Preparing data..') transforms_train = transforms.Compose([ transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))]) dataset_train = CIFAR10(root='../data', train=True, download=True, transform=transforms_train) train_loader = DataLoader(dataset_train, batch_size=args.batch_size, shuffle=True, num_workers=args.num_worker) # there are 10 classes so the dataset name is cifar-10 classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck') print('==> Making model..') net = pyramidnet() net = nn.DataParallel(net) net = net.to(device) num_params = sum(p.numel() for p in net.parameters() if p.requires_grad) print('The number of parameters of model is', num_params) criterion = nn.CrossEntropyLoss() optimizer = optim.Adam(net.parameters(), lr=args.lr) # optimizer = optim.SGD(net.parameters(), lr=args.lr, # momentum=0.9, weight_decay=1e-4) train(net, criterion, optimizer, train_loader, device)
Example #5
Source File: data_loader.py From self-supervised-da with MIT License | 7 votes |
def get_rot_train_transformers(args): size = args.img_transform.random_resize_crop.size scale = args.img_transform.random_resize_crop.scale img_tr = [transforms.RandomResizedCrop((int(size[0]), int(size[1])), (scale[0], scale[1]))] if args.img_transform.random_horiz_flip > 0.0: img_tr.append(transforms.RandomHorizontalFlip(args.img_transform.random_horiz_flip)) if args.img_transform.jitter > 0.0: img_tr.append(transforms.ColorJitter( brightness=args.img_transform.jitter, contrast=args.img_transform.jitter, saturation=args.jitter, hue=min(0.5, args.jitter))) mean = args.normalize.mean std = args.normalize.std img_tr += [transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)] return transforms.Compose(img_tr)
Example #6
Source File: data_loader.py From transferlearning with MIT License | 7 votes |
def load_train(root_path, dir, batch_size, phase): transform_dict = { 'src': transforms.Compose( [transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]), 'tar': transforms.Compose( [transforms.Resize(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ])} data = datasets.ImageFolder(root=root_path + dir, transform=transform_dict[phase]) train_size = int(0.8 * len(data)) test_size = len(data) - train_size data_train, data_val = torch.utils.data.random_split(data, [train_size, test_size]) train_loader = torch.utils.data.DataLoader(data_train, batch_size=batch_size, shuffle=True, drop_last=False, num_workers=4) val_loader = torch.utils.data.DataLoader(data_val, batch_size=batch_size, shuffle=True, drop_last=False, num_workers=4) return train_loader, val_loader
Example #7
Source File: data_loader.py From transferlearning with MIT License | 7 votes |
def load_data(data_folder, batch_size, train, kwargs): transform = { 'train': transforms.Compose( [transforms.Resize([256, 256]), transforms.RandomCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]), 'test': transforms.Compose( [transforms.Resize([224, 224]), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) } data = datasets.ImageFolder(root = data_folder, transform=transform['train' if train else 'test']) data_loader = torch.utils.data.DataLoader(data, batch_size=batch_size, shuffle=True, **kwargs, drop_last = True if train else False) return data_loader
Example #8
Source File: data_load.py From transferlearning with MIT License | 6 votes |
def load_imageclef_test(root_path, domain, batch_size, phase): transform_dict = { 'src': transforms.Compose( [transforms.Resize((256,256)), transforms.RandomCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]), 'tar': transforms.Compose( [transforms.Resize((224, 224)), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ])} data = ImageCLEF(root_dir=root_path, domain=domain, transform=transform_dict[phase]) data_loader = torch.utils.data.DataLoader(data, batch_size=batch_size, shuffle=True, drop_last=False, num_workers=4) return data_loader
Example #9
Source File: imsitu_loader.py From verb-attributes with MIT License | 6 votes |
def transform(is_train=True, normalize=True): """ Returns a transform object """ filters = [] filters.append(Scale(256)) if is_train: filters.append(RandomCrop(224)) else: filters.append(CenterCrop(224)) if is_train: filters.append(RandomHorizontalFlip()) filters.append(ToTensor()) if normalize: filters.append(Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])) return Compose(filters)
Example #10
Source File: utils.py From NAO_pytorch with GNU General Public License v3.0 | 6 votes |
def _data_transforms_cifar10(cutout_size): CIFAR_MEAN = [0.49139968, 0.48215827, 0.44653124] CIFAR_STD = [0.24703233, 0.24348505, 0.26158768] train_transform = transforms.Compose([ transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(CIFAR_MEAN, CIFAR_STD), ]) if cutout_size is not None: train_transform.transforms.append(Cutout(cutout_size)) valid_transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize(CIFAR_MEAN, CIFAR_STD), ]) return train_transform, valid_transform
Example #11
Source File: imagenet.py From nasnet-pytorch with MIT License | 6 votes |
def preprocess(self): if self.train: return transforms.Compose([ transforms.RandomResizedCrop(self.image_size), transforms.RandomHorizontalFlip(), transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.2), transforms.ToTensor(), transforms.Normalize(self.mean, self.std), ]) else: return transforms.Compose([ transforms.Resize((int(self.image_size / 0.875), int(self.image_size / 0.875))), transforms.CenterCrop(self.image_size), transforms.ToTensor(), transforms.Normalize(self.mean, self.std), ])
Example #12
Source File: bbox_aug.py From DetNAS with MIT License | 6 votes |
def im_detect_bbox_hflip(model, images, target_scale, target_max_size, device): """ Performs bbox detection on the horizontally flipped image. Function signature is the same as for im_detect_bbox. """ transform = TT.Compose([ T.Resize(target_scale, target_max_size), TT.RandomHorizontalFlip(1.0), TT.ToTensor(), T.Normalize( mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD, to_bgr255=cfg.INPUT.TO_BGR255 ) ]) images = [transform(image) for image in images] images = to_image_list(images, cfg.DATALOADER.SIZE_DIVISIBILITY) boxlists = model(images.to(device)) # Invert the detections computed on the flipped image boxlists_inv = [boxlist.transpose(0) for boxlist in boxlists] return boxlists_inv
Example #13
Source File: data_loader.py From real-world-sr with MIT License | 6 votes |
def __init__(self, noisy_dir, crop_size, upscale_factor=4, cropped=False, flips=False, rotations=False, **kwargs): super(TrainDataset, self).__init__() # get all directories used for training if isinstance(noisy_dir, str): noisy_dir = [noisy_dir] self.files = [] for n_dir in noisy_dir: self.files += [join(n_dir, x) for x in listdir(n_dir) if utils.is_image_file(x)] # intitialize image transformations and variables self.input_transform = T.Compose([ T.RandomVerticalFlip(0.5 if flips else 0.0), T.RandomHorizontalFlip(0.5 if flips else 0.0), T.RandomCrop(crop_size) ]) self.crop_transform = T.RandomCrop(crop_size // upscale_factor) self.upscale_factor = upscale_factor self.cropped = cropped self.rotations = rotations
Example #14
Source File: image_folder.py From DGP with MIT License | 6 votes |
def __init__(self, path, classes, stage='train'): self.data = [] for i, c in enumerate(classes): cls_path = osp.join(path, c) images = os.listdir(cls_path) for image in images: self.data.append((osp.join(cls_path, image), i)) normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) if stage == 'train': self.transforms = transforms.Compose([transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize]) if stage == 'test': self.transforms = transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize])
Example #15
Source File: dataset.py From sepconv with MIT License | 6 votes |
def __init__(self, patches, use_cache, augment_data): super(PatchDataset, self).__init__() self.patches = patches self.crop = CenterCrop(config.CROP_SIZE) if augment_data: self.random_transforms = [RandomRotation((90, 90)), RandomVerticalFlip(1.0), RandomHorizontalFlip(1.0), (lambda x: x)] self.get_aug_transform = (lambda: random.sample(self.random_transforms, 1)[0]) else: # Transform does nothing. Not sure if horrible or very elegant... self.get_aug_transform = (lambda: (lambda x: x)) if use_cache: self.load_patch = data_manager.load_cached_patch else: self.load_patch = data_manager.load_patch print('Dataset ready with {} tuples.'.format(len(patches)))
Example #16
Source File: data_load.py From transferlearning with MIT License | 6 votes |
def load_imageclef_train(root_path, domain, batch_size, phase): transform_dict = { 'src': transforms.Compose( [transforms.Resize((256, 256)), transforms.RandomCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]), 'tar': transforms.Compose( [transforms.Resize((224, 224)), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ])} data = ImageCLEF(root_dir=root_path, domain=domain, transform=transform_dict[phase]) train_size = int(0.8 * len(data)) test_size = len(data) - train_size data_train, data_val = torch.utils.data.random_split(data, [train_size, test_size]) train_loader = torch.utils.data.DataLoader(data_train, batch_size=batch_size, shuffle=True, drop_last=False, num_workers=4) val_loader = torch.utils.data.DataLoader(data_val, batch_size=batch_size, shuffle=True, drop_last=False, num_workers=4) return train_loader, val_loader
Example #17
Source File: data_load.py From transferlearning with MIT License | 6 votes |
def load_data(data_folder, batch_size, phase='train', train_val_split=True, train_ratio=.8): transform_dict = { 'train': transforms.Compose( [transforms.Resize(256), transforms.RandomCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]), 'test': transforms.Compose( [transforms.Resize(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ])} data = datasets.ImageFolder(root=data_folder, transform=transform_dict[phase]) if phase == 'train': if train_val_split: train_size = int(train_ratio * len(data)) test_size = len(data) - train_size data_train, data_val = torch.utils.data.random_split(data, [train_size, test_size]) train_loader = torch.utils.data.DataLoader(data_train, batch_size=batch_size, shuffle=True, drop_last=True, num_workers=4) val_loader = torch.utils.data.DataLoader(data_val, batch_size=batch_size, shuffle=False, drop_last=False, num_workers=4) return [train_loader, val_loader] else: train_loader = torch.utils.data.DataLoader(data, batch_size=batch_size, shuffle=True, drop_last=True, num_workers=4) return train_loader else: test_loader = torch.utils.data.DataLoader(data, batch_size=batch_size, shuffle=False, drop_last=False, num_workers=4) return test_loader ## Below are for ImageCLEF datasets
Example #18
Source File: train.py From pytorch_deephash with MIT License | 6 votes |
def init_dataset(): transform_train = transforms.Compose( [transforms.Resize(256), transforms.RandomCrop(227), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))]) transform_test = transforms.Compose( [transforms.Resize(227), transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))]) trainset = datasets.CIFAR10(root='./data', train=True, download=True, transform=transform_train) trainloader = torch.utils.data.DataLoader(trainset, batch_size=128, shuffle=True, num_workers=0) testset = datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_test) testloader = torch.utils.data.DataLoader(testset, batch_size=100, shuffle=True, num_workers=0) return trainloader, testloader
Example #19
Source File: data_loader.py From self-supervised-da with MIT License | 6 votes |
def get_jig_train_transformers(args): size = args.img_transform.random_resize_crop.size scale = args.img_transform.random_resize_crop.scale img_tr = [transforms.RandomResizedCrop((int(size[0]), int(size[1])), (scale[0], scale[1]))] if args.img_transform.random_horiz_flip > 0.0: img_tr.append(transforms.RandomHorizontalFlip(args.img_transform.random_horiz_flip)) if args.img_transform.jitter > 0.0: img_tr.append(transforms.ColorJitter( brightness=args.img_transform.jitter, contrast=args.img_transform.jitter, saturation=args.jitter, hue=min(0.5, args.jitter))) tile_tr = [] if args.jig_transform.tile_random_grayscale: tile_tr.append(transforms.RandomGrayscale(args.jig_transform.tile_random_grayscale)) mean = args.normalize.mean std = args.normalize.std tile_tr = tile_tr + [transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)] return transforms.Compose(img_tr), transforms.Compose(tile_tr)
Example #20
Source File: outlier.py From sgd-influence with MIT License | 6 votes |
def cifar10(): transform_train = transforms.Compose([ transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), ]) transform_test = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), ]) trainset = MyCIFAR10.CIFAR10(root='./data', train=True, download=True, transform=transform_train, seed=0) valset = MyCIFAR10.CIFAR10(root='./data', train=True, download=True, transform=transform_test, seed=0) testset = MyCIFAR10.CIFAR10(root='./data', train=False, download=True, transform=transform_test, seed=0) net_func = MyNet.CifarAE return net_func, trainset, valset, testset
Example #21
Source File: train.py From sgd-influence with MIT License | 6 votes |
def cifar10(): transform_train = transforms.Compose([ transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), ]) transform_test = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), ]) trainset = MyCIFAR10.CIFAR10(root='./data', train=True, download=True, transform=transform_train, seed=0) valset = MyCIFAR10.CIFAR10(root='./data', train=True, download=True, transform=transform_test, seed=0) testset = MyCIFAR10.CIFAR10(root='./data', train=False, download=True, transform=transform_test, seed=0) net_func = MyNet.CifarNet return net_func, trainset, valset, testset
Example #22
Source File: cifar10_cls_dataset.py From imgclsmob with MIT License | 6 votes |
def cifar10_train_transform(ds_metainfo, mean_rgb=(0.4914, 0.4822, 0.4465), std_rgb=(0.2023, 0.1994, 0.2010), jitter_param=0.4): assert (ds_metainfo is not None) assert (ds_metainfo.input_image_size[0] == 32) return transforms.Compose([ transforms.RandomCrop( size=32, padding=4), transforms.RandomHorizontalFlip(), transforms.ColorJitter( brightness=jitter_param, contrast=jitter_param, saturation=jitter_param), transforms.ToTensor(), transforms.Normalize( mean=mean_rgb, std=std_rgb) ])
Example #23
Source File: transform.py From metric-learning-divide-and-conquer with GNU Lesser General Public License v3.0 | 6 votes |
def make(sz_resize = 256, sz_crop = 227, mean = [104, 117, 128], std = [1, 1, 1], rgb_to_bgr = True, is_train = True, intensity_scale = None): return transforms.Compose([ RGBToBGR() if rgb_to_bgr else Identity(), transforms.RandomResizedCrop(sz_crop) if is_train else Identity(), transforms.Resize(sz_resize) if not is_train else Identity(), transforms.CenterCrop(sz_crop) if not is_train else Identity(), transforms.RandomHorizontalFlip() if is_train else Identity(), transforms.ToTensor(), ScaleIntensities( *intensity_scale) if intensity_scale is not None else Identity(), transforms.Normalize( mean=mean, std=std, ) ])
Example #24
Source File: util.py From ClassyVision with MIT License | 6 votes |
def __init__( self, crop_size: int = ImagenetConstants.CROP_SIZE, mean: List[float] = ImagenetConstants.MEAN, std: List[float] = ImagenetConstants.STD, ): """The constructor method of ImagenetAugmentTransform class. Args: crop_size: expected output size per dimension after random cropping mean: a 3-tuple denoting the pixel RGB mean std: a 3-tuple denoting the pixel RGB standard deviation """ self.transform = transforms.Compose( [ transforms.RandomResizedCrop(crop_size), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(mean=mean, std=std), ] )
Example #25
Source File: bbox_aug.py From Clothing-Detection with GNU General Public License v3.0 | 6 votes |
def im_detect_bbox_hflip(model, images, target_scale, target_max_size, device): """ Performs bbox detection on the horizontally flipped image. Function signature is the same as for im_detect_bbox. """ transform = TT.Compose([ T.Resize(target_scale, target_max_size), TT.RandomHorizontalFlip(1.0), TT.ToTensor(), T.Normalize( mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD, to_bgr255=cfg.INPUT.TO_BGR255 ) ]) images = [transform(image) for image in images] images = to_image_list(images, cfg.DATALOADER.SIZE_DIVISIBILITY) boxlists = model(images.to(device)) # Invert the detections computed on the flipped image boxlists_inv = [boxlist.transpose(0) for boxlist in boxlists] return boxlists_inv
Example #26
Source File: base_dataset.py From Recycle-GAN with MIT License | 6 votes |
def get_transform(opt): transform_list = [] if opt.resize_or_crop == 'resize_and_crop': osize = [opt.loadSize, opt.loadSize] transform_list.append(transforms.Scale(osize, Image.BICUBIC)) transform_list.append(transforms.RandomCrop(opt.fineSize)) elif opt.resize_or_crop == 'crop': transform_list.append(transforms.RandomCrop(opt.fineSize)) elif opt.resize_or_crop == 'scale_width': transform_list.append(transforms.Lambda( lambda img: __scale_width(img, opt.fineSize))) elif opt.resize_or_crop == 'scale_width_and_crop': transform_list.append(transforms.Lambda( lambda img: __scale_width(img, opt.loadSize))) transform_list.append(transforms.RandomCrop(opt.fineSize)) if opt.isTrain and not opt.no_flip: transform_list.append(transforms.RandomHorizontalFlip()) transform_list += [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] return transforms.Compose(transform_list)
Example #27
Source File: data.py From ResNet50-Pytorch-Face-Recognition with MIT License | 6 votes |
def __init__(self, root_path="CACD2000/", label_path="data/label.npy", name_path="data/name.npy", train_mode = "train"): """ Initialize some variables Load labels & names define transform """ self.root_path = root_path self.image_labels = np.load(label_path) self.image_names = np.load(name_path) self.train_mode = train_mode self.transform = { 'train': transforms.Compose([ transforms.Resize(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), # transforms.Normalize([0.656,0.487,0.411], [1., 1., 1.]) ]), 'val': transforms.Compose([ transforms.Resize(224), transforms.ToTensor(), # transforms.Normalize([0.656,0.487,0.411], [1., 1., 1.]) ]), }
Example #28
Source File: sun_dataset.py From dogTorch with MIT License | 5 votes |
def __init__(self, args, train=True): self.root_dir = args.data root_dir = self.root_dir if train: self.data_set_list = os.path.join(root_dir, args.trainset_image_list) else: self.data_set_list = os.path.join(root_dir, args.testset_image_list) self.categ_dict = get_class_names( os.path.join(root_dir, 'ClassName.txt')) self.data_set_list = parse_file(self.data_set_list, self.categ_dict) self.args = args self.read_features = args.read_features self.features_dir = args.features_dir if train: self.transform = transforms.Compose([ transforms.RandomSizedCrop(args.image_size), transforms.RandomHorizontalFlip(), transforms.Scale((args.image_size, args.image_size)), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) else: self.transform = transforms.Compose([ transforms.Scale((args.image_size, args.image_size)), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ])
Example #29
Source File: datasets.py From Tricks-of-Semi-supervisedDeepLeanring-Pytorch with MIT License | 5 votes |
def cifar100(n_labels, data_root='./data-local/cifar100/'): channel_stats = dict(mean = [0.5071, 0.4867, 0.4408], std = [0.2675, 0.2565, 0.2761]) train_transform = transforms.Compose([ transforms.Pad(2, padding_mode='reflect'), transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.1), transforms.RandomCrop(32), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(**channel_stats) ]) eval_transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize(**channel_stats) ]) trainset = tv.datasets.CIFAR100(data_root, train=True, download=True, transform=train_transform) evalset = tv.datasets.CIFAR100(data_root, train=False, download=True, transform=eval_transform) num_classes = 100 label_per_class = n_labels // num_classes labeled_idxs, unlabed_idxs = split_relabel_data( np.array(trainset.train_labels), trainset.train_labels, label_per_class, num_classes) return { 'trainset': trainset, 'evalset': evalset, 'labeled_idxs': labeled_idxs, 'unlabeled_idxs': unlabed_idxs, 'num_classes': num_classes }
Example #30
Source File: main.py From alibabacloud-quantization-networks with Apache License 2.0 | 5 votes |
def get_data(split_id, data_dir, img_size, scale_size, batch_size, workers, train_list, val_list): root = data_dir normalizer = T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) # RGB imagenet # with data augmentation train_transformer = T.Compose([ T.RandomResizedCrop(img_size), T.RandomHorizontalFlip(), T.ToTensor(), # [0, 255] to [0.0, 1.0] normalizer, # normalize each channel of the input ]) test_transformer = T.Compose([ T.Resize(scale_size), T.CenterCrop(img_size), T.ToTensor(), normalizer, ]) train_loader = DataLoader( Preprocessor(train_list, root=root, transform=train_transformer), batch_size=batch_size, num_workers=workers, sampler=RandomSampler(train_list), pin_memory=True, drop_last=False) val_loader = DataLoader( Preprocessor(val_list, root=root, transform=test_transformer), batch_size=batch_size, num_workers=workers, shuffle=False, pin_memory=True) return train_loader, val_loader