Python torchvision.transforms.RandomResizedCrop() Examples
The following are 30
code examples of torchvision.transforms.RandomResizedCrop().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
torchvision.transforms
, or try the search function
.
Example #1
Source File: data_loader.py From transferlearning with MIT License | 10 votes |
def load_data(root_path, dir, batch_size, phase): transform_dict = { 'src': transforms.Compose( [transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]), 'tar': transforms.Compose( [transforms.Resize(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ])} data = datasets.ImageFolder(root=root_path + dir, transform=transform_dict[phase]) data_loader = torch.utils.data.DataLoader(data, batch_size=batch_size, shuffle=True, drop_last=False, num_workers=4) return data_loader
Example #2
Source File: data_loader.py From self-supervised-da with MIT License | 7 votes |
def get_rot_train_transformers(args): size = args.img_transform.random_resize_crop.size scale = args.img_transform.random_resize_crop.scale img_tr = [transforms.RandomResizedCrop((int(size[0]), int(size[1])), (scale[0], scale[1]))] if args.img_transform.random_horiz_flip > 0.0: img_tr.append(transforms.RandomHorizontalFlip(args.img_transform.random_horiz_flip)) if args.img_transform.jitter > 0.0: img_tr.append(transforms.ColorJitter( brightness=args.img_transform.jitter, contrast=args.img_transform.jitter, saturation=args.jitter, hue=min(0.5, args.jitter))) mean = args.normalize.mean std = args.normalize.std img_tr += [transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)] return transforms.Compose(img_tr)
Example #3
Source File: preprocess.py From pytorch_quantization with MIT License | 7 votes |
def get_imagenet_iter_torch(type, image_dir, batch_size, num_threads, device_id, num_gpus, crop, val_size=256, world_size=1, local_rank=0): if type == 'train': transform = transforms.Compose([ transforms.RandomResizedCrop(crop, scale=(0.08, 1.25)), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) dataset = datasets.ImageFolder(image_dir + '/train', transform) dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True, num_workers=num_threads, pin_memory=True) else: transform = transforms.Compose([ transforms.Resize(val_size), transforms.CenterCrop(crop), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) dataset = datasets.ImageFolder(image_dir + '/val', transform) dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=False, num_workers=num_threads, pin_memory=True) return dataloader
Example #4
Source File: data_loader.py From ImageNet with MIT License | 7 votes |
def data_loader(root, batch_size=256, workers=1, pin_memory=True): traindir = os.path.join(root, 'ILSVRC2012_img_train') valdir = os.path.join(root, 'ILSVRC2012_img_val') normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) train_dataset = datasets.ImageFolder( traindir, transforms.Compose([ transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize ]) ) val_dataset = datasets.ImageFolder( valdir, transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize ]) ) train_loader = torch.utils.data.DataLoader( train_dataset, batch_size=batch_size, shuffle=True, num_workers=workers, pin_memory=pin_memory, sampler=None ) val_loader = torch.utils.data.DataLoader( val_dataset, batch_size=batch_size, shuffle=False, num_workers=workers, pin_memory=pin_memory ) return train_loader, val_loader
Example #5
Source File: dataloader.py From imagenet18_old with The Unlicense | 7 votes |
def get_loaders(traindir, valdir, sz, bs, fp16=True, val_bs=None, workers=8, rect_val=False, min_scale=0.08, distributed=False): val_bs = val_bs or bs train_tfms = [ transforms.RandomResizedCrop(sz, scale=(min_scale, 1.0)), transforms.RandomHorizontalFlip() ] train_dataset = datasets.ImageFolder(traindir, transforms.Compose(train_tfms)) train_sampler = (DistributedSampler(train_dataset, num_replicas=env_world_size(), rank=env_rank()) if distributed else None) train_loader = torch.utils.data.DataLoader( train_dataset, batch_size=bs, shuffle=(train_sampler is None), num_workers=workers, pin_memory=True, collate_fn=fast_collate, sampler=train_sampler) val_dataset, val_sampler = create_validation_set(valdir, val_bs, sz, rect_val=rect_val, distributed=distributed) val_loader = torch.utils.data.DataLoader( val_dataset, num_workers=workers, pin_memory=True, collate_fn=fast_collate, batch_sampler=val_sampler) train_loader = BatchTransformDataLoader(train_loader, fp16=fp16) val_loader = BatchTransformDataLoader(val_loader, fp16=fp16) return train_loader, val_loader, train_sampler, val_sampler
Example #6
Source File: data_loader.py From transferlearning with MIT License | 7 votes |
def load_train(root_path, dir, batch_size, phase): transform_dict = { 'src': transforms.Compose( [transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]), 'tar': transforms.Compose( [transforms.Resize(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ])} data = datasets.ImageFolder(root=root_path + dir, transform=transform_dict[phase]) train_size = int(0.8 * len(data)) test_size = len(data) - train_size data_train, data_val = torch.utils.data.random_split(data, [train_size, test_size]) train_loader = torch.utils.data.DataLoader(data_train, batch_size=batch_size, shuffle=True, drop_last=False, num_workers=4) val_loader = torch.utils.data.DataLoader(data_val, batch_size=batch_size, shuffle=True, drop_last=False, num_workers=4) return train_loader, val_loader
Example #7
Source File: utils.py From proxy-nca with MIT License | 6 votes |
def make_transform(sz_resize = 256, sz_crop = 227, mean = [104, 117, 128], std = [1, 1, 1], rgb_to_bgr = True, is_train = True, intensity_scale = None): return transforms.Compose([ RGBToBGR() if rgb_to_bgr else Identity(), transforms.RandomResizedCrop(sz_crop) if is_train else Identity(), transforms.Resize(sz_resize) if not is_train else Identity(), transforms.CenterCrop(sz_crop) if not is_train else Identity(), transforms.RandomHorizontalFlip() if is_train else Identity(), transforms.ToTensor(), ScaleIntensities( *intensity_scale) if intensity_scale is not None else Identity(), transforms.Normalize( mean=mean, std=std, ) ])
Example #8
Source File: transform.py From metric-learning-divide-and-conquer with GNU Lesser General Public License v3.0 | 6 votes |
def make(sz_resize = 256, sz_crop = 227, mean = [104, 117, 128], std = [1, 1, 1], rgb_to_bgr = True, is_train = True, intensity_scale = None): return transforms.Compose([ RGBToBGR() if rgb_to_bgr else Identity(), transforms.RandomResizedCrop(sz_crop) if is_train else Identity(), transforms.Resize(sz_resize) if not is_train else Identity(), transforms.CenterCrop(sz_crop) if not is_train else Identity(), transforms.RandomHorizontalFlip() if is_train else Identity(), transforms.ToTensor(), ScaleIntensities( *intensity_scale) if intensity_scale is not None else Identity(), transforms.Normalize( mean=mean, std=std, ) ])
Example #9
Source File: datasets.py From amdim-public with MIT License | 6 votes |
def __init__(self): # image augmentation functions self.flip_lr = transforms.RandomHorizontalFlip(p=0.5) rand_crop = \ transforms.RandomResizedCrop(128, scale=(0.3, 1.0), ratio=(0.7, 1.4), interpolation=INTERP) col_jitter = transforms.RandomApply([ transforms.ColorJitter(0.4, 0.4, 0.4, 0.1)], p=0.8) rnd_gray = transforms.RandomGrayscale(p=0.25) post_transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) self.test_transform = transforms.Compose([ transforms.Resize(146, interpolation=INTERP), transforms.CenterCrop(128), post_transform ]) self.train_transform = transforms.Compose([ rand_crop, col_jitter, rnd_gray, post_transform ])
Example #10
Source File: util.py From ClassyVision with MIT License | 6 votes |
def __init__( self, crop_size: int = ImagenetConstants.CROP_SIZE, mean: List[float] = ImagenetConstants.MEAN, std: List[float] = ImagenetConstants.STD, ): """The constructor method of ImagenetAugmentTransform class. Args: crop_size: expected output size per dimension after random cropping mean: a 3-tuple denoting the pixel RGB mean std: a 3-tuple denoting the pixel RGB standard deviation """ self.transform = transforms.Compose( [ transforms.RandomResizedCrop(crop_size), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(mean=mean, std=std), ] )
Example #11
Source File: dataloaders.py From HBONet with Apache License 2.0 | 6 votes |
def get_pytorch_train_loader(data_path, batch_size, workers=5, _worker_init_fn=None, input_size=224): traindir = os.path.join(data_path, 'train') train_dataset = datasets.ImageFolder( traindir, transforms.Compose([ transforms.RandomResizedCrop(input_size), transforms.RandomHorizontalFlip(), ])) if torch.distributed.is_initialized(): train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset) else: train_sampler = None train_loader = torch.utils.data.DataLoader( train_dataset, batch_size=batch_size, shuffle=(train_sampler is None), num_workers=workers, worker_init_fn=_worker_init_fn, pin_memory=True, sampler=train_sampler, collate_fn=fast_collate) return PrefetchedWrapper(train_loader), len(train_loader)
Example #12
Source File: dataloaders.py From mobilenetv2.pytorch with Apache License 2.0 | 6 votes |
def get_pytorch_train_loader(data_path, batch_size, workers=5, _worker_init_fn=None, input_size=224): traindir = os.path.join(data_path, 'train') train_dataset = datasets.ImageFolder( traindir, transforms.Compose([ transforms.RandomResizedCrop(input_size), transforms.RandomHorizontalFlip(), ])) if torch.distributed.is_initialized(): train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset) else: train_sampler = None train_loader = torch.utils.data.DataLoader( train_dataset, batch_size=batch_size, shuffle=(train_sampler is None), num_workers=workers, worker_init_fn=_worker_init_fn, pin_memory=True, sampler=train_sampler, collate_fn=fast_collate) return PrefetchedWrapper(train_loader), len(train_loader)
Example #13
Source File: fakeego.py From actor-observer with GNU General Public License v3.0 | 6 votes |
def get(cls, args): normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) train_dataset = torchvision.datasets.FakeData( transform=transforms.Compose([ transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize, transforms.Lambda(lambda x: [x, x, x]) ])) val_dataset = torchvision.datasets.FakeData( transform=transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize, transforms.Lambda(lambda x: [x, x, x]) ])) return train_dataset, val_dataset, val_dataset
Example #14
Source File: preprocess.py From pytorch_quantization with MIT License | 6 votes |
def imgnet_transform(is_training=True): if is_training: transform_list = transforms.Compose([transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ColorJitter(brightness=0.5, contrast=0.5, saturation=0.3), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) else: transform_list = transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) return transform_list
Example #15
Source File: data_loader.py From self-supervised-da with MIT License | 6 votes |
def get_jig_train_transformers(args): size = args.img_transform.random_resize_crop.size scale = args.img_transform.random_resize_crop.scale img_tr = [transforms.RandomResizedCrop((int(size[0]), int(size[1])), (scale[0], scale[1]))] if args.img_transform.random_horiz_flip > 0.0: img_tr.append(transforms.RandomHorizontalFlip(args.img_transform.random_horiz_flip)) if args.img_transform.jitter > 0.0: img_tr.append(transforms.ColorJitter( brightness=args.img_transform.jitter, contrast=args.img_transform.jitter, saturation=args.jitter, hue=min(0.5, args.jitter))) tile_tr = [] if args.jig_transform.tile_random_grayscale: tile_tr.append(transforms.RandomGrayscale(args.jig_transform.tile_random_grayscale)) mean = args.normalize.mean std = args.normalize.std tile_tr = tile_tr + [transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)] return transforms.Compose(img_tr), transforms.Compose(tile_tr)
Example #16
Source File: imagenet.py From nasnet-pytorch with MIT License | 6 votes |
def preprocess(self): if self.train: return transforms.Compose([ transforms.RandomResizedCrop(self.image_size), transforms.RandomHorizontalFlip(), transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.2), transforms.ToTensor(), transforms.Normalize(self.mean, self.std), ]) else: return transforms.Compose([ transforms.Resize((int(self.image_size / 0.875), int(self.image_size / 0.875))), transforms.CenterCrop(self.image_size), transforms.ToTensor(), transforms.Normalize(self.mean, self.std), ])
Example #17
Source File: image_folder.py From DGP with MIT License | 6 votes |
def __init__(self, path, classes, stage='train'): self.data = [] for i, c in enumerate(classes): cls_path = osp.join(path, c) images = os.listdir(cls_path) for image in images: self.data.append((osp.join(cls_path, image), i)) normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) if stage == 'train': self.transforms = transforms.Compose([transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize]) if stage == 'test': self.transforms = transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize])
Example #18
Source File: utils.py From NAS-Benchmark with GNU General Public License v3.0 | 6 votes |
def data_transforms_food101(): normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) train_transform = transforms.Compose([ transforms.RandomResizedCrop(128), # default bilinear for interpolation transforms.RandomHorizontalFlip(), transforms.ColorJitter( brightness=0.2, contrast=0.2, saturation=0.2, hue=0.2), transforms.ToTensor(), normalize, ]) valid_transform = transforms.Compose([ transforms.Resize(128), transforms.CenterCrop(128), transforms.ToTensor(), normalize, ]) return train_transform, valid_transform
Example #19
Source File: utils.py From NAS-Benchmark with GNU General Public License v3.0 | 6 votes |
def data_transforms_imagenet(): normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) train_transform = transforms.Compose([ transforms.RandomResizedCrop(128), transforms.RandomHorizontalFlip(), transforms.ColorJitter( brightness=0.4, contrast=0.4, saturation=0.4, hue=0.2), transforms.ToTensor(), normalize, ]) valid_transform = transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(128), transforms.ToTensor(), normalize, ]) return train_transform, valid_transform
Example #20
Source File: datasets.py From garbageClassifier with MIT License | 6 votes |
def __init__(self, data_dir, image_size, is_train=True, **kwargs): self.image_size = image_size self.image_paths = [] self.image_labels = [] self.classes = sorted(os.listdir(data_dir)) for idx, cls_ in enumerate(self.classes): self.image_paths += glob.glob(os.path.join(data_dir, cls_, '*.*')) self.image_labels += [idx] * len(glob.glob(os.path.join(data_dir, cls_, '*.*'))) self.indexes = list(range(len(self.image_paths))) if is_train: random.shuffle(self.indexes) self.transform = transforms.Compose([transforms.RandomResizedCrop(image_size), transforms.RandomHorizontalFlip(), transforms.ColorJitter(brightness=1, contrast=1, saturation=0.5, hue=0.5), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) else: self.transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
Example #21
Source File: image.py From mmfashion with Apache License 2.0 | 6 votes |
def get_img_tensor(img_path, use_cuda, get_size=False): img = Image.open(img_path) original_w, original_h = img.size img_size = (224, 224) # crop image to (224, 224) img.thumbnail(img_size, Image.ANTIALIAS) img = img.convert('RGB') normalize = transforms.Normalize( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) transform = transforms.Compose([ transforms.RandomResizedCrop(img_size[0]), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize, ]) img_tensor = transform(img) img_tensor = torch.unsqueeze(img_tensor, 0) if use_cuda: img_tensor = img_tensor.cuda() if get_size: return img_tensor, original_w, original_w else: return img_tensor
Example #22
Source File: datasets.py From nni with MIT License | 6 votes |
def build_train_transform(self, distort_color, resize_scale): print('Color jitter: %s' % distort_color) if distort_color == 'strong': color_transform = transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.1) elif distort_color == 'normal': color_transform = transforms.ColorJitter(brightness=32. / 255., saturation=0.5) else: color_transform = None if color_transform is None: train_transforms = transforms.Compose([ transforms.RandomResizedCrop(self.image_size, scale=(resize_scale, 1.0)), transforms.RandomHorizontalFlip(), transforms.ToTensor(), self.normalize, ]) else: train_transforms = transforms.Compose([ transforms.RandomResizedCrop(self.image_size, scale=(resize_scale, 1.0)), transforms.RandomHorizontalFlip(), color_transform, transforms.ToTensor(), self.normalize, ]) return train_transforms
Example #23
Source File: preprocessing.py From pytorch_DoReFaNet with MIT License | 6 votes |
def imgnet_transform(is_training=True): if is_training: transform_list = transforms.Compose([transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ColorJitter(brightness=0.5, contrast=0.5, saturation=0.3), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) else: transform_list = transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) return transform_list
Example #24
Source File: imagenet_data.py From DenseNAS with Apache License 2.0 | 5 votes |
def _getTransformList(self, aug_type): assert aug_type in ['rand_scale', 'random_sized', 'week_train', 'validation'] list_of_transforms = [] if aug_type == 'validation': list_of_transforms.append(transforms.Resize(self.scaled_size)) list_of_transforms.append(transforms.CenterCrop(self.size_images)) elif aug_type == 'week_train': list_of_transforms.append(transforms.Resize(256)) list_of_transforms.append(transforms.RandomCrop(self.size_images)) list_of_transforms.append(transforms.RandomHorizontalFlip()) else: if aug_type == 'rand_scale': list_of_transforms.append(transforms_extension.RandomScale(256, 480)) list_of_transforms.append(transforms.RandomCrop(self.size_images)) list_of_transforms.append(transforms.RandomHorizontalFlip()) elif aug_type == 'random_sized': list_of_transforms.append(transforms.RandomResizedCrop(self.size_images, scale=(self.data_config.random_sized.min_scale, 1.0))) list_of_transforms.append(transforms.RandomHorizontalFlip()) if self.data_config.color: list_of_transforms.append(transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4)) return transforms.Compose(list_of_transforms)
Example #25
Source File: dataset_1.py From DriverPostureClassification with MIT License | 5 votes |
def __init__(self, root, transform=None, target_transform=None, train=True, test=False): self.root = root self.transform = transform self.target_transform = target_transform self.train = train self.test = test if self.test: with open(os.path.join(self.root, 'test.csv'), 'r') as f: lines = f.readlines()[1:] dataset = [] for line in lines: dataset.append(line.strip().split(',')) else: with open(os.path.join(self.root, 'train.csv'), 'r') as f: lines = f.readlines()[1:] dataset = [] for line in lines: dataset.append(line.strip().split(',')) dataset = np.array(dataset) self.imgs = list(map(lambda x: os.path.join(self.root, x), dataset[:, 0])) self.target = list(map(int, dataset[:, 1])) if transform is None: normalize = transforms.Normalize( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) if self.test: self.transform = transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize ]) else: self.transform = transforms.Compose([ transforms.Resize(256), transforms.RandomResizedCrop(224, scale=(0.25, 1)), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize ])
Example #26
Source File: load_data.py From Deep-Expander-Networks with GNU General Public License v3.0 | 5 votes |
def __init__(self, opt): kwargstrain = { 'num_workers': opt.workers, 'batch_size' : opt.batch_size, 'shuffle' : False, 'pin_memory': True} kwargstest = { 'num_workers': opt.workers, 'batch_size' : opt.batch_size, 'shuffle' : True, 'pin_memory': True} data_transforms = { 'train': transforms.Compose([ transforms.RandomResizedCrop(opt.inpsize), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]), 'val': transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(opt.inpsize), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) } data_dir = opt.data_dir dtsets = {x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x]) for x in ['train', 'val']} self.train_loader = torch.utils.data.DataLoader(dtsets["train"], **kwargstrain) self.val_loader = torch.utils.data.DataLoader(dtsets["val"], **kwargstest)
Example #27
Source File: charadesrgb.py From actor-observer with GNU General Public License v3.0 | 5 votes |
def get(cls, args, scale=(0.08, 1.0)): """ Entry point. Call this function to get all Charades dataloaders """ normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) train_file = args.train_file val_file = args.val_file train_dataset = cls( args.data, 'train', train_file, args.cache, args.cache_buster, transform=transforms.Compose([ transforms.RandomResizedCrop(args.inputsize, scale), transforms.ColorJitter( brightness=0.4, contrast=0.4, saturation=0.4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), # missing PCA lighting jitter normalize, ])) val_dataset = cls( args.data, 'val', val_file, args.cache, args.cache_buster, transform=transforms.Compose([ transforms.Resize(int(256. / 224 * args.inputsize)), transforms.CenterCrop(args.inputsize), transforms.ToTensor(), normalize, ])) valvideo_dataset = cls( args.data, 'val_video', val_file, args.cache, args.cache_buster, transform=transforms.Compose([ transforms.Resize(int(256. / 224 * args.inputsize)), transforms.CenterCrop(args.inputsize), transforms.ToTensor(), normalize, ])) return train_dataset, val_dataset, valvideo_dataset
Example #28
Source File: utils.py From inplace_abn with BSD 3-Clause "New" or "Revised" License | 5 votes |
def create_transforms(input_config): """Create transforms from configuration Parameters ---------- input_config : dict Dictionary containing the configuration options for input pre-processing. Returns ------- train_transforms : list List of transforms to be applied to the input during training. val_transforms : list List of transforms to be applied to the input during validation. """ normalize = transforms.Normalize(mean=input_config["mean"], std=input_config["std"]) train_transforms = [] if input_config["scale_train"] != -1: train_transforms.append(transforms.Scale(input_config["scale_train"])) train_transforms += [ transforms.RandomResizedCrop(input_config["crop_train"]), transforms.RandomHorizontalFlip(), transforms.ToTensor() ] if input_config["color_jitter_train"]: train_transforms.append(ColorJitter()) if input_config["lighting_train"]: train_transforms.append(Lighting()) train_transforms.append(normalize) val_transforms = [] if input_config["scale_val"] != -1: val_transforms.append(transforms.Resize(input_config["scale_val"])) val_transforms += [ transforms.CenterCrop(input_config["crop_val"]), transforms.ToTensor(), normalize, ] return train_transforms, val_transforms
Example #29
Source File: data.py From vsepp with Apache License 2.0 | 5 votes |
def get_transform(data_name, split_name, opt): normalizer = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) t_list = [] if split_name == 'train': t_list = [transforms.RandomResizedCrop(opt.crop_size), transforms.RandomHorizontalFlip()] elif split_name == 'val': t_list = [transforms.Resize(256), transforms.CenterCrop(224)] elif split_name == 'test': t_list = [transforms.Resize(256), transforms.CenterCrop(224)] t_end = [transforms.ToTensor(), normalizer] transform = transforms.Compose(t_list + t_end) return transform
Example #30
Source File: trainer.py From advex-uar with Apache License 2.0 | 5 votes |
def _init_loaders(self): allreduce_batch_size = self.batch_size * self.batches_per_allreduce traindir = os.path.join(self.dataset_path, 'train') valdir = os.path.join(self.dataset_path, 'val') self.train_dataset = StridedImageFolder( traindir, transforms.Compose([ transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), self.normalize,]), stride=self.stride) self.val_dataset = StridedImageFolder( valdir, transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), self.normalize,]), stride=self.stride) self.train_sampler = torch.utils.data.distributed.DistributedSampler( self.train_dataset, num_replicas=hvd.size(), rank=hvd.rank()) self.val_sampler = torch.utils.data.distributed.DistributedSampler( self.val_dataset, num_replicas=hvd.size(), rank=hvd.rank()) self.train_loader = torch.utils.data.DataLoader( self.train_dataset, batch_size=allreduce_batch_size, sampler=self.train_sampler, num_workers=8, pin_memory=True) self.val_loader = torch.utils.data.DataLoader( self.val_dataset, batch_size=allreduce_batch_size, sampler=self.val_sampler, num_workers=8, pin_memory=True, shuffle=False)