Python torchvision.transforms.RandomAffine() Examples
The following are 10
code examples of torchvision.transforms.RandomAffine().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
torchvision.transforms
, or try the search function
.
Example #1
Source File: data_loader.py From DeepFake-Detection with MIT License | 9 votes |
def get_transforms(): pre_trained_mean, pre_trained_std = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225] train_transforms = transforms.Compose([ transforms.RandomHorizontalFlip(), transforms.RandomAffine(degrees=40, scale=(.9, 1.1), shear=0), transforms.RandomPerspective(distortion_scale=0.2), transforms.ColorJitter(brightness=0.5, contrast=0.5, saturation=0.5), transforms.ToTensor(), transforms.RandomErasing(scale=(0.02, 0.16), ratio=(0.3, 1.6)), transforms.Normalize(mean=pre_trained_mean, std=pre_trained_std), ]) val_transforms = transforms.Compose([ transforms.ToTensor(), transforms.Normalize(mean=pre_trained_mean, std=pre_trained_std) ]) return train_transforms, val_transforms
Example #2
Source File: cufed5_dataset.py From srntt-pytorch with Apache License 2.0 | 6 votes |
def __init__(self, dataroot: Path, scale_factor: int = 4): super(CUFED5Dataset, self).__init__() self.dataroot = Path(dataroot) self.filenames = list(set( [f.stem.split('_')[0] for f in self.dataroot.glob('*.png')] )) self.transforms = transforms.Compose([ transforms.ToTensor(), # transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]) ]) self.warp = transforms.RandomAffine( degrees=(10, 30), translate=(0.25, 0.5), scale=(1.2, 2.0), resample=Image.BICUBIC )
Example #3
Source File: dataset.py From pytorch-UNet with MIT License | 5 votes |
def __call__(self, image, mask): # transforming to PIL image image, mask = F.to_pil_image(image), F.to_pil_image(mask) # random crop if self.crop: i, j, h, w = T.RandomCrop.get_params(image, self.crop) image, mask = F.crop(image, i, j, h, w), F.crop(mask, i, j, h, w) if np.random.rand() < self.p_flip: image, mask = F.hflip(image), F.hflip(mask) # color transforms || ONLY ON IMAGE if self.color_jitter_params: image = self.color_tf(image) # random affine transform if np.random.rand() < self.p_random_affine: affine_params = T.RandomAffine(180).get_params((-90, 90), (1, 1), (2, 2), (-45, 45), self.crop) image, mask = F.affine(image, *affine_params), F.affine(mask, *affine_params) # transforming to tensor image = F.to_tensor(image) if not self.long_mask: mask = F.to_tensor(mask) else: mask = to_long_tensor(mask) return image, mask
Example #4
Source File: preproc.py From pt.darts with MIT License | 5 votes |
def data_transforms(dataset, cutout_length): dataset = dataset.lower() if dataset == 'cifar10': MEAN = [0.49139968, 0.48215827, 0.44653124] STD = [0.24703233, 0.24348505, 0.26158768] transf = [ transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip() ] elif dataset == 'mnist': MEAN = [0.13066051707548254] STD = [0.30810780244715075] transf = [ transforms.RandomAffine(degrees=15, translate=(0.1, 0.1), scale=(0.9, 1.1), shear=0.1) ] elif dataset == 'fashionmnist': MEAN = [0.28604063146254594] STD = [0.35302426207299326] transf = [ transforms.RandomAffine(degrees=15, translate=(0.1, 0.1), scale=(0.9, 1.1), shear=0.1), transforms.RandomVerticalFlip() ] else: raise ValueError('not expected dataset = {}'.format(dataset)) normalize = [ transforms.ToTensor(), transforms.Normalize(MEAN, STD) ] train_transform = transforms.Compose(transf + normalize) valid_transform = transforms.Compose(normalize) if cutout_length > 0: train_transform.transforms.append(Cutout(cutout_length)) return train_transform, valid_transform
Example #5
Source File: train_mnist_model.py From pytorch-to-javascript-with-onnx-js with MIT License | 4 votes |
def main(): # Training settings parser = argparse.ArgumentParser(description='PyTorch MNIST Example') parser.add_argument('--batch-size', type=int, default=64, metavar='N', help='input batch size for training (default: 64)') parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N', help='input batch size for testing (default: 1000)') parser.add_argument('--epochs', type=int, default=14, metavar='N', help='number of epochs to train (default: 14)') parser.add_argument('--lr', type=float, default=1.0, metavar='LR', help='learning rate (default: 1.0)') parser.add_argument('--gamma', type=float, default=0.7, metavar='M', help='Learning rate step gamma (default: 0.7)') parser.add_argument('--no-cuda', action='store_true', default=False, help='disables CUDA training') parser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 1)') parser.add_argument('--log-interval', type=int, default=10, metavar='N', help='how many batches to wait before logging training status') parser.add_argument('--save-model', action='store_true', default=False, help='For Saving the current Model') args = parser.parse_args() use_cuda = not args.no_cuda and torch.cuda.is_available() torch.manual_seed(args.seed) device = torch.device("cuda" if use_cuda else "cpu") kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {} train_loader = torch.utils.data.DataLoader( datasets.MNIST('data', train=True, download=True, transform=transforms.Compose([ # Add random transformations to the image. transforms.RandomAffine( degrees=30, translate=(0.5, 0.5), scale=(0.25, 1), shear=(-30, 30, -30, 30)), transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ])), batch_size=args.batch_size, shuffle=True, **kwargs) test_loader = torch.utils.data.DataLoader( datasets.MNIST('data', train=False, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ])), batch_size=args.test_batch_size, shuffle=True, **kwargs) model = Net().to(device) optimizer = optim.Adadelta(model.parameters(), lr=args.lr) scheduler = StepLR(optimizer, step_size=1, gamma=args.gamma) for epoch in range(1, args.epochs + 1): train(args, model, device, train_loader, optimizer, epoch) test(args, model, device, test_loader) scheduler.step() torch.save(model.state_dict(), "pytorch_model.pt")
Example #6
Source File: DD.py From DEEPSEC with MIT License | 4 votes |
def __init__(self, model=None, defense_name=None, dataset=None, temperature=1, training_parameters=None, device=None): """ :param model: :param defense_name: :param dataset: :param temperature: :param training_parameters: :param device: """ super(DistillationDefense, self).__init__(model=model, defense_name=defense_name) self.model = model self.defense_name = defense_name self.device = device self.Dataset = dataset.upper() assert self.Dataset in ['MNIST', 'CIFAR10'], "The data set must be MNIST or CIFAR10" # prepare the models for the defenses self.initial_model = copy.deepcopy(model) self.best_initial_model = copy.deepcopy(model) self.distilled_model = copy.deepcopy(model) # parameters for the defense self.temperature = temperature * 1.0 # get the training_parameters, the same as the settings of RawModels self.num_epochs = training_parameters['num_epochs'] self.batch_size = training_parameters['batch_size'] # prepare the optimizers and transforms if self.Dataset == 'MNIST': self.initial_optimizer = optim.SGD(self.initial_model.parameters(), lr=training_parameters['learning_rate'], momentum=training_parameters['momentum'], weight_decay=training_parameters['decay'], nesterov=True) self.distilled_optimizer = optim.SGD(self.distilled_model.parameters(), lr=training_parameters['learning_rate'], momentum=training_parameters['momentum'], weight_decay=training_parameters['decay'], nesterov=True) self.transform = None else: self.initial_optimizer = optim.Adam(self.initial_model.parameters(), lr=training_parameters['lr']) self.distilled_optimizer = optim.Adam(self.distilled_model.parameters(), lr=training_parameters['lr']) self.transform = Compose([RandomAffine(degrees=0, translate=(0.1, 0.1)), RandomHorizontalFlip(), ToTensor()])
Example #7
Source File: dataset.py From DEEPSEC with MIT License | 4 votes |
def get_cifar10_train_validate_loader(dir_name, batch_size, valid_size=0.1, augment=True, shuffle=True, random_seed=100, num_workers=1): """ :param dir_name: :param batch_size: :param valid_size: :param augment: :param shuffle: :param random_seed: :param num_workers: :return: """ # training dataset's transform if augment is True: train_transform = transforms.Compose([ # transforms.RandomCrop(32), # transforms.RandomCrop(32, padding=4), transforms.RandomAffine(degrees=0, translate=(0.1, 0.1)), transforms.RandomHorizontalFlip(), transforms.ToTensor() ]) else: train_transform = transforms.Compose([transforms.ToTensor()]) # validation dataset's transform valid_transform = transforms.Compose([transforms.ToTensor()]) # load the dataset train_cifar10_dataset = torchvision.datasets.CIFAR10(root=dir_name, train=True, download=True, transform=train_transform) valid_cifar10_dataset = torchvision.datasets.CIFAR10(root=dir_name, train=True, download=True, transform=valid_transform) num_train = len(train_cifar10_dataset) indices = list(range(num_train)) split = int(np.floor(valid_size * num_train)) if shuffle is True: np.random.seed(random_seed) np.random.shuffle(indices) train_idx, valid_idx = indices[split:], indices[:split] train_sampler = SubsetRandomSampler(train_idx) valid_sampler = SubsetRandomSampler(valid_idx) train_loader = torch.utils.data.DataLoader(train_cifar10_dataset, batch_size=batch_size, sampler=train_sampler, num_workers=num_workers) valid_loader = torch.utils.data.DataLoader(valid_cifar10_dataset, batch_size=batch_size, sampler=valid_sampler, num_workers=num_workers) return train_loader, valid_loader
Example #8
Source File: preproc.py From NAS-Benchmark with GNU General Public License v3.0 | 4 votes |
def data_transforms(dataset, cutout_length): dataset = dataset.lower() if dataset == 'cifar10' or dataset == 'cifar100': MEAN = [0.49139968, 0.48215827, 0.44653124] STD = [0.24703233, 0.24348505, 0.26158768] transf_train = [ transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip() ] transf_val = [] elif dataset == 'mnist': MEAN = [0.13066051707548254] STD = [0.30810780244715075] transf_train = [ transforms.RandomAffine(degrees=15, translate=(0.1, 0.1), scale=(0.9, 1.1), shear=0.1) ] transf_val=[] elif dataset == 'fashionmnist': MEAN = [0.28604063146254594] STD = [0.35302426207299326] transf_train = [ transforms.RandomAffine(degrees=15, translate=(0.1, 0.1), scale=(0.9, 1.1), shear=0.1), transforms.RandomVerticalFlip() ] transf_val = [] #Same preprocessing for ImageNet, Sport8 and MIT67 elif dataset in utils.LARGE_DATASETS: MEAN = [0.485, 0.456, 0.406] STD = [0.229, 0.224, 0.225] transf_train = [ transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ColorJitter( brightness=0.4, contrast=0.4, saturation=0.4, hue=0.2) ] transf_val = [ transforms.Resize(256), transforms.CenterCrop(224), ] else: raise ValueError('not expected dataset = {}'.format(dataset)) normalize = [ transforms.ToTensor(), transforms.Normalize(MEAN, STD) ] train_transform = transforms.Compose(transf_train + normalize) valid_transform = transforms.Compose(transf_val + normalize) # FIXME validation is not set to square proportions, is this an issue? if cutout_length > 0: train_transform.transforms.append(Cutout(cutout_length)) return train_transform, valid_transform
Example #9
Source File: load_data.py From integer_discrete_flows with MIT License | 4 votes |
def load_cifar10(args, **kwargs): # set args args.input_size = [3, 32, 32] args.input_type = 'continuous' args.dynamic_binarization = False from keras.datasets import cifar10 (x_train, y_train), (x_test, y_test) = cifar10.load_data() x_train = x_train.transpose(0, 3, 1, 2) x_test = x_test.transpose(0, 3, 1, 2) import math if args.data_augmentation_level == 2: data_transform = transforms.Compose([ transforms.ToPILImage(), transforms.RandomHorizontalFlip(), transforms.Pad(int(math.ceil(32 * 0.05)), padding_mode='edge'), transforms.RandomAffine(degrees=0, translate=(0.05, 0.05)), transforms.CenterCrop(32) ]) elif args.data_augmentation_level == 1: data_transform = transforms.Compose([ transforms.ToPILImage(), transforms.RandomHorizontalFlip(), ]) else: data_transform = transforms.Compose([ transforms.ToPILImage(), ]) x_val = x_train[-10000:] y_val = y_train[-10000:] x_train = x_train[:-10000] y_train = y_train[:-10000] train = CustomTensorDataset(torch.from_numpy(x_train), torch.from_numpy(y_train), transform=data_transform) train_loader = data_utils.DataLoader(train, batch_size=args.batch_size, shuffle=True, **kwargs) validation = data_utils.TensorDataset(torch.from_numpy(x_val), torch.from_numpy(y_val)) val_loader = data_utils.DataLoader(validation, batch_size=args.batch_size, shuffle=False, **kwargs) test = data_utils.TensorDataset(torch.from_numpy(x_test), torch.from_numpy(y_test)) test_loader = data_utils.DataLoader(test, batch_size=args.batch_size, shuffle=False, **kwargs) return train_loader, val_loader, test_loader, args
Example #10
Source File: helpers.py From diffai with MIT License | 4 votes |
def loadDataset(dataset, batch_size, train, transform = True): oargs = {} if dataset in ["MNIST", "CIFAR10", "CIFAR100", "FashionMNIST", "PhotoTour"]: oargs['train'] = train elif dataset in ["STL10", "SVHN"] : oargs['split'] = 'train' if train else 'test' elif dataset in ["LSUN"]: oargs['classes'] = 'train' if train else 'test' elif dataset in ["Imagenet12"]: pass else: raise Exception(dataset + " is not yet supported") if dataset in ["MNIST"]: transformer = transforms.Compose([ transforms.ToTensor()] + ([transforms.Normalize((0.1307,), (0.3081,))] if transform else [])) elif dataset in ["CIFAR10", "CIFAR100"]: transformer = transforms.Compose(([ #transforms.RandomCrop(32, padding=4), transforms.RandomAffine(0, (0.125, 0.125), resample=PIL.Image.BICUBIC) , transforms.RandomHorizontalFlip(), #transforms.RandomRotation(15, resample = PIL.Image.BILINEAR) ] if train else []) + [transforms.ToTensor()] + ([transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))] if transform else [])) elif dataset in ["SVHN"]: transformer = transforms.Compose([ transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.5,0.5,0.5), (0.2,0.2,0.2))]) else: transformer = transforms.ToTensor() if dataset in ["Imagenet12"]: # https://github.com/facebook/fb.resnet.torch/blob/master/INSTALL.md#download-the-imagenet-dataset train_set = datasets.ImageFolder( '../data/Imagenet12/train' if train else '../data/Imagenet12/val', transforms.Compose([ transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), normalize, ])) else: train_set = getattr(datasets, dataset)('../data', download=True, transform=transformer, **oargs) return torch.utils.data.DataLoader( train_set , batch_size=batch_size , shuffle=True, **({'num_workers': 1, 'pin_memory': True} if use_cuda else {}))