Python torchvision.transforms.Compose() Examples
The following are 30
code examples of torchvision.transforms.Compose().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
torchvision.transforms
, or try the search function
.
Example #1
Source File: data_loader.py From transferlearning with MIT License | 10 votes |
def load_data(root_path, dir, batch_size, phase): transform_dict = { 'src': transforms.Compose( [transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]), 'tar': transforms.Compose( [transforms.Resize(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ])} data = datasets.ImageFolder(root=root_path + dir, transform=transform_dict[phase]) data_loader = torch.utils.data.DataLoader(data, batch_size=batch_size, shuffle=True, drop_last=False, num_workers=4) return data_loader
Example #2
Source File: model.py From iAI with MIT License | 8 votes |
def __init__(self): self.batch_size = 64 self.test_batch_size = 100 self.learning_rate = 0.01 self.sgd_momentum = 0.9 self.log_interval = 100 # Fetch MNIST data set. self.train_loader = torch.utils.data.DataLoader( datasets.MNIST('/tmp/mnist/data', train=True, download=True, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ])), batch_size=self.batch_size, shuffle=True) self.test_loader = torch.utils.data.DataLoader( datasets.MNIST('/tmp/mnist/data', train=False, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ])), batch_size=self.test_batch_size, shuffle=True) self.network = Net() # Train the network for several epochs, validating after each epoch.
Example #3
Source File: data_loader.py From transferlearning with MIT License | 8 votes |
def load_training(root_path, dir, batch_size, kwargs): transform = transforms.Compose( [transforms.Resize([256, 256]), transforms.RandomCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor()]) data = datasets.ImageFolder(root=root_path + dir, transform=transform) train_loader = torch.utils.data.DataLoader(data, batch_size=batch_size, shuffle=True, drop_last=True, **kwargs) return train_loader
Example #4
Source File: dataset.py From DeepLab_v3_plus with MIT License | 7 votes |
def transform_for_train(fixed_scale = 512, rotate_prob = 15): """ Options: 1.RandomCrop 2.CenterCrop 3.RandomHorizontalFlip 4.Normalize 5.ToTensor 6.FixedResize 7.RandomRotate """ transform_list = [] #transform_list.append(FixedResize(size = (fixed_scale, fixed_scale))) transform_list.append(RandomSized(fixed_scale)) transform_list.append(RandomRotate(rotate_prob)) transform_list.append(RandomHorizontalFlip()) #transform_list.append(Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))) transform_list.append(Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))) transform_list.append(ToTensor()) return transforms.Compose(transform_list)
Example #5
Source File: data_loader.py From transferlearning with MIT License | 7 votes |
def load_train(root_path, dir, batch_size, phase): transform_dict = { 'src': transforms.Compose( [transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]), 'tar': transforms.Compose( [transforms.Resize(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ])} data = datasets.ImageFolder(root=root_path + dir, transform=transform_dict[phase]) train_size = int(0.8 * len(data)) test_size = len(data) - train_size data_train, data_val = torch.utils.data.random_split(data, [train_size, test_size]) train_loader = torch.utils.data.DataLoader(data_train, batch_size=batch_size, shuffle=True, drop_last=False, num_workers=4) val_loader = torch.utils.data.DataLoader(data_val, batch_size=batch_size, shuffle=True, drop_last=False, num_workers=4) return train_loader, val_loader
Example #6
Source File: data_loader.py From transferlearning with MIT License | 7 votes |
def load_data(data_folder, batch_size, train, kwargs): transform = { 'train': transforms.Compose( [transforms.Resize([256, 256]), transforms.RandomCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]), 'test': transforms.Compose( [transforms.Resize([224, 224]), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) } data = datasets.ImageFolder(root = data_folder, transform=transform['train' if train else 'test']) data_loader = torch.utils.data.DataLoader(data, batch_size=batch_size, shuffle=True, **kwargs, drop_last = True if train else False) return data_loader
Example #7
Source File: train.py From pytorch-multigpu with MIT License | 7 votes |
def main(): best_acc = 0 device = 'cuda' if torch.cuda.is_available() else 'cpu' print('==> Preparing data..') transforms_train = transforms.Compose([ transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))]) dataset_train = CIFAR10(root='../data', train=True, download=True, transform=transforms_train) train_loader = DataLoader(dataset_train, batch_size=args.batch_size, shuffle=True, num_workers=args.num_worker) # there are 10 classes so the dataset name is cifar-10 classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck') print('==> Making model..') net = pyramidnet() net = nn.DataParallel(net) net = net.to(device) num_params = sum(p.numel() for p in net.parameters() if p.requires_grad) print('The number of parameters of model is', num_params) criterion = nn.CrossEntropyLoss() optimizer = optim.Adam(net.parameters(), lr=args.lr) # optimizer = optim.SGD(net.parameters(), lr=args.lr, # momentum=0.9, weight_decay=1e-4) train(net, criterion, optimizer, train_loader, device)
Example #8
Source File: nyu_walkable_surface_dataset.py From dogTorch with MIT License | 7 votes |
def __init__(self, args, train=True): self.root_dir = args.data if train: self.data_set_list = train_set_list elif args.use_test_for_val: self.data_set_list = test_set_list else: self.data_set_list = val_set_list self.data_set_list = ['%06d.png' % (x) for x in self.data_set_list] self.args = args self.read_features = args.read_features self.features_dir = args.features_dir self.transform = transforms.Compose([ transforms.Scale((args.image_size, args.image_size)), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) self.transform_segmentation = transforms.Compose([ transforms.Scale((args.segmentation_size, args.segmentation_size)), transforms.ToTensor(), ])
Example #9
Source File: multiscale_trainer.py From L3C-PyTorch with GNU General Public License v3.0 | 6 votes |
def _get_ds_val(self, images_spec, crop=False, truncate=False): img_to_tensor_t = [images_loader.IndexImagesDataset.to_tensor_uint8_transform()] if crop: img_to_tensor_t.insert(0, transforms.CenterCrop(crop)) img_to_tensor_t = transforms.Compose(img_to_tensor_t) fixed_first = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'fixedimg.jpg') if not os.path.isfile(fixed_first): print(f'INFO: No file found at {fixed_first}') fixed_first = None ds = images_loader.IndexImagesDataset( images=images_loader.ImagesCached( images_spec, self.config_dl.image_cache_pkl, min_size=self.config_dl.val_glob_min_size), to_tensor_transform=img_to_tensor_t, fixed_first=fixed_first) # fix a first image to have consistency in tensor board if truncate: ds = pe.TruncatedDataset(ds, num_elemens=truncate) return ds
Example #10
Source File: model.py From iAI with MIT License | 6 votes |
def __init__(self): self.batch_size = 64 self.test_batch_size = 100 self.learning_rate = 0.0025 self.sgd_momentum = 0.9 self.log_interval = 100 # Fetch MNIST data set. self.train_loader = torch.utils.data.DataLoader( datasets.MNIST('/tmp/mnist/data', train=True, download=True, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ])), batch_size=self.batch_size, shuffle=True) self.test_loader = torch.utils.data.DataLoader( datasets.MNIST('/tmp/mnist/data', train=False, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ])), batch_size=self.test_batch_size, shuffle=True) self.network = Net() # Train the network for one or more epochs, validating after each epoch.
Example #11
Source File: loaders.py From dfw with MIT License | 6 votes |
def loaders_mnist(dataset, batch_size=64, cuda=0, train_size=50000, val_size=10000, test_size=10000, test_batch_size=1000, **kwargs): assert dataset == 'mnist' root = '{}/{}'.format(os.environ['VISION_DATA'], dataset) # Data loading code normalize = transforms.Normalize(mean=(0.1307,), std=(0.3081,)) transform = transforms.Compose([transforms.ToTensor(), normalize]) # define two datasets in order to have different transforms # on training and validation dataset_train = datasets.MNIST(root=root, train=True, transform=transform) dataset_val = datasets.MNIST(root=root, train=True, transform=transform) dataset_test = datasets.MNIST(root=root, train=False, transform=transform) return create_loaders(dataset_train, dataset_val, dataset_test, train_size, val_size, test_size, batch_size=batch_size, test_batch_size=test_batch_size, cuda=cuda, num_workers=0)
Example #12
Source File: loaddata.py From Visualizing-CNNs-for-monocular-depth-estimation with MIT License | 6 votes |
def getTestingData(batch_size=64): __imagenet_stats = {'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225]} # scale = random.uniform(1, 1.5) transformed_testing = depthDataset(csv_file='./data/nyu2_test.csv', transform=transforms.Compose([ Scale(240), CenterCrop([304, 228], [152, 114]), ToTensor(is_test=True), Normalize(__imagenet_stats['mean'], __imagenet_stats['std']) ])) dataloader_testing = DataLoader(transformed_testing, batch_size, shuffle=False, num_workers=4, pin_memory=False) return dataloader_testing
Example #13
Source File: serve.py From robosat with MIT License | 6 votes |
def segment(self, image): # don't track tensors with autograd during prediction with torch.no_grad(): mean, std = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225] transform = Compose([ConvertImageMode(mode="RGB"), ImageToTensor(), Normalize(mean=mean, std=std)]) image = transform(image) batch = image.unsqueeze(0).to(self.device) output = self.net(batch) output = output.cpu().data.numpy() output = output.squeeze(0) mask = output.argmax(axis=0).astype(np.uint8) mask = Image.fromarray(mask, mode="P") palette = make_palette(*self.dataset["common"]["colors"]) mask.putpalette(palette) return mask
Example #14
Source File: model.py From iAI with MIT License | 6 votes |
def __init__(self): self.batch_size = 64 self.test_batch_size = 100 self.learning_rate = 0.0025 self.sgd_momentum = 0.9 self.log_interval = 100 # Fetch MNIST data set. self.train_loader = torch.utils.data.DataLoader( datasets.MNIST('/tmp/mnist/data', train=True, download=True, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ])), batch_size=self.batch_size, shuffle=True) self.test_loader = torch.utils.data.DataLoader( datasets.MNIST('/tmp/mnist/data', train=False, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ])), batch_size=self.test_batch_size, shuffle=True) self.network = Net() # Train the network for one or more epochs, validating after each epoch.
Example #15
Source File: data_load.py From transferlearning with MIT License | 6 votes |
def load_data(data_folder, batch_size, phase='train', train_val_split=True, train_ratio=.8): transform_dict = { 'train': transforms.Compose( [transforms.Resize(256), transforms.RandomCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]), 'test': transforms.Compose( [transforms.Resize(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ])} data = datasets.ImageFolder(root=data_folder, transform=transform_dict[phase]) if phase == 'train': if train_val_split: train_size = int(train_ratio * len(data)) test_size = len(data) - train_size data_train, data_val = torch.utils.data.random_split(data, [train_size, test_size]) train_loader = torch.utils.data.DataLoader(data_train, batch_size=batch_size, shuffle=True, drop_last=True, num_workers=4) val_loader = torch.utils.data.DataLoader(data_val, batch_size=batch_size, shuffle=False, drop_last=False, num_workers=4) return [train_loader, val_loader] else: train_loader = torch.utils.data.DataLoader(data, batch_size=batch_size, shuffle=True, drop_last=True, num_workers=4) return train_loader else: test_loader = torch.utils.data.DataLoader(data, batch_size=batch_size, shuffle=False, drop_last=False, num_workers=4) return test_loader ## Below are for ImageCLEF datasets
Example #16
Source File: data_load.py From transferlearning with MIT License | 6 votes |
def load_imageclef_train(root_path, domain, batch_size, phase): transform_dict = { 'src': transforms.Compose( [transforms.Resize((256, 256)), transforms.RandomCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]), 'tar': transforms.Compose( [transforms.Resize((224, 224)), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ])} data = ImageCLEF(root_dir=root_path, domain=domain, transform=transform_dict[phase]) train_size = int(0.8 * len(data)) test_size = len(data) - train_size data_train, data_val = torch.utils.data.random_split(data, [train_size, test_size]) train_loader = torch.utils.data.DataLoader(data_train, batch_size=batch_size, shuffle=True, drop_last=False, num_workers=4) val_loader = torch.utils.data.DataLoader(data_val, batch_size=batch_size, shuffle=True, drop_last=False, num_workers=4) return train_loader, val_loader
Example #17
Source File: data_load.py From transferlearning with MIT License | 6 votes |
def load_imageclef_test(root_path, domain, batch_size, phase): transform_dict = { 'src': transforms.Compose( [transforms.Resize((256,256)), transforms.RandomCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]), 'tar': transforms.Compose( [transforms.Resize((224, 224)), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ])} data = ImageCLEF(root_dir=root_path, domain=domain, transform=transform_dict[phase]) data_loader = torch.utils.data.DataLoader(data, batch_size=batch_size, shuffle=True, drop_last=False, num_workers=4) return data_loader
Example #18
Source File: segmentation.py From steppy-toolkit with MIT License | 6 votes |
def __init__(self, loader_params, dataset_params, augmentation_params): super().__init__(loader_params, dataset_params, augmentation_params) self.image_transform = transforms.Compose([transforms.Resize((self.dataset_params.h, self.dataset_params.w)), transforms.Grayscale(num_output_channels=3), transforms.ToTensor(), transforms.Normalize(mean=self.dataset_params.MEAN, std=self.dataset_params.STD), ]) self.mask_transform = transforms.Compose([transforms.Resize((self.dataset_params.h, self.dataset_params.w), interpolation=0), transforms.Lambda(to_array), transforms.Lambda(to_tensor), ]) self.dataset = ImageSegmentationTTADataset
Example #19
Source File: segmentation.py From steppy-toolkit with MIT License | 6 votes |
def __init__(self, train_mode, loader_params, dataset_params, augmentation_params): super().__init__(train_mode, loader_params, dataset_params, augmentation_params) self.image_transform = transforms.Compose([transforms.Resize((self.dataset_params.h, self.dataset_params.w)), transforms.Grayscale(num_output_channels=3), transforms.ToTensor(), transforms.Normalize(mean=self.dataset_params.MEAN, std=self.dataset_params.STD), ]) self.mask_transform = transforms.Compose([transforms.Resize((self.dataset_params.h, self.dataset_params.w), interpolation=0), transforms.Lambda(to_array), transforms.Lambda(to_tensor), ]) self.image_augment_train = ImgAug(self.augmentation_params['image_augment_train']) self.image_augment_with_target_train = ImgAug(self.augmentation_params['image_augment_with_target_train']) if self.dataset_params.target_format == 'png': self.dataset = ImageSegmentationPngDataset elif self.dataset_params.target_format == 'json': self.dataset = ImageSegmentationJsonDataset else: raise Exception('files must be png or json')
Example #20
Source File: get_data_loader.py From DDPAE-video-prediction with MIT License | 6 votes |
def get_data_loader(opt): if opt.dset_name == 'moving_mnist': transform = transforms.Compose([vtransforms.ToTensor()]) dset = MovingMNIST(opt.dset_path, opt.is_train, opt.n_frames_input, opt.n_frames_output, opt.num_objects, transform) elif opt.dset_name == 'bouncing_balls': transform = transforms.Compose([vtransforms.Scale(opt.image_size), vtransforms.ToTensor()]) dset = BouncingBalls(opt.dset_path, opt.is_train, opt.n_frames_input, opt.n_frames_output, opt.image_size[0], transform) else: raise NotImplementedError dloader = data.DataLoader(dset, batch_size=opt.batch_size, shuffle=opt.is_train, num_workers=opt.n_workers, pin_memory=True) return dloader
Example #21
Source File: segmentation.py From steppy-toolkit with MIT License | 6 votes |
def __init__(self, train_mode, loader_params, dataset_params, augmentation_params): super().__init__(train_mode, loader_params, dataset_params, augmentation_params) self.image_transform = transforms.Compose([transforms.Grayscale(num_output_channels=3), transforms.ToTensor(), transforms.Normalize(mean=self.dataset_params.MEAN, std=self.dataset_params.STD), ]) self.mask_transform = transforms.Compose([transforms.Lambda(to_array), transforms.Lambda(to_tensor), ]) self.image_augment_train = ImgAug(self.augmentation_params['image_augment_train']) self.image_augment_with_target_train = ImgAug(self.augmentation_params['image_augment_with_target_train']) self.image_augment_inference = ImgAug(self.augmentation_params['image_augment_inference']) self.image_augment_with_target_inference = ImgAug( self.augmentation_params['image_augment_with_target_inference']) if self.dataset_params.target_format == 'png': self.dataset = ImageSegmentationPngDataset elif self.dataset_params.target_format == 'json': self.dataset = ImageSegmentationJsonDataset else: raise Exception('files must be png or json')
Example #22
Source File: weights.py From robosat with MIT License | 5 votes |
def main(args): dataset = load_config(args.dataset) path = dataset["common"]["dataset"] num_classes = len(dataset["common"]["classes"]) train_transform = Compose([ConvertImageMode(mode="P"), MaskToTensor()]) train_dataset = SlippyMapTiles(os.path.join(path, "training", "labels"), transform=train_transform) n = 0 counts = np.zeros(num_classes, dtype=np.int64) loader = DataLoader(train_dataset, batch_size=1) for images, tile in tqdm(loader, desc="Loading", unit="image", ascii=True): image = torch.squeeze(images) image = np.array(image, dtype=np.uint8) n += image.shape[0] * image.shape[1] counts += np.bincount(image.ravel(), minlength=num_classes) assert n > 0, "dataset with masks must not be empty" # Class weighting scheme `w = 1 / ln(c + p)` see: # - https://arxiv.org/abs/1707.03718 # LinkNet: Exploiting Encoder Representations for Efficient Semantic Segmentation # - https://arxiv.org/abs/1606.02147 # ENet: A Deep Neural Network Architecture for Real-Time Semantic Segmentation probs = counts / n weights = 1 / np.log(1.02 + probs) weights.round(6, out=weights) print(weights.tolist())
Example #23
Source File: pascal.py From overhaul-distillation with MIT License | 5 votes |
def transform_tr(self, sample): composed_transforms = transforms.Compose([ tr.RandomHorizontalFlip(), tr.RandomScaleCrop(base_size=self.args.base_size, crop_size=self.args.crop_size), tr.RandomGaussianBlur(), tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)), tr.ToTensor()]) return composed_transforms(sample)
Example #24
Source File: coco.py From overhaul-distillation with MIT License | 5 votes |
def transform_val(self, sample): composed_transforms = transforms.Compose([ tr.FixedResize(size=self.args.crop_size), tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)), tr.ToTensor()]) return composed_transforms(sample)
Example #25
Source File: coco.py From overhaul-distillation with MIT License | 5 votes |
def transform_tr(self, sample): composed_transforms = transforms.Compose([ tr.RandomHorizontalFlip(), tr.RandomScaleCrop(base_size=self.args.base_size, crop_size=self.args.crop_size), tr.RandomGaussianBlur(), tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)), tr.ToTensor()]) return composed_transforms(sample)
Example #26
Source File: cityscapes.py From overhaul-distillation with MIT License | 5 votes |
def transform_ts(self, sample): composed_transforms = transforms.Compose([ tr.FixedResize(size=self.args.crop_size), tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)), tr.ToTensor()]) return composed_transforms(sample)
Example #27
Source File: cityscapes.py From overhaul-distillation with MIT License | 5 votes |
def transform_val(self, sample): composed_transforms = transforms.Compose([ tr.FixedResize(size=self.args.crop_size), tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)), tr.ToTensor()]) return composed_transforms(sample)
Example #28
Source File: predictor.py From Res2Net-maskrcnn with MIT License | 5 votes |
def build_transform(self): """ Creates a basic transformation that was used to train the models """ cfg = self.cfg # we are loading images with OpenCV, so we don't need to convert them # to BGR, they are already! So all we need to do is to normalize # by 255 if we want to convert to BGR255 format, or flip the channels # if we want it to be in RGB in [0-1] range. if cfg.INPUT.TO_BGR255: to_bgr_transform = T.Lambda(lambda x: x * 255) else: to_bgr_transform = T.Lambda(lambda x: x[[2, 1, 0]]) normalize_transform = T.Normalize( mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD ) transform = T.Compose( [ T.ToPILImage(), T.Resize(self.min_image_size), T.ToTensor(), to_bgr_transform, normalize_transform, ] ) return transform
Example #29
Source File: data.py From MobileNetV3-pytorch with MIT License | 5 votes |
def inception_preproccess(input_size, normalize=__imagenet_stats): return transforms.Compose([ transforms.RandomResizedCrop(input_size), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(**normalize) ])
Example #30
Source File: segmentation.py From steppy-toolkit with MIT License | 5 votes |
def __init__(self, loader_params, dataset_params, augmentation_params): super().__init__(loader_params, dataset_params, augmentation_params) self.image_transform = transforms.Compose([transforms.Grayscale(num_output_channels=3), transforms.ToTensor(), transforms.Normalize(mean=self.dataset_params.MEAN, std=self.dataset_params.STD), ]) self.mask_transform = transforms.Compose([transforms.Lambda(to_array), transforms.Lambda(to_tensor), ]) self.image_augment_inference = ImgAug(self.augmentation_params['image_augment_inference']) self.image_augment_with_target_inference = ImgAug( self.augmentation_params['image_augment_with_target_inference']) self.dataset = ImageSegmentationTTADataset