Python torchvision.transforms.Grayscale() Examples
The following are 25
code examples of torchvision.transforms.Grayscale().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
torchvision.transforms
, or try the search function
.
Example #1
Source File: datasets.py From OCDVAEContinualLearning with MIT License | 6 votes |
def __get_transforms(self, patch_size): if self.gray_scale: train_transforms = transforms.Compose([ transforms.Resize(size=(patch_size, patch_size)), transforms.Grayscale(num_output_channels=1), transforms.ToTensor(), ]) val_transforms = transforms.Compose([ transforms.Resize(size=(patch_size, patch_size)), transforms.Grayscale(num_output_channels=1), transforms.ToTensor(), ]) else: train_transforms = transforms.Compose([ transforms.Resize(size=(patch_size, patch_size)), transforms.ToTensor(), ]) val_transforms = transforms.Compose([ transforms.Resize(size=(patch_size, patch_size)), transforms.ToTensor(), ]) return train_transforms, val_transforms
Example #2
Source File: segmentation.py From steppy-toolkit with MIT License | 6 votes |
def __init__(self, train_mode, loader_params, dataset_params, augmentation_params): super().__init__(train_mode, loader_params, dataset_params, augmentation_params) self.image_transform = transforms.Compose([transforms.Grayscale(num_output_channels=3), transforms.ToTensor(), transforms.Normalize(mean=self.dataset_params.MEAN, std=self.dataset_params.STD), ]) self.mask_transform = transforms.Compose([transforms.Lambda(to_array), transforms.Lambda(to_tensor), ]) self.image_augment_train = ImgAug(self.augmentation_params['image_augment_train']) self.image_augment_with_target_train = ImgAug(self.augmentation_params['image_augment_with_target_train']) self.image_augment_inference = ImgAug(self.augmentation_params['image_augment_inference']) self.image_augment_with_target_inference = ImgAug( self.augmentation_params['image_augment_with_target_inference']) if self.dataset_params.target_format == 'png': self.dataset = ImageSegmentationPngDataset elif self.dataset_params.target_format == 'json': self.dataset = ImageSegmentationJsonDataset else: raise Exception('files must be png or json')
Example #3
Source File: datasets.py From OCDVAEContinualLearning with MIT License | 6 votes |
def __get_transforms(self, patch_size): if self.gray_scale: train_transforms = transforms.Compose([ transforms.Resize(size=(patch_size, patch_size)), transforms.Grayscale(num_output_channels=1), transforms.ToTensor(), ]) val_transforms = transforms.Compose([ transforms.Resize(size=(patch_size, patch_size)), transforms.Grayscale(num_output_channels=1), transforms.ToTensor(), ]) else: train_transforms = transforms.Compose([ transforms.Resize(size=(patch_size, patch_size)), transforms.ToTensor(), ]) val_transforms = transforms.Compose([ transforms.Resize(size=(patch_size, patch_size)), transforms.ToTensor(), ]) return train_transforms, val_transforms
Example #4
Source File: segmentation.py From steppy-toolkit with MIT License | 6 votes |
def __init__(self, train_mode, loader_params, dataset_params, augmentation_params): super().__init__(train_mode, loader_params, dataset_params, augmentation_params) self.image_transform = transforms.Compose([transforms.Resize((self.dataset_params.h, self.dataset_params.w)), transforms.Grayscale(num_output_channels=3), transforms.ToTensor(), transforms.Normalize(mean=self.dataset_params.MEAN, std=self.dataset_params.STD), ]) self.mask_transform = transforms.Compose([transforms.Resize((self.dataset_params.h, self.dataset_params.w), interpolation=0), transforms.Lambda(to_array), transforms.Lambda(to_tensor), ]) self.image_augment_train = ImgAug(self.augmentation_params['image_augment_train']) self.image_augment_with_target_train = ImgAug(self.augmentation_params['image_augment_with_target_train']) if self.dataset_params.target_format == 'png': self.dataset = ImageSegmentationPngDataset elif self.dataset_params.target_format == 'json': self.dataset = ImageSegmentationJsonDataset else: raise Exception('files must be png or json')
Example #5
Source File: segmentation.py From steppy-toolkit with MIT License | 6 votes |
def __init__(self, loader_params, dataset_params, augmentation_params): super().__init__(loader_params, dataset_params, augmentation_params) self.image_transform = transforms.Compose([transforms.Resize((self.dataset_params.h, self.dataset_params.w)), transforms.Grayscale(num_output_channels=3), transforms.ToTensor(), transforms.Normalize(mean=self.dataset_params.MEAN, std=self.dataset_params.STD), ]) self.mask_transform = transforms.Compose([transforms.Resize((self.dataset_params.h, self.dataset_params.w), interpolation=0), transforms.Lambda(to_array), transforms.Lambda(to_tensor), ]) self.dataset = ImageSegmentationTTADataset
Example #6
Source File: datasets.py From OCDVAEContinualLearning with MIT License | 6 votes |
def __get_transforms(self, patch_size): if self.gray_scale: train_transforms = transforms.Compose([ transforms.Resize(size=(patch_size, patch_size)), transforms.Grayscale(num_output_channels=1), transforms.ToTensor(), ]) val_transforms = transforms.Compose([ transforms.Resize(size=(patch_size, patch_size)), transforms.Grayscale(num_output_channels=1), transforms.ToTensor(), ]) else: train_transforms = transforms.Compose([ transforms.Resize(size=(patch_size, patch_size)), transforms.ToTensor(), ]) val_transforms = transforms.Compose([ transforms.Resize(size=(patch_size, patch_size)), transforms.ToTensor(), ]) return train_transforms, val_transforms
Example #7
Source File: loaders.py From open-solution-salt-identification with MIT License | 6 votes |
def __init__(self, train_mode, loader_params, dataset_params, augmentation_params): super().__init__(train_mode, loader_params, dataset_params, augmentation_params) self.image_transform = transforms.Compose([transforms.Grayscale(num_output_channels=3), transforms.ToTensor(), transforms.Normalize(mean=self.dataset_params.MEAN, std=self.dataset_params.STD), AddDepthChannels() ]) self.mask_transform = transforms.Lambda(preprocess_emptiness_target) self.image_augment_train = ImgAug(self.augmentation_params['image_augment_train']) self.image_augment_with_target_train = ImgAug(self.augmentation_params['image_augment_with_target_train']) self.image_augment_inference = ImgAug(self.augmentation_params['image_augment_inference']) self.image_augment_with_target_inference = ImgAug( self.augmentation_params['image_augment_with_target_inference']) self.dataset = EmptinessDataset
Example #8
Source File: loaders.py From open-solution-salt-identification with MIT License | 6 votes |
def __init__(self, train_mode, loader_params, dataset_params, augmentation_params): super().__init__(train_mode, loader_params, dataset_params, augmentation_params) self.image_transform = transforms.Compose([transforms.Grayscale(num_output_channels=3), transforms.ToTensor(), transforms.Normalize(mean=self.dataset_params.MEAN, std=self.dataset_params.STD), AddDepthChannels() ]) self.mask_transform = transforms.Lambda(preprocess_target) self.image_augment_train = ImgAug(self.augmentation_params['image_augment_train']) self.image_augment_with_target_train = ImgAug(self.augmentation_params['image_augment_with_target_train']) self.image_augment_inference = ImgAug(self.augmentation_params['image_augment_inference']) self.image_augment_with_target_inference = ImgAug( self.augmentation_params['image_augment_with_target_inference']) if self.dataset_params.use_depth: self.dataset = ImageSegmentationDatasetWithDepth else: self.dataset = ImageSegmentationDataset
Example #9
Source File: loaders.py From open-solution-salt-identification with MIT License | 6 votes |
def __init__(self, loader_params, dataset_params, augmentation_params): super().__init__(loader_params, dataset_params, augmentation_params) self.image_transform = transforms.Compose([transforms.Grayscale(num_output_channels=3), transforms.ToTensor(), transforms.Normalize(mean=self.dataset_params.MEAN, std=self.dataset_params.STD), AddDepthChannels() ]) self.mask_transform = transforms.Lambda(preprocess_target) self.image_augment_inference = ImgAug(self.augmentation_params['image_augment_inference']) self.image_augment_with_target_inference = ImgAug( self.augmentation_params['image_augment_with_target_inference']) if self.dataset_params.use_depth: self.dataset = ImageSegmentationTTADatasetWithDepth else: self.dataset = ImageSegmentationTTADataset
Example #10
Source File: conv_cifar_2.py From cwcf with MIT License | 6 votes |
def get_data(train): data_raw = datasets.CIFAR10('../data/dl/', train=train, download=True, transform=transforms.Compose([ transforms.Grayscale(), transforms.Resize((20, 20)), transforms.ToTensor(), lambda x: x.numpy().flatten()])) data_x, data_y = zip(*data_raw) data_x = np.array(data_x) data_y = np.array(data_y, dtype='int32').reshape(-1, 1) # binarize label_0 = data_y < 5 label_1 = ~label_0 data_y[label_0] = 0 data_y[label_1] = 1 data = pd.DataFrame(data_x) data[COLUMN_LABEL] = data_y return data, data_x.mean(), data_x.std() #---
Example #11
Source File: conv_cifar.py From cwcf with MIT License | 6 votes |
def get_data(train): data_raw = datasets.CIFAR10('../data/dl/', train=train, download=True, transform=transforms.Compose([ transforms.Grayscale(), transforms.Resize((20, 20)), transforms.ToTensor(), lambda x: x.numpy().flatten()])) data_x, data_y = zip(*data_raw) data_x = np.array(data_x) data_y = np.array(data_y, dtype='int32').reshape(-1, 1) data = pd.DataFrame(data_x) data[COLUMN_LABEL] = data_y return data, data_x.mean(), data_x.std() #---
Example #12
Source File: get_dataloader.py From Greedy_InfoMax with MIT License | 6 votes |
def get_transforms(eval=False, aug=None): trans = [] if aug["randcrop"] and not eval: trans.append(transforms.RandomCrop(aug["randcrop"])) if aug["randcrop"] and eval: trans.append(transforms.CenterCrop(aug["randcrop"])) if aug["flip"] and not eval: trans.append(transforms.RandomHorizontalFlip()) if aug["grayscale"]: trans.append(transforms.Grayscale()) trans.append(transforms.ToTensor()) trans.append(transforms.Normalize(mean=aug["bw_mean"], std=aug["bw_std"])) elif aug["mean"]: trans.append(transforms.ToTensor()) trans.append(transforms.Normalize(mean=aug["mean"], std=aug["std"])) else: trans.append(transforms.ToTensor()) trans = transforms.Compose(trans) return trans
Example #13
Source File: data_loader.py From transferlearning with MIT License | 5 votes |
def load_test(root_dir,domain,batch_size): transform = transforms.Compose([ transforms.Grayscale(), transforms.Resize([28, 28]), transforms.ToTensor(), transforms.Normalize(mean=(0,), std=(1,)), ] ) image_folder = datasets.ImageFolder( root=root_dir + domain, transform=transform ) data_loader = torch.utils.data.DataLoader(dataset=image_folder, batch_size=batch_size, shuffle=False, num_workers=2 ) return data_loader
Example #14
Source File: gan_trainer.py From pytorch_GAN_zoo with BSD 3-Clause "New" or "Revised" License | 5 votes |
def getDataset(self, scale, size=None): if size is None: size = self.model.getSize() isH5 = os.path.splitext(self.path_db)[1] == ".h5" print("size", size) transformList = [NumpyResize(size), NumpyToTensor(), Transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] if self.modelConfig.dimOutput == 1: transformList = [Transforms.Grayscale(1)] + transformList transform = Transforms.Compose(transformList) if isH5: return H5Dataset(self.path_db, partition_path=self.pathPartition, partition_value=self.partitionValue, specificAttrib=self.selectedAttributes, stats_file=self.pathAttribDict, transform=transform) return AttribDataset(self.path_db, transform=transform, attribDictPath=self.pathAttribDict, specificAttrib=self.selectedAttributes, mimicImageFolder=self.imagefolderDataset)
Example #15
Source File: attrib_dataset.py From pytorch_GAN_zoo with BSD 3-Clause "New" or "Revised" License | 5 votes |
def __getitem__(self, idx): imgName = self.listImg[idx] imgPath = os.path.join(self.pathdb, imgName) img = pil_loader(imgPath) if self.transform is not None: img = self.transform(img) # Build the attribute tensor attr = [0 for i in range(self.totAttribSize)] if self.hasAttrib: attribVals = self.attribDict[imgName] for key, val in attribVals.items(): baseShift = self.shiftAttrib[key] attr[baseShift] = self.shiftAttribVal[key][val] else: attr = [0] if self.pathMask is not None: mask_path = os.path.join( self.pathMask, os.path.splitext(imgName)[0] + "_mask.jpg") mask = pil_loader(mask_path) mask = Transforms.Grayscale(1)(mask) mask = self.transform(mask) return img, torch.tensor(attr, dtype=torch.long), mask return img, torch.tensor(attr, dtype=torch.long)
Example #16
Source File: base_dataset.py From CAG_UDA with MIT License | 5 votes |
def get_transform(opt, grayscale=False, convert=True, crop=True, flip=True): """Create a torchvision transformation function The type of transformation is defined by option (e.g., [opt.preprocess], [opt.load_size], [opt.crop_size]) and can be overwritten by arguments such as [convert], [crop], and [flip] Parameters: opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions grayscale (bool) -- if convert input RGB image to a grayscale image convert (bool) -- if convert an image to a tensor array betwen [-1, 1] crop (bool) -- if apply cropping flip (bool) -- if apply horizontal flippling """ transform_list = [] if grayscale: transform_list.append(transforms.Grayscale(1)) if opt.preprocess == 'resize_and_crop': osize = [opt.load_size, opt.load_size] transform_list.append(transforms.Resize(osize, Image.BICUBIC)) transform_list.append(transforms.RandomCrop(opt.crop_size)) elif opt.preprocess == 'crop' and crop: transform_list.append(transforms.RandomCrop(opt.crop_size)) elif opt.preprocess == 'scale_width': transform_list.append(transforms.Lambda(lambda img: __scale_width(img, opt.crop_size))) elif opt.preprocess == 'scale_width_and_crop': transform_list.append(transforms.Lambda(lambda img: __scale_width(img, opt.load_size))) if crop: transform_list.append(transforms.RandomCrop(opt.crop_size)) elif opt.preprocess == 'none': transform_list.append(transforms.Lambda(lambda img: __adjust(img))) else: raise ValueError('--preprocess %s is not a valid option.' % opt.preprocess) if not opt.no_flip and flip: transform_list.append(transforms.RandomHorizontalFlip()) if convert: transform_list += [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] return transforms.Compose(transform_list)
Example #17
Source File: dataset_loader.py From semi-adversarial-networks with MIT License | 5 votes |
def get_loader(image_path, proto_same_path, proto_oppo_path, metadata_path, crop_size=(224, 224), image_size=(224, 224), batch_size=64, dataset='CelebA', mode='train', num_workers=1): """Build and return data loader.""" if mode == 'train': transform = transforms.Compose([ transforms.Grayscale(), transforms.RandomCrop(size=crop_size), transforms.Resize(image_size), transforms.RandomHorizontalFlip(), transforms.ToTensor(), ]) else: transform = transforms.Compose([ transforms.Grayscale(), transforms.Resize(image_size), transforms.ToTensor() ]) #if dataset == 'CelebA': dataset = CelebaDataset(image_path, proto_same_path, proto_oppo_path, metadata_path, transform, mode) #, flip_rate=flip_rate) if mode == 'train': shuffle = True else: shuffle = False data_loader = DataLoader(dataset=dataset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers) return data_loader
Example #18
Source File: utils.py From TorchFusion with MIT License | 5 votes |
def load_image(file,grayscale=False,target_size=None,to_tensor=True,mean=0.5,std=0.5,interpolation = Image.BILINEAR): """ :param file: :param grayscale: :param target_size: :param to_tensor: :param mean: :param std: :param interpolation: :return: """ img = Image.open(file).convert("RGB") transformations = [] if grayscale: transformations.append(transforms.Grayscale()) if target_size is not None: target_ = target_size if isinstance(target_size,int): target_ = (target_size,target_size) transformations.append(transforms.CenterCrop(target_)) if to_tensor: transformations.append(transforms.ToTensor()) if mean is not None and std is not None: if not isinstance(mean,tuple): mean = (mean,) if not isinstance(std,tuple): std = (std,) transformations.append(transforms.Normalize(mean=mean,std=std)) trans_ = transforms.Compose(transformations) return trans_(img)
Example #19
Source File: masks.py From pixel-constrained-cnn-pytorch with Apache License 2.0 | 5 votes |
def __init__(self, img_size, mask_descriptor): self.img_size = img_size self.num_pixels = img_size[1] * img_size[2] self.mask_type, self.mask_attribute = mask_descriptor if self.mask_type == 'random_blob_cache': dset = datasets.ImageFolder(self.mask_attribute[0], transform=transforms.Compose([transforms.Grayscale(), transforms.ToTensor()])) self.data_loader = DataLoader(dset, batch_size=self.mask_attribute[1], shuffle=True)
Example #20
Source File: base_dataset.py From EvolutionaryGAN-pytorch with MIT License | 5 votes |
def get_transform(opt, params=None, grayscale=False, method=Image.BICUBIC, convert=True): transform_list = [] if grayscale: transform_list.append(transforms.Grayscale(1)) if 'resize' in opt.preprocess: osize = [opt.load_size, opt.load_size] transform_list.append(transforms.Resize(osize, method)) elif 'scale_width' in opt.preprocess: transform_list.append(transforms.Lambda(lambda img: __scale_width(img, opt.load_size, method))) if 'crop' in opt.preprocess: if params is None: transform_list.append(transforms.RandomCrop(opt.crop_size)) else: transform_list.append(transforms.Lambda(lambda img: __crop(img, params['crop_pos'], opt.crop_size))) if opt.preprocess == 'none': transform_list.append(transforms.Lambda(lambda img: __make_power_2(img, base=4, method=method))) if not opt.no_flip: if params is None: transform_list.append(transforms.RandomHorizontalFlip()) elif params['flip']: transform_list.append(transforms.Lambda(lambda img: __flip(img, params['flip']))) if convert: transform_list += [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] return transforms.Compose(transform_list)
Example #21
Source File: base_dataset.py From 2019-CCF-BDCI-OCR-MCZJ-OCR-IdentificationIDElement with MIT License | 5 votes |
def get_transform(opt, params=None, grayscale=False, method=Image.BICUBIC, convert=True): transform_list = [] if grayscale: transform_list.append(transforms.Grayscale(1)) if 'resize' in opt.preprocess: osize = [opt.load_size, opt.load_size] transform_list.append(transforms.Resize(osize, method)) elif 'scale_width' in opt.preprocess: transform_list.append(transforms.Lambda(lambda img: __scale_width(img, opt.load_size, method))) if 'crop' in opt.preprocess: if params is None: transform_list.append(transforms.RandomCrop(opt.crop_size)) else: transform_list.append(transforms.Lambda(lambda img: __crop(img, params['crop_pos'], opt.crop_size))) if opt.preprocess == 'none': transform_list.append(transforms.Lambda(lambda img: __make_power_2(img, base=4, method=method))) if not opt.no_flip: if params is None: transform_list.append(transforms.RandomHorizontalFlip()) elif params['flip']: transform_list.append(transforms.Lambda(lambda img: __flip(img, params['flip']))) ## if convert: transform_list += [transforms.ToTensor()] if grayscale: transform_list += [transforms.Normalize((0.5,), (0.5,))] else: transform_list += [transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] return transforms.Compose(transform_list)
Example #22
Source File: segmentation.py From steppy-toolkit with MIT License | 5 votes |
def __init__(self, loader_params, dataset_params, augmentation_params): super().__init__(loader_params, dataset_params, augmentation_params) self.image_transform = transforms.Compose([transforms.Grayscale(num_output_channels=3), transforms.ToTensor(), transforms.Normalize(mean=self.dataset_params.MEAN, std=self.dataset_params.STD), ]) self.mask_transform = transforms.Compose([transforms.Lambda(to_array), transforms.Lambda(to_tensor), ]) self.image_augment_inference = ImgAug(self.augmentation_params['image_augment_inference']) self.image_augment_with_target_inference = ImgAug( self.augmentation_params['image_augment_with_target_inference']) self.dataset = ImageSegmentationTTADataset
Example #23
Source File: data_loader.py From transferlearning with MIT License | 5 votes |
def load_data(root_dir,domain,batch_size): transform = transforms.Compose([ transforms.Grayscale(), transforms.Resize([28, 28]), transforms.ToTensor(), transforms.Normalize(mean=(0,),std=(1,)), ] ) image_folder = datasets.ImageFolder( root=root_dir + domain, transform=transform ) data_loader = torch.utils.data.DataLoader(dataset=image_folder,batch_size=batch_size,shuffle=True,num_workers=2,drop_last=True ) return data_loader
Example #24
Source File: dataloaders.py From pixel-constrained-cnn-pytorch with Apache License 2.0 | 4 votes |
def celeba(batch_size=128, num_colors=256, size=178, crop=178, grayscale=False, shuffle=True, path_to_data='../celeba_data'): """CelebA dataloader with square images. Note original CelebA images have shape (218, 178), this dataloader center crops these images to be (178, 178) by default. Parameters ---------- batch_size : int num_colors : int Number of colors to quantize images into. Typically 256, but can be lower for e.g. binary images. size : int Size (height and width) of each image. crop : int Size of center crop. This crop happens *before* the resizing. grayscale : bool If True converts images to grayscale. shuffle : bool If True shuffles images. path_to_data : string Path to CelebA image files. """ quantize = get_quantize_func(num_colors) if grayscale: transform = transforms.Compose([ transforms.CenterCrop(crop), transforms.Resize(size), transforms.Grayscale(), transforms.ToTensor(), transforms.Lambda(lambda x: quantize(x)) ]) else: transform = transforms.Compose([ transforms.CenterCrop(crop), transforms.Resize(size), transforms.ToTensor(), transforms.Lambda(lambda x: quantize(x)) ]) celeba_data = CelebADataset(path_to_data, transform=transform) celeba_loader = DataLoader(celeba_data, batch_size=batch_size, shuffle=shuffle) return celeba_loader
Example #25
Source File: image_augmentation.py From Auto-PyTorch with Apache License 2.0 | 4 votes |
def compute_mean_std(self, pipeline_config, hyperparameter_config, X, Y, train_indices, dataset_info): log = logging.getLogger('autonet') if dataset_info.path in self.mean_std_cache: mean, std = self.mean_std_cache[dataset_info.path] log.debug('CACHED: MEAN: ' + str(mean) + ' -- STD: ' + str(std)) return mean, std from autoPyTorch.pipeline.nodes.image.create_image_dataloader import CreateImageDataLoader loader = CreateImageDataLoader() image_size = min(dataset_info.x_shape[-2], dataset_info.x_shape[-1]) transform_list = [] if len(X.shape) > 1: transform_list.append(transforms.ToPILImage()) transform_list.append(transforms.Resize(image_size)) transform_list.append(transforms.CenterCrop(image_size)) if dataset_info.x_shape[1] == 1: transform_list.append(transforms.Grayscale(1)) transform_list.append(transforms.ToTensor()) train_transform = transforms.Compose(transform_list) cache_size = pipeline_config['dataloader_cache_size_mb'] pipeline_config['dataloader_cache_size_mb'] = 0 train_loader = loader.fit(pipeline_config, hyperparameter_config, X, Y, train_indices, None, train_transform, None, dataset_info)['train_loader'] pipeline_config['dataloader_cache_size_mb'] = cache_size mean = 0. std = 0. nb_samples = 0. with torch.no_grad(): for data, _ in train_loader: # import matplotlib.pyplot as plt # img = plt.imshow(data.numpy()[0,1,:]) # plt.show() batch_samples = data.size(0) data = data.view(batch_samples, data.size(1), -1) mean = mean + data.mean(2).sum(0) std = std + data.std(2).sum(0) nb_samples += batch_samples if nb_samples > 0.: mean /= nb_samples std /= nb_samples mean, std = mean.numpy().tolist(), std.numpy().tolist() else: mean, std = [mean], [std] log.debug('MEAN: ' + str(mean) + ' -- STD: ' + str(std)) self.mean_std_cache[dataset_info.path] = [mean, std] return mean, std