Python torchvision.transforms.TenCrop() Examples
The following are 11
code examples of torchvision.transforms.TenCrop().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
torchvision.transforms
, or try the search function
.
Example #1
Source File: preprocess.py From convNet.pytorch with MIT License | 6 votes |
def scale_crop(input_size, scale_size=None, num_crops=1, normalize=_IMAGENET_STATS): assert num_crops in [1, 5, 10], "num crops must be in {1,5,10}" convert_tensor = transforms.Compose([transforms.ToTensor(), transforms.Normalize(**normalize)]) if num_crops == 1: t_list = [ transforms.CenterCrop(input_size), convert_tensor ] else: if num_crops == 5: t_list = [transforms.FiveCrop(input_size)] elif num_crops == 10: t_list = [transforms.TenCrop(input_size)] # returns a 4D tensor t_list.append(transforms.Lambda(lambda crops: torch.stack([convert_tensor(crop) for crop in crops]))) if scale_size != input_size: t_list = [transforms.Resize(scale_size)] + t_list return transforms.Compose(t_list)
Example #2
Source File: utils.py From inplace_abn with BSD 3-Clause "New" or "Revised" License | 6 votes |
def create_test_transforms(config, crop, scale, ten_crops): normalize = transforms.Normalize(mean=config["mean"], std=config["std"]) val_transforms = [] if scale != -1: val_transforms.append(transforms.Resize(scale)) if ten_crops: val_transforms += [ transforms.TenCrop(crop), transforms.Lambda(lambda crops: [transforms.ToTensor()(crop) for crop in crops]), transforms.Lambda(lambda crops: [normalize(crop) for crop in crops]), transforms.Lambda(lambda crops: torch.stack(crops)) ] else: val_transforms += [ transforms.CenterCrop(crop), transforms.ToTensor(), normalize ] return val_transforms
Example #3
Source File: transform_wrapper.py From BBN with MIT License | 5 votes |
def ten_crop(cfg, **kwargs): size = kwargs["input_size"] if kwargs["input_size"] != None else cfg.INPUT_SIZE return transforms.TenCrop(size)
Example #4
Source File: extract_features.py From DARENet with MIT License | 5 votes |
def extract_features_CUHK03(model, scale_image_size, data, extract_features_folder, logger, batch_size=128, workers=4, is_tencrop=False,normalize=None): logger.info('Begin extract features') if normalize == None: normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) if is_tencrop: logger.info('==> Using TenCrop') tencrop = transforms.Compose([ transforms.Resize([int(x*1.125) for x in scale_image_size]), transforms.TenCrop(scale_image_size)]) else: tencrop = None transform = transforms.Compose([ transforms.Resize(scale_image_size), transforms.ToTensor(), normalize, ]) train_data_folder = data logger.info('Begin load train data from '+train_data_folder) train_dataloader = torch.utils.data.DataLoader( Datasets.CUHK03EvaluateDataset(folder=train_data_folder, transform=transform, tencrop=tencrop), batch_size=batch_size, shuffle=False, num_workers=workers, pin_memory=True) train_features = extract_features(model, train_dataloader, is_tencrop) if os.path.isdir(extract_features_folder) is False: os.makedirs(extract_features_folder) sio.savemat(os.path.join(extract_features_folder, 'train_features.mat'), {'feature_train_new': train_features}) return
Example #5
Source File: Market1501.py From triplet-reid-pytorch with Apache License 2.0 | 5 votes |
def __init__(self, data_path, is_train = True, *args, **kwargs): super(Market1501, self).__init__(*args, **kwargs) self.is_train = is_train self.data_path = data_path self.imgs = os.listdir(data_path) self.imgs = [el for el in self.imgs if os.path.splitext(el)[1] == '.jpg'] self.lb_ids = [int(el.split('_')[0]) for el in self.imgs] self.lb_cams = [int(el.split('_')[1][1]) for el in self.imgs] self.imgs = [os.path.join(data_path, el) for el in self.imgs] if is_train: self.trans = transforms.Compose([ transforms.Resize((288, 144)), transforms.RandomCrop((256, 128)), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.486, 0.459, 0.408), (0.229, 0.224, 0.225)), RandomErasing(0.5, mean=[0.0, 0.0, 0.0]) ]) else: self.trans_tuple = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.486, 0.459, 0.408), (0.229, 0.224, 0.225)) ]) self.Lambda = transforms.Lambda( lambda crops: [self.trans_tuple(crop) for crop in crops]) self.trans = transforms.Compose([ transforms.Resize((288, 144)), transforms.TenCrop((256, 128)), self.Lambda, ]) # useful for sampler self.lb_img_dict = dict() self.lb_ids_uniq = set(self.lb_ids) lb_array = np.array(self.lb_ids) for lb in self.lb_ids_uniq: idx = np.where(lb_array == lb)[0] self.lb_img_dict.update({lb: idx})
Example #6
Source File: dataset.py From IIC with MIT License | 5 votes |
def __init__(self, base_dataset, input_sz=None, include_rgb=None): super(TenCropAndFinish, self).__init__() self.base_dataset = base_dataset self.num_tfs = 10 self.input_sz = input_sz self.include_rgb = include_rgb self.crops_tf = transforms.TenCrop(self.input_sz) self.finish_tf = custom_greyscale_to_tensor(self.include_rgb)
Example #7
Source File: transform.py From DFL-CNN with MIT License | 5 votes |
def get_transform_for_test(): transform_list = [] transform_list.append(transforms.Lambda(lambda img:scale_keep_ar_min_fixed(img, 560))) transform_list.append(transforms.TenCrop(448)) transform_list.append(transforms.Lambda(lambda crops: torch.stack([transforms.Normalize(mean=(0.5,0.5,0.5),std=(0.5,0.5,0.5))((transforms.ToTensor())(crop)) for crop in crops])) ) #transform_list.append(transforms.Normalize(mean=(0.5,0.5,0.5),std=(0.5,0.5,0.5))) return transforms.Compose(transform_list)
Example #8
Source File: main.py From smooth-topk with MIT License | 4 votes |
def loaders_imagenet(dataset_name, batch_size, cuda, train_size, augment=True, val_size=50000, test_batch_size=256, topk=None, noise=False, multiple_crops=False, data_root=None): assert dataset_name == 'imagenet' data_root = data_root if data_root is not None else os.environ['VISION_DATA_SSD'] root = '{}/ILSVRC2012-prepr-split/images'.format(data_root) mean = [0.485, 0.456, 0.406] std = [0.229, 0.224, 0.225] traindir = os.path.join(root, 'train') valdir = os.path.join(root, 'val') testdir = os.path.join(root, 'test') normalize = transforms.Normalize(mean=mean, std=std) if multiple_crops: print('Using multiple crops') transform_test = transforms.Compose([ transforms.Resize(256), transforms.TenCrop(224), lambda x: [normalize(transforms.functional.to_tensor(img)) for img in x]]) else: transform_test = transforms.Compose([ transforms.Resize(224), transforms.CenterCrop(224), transforms.ToTensor(), normalize]) if augment: transform_train = transforms.Compose([ transforms.RandomCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize]) else: transform_train = transform_test dataset_train = datasets.ImageFolder(traindir, transform_train) dataset_val = datasets.ImageFolder(valdir, transform_test) dataset_test = datasets.ImageFolder(testdir, transform_test) return create_loaders(dataset_name, dataset_train, dataset_val, dataset_test, train_size, val_size, batch_size, test_batch_size, cuda, noise=noise, num_workers=4)
Example #9
Source File: extract_features.py From DARENet with MIT License | 4 votes |
def extract_features_MARS(model, scale_image_size, info_folder, data, extract_features_folder, logger, batch_size=128, workers=4, is_tencrop=False): logger.info('Begin extract features') normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) if is_tencrop: logger.info('==> Using TenCrop') tencrop = transforms.Compose([ transforms.Resize([int(x*1.125) for x in scale_image_size]), transforms.TenCrop(scale_image_size)]) else: tencrop = None transform = transforms.Compose([ transforms.Resize(scale_image_size), transforms.ToTensor(), normalize, ]) train_name_path = os.path.join(info_folder, 'train_name.txt') test_name_path = os.path.join(info_folder, 'test_name.txt') train_data_folder = os.path.join(data, 'bbox_train') test_data_folder = os.path.join(data, 'bbox_test') logger.info('Train data folder: '+train_data_folder) logger.info('Test data folder: '+test_data_folder) logger.info('Begin load train data') train_dataloader = torch.utils.data.DataLoader( Datasets.MARSEvalDataset(folder=train_data_folder, image_name_file=train_name_path, transform=transform, tencrop=tencrop), batch_size=batch_size, shuffle=False, num_workers=workers, pin_memory=True) logger.info('Begin load test data') test_dataloader = torch.utils.data.DataLoader( Datasets.MARSEvalDataset(folder=test_data_folder, image_name_file=test_name_path, transform=transform, tencrop=tencrop), batch_size=batch_size, shuffle=False, num_workers=workers, pin_memory=True) train_features = extract_features(model, train_dataloader, is_tencrop) test_features = extract_features(model, test_dataloader, is_tencrop) if os.path.isdir(extract_features_folder) is False: os.makedirs(extract_features_folder) sio.savemat(os.path.join(extract_features_folder, 'train_features.mat'), {'feature_train_new': train_features}) sio.savemat(os.path.join(extract_features_folder, 'test_features.mat'), {'feature_test_new': test_features}) return
Example #10
Source File: extract_features.py From DARENet with MIT License | 4 votes |
def extract_features_Market1501(model, scale_image_size, data, extract_features_folder, logger, batch_size=128, workers=4, is_tencrop=False, gen_stage_features = False): logger.info('Begin extract features') normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) if is_tencrop: logger.info('==> Using TenCrop') tencrop = transforms.Compose([ transforms.Resize([int(x*1.125) for x in scale_image_size]), transforms.TenCrop(scale_image_size)]) else: tencrop = None transform = transforms.Compose([ transforms.Resize(scale_image_size), transforms.ToTensor(), normalize, ]) train_data_folder = os.path.join(data, 'bounding_box_train') test_data_folder = os.path.join(data, 'bounding_box_test') query_data_folder = os.path.join(data, 'query') logger.info('Begin load train data from '+train_data_folder) train_dataloader = torch.utils.data.DataLoader( Datasets.Market1501EvaluateDataset(folder=train_data_folder, transform=transform, tencrop=tencrop), batch_size=batch_size, shuffle=False, num_workers=workers, pin_memory=True) logger.info('Begin load test data from '+test_data_folder) test_dataloader = torch.utils.data.DataLoader( Datasets.Market1501EvaluateDataset(folder=test_data_folder, transform=transform, tencrop=tencrop), batch_size=batch_size, shuffle=False, num_workers=workers, pin_memory=True) logger.info('Begin load query data from '+query_data_folder) query_dataloader = torch.utils.data.DataLoader( Datasets.Market1501EvaluateDataset(folder=query_data_folder, transform=transform, tencrop=tencrop), batch_size=batch_size, shuffle=False, num_workers=workers, pin_memory=True) if not gen_stage_features: train_features = extract_features(model, train_dataloader, is_tencrop) test_features = extract_features(model, test_dataloader, is_tencrop) query_features = extract_features(model, query_dataloader, is_tencrop) if os.path.isdir(extract_features_folder) is False: os.makedirs(extract_features_folder) sio.savemat(os.path.join(extract_features_folder, 'train_features.mat'), {'feature_train_new': train_features}) sio.savemat(os.path.join(extract_features_folder, 'test_features.mat'), {'feature_test_new': test_features}) sio.savemat(os.path.join(extract_features_folder, 'query_features.mat'), {'feature_query_new': query_features}) else: # model.gen_stage_features = True train_features = extract_stage_features(model, train_dataloader, is_tencrop) test_features = extract_stage_features(model, test_dataloader, is_tencrop) query_features = extract_stage_features(model, query_dataloader, is_tencrop) if os.path.isdir(extract_features_folder) is False: os.makedirs(extract_features_folder) for i in range(4): sio.savemat(os.path.join(extract_features_folder, 'train_features_{}.mat'.format(i + 1)), {'feature_train_new': train_features[i]}) sio.savemat(os.path.join(extract_features_folder, 'test_features_{}.mat'.format(i + 1)), {'feature_test_new': test_features[i]}) sio.savemat(os.path.join(extract_features_folder, 'query_features_{}.mat'.format(i + 1)), {'feature_query_new': query_features[i]}) sio.savemat(os.path.join(extract_features_folder, 'train_features_fusion.mat'), {'feature_train_new': train_features[4]}) sio.savemat(os.path.join(extract_features_folder, 'test_features_fusion.mat'), {'feature_test_new': test_features[4]}) sio.savemat(os.path.join(extract_features_folder, 'query_features_fusion.mat'), {'feature_query_new': query_features[4]})
Example #11
Source File: tars_data_loaders.py From pytorch_classifiers with GNU General Public License v3.0 | 4 votes |
def data_loader_predict(data_dir, input_shape, name): if name in ["inceptionv4", "inceptionresnetv2", "inception_v3"]: scale = 360 mean = [0.5, 0.5, 0.5] std = [0.5, 0.5, 0.5] elif name == "bninception": scale = 256 mean = [104, 117, 128] std = [1, 1, 1] elif name == "vggm": scale = 256 mean = [123.68, 116.779, 103.939] std = [1, 1, 1] elif name == "nasnetalarge": scale = 354 mean = [0.5, 0.5, 0.5] std = [1, 1, 1] else: scale = 256 mean = [0.485, 0.456, 0.406] std = [0.229, 0.224, 0.225] print("[Scale: {} , mean: {}, std: {}]".format(scale, mean, std)) if name == "bninception": val = transforms.Compose([transforms.Scale(scale), transforms.TenCrop(input_shape), transforms.Lambda(lambda crops: torch.stack([transforms.ToTensor()(crop) for crop in crops])), transforms.Lambda(lambda bgr: torch.stack([ToSpaceBGR(True)(bgrformat) for bgrformat in bgr])), transforms.Lambda(lambda range255: torch.stack([ToRange255(True)(ranges) for ranges in range255])), transforms.Lambda(lambda normal: torch.stack([transforms.Normalize(mean, std)(normalize) for normalize in normal]))]) else: val = transforms.Compose([transforms.Scale(scale), transforms.TenCrop(input_shape), transforms.Lambda(lambda crops: torch.stack([transforms.ToTensor()(crop) for crop in crops])), transforms.Lambda(lambda normal: torch.stack([transforms.Normalize(mean, std)(normalize) for normalize in normal]))]) image_datasets = datasets.ImageFolder(data_dir, val) dataloaders = torch.utils.data.DataLoader(image_datasets, batch_size=1, shuffle=False, num_workers=1) return dataloaders, image_datasets