Python data.base_dataset.get_params() Examples
The following are 7
code examples of data.base_dataset.get_params().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
data.base_dataset
, or try the search function
.
Example #1
Source File: aligned_dataset.py From 2019-CCF-BDCI-OCR-MCZJ-OCR-IdentificationIDElement with MIT License | 5 votes |
def __getitem__(self, index): """Return a data point and its metadata information. Parameters: index - - a random integer for data indexing Returns a dictionary that contains A, B, A_paths and B_paths A (tensor) - - an image in the input domain B (tensor) - - its corresponding image in the target domain A_paths (str) - - image paths B_paths (str) - - image paths (same as A_paths) """ # read a image given a random integer index AB_path = self.AB_paths[index] AB = Image.open(AB_path).convert('RGB') # split AB image into A and B w, h = AB.size w2 = int(w / 2) if self.opt.add_contrast: ## 增加亮度和对比度 AB = transforms.ColorJitter(contrast=0.1, brightness=0.1)(AB) A = AB.crop((0, 0, w2, h)) B = AB.crop((w2, 0, w, h)) # apply the same transform to both A and B transform_params = get_params(self.opt, A.size) A_transform = get_transform(self.opt, transform_params, grayscale=(self.input_nc == 1)) B_transform = get_transform(self.opt, transform_params, grayscale=(self.output_nc == 1)) A = A_transform(A) B = B_transform(B) return {'A': A, 'B': B, 'A_paths': AB_path, 'B_paths': AB_path}
Example #2
Source File: aligned_dataset.py From everybody_dance_now_pytorch with GNU Affero General Public License v3.0 | 5 votes |
def __getitem__(self, index): ### input A (label maps) A_path = self.A_paths[index] A = Image.open(A_path) params = get_params(self.opt, A.size) if self.opt.label_nc == 0: transform_A = get_transform(self.opt, params) A_tensor = transform_A(A.convert('RGB')) else: transform_A = get_transform(self.opt, params, method=Image.NEAREST, normalize=False) A_tensor = transform_A(A) * 255.0 B_tensor = inst_tensor = feat_tensor = 0 ### input B (real images) if self.opt.isTrain: B_path = self.B_paths[index] B = Image.open(B_path).convert('RGB') transform_B = get_transform(self.opt, params) B_tensor = transform_B(B) ### if using instance maps if not self.opt.no_instance: inst_path = self.inst_paths[index] inst = Image.open(inst_path) inst_tensor = transform_A(inst) if self.opt.load_features: feat_path = self.feat_paths[index] feat = Image.open(feat_path).convert('RGB') norm = normalize() feat_tensor = norm(transform_A(feat)) input_dict = {'label': A_tensor, 'inst': inst_tensor, 'image': B_tensor, 'feat': feat_tensor, 'path': A_path} return input_dict
Example #3
Source File: aligned_dataset.py From EverybodyDanceNow_reproduce_pytorch with MIT License | 5 votes |
def __getitem__(self, index): ### input A (label maps) A_path = self.A_paths[index] A = Image.open(A_path) params = get_params(self.opt, A.size) if self.opt.label_nc == 0: transform_A = get_transform(self.opt, params) A_tensor = transform_A(A.convert('RGB')) else: transform_A = get_transform(self.opt, params, method=Image.NEAREST, normalize=False) A_tensor = transform_A(A) * 255.0 B_tensor = inst_tensor = feat_tensor = 0 ### input B (real images) if self.opt.isTrain: B_path = self.B_paths[index] B = Image.open(B_path).convert('RGB') transform_B = get_transform(self.opt, params) B_tensor = transform_B(B) ### if using instance maps if not self.opt.no_instance: inst_path = self.inst_paths[index] inst = Image.open(inst_path) inst_tensor = transform_A(inst) if self.opt.load_features: feat_path = self.feat_paths[index] feat = Image.open(feat_path).convert('RGB') norm = normalize() feat_tensor = norm(transform_A(feat)) input_dict = {'label': A_tensor, 'inst': inst_tensor, 'image': B_tensor, 'feat': feat_tensor, 'path': A_path} return input_dict
Example #4
Source File: aligned_dataset.py From deep-learning-for-document-dewarping with MIT License | 5 votes |
def __getitem__(self, index): ### input A (label maps) A_path = self.A_paths[index] A = Image.open(A_path) params = get_params(self.opt, A.size) if self.opt.label_nc == 0: transform_A = get_transform(self.opt, params) A_tensor = transform_A(A.convert('RGB')) else: transform_A = get_transform(self.opt, params, method=Image.NEAREST, normalize=False) A_tensor = transform_A(A) * 255.0 B_tensor = inst_tensor = feat_tensor = 0 ### input B (real images) if self.opt.isTrain or self.opt.use_encoded_image: B_path = self.B_paths[index] B = Image.open(B_path).convert('RGB') transform_B = get_transform(self.opt, params) B_tensor = transform_B(B) ### if using instance maps if not self.opt.no_instance: inst_path = self.inst_paths[index] inst = Image.open(inst_path) inst_tensor = transform_A(inst) if self.opt.load_features: feat_path = self.feat_paths[index] feat = Image.open(feat_path).convert('RGB') norm = normalize() feat_tensor = norm(transform_A(feat)) input_dict = {'label': A_tensor, 'inst': inst_tensor, 'image': B_tensor, 'feat': feat_tensor, 'path': A_path} return input_dict
Example #5
Source File: aligned_pair_dataset.py From everybody_dance_now_pytorch with GNU Affero General Public License v3.0 | 4 votes |
def __getitem__(self, index): ### input A (label maps) if index > self.dataset_size - self.clip_length: index = 0 # it's a rare chance and won't be effecting training dynamics A_path = self.A_paths[index: index + self.clip_length] A = [Image.open(path) for path in A_path] params = get_params(self.opt, A[0].size) if self.opt.label_nc == 0: transform_A = get_transform(self.opt, params) A_tensor = [transform_A(item.convert('RGB')) for item in A] A_tensor = torch.stack(A_tensor, dim=0) else: transform_A = get_transform(self.opt, params, method=Image.NEAREST, normalize=False) A_tensor = transform_A(A) * 255.0 B_tensor = inst_tensor = feat_tensor = 0 ### input B (real images) if self.opt.isTrain: B_path = self.B_paths[index: index + self.clip_length] B = [Image.open(path).convert('RGB') for path in B_path] transform_B = get_transform(self.opt, params) B_tensor = [transform_B(item) for item in B] B_tensor = torch.stack(B_tensor, dim=0) else: # only retain first frame for testing B_path = self.B_paths[index] B = Image.open(B_path).convert('RGB') transform_B = get_transform(self.opt, params) B_tensor = transform_B(B) ### if using instance maps (which is never supposed to) if not self.opt.no_instance: inst_path = self.inst_paths[index: index + self.clip_length] inst = [Image.open(path) for path in inst_path] inst_tensor = [transform_A(item) for item in inst] inst_tensor = torch.stack(inst_tensor, dim=0) if self.opt.load_features: feat_path = self.feat_paths[index: index + self.clip_length] feat = [Image.open(path).convert('RGB') for path in feat_path] norm = normalize() feat_tensor = [norm(transform_A(item)) for item in feat] feat_tensor = torch.stack(feat_tensor, dim=0) input_dict = {'label': A_tensor, 'inst': inst_tensor, 'image': B_tensor, 'feat': feat_tensor, 'path': A_path} return input_dict
Example #6
Source File: aligned_dataset.py From EverybodyDanceNow-Temporal-FaceGAN with MIT License | 4 votes |
def __getitem__(self, index): ### input A (label maps) A_path = self.A_paths[index] A_tensor = torch.load(A_path).permute((2,0,1)) # A = Image.open(A_path) # params = get_params(self.opt, A.size) # if self.opt.label_nc == 0: # transform_A = get_transform(self.opt, params) # A_tensor = transform_A(A.convert('RGB')) # else: # transform_A = get_transform(self.opt, params, method=Image.NEAREST, normalize=False) # A_tensor = transform_A(A) * 255.0 B_tensor = inst_tensor = feat_tensor = 0 ### input B (real images) if self.opt.isTrain: B_path = self.B_paths[index] B = Image.open(B_path).convert('RGB') # transform_B = get_transform(self.opt, params) # B_tensor = transform_B(B) B = np.array(B, dtype = float) / 255. B_tensor = torch.tensor(B)[:,:,:3].permute((2,0,1)).float() # fig = plt.figure(1) # ax = fig.add_subplot(111) # ax.imshow(B_tensor[:,:1024,:].permute((1,2,0))) # plt.show() ### if using instance maps if not self.opt.no_instance: inst_path = self.inst_paths[index] inst = Image.open(inst_path) inst_tensor = transform_A(inst) if self.opt.load_features: feat_path = self.feat_paths[index] feat = Image.open(feat_path).convert('RGB') norm = normalize() feat_tensor = norm(transform_A(feat)) input_dict = {'label': A_tensor, 'inst': inst_tensor, 'image': B_tensor, 'feat': feat_tensor, 'path': A_path} return input_dict
Example #7
Source File: aligned_dataset_temporal.py From EverybodyDanceNow-Temporal-FaceGAN with MIT License | 4 votes |
def work(self, index): ### input A (label maps) A_path = self.A_paths[index] A_tensor = torch.load(A_path).permute((2,0,1)) # A = Image.open(A_path) # params = get_params(self.opt, A.size) # if self.opt.label_nc == 0: # transform_A = get_transform(self.opt, params) # A_tensor = transform_A(A.convert('RGB')) # else: # transform_A = get_transform(self.opt, params, method=Image.NEAREST, normalize=False) # A_tensor = transform_A(A) * 255.0 B_tensor = inst_tensor = feat_tensor = 0 ### input B (real images) if self.opt.isTrain: B_path = self.B_paths[index] B = Image.open(B_path).convert('RGB') # transform_B = get_transform(self.opt, params) # B_tensor = transform_B(B) B = np.array(B, dtype = float) / 255. B_tensor = torch.tensor(B)[:,:,:3].permute((2,0,1)).float() # fig = plt.figure(1) # ax = fig.add_subplot(111) # ax.imshow(B_tensor[:,:1024,:].permute((1,2,0))) # plt.show() ### if using instance maps if not self.opt.no_instance: inst_path = self.inst_paths[index] inst = Image.open(inst_path) inst_tensor = transform_A(inst) if self.opt.load_features: feat_path = self.feat_paths[index] feat = Image.open(feat_path).convert('RGB') norm = normalize() feat_tensor = norm(transform_A(feat)) input_dict = {'label': A_tensor, 'inst': inst_tensor, 'image': B_tensor, 'feat': feat_tensor, 'path': A_path} return input_dict