Python util.util.tensor2im() Examples
The following are 30
code examples of util.util.tensor2im().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
util.util
, or try the search function
.
Example #1
Source File: test_model.py From non-stationary_texture_syn with MIT License | 6 votes |
def random_crop_256x256(self, crop_patch=6): input_size = self.input_A.cpu().shape width, height = input_size[3], input_size[2] results = [] self.real_A = Variable(self.input_A, volatile=True) real_A_src = self.real_A.clone() # self.fake_B = self.netG.forward(self.real_A) # src_fake_B = self.fake_B.clone() real_A = util.tensor2im(self.real_A.data) # fake_B = util.tensor2im(self.fake_B.data) results.append(('real_A', real_A)) # results.append(('fake_{}_B'.format('src'), fake_B)) for i in range(0, crop_patch): rw = random.randint(0, width - 256) rh = random.randint(0, height - 256) self.real_A = Variable(real_A_src.data[:, :, rh:rh + 256, rw:rw + 256], volatile=True) self.fake_B = self.netG.forward(self.real_A) real_A = util.tensor2im(self.real_A.data) fake_B = util.tensor2im(self.fake_B.data) results.append(('256_real_{}_{}_{}_A'.format(i, rw, rh), real_A)) results.append(('512_fake_{}_B'.format(i), fake_B)) return OrderedDict(results)
Example #2
Source File: semantic_image_synthesis_model.py From DMIT with MIT License | 6 votes |
def translation(self, data): with torch.no_grad(): img, cap_ori, cap_len_ori = data assert img.size(0) == 1 img = img.repeat(len(TEST_SEQ)+1,1,1,1) cap_tar, cap_len_tar = [cap_ori], [cap_len_ori] for seq in TEST_SEQ: cap, cap_len = self.opt.txt_dataset.cap2ix(seq) cap = torch.LongTensor(cap).unsqueeze(0) cap_len = torch.LongTensor([cap_len]) cap_tar.append(cap) cap_len_tar.append(cap_len) cap_tar = torch.cat(cap_tar,dim=0) cap_len_tar = torch.cat(cap_len_tar,dim=0) img, sent_emb, _, _ = self.prepare_data([img,cap_tar,cap_len_tar]) style_enc, _, _ = self.enc_style(img) content = self.enc_content(img) fakes = self.dec(content,torch.cat([sent_emb,style_enc],dim=1)) results = [('input',tensor2im(img[0].data)), ('rec',tensor2im(fakes[0].data))] for i in range(len(TEST_SEQ)): results.append(('seq_{}'.format(i+1),tensor2im(fakes[i+1].data))) return results
Example #3
Source File: season_transfer_model.py From DMIT with MIT License | 6 votes |
def translation(self, data): with torch.no_grad(): self.prepare_data(data) img, attr_source, index_target, _ = self.current_data batch_size = img.size(0) assert batch_size == 2 style_enc, _, _ = self.enc_style(img) style_target_enc = style_enc[index_target] attr_target = attr_source[index_target] content = self.enc_content(img) results_s2w, results_w2s = [('input_summer',tensor2im(img[0].data))], [('input_winter',tensor2im(img[1].data))] fakes = self.dec(content,torch.cat([attr_target,style_target_enc],dim=1)) results_s2w.append(('s2w_enc',tensor2im(fakes[0].data))) results_w2s.append(('w2s_enc',tensor2im(fakes[1].data))) for i in range(self.opt.n_samples): style_rand = self.sample_latent_code(style_enc.size()) fakes = self.dec(content,torch.cat([attr_target,style_rand],dim=1)) results_s2w.append(('s2w_rand_{}'.format(i+1),tensor2im(fakes[0].data))) results_w2s.append(('w2s_rand_{}'.format(i+1),tensor2im(fakes[1].data))) return results_s2w+results_w2s
Example #4
Source File: single_gan.py From SingleGAN with MIT License | 6 votes |
def translation(self, data): input, sourceD, targetD = self.prepare_image(data) sourceDC, sourceIndex = self.get_domain_code(sourceD) targetDC, targetIndex = self.get_domain_code(targetD) images, names =[], [] for i in range(self.opt.d_num): images.append([tensor2im(input.index_select(0,sourceIndex[i])[0].data)]) names.append(['D{}'.format(i)]) if self.opt.mode == 'multimodal': for i in range(self.opt.n_samples): c_rand = self.sample_latent_code(torch.Size([input.size(0),self.opt.c_num])) targetC = torch.cat([targetDC, c_rand],1) output = self.G(input,targetC) for j in range(output.size(0)): images[sourceD[j]].append(tensor2im(output[j].data)) names[sourceD[j]].append('{}to{}_{}'.format(sourceD[j],targetD[j],i)) else: output = self.G(input,targetDC) for i in range(output.size(0)): images[sourceD[i]].append(tensor2im(output[i].data)) names[sourceD[i]].append('{}to{}'.format(sourceD[i],targetD[i])) return images, names
Example #5
Source File: ui_model.py From EverybodyDanceNow_reproduce_pytorch with MIT License | 6 votes |
def add_objects(self, click_src, label_tgt, mask, style_id=0): y, x = click_src[0], click_src[1] mask = np.transpose(mask, (2, 0, 1))[np.newaxis,...] idx_src = torch.from_numpy(mask).cuda().nonzero() idx_src[:,2] += y idx_src[:,3] += x # backup current maps self.backup_current_state() # update label map self.label_map[idx_src[:,0], idx_src[:,1], idx_src[:,2], idx_src[:,3]] = label_tgt for k in range(self.opt.label_nc): self.net_input[idx_src[:,0], idx_src[:,1] + k, idx_src[:,2], idx_src[:,3]] = 0 self.net_input[idx_src[:,0], idx_src[:,1] + label_tgt, idx_src[:,2], idx_src[:,3]] = 1 # update instance map self.inst_map[idx_src[:,0], idx_src[:,1], idx_src[:,2], idx_src[:,3]] = label_tgt self.net_input[:,-1,:,:] = self.get_edges(self.inst_map) # update feature map self.set_features(idx_src, self.feat, style_id) self.fake_image = util.tensor2im(self.single_forward(self.net_input, self.feat_map))
Example #6
Source File: ui_model.py From everybody_dance_now_pytorch with GNU Affero General Public License v3.0 | 6 votes |
def add_objects(self, click_src, label_tgt, mask, style_id=0): y, x = click_src[0], click_src[1] mask = np.transpose(mask, (2, 0, 1))[np.newaxis,...] idx_src = torch.from_numpy(mask).cuda().nonzero() idx_src[:,2] += y idx_src[:,3] += x # backup current maps self.backup_current_state() # update label map self.label_map[idx_src[:,0], idx_src[:,1], idx_src[:,2], idx_src[:,3]] = label_tgt for k in range(self.opt.label_nc): self.net_input[idx_src[:,0], idx_src[:,1] + k, idx_src[:,2], idx_src[:,3]] = 0 self.net_input[idx_src[:,0], idx_src[:,1] + label_tgt, idx_src[:,2], idx_src[:,3]] = 1 # update instance map self.inst_map[idx_src[:,0], idx_src[:,1], idx_src[:,2], idx_src[:,3]] = label_tgt self.net_input[:,-1,:,:] = self.get_edges(self.inst_map) # update feature map self.set_features(idx_src, self.feat, style_id) self.fake_image = util.tensor2im(self.single_forward(self.net_input, self.feat_map))
Example #7
Source File: ui_model.py From EverybodyDanceNow-Temporal-FaceGAN with MIT License | 6 votes |
def add_objects(self, click_src, label_tgt, mask, style_id=0): y, x = click_src[0], click_src[1] mask = np.transpose(mask, (2, 0, 1))[np.newaxis,...] idx_src = torch.from_numpy(mask).cuda().nonzero() idx_src[:,2] += y idx_src[:,3] += x # backup current maps self.backup_current_state() # update label map self.label_map[idx_src[:,0], idx_src[:,1], idx_src[:,2], idx_src[:,3]] = label_tgt for k in range(self.opt.label_nc): self.net_input[idx_src[:,0], idx_src[:,1] + k, idx_src[:,2], idx_src[:,3]] = 0 self.net_input[idx_src[:,0], idx_src[:,1] + label_tgt, idx_src[:,2], idx_src[:,3]] = 1 # update instance map self.inst_map[idx_src[:,0], idx_src[:,1], idx_src[:,2], idx_src[:,3]] = label_tgt self.net_input[:,-1,:,:] = self.get_edges(self.inst_map) # update feature map self.set_features(idx_src, self.feat, style_id) self.fake_image = util.tensor2im(self.single_forward(self.net_input, self.feat_map))
Example #8
Source File: sparse_wgangp_pix2pix_model.py From iSketchNFill with GNU General Public License v3.0 | 6 votes |
def get_latent_space_visualization(self,num_interpolate=20,label_1=-1,label_2=-1): rand_perm = np.random.permutation( self.opt.n_classes ) if label_1 == -1: label_1 = self.label[0] #rand_perm[0] if label_2 == -1: label_2 = self.opt.target_label #rand_perm[1] alpha_blends = np.linspace(0,1,num_interpolate) self.label[0] = label_1 output_gate_1 = self.netG.forward_gate(self.label) self.label[0] = label_2 output_gate_2 = self.netG.forward_gate(self.label) results={} results['latent_real_A']=util.tensor2im(self.real_A.data) results['latent_real_B']=util.tensor2im(self.real_B.data) for i in range(num_interpolate): alpha_blend = alpha_blends[i] output_gate = output_gate_1*alpha_blend + output_gate_2*(1-alpha_blend) self.fake_B = self.netG.forward_main( self.real_A,output_gate) results['%d_L_fake_B_inter'%(i)]=util.tensor2im(self.fake_B.data) return OrderedDict(results)
Example #9
Source File: label_channel_gated_pix2pix_model.py From iSketchNFill with GNU General Public License v3.0 | 6 votes |
def get_latent_space_visualization(self,num_interpolate=20,label_1=-1,label_2=-1): rand_perm = np.random.permutation( self.opt.n_classes ) if label_1 == -1: label_1 = self.label[0] #rand_perm[0] if label_2 == -1: label_2 = self.opt.target_label #rand_perm[1] alpha_blends = np.linspace(0,1,num_interpolate) self.label[0] = label_1 output_gate_1 = self.netG.forward_gate(self.label) self.label[0] = label_2 output_gate_2 = self.netG.forward_gate(self.label) results={} results['latent_real_A']=util.tensor2im(self.real_A.data) results['latent_real_B']=util.tensor2im(self.real_B.data) for i in range(num_interpolate): alpha_blend = alpha_blends[i] output_gate = output_gate_1*alpha_blend + output_gate_2*(1-alpha_blend) self.fake_B = self.netG.forward_main( self.real_A,output_gate) results['%d_L_fake_B_inter'%(i)]=util.tensor2im(self.fake_B.data) return OrderedDict(results)
Example #10
Source File: ui_model.py From deep-learning-for-document-dewarping with MIT License | 6 votes |
def add_objects(self, click_src, label_tgt, mask, style_id=0): y, x = click_src[0], click_src[1] mask = np.transpose(mask, (2, 0, 1))[np.newaxis,...] idx_src = torch.from_numpy(mask).cuda().nonzero() idx_src[:,2] += y idx_src[:,3] += x # backup current maps self.backup_current_state() # update label map self.label_map[idx_src[:,0], idx_src[:,1], idx_src[:,2], idx_src[:,3]] = label_tgt for k in range(self.opt.label_nc): self.net_input[idx_src[:,0], idx_src[:,1] + k, idx_src[:,2], idx_src[:,3]] = 0 self.net_input[idx_src[:,0], idx_src[:,1] + label_tgt, idx_src[:,2], idx_src[:,3]] = 1 # update instance map self.inst_map[idx_src[:,0], idx_src[:,1], idx_src[:,2], idx_src[:,3]] = label_tgt self.net_input[:,-1,:,:] = self.get_edges(self.inst_map) # update feature map self.set_features(idx_src, self.feat, style_id) self.fake_image = util.tensor2im(self.single_forward(self.net_input, self.feat_map))
Example #11
Source File: CycleGAN.py From Bayesian-CycleGAN with MIT License | 6 votes |
def get_current_visuals(self): real_A = util.tensor2im(self.input_A) fake_B = util.tensor2im(self.fake_B) rec_A = util.tensor2im(self.rec_A) real_B = util.tensor2im(self.input_B) fake_A = util.tensor2im(self.fake_A) rec_B = util.tensor2im(self.rec_B) visuals = OrderedDict([ ('real_A', real_A), ('fake_B', fake_B), ('rec_A', rec_A), ('real_B', real_B), ('fake_A', fake_A), ('rec_B', rec_B) ]) return visuals
Example #12
Source File: CycleGAN_bayes_z.py From Bayesian-CycleGAN with MIT License | 6 votes |
def get_current_visuals(self): real_A = util.tensor2im(self.input_A) fake_B = util.tensor2im(self.fake_B) rec_A = util.tensor2im(self.rec_A) real_B = util.tensor2im(self.input_B) fake_A = util.tensor2im(self.fake_A) rec_B = util.tensor2im(self.rec_B) visuals = OrderedDict([ ('real_A', real_A), ('fake_B', fake_B), ('rec_A', rec_A), ('real_B', real_B), ('fake_A', fake_A), ('rec_B', rec_B) ]) return visuals
Example #13
Source File: CycleGAN_bayes.py From Bayesian-CycleGAN with MIT License | 6 votes |
def get_current_visuals(self): real_A = util.tensor2im(self.input_A) fake_B = util.tensor2im(self.fake_B) rec_A = util.tensor2im(self.rec_A) real_B = util.tensor2im(self.input_B) fake_A = util.tensor2im(self.fake_A) rec_B = util.tensor2im(self.rec_B) visuals = OrderedDict([ ('real_A', real_A), ('fake_B', fake_B), ('rec_A', rec_A), ('real_B', real_B), ('fake_A', fake_A), ('rec_B', rec_B) ]) return visuals
Example #14
Source File: test_model.py From non-stationary_texture_syn with MIT License | 6 votes |
def recurrent_test(self, step=5): input_size = self.input_A.cpu().shape width,height = input_size[3], input_size[2] results = [] self.real_A = Variable(self.input_A, volatile=True) self.fake_B = self.netG.forward(self.real_A) real_A = util.tensor2im(self.real_A.data) fake_B = util.tensor2im(self.fake_B.data) results.append(('real_{}_A'.format(0), real_A)) results.append(('fake_{}_B'.format(0), fake_B)) for i in range(1, step): # rw = random.randint(0, width) # rh = random.randint(0, height) rw = int(width/2) rh = int(height/2) self.real_A = Variable(self.fake_B.data[:, :, rh:rh + height, rw:rw + width], volatile=True) self.fake_B = self.netG.forward(self.real_A) real_A = util.tensor2im(self.real_A.data) fake_B = util.tensor2im(self.fake_B.data) results.append(('real_{}_A'.format(i), real_A)) results.append(('fake_{}_B'.format(i), fake_B)) return OrderedDict(results)
Example #15
Source File: test_model.py From non-stationary_texture_syn with MIT License | 6 votes |
def recurrent_test_l2_searching(self, step=5): input_size = self.input_A.cpu().shape width,height = input_size[3], input_size[2] results = [] self.real_A = Variable(self.input_A, volatile=True) self.fake_B = self.netG.forward(self.real_A) real_A = util.tensor2im(self.real_A.data) fake_B = util.tensor2im(self.fake_B.data) results.append(('l2_search_real_{}_A'.format(0), real_A)) results.append(('l2_search_fake_{}_B'.format(0), fake_B)) for i in range(1, step): # rw = random.randint(0, width) # rh = random.randint(0, height) rw, rh = self.l2_searching(self.real_A.clone(), self.fake_B.clone()) print("end selection: ", rw, rh) self.real_A = Variable(self.fake_B.data[:, :, rh:rh + height, rw:rw + width], volatile=True) self.fake_B = self.netG.forward(self.real_A) real_A = util.tensor2im(self.real_A.data) fake_B = util.tensor2im(self.fake_B.data) results.append(('l2_search_real_{}_{}_{}_A'.format(i, rw, rh), real_A)) results.append(('l2_search_fake_{}_B'.format(i), fake_B)) return OrderedDict(results)
Example #16
Source File: test_model.py From non-stationary_texture_syn with MIT License | 6 votes |
def random_crop(self, crop_patch=6): input_size = self.input_A.cpu().shape width, height = input_size[3], input_size[2] results = [] self.real_A = Variable(self.input_A, volatile=True) self.fake_B = self.netG.forward(self.real_A) src_fake_B = self.fake_B.clone() real_A = util.tensor2im(self.real_A.data) fake_B = util.tensor2im(self.fake_B.data) results.append(('real_A', real_A)) results.append(('fake_{}_B'.format('src'), fake_B)) for i in range(0, crop_patch): rw = random.randint(0, width) rh = random.randint(0, height) self.real_A = Variable(src_fake_B.data[:, :, rh:rh + height, rw:rw + width], volatile=True) self.fake_B = self.netG.forward(self.real_A) real_A = util.tensor2im(self.real_A.data) fake_B = util.tensor2im(self.fake_B.data) results.append(('real_{}_{}_{}_A'.format(i, rw, rh), real_A)) results.append(('fake_{}_B'.format(i), fake_B)) return OrderedDict(results)
Example #17
Source File: Gen_final_v1.py From Talking-Face-Generation-DAVS with MIT License | 6 votes |
def get_current_visuals(self): fake_B_audio = self.audio_gen_fakes.view(-1, self.opt.sequence_length, self.opt.image_channel_size, self.opt.image_size, self.opt.image_size) fake_B_image = self.image_gen_fakes.view(-1, self.opt.sequence_length, self.opt.image_channel_size, self.opt.image_size, self.opt.image_size) real_A = util.tensor2im(self.real_A.data) oderdict = OrderedDict([('real_A', real_A)]) fake_audio_B = {} fake_image_B = {} real_B = {} for i in range(self.opt.sequence_length): fake_audio_B[i] = util.tensor2im(fake_B_audio[:, i, :, :, :].data) fake_image_B[i] = util.tensor2im(fake_B_image[:, i, :, :, :].data) real_B[i] = util.tensor2im(self.real_videos[:, i, :, :, :].data) oderdict['real_B_' + str(i)] = real_B[i] oderdict['fake_audio_B_' + str(i)] = fake_audio_B[i] oderdict['fake_image_B_' + str(i)] = fake_image_B[i] return oderdict
Example #18
Source File: test_model.py From non-stationary_texture_syn with MIT License | 5 votes |
def stress_test_up_origin(self, step=3): input_size = self.input_A.cpu().shape width,height = input_size[3], input_size[2] results = [] self.real_A = Variable(self.input_A, volatile=True) # rw = random.randint(0, width - 64) # rh = random.randint(0, height - 64) # self.real_A = Variable(self.real_A.data, volatile=True) self.fake_B = self.netG.forward(self.real_A) real_A = util.tensor2im(self.real_A.data) fake_B = util.tensor2im(self.fake_B.data) results.append(('real_{}_A'.format(0), real_A)) results.append(('fake_{}_B'.format(0), fake_B)) for i in range(1, step): # rw = random.randint(0, width) # rh = random.randint(0, height) # rw = int(width/2) # rh = int(height/2) self.real_A = Variable(self.fake_B.data, volatile=True) print(self.real_A.size()) self.fake_B = self.netG.forward(self.real_A) real_A = util.tensor2im(self.real_A.data) fake_B = util.tensor2im(self.fake_B.data) results.append(('real_{}_A'.format(i), real_A)) results.append(('fake_{}_B'.format(i), fake_B)) return OrderedDict(results)
Example #19
Source File: half_gan_style.py From non-stationary_texture_syn with MIT License | 5 votes |
def get_current_visuals(self): real_A = util.tensor2im(self.real_A.data) fake_B = util.tensor2im(self.fake_B.data) real_B = util.tensor2im(self.real_B.data) return OrderedDict([('real_A', real_A), ('fake_B', fake_B), ('real_B', real_B)]), self.start_points
Example #20
Source File: ui_model.py From EverybodyDanceNow_reproduce_pytorch with MIT License | 5 votes |
def change_labels(self, click_src, click_tgt): y_src, x_src = click_src[0], click_src[1] y_tgt, x_tgt = click_tgt[0], click_tgt[1] label_src = int(self.label_map[0, 0, y_src, x_src]) inst_src = self.inst_map[0, 0, y_src, x_src] label_tgt = int(self.label_map[0, 0, y_tgt, x_tgt]) inst_tgt = self.inst_map[0, 0, y_tgt, x_tgt] idx_src = (self.inst_map == inst_src).nonzero() # need to change 3 things: label map, instance map, and feature map if idx_src.shape: # backup current maps self.backup_current_state() # change both the label map and the network input self.label_map[idx_src[:,0], idx_src[:,1], idx_src[:,2], idx_src[:,3]] = label_tgt self.net_input[idx_src[:,0], idx_src[:,1] + label_src, idx_src[:,2], idx_src[:,3]] = 0 self.net_input[idx_src[:,0], idx_src[:,1] + label_tgt, idx_src[:,2], idx_src[:,3]] = 1 # update the instance map (and the network input) if inst_tgt > 1000: # if different instances have different ids, give the new object a new id tgt_indices = (self.inst_map > label_tgt * 1000) & (self.inst_map < (label_tgt+1) * 1000) inst_tgt = self.inst_map[tgt_indices].max() + 1 self.inst_map[idx_src[:,0], idx_src[:,1], idx_src[:,2], idx_src[:,3]] = inst_tgt self.net_input[:,-1,:,:] = self.get_edges(self.inst_map) # also copy the source features to the target position idx_tgt = (self.inst_map == inst_tgt).nonzero() if idx_tgt.shape: self.copy_features(idx_src, idx_tgt[0,:]) self.fake_image = util.tensor2im(self.single_forward(self.net_input, self.feat_map)) # add strokes of target label in the image
Example #21
Source File: ui_model.py From EverybodyDanceNow_reproduce_pytorch with MIT License | 5 votes |
def add_strokes(self, click_src, label_tgt, bw, save): # get the region of the new strokes (bw is the brush width) size = self.net_input.size() h, w = size[2], size[3] idx_src = torch.LongTensor(bw**2, 4).fill_(0) for i in range(bw): idx_src[i*bw:(i+1)*bw, 2] = min(h-1, max(0, click_src[0]-bw//2 + i)) for j in range(bw): idx_src[i*bw+j, 3] = min(w-1, max(0, click_src[1]-bw//2 + j)) idx_src = idx_src.cuda() # again, need to update 3 things if idx_src.shape: # backup current maps if save: self.backup_current_state() # update the label map (and the network input) in the stroke region self.label_map[idx_src[:,0], idx_src[:,1], idx_src[:,2], idx_src[:,3]] = label_tgt for k in range(self.opt.label_nc): self.net_input[idx_src[:,0], idx_src[:,1] + k, idx_src[:,2], idx_src[:,3]] = 0 self.net_input[idx_src[:,0], idx_src[:,1] + label_tgt, idx_src[:,2], idx_src[:,3]] = 1 # update the instance map (and the network input) self.inst_map[idx_src[:,0], idx_src[:,1], idx_src[:,2], idx_src[:,3]] = label_tgt self.net_input[:,-1,:,:] = self.get_edges(self.inst_map) # also update the features if available if self.opt.instance_feat: feat = self.features_clustered[label_tgt] #np.random.seed(label_tgt+1) #cluster_idx = np.random.randint(0, feat.shape[0]) cluster_idx = self.cluster_indices[label_tgt] self.set_features(idx_src, feat, cluster_idx) self.fake_image = util.tensor2im(self.single_forward(self.net_input, self.feat_map)) # add an object to the clicked position with selected style
Example #22
Source File: ui_model.py From EverybodyDanceNow-Temporal-FaceGAN with MIT License | 5 votes |
def add_strokes(self, click_src, label_tgt, bw, save): # get the region of the new strokes (bw is the brush width) size = self.net_input.size() h, w = size[2], size[3] idx_src = torch.LongTensor(bw**2, 4).fill_(0) for i in range(bw): idx_src[i*bw:(i+1)*bw, 2] = min(h-1, max(0, click_src[0]-bw//2 + i)) for j in range(bw): idx_src[i*bw+j, 3] = min(w-1, max(0, click_src[1]-bw//2 + j)) idx_src = idx_src.cuda() # again, need to update 3 things if idx_src.shape: # backup current maps if save: self.backup_current_state() # update the label map (and the network input) in the stroke region self.label_map[idx_src[:,0], idx_src[:,1], idx_src[:,2], idx_src[:,3]] = label_tgt for k in range(self.opt.label_nc): self.net_input[idx_src[:,0], idx_src[:,1] + k, idx_src[:,2], idx_src[:,3]] = 0 self.net_input[idx_src[:,0], idx_src[:,1] + label_tgt, idx_src[:,2], idx_src[:,3]] = 1 # update the instance map (and the network input) self.inst_map[idx_src[:,0], idx_src[:,1], idx_src[:,2], idx_src[:,3]] = label_tgt self.net_input[:,-1,:,:] = self.get_edges(self.inst_map) # also update the features if available if self.opt.instance_feat: feat = self.features_clustered[label_tgt] #np.random.seed(label_tgt+1) #cluster_idx = np.random.randint(0, feat.shape[0]) cluster_idx = self.cluster_indices[label_tgt] self.set_features(idx_src, feat, cluster_idx) self.fake_image = util.tensor2im(self.single_forward(self.net_input, self.feat_map)) # add an object to the clicked position with selected style
Example #23
Source File: single_gan.py From SingleGAN with MIT License | 5 votes |
def get_current_visuals(self): real = make_grid(self.real.data,nrow=self.real.size(0),padding=0) fake = make_grid(self.fake.data,nrow=self.real.size(0),padding=0) cyc = make_grid(self.cyc.data,nrow=self.real.size(0),padding=0) img = [real,fake,cyc] name = 'rsal,fake,cyc' if self.opt.lambda_ide > 0: ide = make_grid(self.ide.data,nrow=self.real.size(0),padding=0) img.append(ide) name +=',ide' img = torch.cat(img,1) return OrderedDict([(name,tensor2im(img))])
Example #24
Source File: ui_model.py From EverybodyDanceNow-Temporal-FaceGAN with MIT License | 5 votes |
def change_labels(self, click_src, click_tgt): y_src, x_src = click_src[0], click_src[1] y_tgt, x_tgt = click_tgt[0], click_tgt[1] label_src = int(self.label_map[0, 0, y_src, x_src]) inst_src = self.inst_map[0, 0, y_src, x_src] label_tgt = int(self.label_map[0, 0, y_tgt, x_tgt]) inst_tgt = self.inst_map[0, 0, y_tgt, x_tgt] idx_src = (self.inst_map == inst_src).nonzero() # need to change 3 things: label map, instance map, and feature map if idx_src.shape: # backup current maps self.backup_current_state() # change both the label map and the network input self.label_map[idx_src[:,0], idx_src[:,1], idx_src[:,2], idx_src[:,3]] = label_tgt self.net_input[idx_src[:,0], idx_src[:,1] + label_src, idx_src[:,2], idx_src[:,3]] = 0 self.net_input[idx_src[:,0], idx_src[:,1] + label_tgt, idx_src[:,2], idx_src[:,3]] = 1 # update the instance map (and the network input) if inst_tgt > 1000: # if different instances have different ids, give the new object a new id tgt_indices = (self.inst_map > label_tgt * 1000) & (self.inst_map < (label_tgt+1) * 1000) inst_tgt = self.inst_map[tgt_indices].max() + 1 self.inst_map[idx_src[:,0], idx_src[:,1], idx_src[:,2], idx_src[:,3]] = inst_tgt self.net_input[:,-1,:,:] = self.get_edges(self.inst_map) # also copy the source features to the target position idx_tgt = (self.inst_map == inst_tgt).nonzero() if idx_tgt.shape: self.copy_features(idx_src, idx_tgt[0,:]) self.fake_image = util.tensor2im(self.single_forward(self.net_input, self.feat_map)) # add strokes of target label in the image
Example #25
Source File: pix2pixHD_condImg_model.py From neurips18_hierchical_image_manipulation with MIT License | 5 votes |
def get_current_visuals(self): return OrderedDict([ ('input_label', util.tensor2label(self.input_label, self.opt.label_nc)), ('input_image', util.tensor2im(self.input_image)), ('real_image', util.tensor2im(self.real_image)), ('synthesized_image', util.tensor2im(self.fake_image)) ])
Example #26
Source File: ui_model.py From deep-learning-for-document-dewarping with MIT License | 5 votes |
def change_labels(self, click_src, click_tgt): y_src, x_src = click_src[0], click_src[1] y_tgt, x_tgt = click_tgt[0], click_tgt[1] label_src = int(self.label_map[0, 0, y_src, x_src]) inst_src = self.inst_map[0, 0, y_src, x_src] label_tgt = int(self.label_map[0, 0, y_tgt, x_tgt]) inst_tgt = self.inst_map[0, 0, y_tgt, x_tgt] idx_src = (self.inst_map == inst_src).nonzero() # need to change 3 things: label map, instance map, and feature map if idx_src.shape: # backup current maps self.backup_current_state() # change both the label map and the network input self.label_map[idx_src[:,0], idx_src[:,1], idx_src[:,2], idx_src[:,3]] = label_tgt self.net_input[idx_src[:,0], idx_src[:,1] + label_src, idx_src[:,2], idx_src[:,3]] = 0 self.net_input[idx_src[:,0], idx_src[:,1] + label_tgt, idx_src[:,2], idx_src[:,3]] = 1 # update the instance map (and the network input) if inst_tgt > 1000: # if different instances have different ids, give the new object a new id tgt_indices = (self.inst_map > label_tgt * 1000) & (self.inst_map < (label_tgt+1) * 1000) inst_tgt = self.inst_map[tgt_indices].max() + 1 self.inst_map[idx_src[:,0], idx_src[:,1], idx_src[:,2], idx_src[:,3]] = inst_tgt self.net_input[:,-1,:,:] = self.get_edges(self.inst_map) # also copy the source features to the target position idx_tgt = (self.inst_map == inst_tgt).nonzero() if idx_tgt.shape: self.copy_features(idx_src, idx_tgt[0,:]) self.fake_image = util.tensor2im(self.single_forward(self.net_input, self.feat_map)) # add strokes of target label in the image
Example #27
Source File: ui_model.py From deep-learning-for-document-dewarping with MIT License | 5 votes |
def add_strokes(self, click_src, label_tgt, bw, save): # get the region of the new strokes (bw is the brush width) size = self.net_input.size() h, w = size[2], size[3] idx_src = torch.LongTensor(bw**2, 4).fill_(0) for i in range(bw): idx_src[i*bw:(i+1)*bw, 2] = min(h-1, max(0, click_src[0]-bw//2 + i)) for j in range(bw): idx_src[i*bw+j, 3] = min(w-1, max(0, click_src[1]-bw//2 + j)) idx_src = idx_src.cuda() # again, need to update 3 things if idx_src.shape: # backup current maps if save: self.backup_current_state() # update the label map (and the network input) in the stroke region self.label_map[idx_src[:,0], idx_src[:,1], idx_src[:,2], idx_src[:,3]] = label_tgt for k in range(self.opt.label_nc): self.net_input[idx_src[:,0], idx_src[:,1] + k, idx_src[:,2], idx_src[:,3]] = 0 self.net_input[idx_src[:,0], idx_src[:,1] + label_tgt, idx_src[:,2], idx_src[:,3]] = 1 # update the instance map (and the network input) self.inst_map[idx_src[:,0], idx_src[:,1], idx_src[:,2], idx_src[:,3]] = label_tgt self.net_input[:,-1,:,:] = self.get_edges(self.inst_map) # also update the features if available if self.opt.instance_feat: feat = self.features_clustered[label_tgt] #np.random.seed(label_tgt+1) #cluster_idx = np.random.randint(0, feat.shape[0]) cluster_idx = self.cluster_indices[label_tgt] self.set_features(idx_src, feat, cluster_idx) self.fake_image = util.tensor2im(self.single_forward(self.net_input, self.feat_map)) # add an object to the clicked position with selected style
Example #28
Source File: combogan_model.py From ToDayGAN with BSD 2-Clause "Simplified" License | 5 votes |
def get_current_visuals(self, testing=False): if not testing: self.visuals = [self.real_A, self.fake_B, self.rec_A, self.real_B, self.fake_A, self.rec_B] self.labels = ['real_A', 'fake_B', 'rec_A', 'real_B', 'fake_A', 'rec_B'] images = [util.tensor2im(v.data) for v in self.visuals] return OrderedDict(zip(self.labels, images))
Example #29
Source File: combogan_model.py From ComboGAN with BSD 2-Clause "Simplified" License | 5 votes |
def get_current_visuals(self, testing=False): if not testing: self.visuals = [self.real_A, self.fake_B, self.rec_A, self.real_B, self.fake_A, self.rec_B] self.labels = ['real_A', 'fake_B', 'rec_A', 'real_B', 'fake_A', 'rec_B'] images = [util.tensor2im(v.data) for v in self.visuals] return OrderedDict(zip(self.labels, images))
Example #30
Source File: pix2pixHD_condImgColor_model.py From neurips18_hierchical_image_manipulation with MIT License | 5 votes |
def get_current_visuals(self): return OrderedDict([ ('input_label', util.tensor2label(self.input_label, self.opt.label_nc)), ('input_image', util.tensor2im(self.input_image)), ('real_image', util.tensor2im(self.real_image)), ('synthesized_image', util.tensor2im(self.fake_image)) ])