Python torch.from_numpy() Examples
The following are 30
code examples of torch.from_numpy().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
torch
, or try the search function
.
Example #1
Source File: utils.py From pruning_yolov3 with GNU General Public License v3.0 | 8 votes |
def plot_wh_methods(): # from utils.utils import *; plot_wh_methods() # Compares the two methods for width-height anchor multiplication # https://github.com/ultralytics/yolov3/issues/168 x = np.arange(-4.0, 4.0, .1) ya = np.exp(x) yb = torch.sigmoid(torch.from_numpy(x)).numpy() * 2 fig = plt.figure(figsize=(6, 3), dpi=150) plt.plot(x, ya, '.-', label='yolo method') plt.plot(x, yb ** 2, '.-', label='^2 power method') plt.plot(x, yb ** 2.5, '.-', label='^2.5 power method') plt.xlim(left=-4, right=4) plt.ylim(bottom=0, top=6) plt.xlabel('input') plt.ylabel('output') plt.legend() fig.tight_layout() fig.savefig('comparison.png', dpi=200)
Example #2
Source File: models.py From cvpr2018-hnd with MIT License | 7 votes |
def __init__(self, T, opts): super(LOOLoss, self).__init__() self.gpu = opts.gpu self.loo = opts.loo if 'LOO' in opts.method else 0. self.label_smooth = opts.label_smooth self.kld_u_const = math.log(len(T['wnids'])) self.relevant = [torch.from_numpy(rel) for rel in T['relevant']] self.labels_relevant = torch.from_numpy(T['labels_relevant'].astype(np.uint8)) ch_slice = T['ch_slice'] if opts.class_wise: num_children = T['num_children'] num_supers = len(num_children) self.class_weight = torch.zeros(ch_slice[-1]) for m, num_ch in enumerate(num_children): self.class_weight[ch_slice[m]:ch_slice[m+1]] = 1. / (num_ch * num_supers) else: self.class_weight = torch.ones(ch_slice[-1]) / ch_slice[-1]
Example #3
Source File: transforms.py From ACAN with MIT License | 6 votes |
def __call__(self, img): """Convert a ``numpy.ndarray`` to tensor. Args: img (numpy.ndarray): Image to be converted to tensor. Returns: Tensor: Converted image. """ if not(_is_numpy_image(img)): raise TypeError('img should be ndarray. Got {}'.format(type(img))) if isinstance(img, np.ndarray): # handle numpy array if img.ndim == 3: img = torch.from_numpy(img.transpose((2, 0, 1)).copy()) elif img.ndim == 2: img = torch.from_numpy(img.copy()) else: raise RuntimeError('img should be ndarray with 2 or 3 dimensions. Got {}'.format(img.ndim)) # backward compatibility #return img.float().div(255) return img.float()
Example #4
Source File: custom_datasets.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 6 votes |
def __getitem__(self, index): img=self.adv_flat[self.sample_num,:] if(self.shuff == False): # shuff is true for non-pgd attacks img = torch.from_numpy(np.reshape(img,(3,224,224))) else: img = torch.from_numpy(img).type(torch.FloatTensor) target = self.adv_dict["adv_labels"][self.sample_num] # doing this so that it is consistent with all other datasets # to return a PIL Image if self.transform is not None: img = self.transform(img) if self.target_transform is not None: target = self.target_transform(target) self.sample_num = self.sample_num + 1 return img, target
Example #5
Source File: maskiou_head.py From mmdetection with Apache License 2.0 | 6 votes |
def _get_area_ratio(self, pos_proposals, pos_assigned_gt_inds, gt_masks): """Compute area ratio of the gt mask inside the proposal and the gt mask of the corresponding instance.""" num_pos = pos_proposals.size(0) if num_pos > 0: area_ratios = [] proposals_np = pos_proposals.cpu().numpy() pos_assigned_gt_inds = pos_assigned_gt_inds.cpu().numpy() # compute mask areas of gt instances (batch processing for speedup) gt_instance_mask_area = gt_masks.areas for i in range(num_pos): gt_mask = gt_masks[pos_assigned_gt_inds[i]] # crop the gt mask inside the proposal bbox = proposals_np[i, :].astype(np.int32) gt_mask_in_proposal = gt_mask.crop(bbox) ratio = gt_mask_in_proposal.areas[0] / ( gt_instance_mask_area[pos_assigned_gt_inds[i]] + 1e-7) area_ratios.append(ratio) area_ratios = torch.from_numpy(np.stack(area_ratios)).float().to( pos_proposals.device) else: area_ratios = pos_proposals.new_zeros((0, )) return area_ratios
Example #6
Source File: custom_datasets.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 6 votes |
def __getitem__(self, index): img=self.adv_flat[self.sample_num,:] if(self.shuff == False): # shuff is true for non-pgd attacks img = torch.from_numpy(np.reshape(img,(3,32,32))) else: img = torch.from_numpy(img).type(torch.FloatTensor) target = np.argmax(self.adv_dict["adv_labels"],axis=1)[self.sample_num] # doing this so that it is consistent with all other datasets # to return a PIL Image if self.transform is not None: img = self.transform(img) if self.target_transform is not None: target = self.target_transform(target) self.sample_num = self.sample_num + 1 return img, target
Example #7
Source File: custom_datasets.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 6 votes |
def __getitem__(self, index): img=self.adv_flat[self.sample_num,:] if(self.shuff == False): # shuff is true for non-pgd attacks img = torch.from_numpy(np.reshape(img,(3,32,32))) else: img = torch.from_numpy(img).type(torch.FloatTensor) target = np.argmax(self.adv_dict["adv_labels"],axis=1)[self.sample_num] # doing this so that it is consistent with all other datasets # to return a PIL Image if self.transform is not None: img = self.transform(img) if self.target_transform is not None: target = self.target_transform(target) self.sample_num = self.sample_num + 1 return img, target
Example #8
Source File: custom_datasets.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 6 votes |
def __getitem__(self, index): img=self.adv_flat[self.sample_num,:] if(self.transp == False): # shuff is true for non-pgd attacks img = torch.from_numpy(np.reshape(img,(28,28))) else: img = torch.from_numpy(img).type(torch.FloatTensor) target = np.argmax(self.adv_dict["adv_labels"],axis=1)[self.sample_num] # doing this so that it is consistent with all other datasets # to return a PIL Image if self.transform is not None: img = self.transform(img) if self.target_transform is not None: target = self.target_transform(target) self.sample_num = self.sample_num + 1 return img, target
Example #9
Source File: Embed.py From pytorch_NER_BiLSTM_CNN_CRF with Apache License 2.0 | 6 votes |
def _zeros_embed(self, embed_dict, words_dict): """ :param embed_dict: :param words_dict: """ print("loading pre_train embedding by zeros for out of vocabulary.") embeddings = np.zeros((int(self.words_count), int(self.dim))) for word in words_dict: if word in embed_dict: embeddings[words_dict[word]] = np.array([float(i) for i in embed_dict[word]], dtype='float32') self.exact_count += 1 elif word.lower() in embed_dict: embeddings[words_dict[word]] = np.array([float(i) for i in embed_dict[word.lower()]], dtype='float32') self.fuzzy_count += 1 else: self.oov_count += 1 final_embed = torch.from_numpy(embeddings).float() return final_embed
Example #10
Source File: Embed.py From pytorch_NER_BiLSTM_CNN_CRF with Apache License 2.0 | 6 votes |
def _nn_embed(self, embed_dict, words_dict): """ :param embed_dict: :param words_dict: """ print("loading pre_train embedding by nn.Embedding for out of vocabulary.") embed = nn.Embedding(int(self.words_count), int(self.dim)) init.xavier_uniform_(embed.weight.data) embeddings = np.array(embed.weight.data) for word in words_dict: if word in embed_dict: embeddings[words_dict[word]] = np.array([float(i) for i in embed_dict[word]], dtype='float32') self.exact_count += 1 elif word.lower() in embed_dict: embeddings[words_dict[word]] = np.array([float(i) for i in embed_dict[word.lower()]], dtype='float32') self.fuzzy_count += 1 else: self.oov_count += 1 embeddings[self.padID] = 0 final_embed = torch.from_numpy(embeddings).float() return final_embed
Example #11
Source File: Embed.py From pytorch_NER_BiLSTM_CNN_CRF with Apache License 2.0 | 6 votes |
def _uniform_embed(self, embed_dict, words_dict): """ :param embed_dict: :param words_dict: """ print("loading pre_train embedding by uniform for out of vocabulary.") embeddings = np.zeros((int(self.words_count), int(self.dim))) inword_list = {} for word in words_dict: if word in embed_dict: embeddings[words_dict[word]] = np.array([float(i) for i in embed_dict[word]], dtype='float32') inword_list[words_dict[word]] = 1 self.exact_count += 1 elif word.lower() in embed_dict: embeddings[words_dict[word]] = np.array([float(i) for i in embed_dict[word.lower()]], dtype='float32') inword_list[words_dict[word]] = 1 self.fuzzy_count += 1 else: self.oov_count += 1 uniform_col = np.random.uniform(-0.25, 0.25, int(self.dim)).round(6) # uniform for i in range(len(words_dict)): if i not in inword_list and i != self.padID: embeddings[i] = uniform_col final_embed = torch.from_numpy(embeddings).float() return final_embed
Example #12
Source File: Embed.py From pytorch_NER_BiLSTM_CNN_CRF with Apache License 2.0 | 6 votes |
def _avg_embed(self, embed_dict, words_dict): """ :param embed_dict: :param words_dict: """ print("loading pre_train embedding by avg for out of vocabulary.") embeddings = np.zeros((int(self.words_count), int(self.dim))) inword_list = {} for word in words_dict: if word in embed_dict: embeddings[words_dict[word]] = np.array([float(i) for i in embed_dict[word]], dtype='float32') inword_list[words_dict[word]] = 1 self.exact_count += 1 elif word.lower() in embed_dict: embeddings[words_dict[word]] = np.array([float(i) for i in embed_dict[word.lower()]], dtype='float32') inword_list[words_dict[word]] = 1 self.fuzzy_count += 1 else: self.oov_count += 1 sum_col = np.sum(embeddings, axis=0) / len(inword_list) # avg for i in range(len(words_dict)): if i not in inword_list and i != self.padID: embeddings[i] = sum_col final_embed = torch.from_numpy(embeddings).float() return final_embed
Example #13
Source File: Data.py From GST-Tacotron with MIT License | 6 votes |
def get_eval_data(text, wav_path): ''' get data for eval -------------- input: text --- pinyin format sequence output: text --- [1, T_x] mel --- [1, 1, n_mels] ''' text = text_normalize(text) + 'E' text = [hp.char2idx[c] for c in text] text = torch.Tensor(text).type(torch.LongTensor) # [T_x] text = text.unsqueeze(0) # [1, T_x] mel = torch.zeros(1, 1, hp.n_mels) # GO frame [1, 1, n_mels] _, ref_mels, _ = load_spectrograms(wav_path) ref_mels = torch.from_numpy(ref_mels).unsqueeze(0) return text, mel, ref_mels
Example #14
Source File: env_utils.py From Pytorch-Project-Template with MIT License | 6 votes |
def get_screen(self, env): screen = env.render(mode='rgb_array').transpose((2, 0, 1)) # transpose into torch order (CHW) # Strip off the top and bottom of the screen screen = screen[:, 160:320] view_width = 320 cart_location = self.get_cart_location(env) if cart_location < view_width // 2: slice_range = slice(view_width) elif cart_location > (self.screen_width - view_width // 2): slice_range = slice(-view_width, None) else: slice_range = slice(cart_location - view_width // 2, cart_location + view_width // 2) # Strip off the edges, so that we have a square image centered on a cart screen = screen[:, :, slice_range] # Convert to float, rescale, convert to torch tensor screen = np.ascontiguousarray(screen, dtype=np.float32) / 255 screen = torch.from_numpy(screen) # Resize, and add a batch dimension (BCHW) return resize(screen).unsqueeze(0)
Example #15
Source File: prune_utils.py From pruning_yolov3 with GNU General Public License v3.0 | 5 votes |
def prune_model_keep_size(model, prune_idx, CBL_idx, CBLidx2mask): pruned_model = deepcopy(model) for idx in prune_idx: mask = torch.from_numpy(CBLidx2mask[idx]).cuda() bn_module = pruned_model.module_list[idx][1] bn_module.weight.data.mul_(mask) activation = F.leaky_relu((1 - mask) * bn_module.bias.data, 0.1) # 两个上采样层前的卷积层 next_idx_list = [idx + 1] if idx == 79: next_idx_list.append(84) elif idx == 91: next_idx_list.append(96) for next_idx in next_idx_list: next_conv = pruned_model.module_list[next_idx][0] conv_sum = next_conv.weight.data.sum(dim=(2, 3)) offset = conv_sum.matmul(activation.reshape(-1, 1)).reshape(-1) if next_idx in CBL_idx: next_bn = pruned_model.module_list[next_idx][1] next_bn.running_mean.data.sub_(offset) else: next_conv.bias.data.add_(offset) bn_module.bias.data.mul_(mask) return pruned_model
Example #16
Source File: util.py From DeepLab_v3_plus with MIT License | 5 votes |
def decode_seg_map_sequence(label_masks, dataset='pascal'): rgb_masks = [] for label_mask in label_masks: rgb_mask = decode_segmap(label_mask, dataset) rgb_masks.append(rgb_mask) rgb_masks = torch.from_numpy(np.array(rgb_masks).transpose([0, 3, 1, 2])) return rgb_masks
Example #17
Source File: util.py From DeepLab_v3_plus with MIT License | 5 votes |
def cross_entropy2d(logit, target, ignore_index=255, weight=None, size_average=True, batch_average=True): """ logit 是网络输出 (batchsize, 21, 512, 512) 值应该为任意(没经历归一化) target是gt (batchsize, 1, 512, 512) 值应该是背景为0,其他类分别为1-20,忽略为255 return 经过h*w*batchsize平均的loss 这里的loss相当于对每个像素点求分类交叉熵 ignore_index 是指target中有些忽略的(非背景也非目标,是不属于数据集类别的其他物体,不计算loss) 表现为白色 最后要注意:crossentropy是已经经过softmax,所以网络最后一层不需要处理 https://pytorch.org/docs/stable/nn.html#torch.nn.CrossEntropyLoss """ n, c, h, w = logit.size() # logit = logit.permute(0, 2, 3, 1) target = target.squeeze(1)# (batchsize, 1, 512, 512) -> (batchsize, 512, 512) if weight is None: criterion = nn.CrossEntropyLoss(weight=weight, ignore_index=ignore_index, size_average=False) else: criterion = nn.CrossEntropyLoss(weight=torch.from_numpy(np.array(weight)).float().cuda(), ignore_index=ignore_index, size_average=False) loss = criterion(logit, target.long()) if size_average: loss /= (h * w) if batch_average: loss /= n return loss
Example #18
Source File: transform.py From DeepLab_v3_plus with MIT License | 5 votes |
def __call__(self, sample): # swap color axis because # numpy image: H x W x C # torch image: C X H X W img = np.array(sample['image']).astype(np.float32).transpose((2, 0, 1)) mask = np.expand_dims(np.array(sample['label']).astype(np.float32), -1).transpose((2, 0, 1)) mask[mask == 255] = 0 img = torch.from_numpy(img).float() mask = torch.from_numpy(mask).float() return {'image': img, 'label': mask}
Example #19
Source File: utils_test.py From neural-pipeline with MIT License | 5 votes |
def test_dict_recursive_bypass(self): d = {'data': np.array([1]), 'target': {'a': np.array([1]), 'b': np.array([1])}} d = dict_recursive_bypass(d, lambda v: torch.from_numpy(v)) self.assertTrue(isinstance(d['data'], Tensor)) self.assertTrue(isinstance(d['target']['a'], Tensor)) self.assertTrue(isinstance(d['target']['b'], Tensor))
Example #20
Source File: img_segmentation.py From neural-pipeline with MIT License | 5 votes |
def augmentate_and_to_pytorch(item: {}): res = augmentate(item) return {'data': torch.from_numpy(np.moveaxis(res['data'].astype(np.float32) / 255., -1, 0)), 'target': torch.from_numpy(np.expand_dims(res['target'].astype(np.float32) / 255, axis=0))}
Example #21
Source File: completion_test.py From VSE-C with MIT License | 5 votes |
def load_word_embedding(vse): checkpoint = torch.load(vse) opt = checkpoint['opt'] vocab = Vocab.from_pickle(pjoin(opt.vocab_path, '%s_vocab.pkl' % opt.data_name)) if not args.glove_only: embed_weights = checkpoint['model'][1]['embed.weight'].cpu().numpy() _, glove_weights = io.load('data/snli/glove.pkl') embed_weights = np.concatenate((glove_weights, embed_weights), axis=1) else: _, embed_weights = io.load('data/snli/glove.pkl') embedding = nn.Embedding(embed_weights.shape[0], embed_weights.shape[1], padding_idx=0) embedding.weight.data.copy_(torch.from_numpy(embed_weights)) return vocab, embedding
Example #22
Source File: completion_train.py From VSE-C with MIT License | 5 votes |
def load_word_embedding(vse): checkpoint = torch.load(vse) opt = checkpoint['opt'] vocab = Vocab.from_pickle(pjoin(opt.vocab_path, '%s_vocab.pkl' % opt.data_name)) if not args.glove_only: embed_weights = checkpoint['model'][1]['embed.weight'].cpu().numpy() _, glove_weights = io.load('data/snli/glove.pkl') embed_weights = np.concatenate((glove_weights, embed_weights), axis=1) else: _, embed_weights = io.load('data/snli/glove.pkl') embedding = nn.Embedding(embed_weights.shape[0], embed_weights.shape[1], padding_idx=0) embedding.weight.data.copy_(torch.from_numpy(embed_weights)) return vocab, embedding
Example #23
Source File: network.py From Collaborative-Learning-for-Weakly-Supervised-Object-Detection with MIT License | 5 votes |
def _anchor_component(self, height, width): # just to get the shape right #height = int(math.ceil(self._im_info.data[0, 0] / self._feat_stride[0])) #width = int(math.ceil(self._im_info.data[0, 1] / self._feat_stride[0])) anchors, anchor_length = generate_anchors_pre(\ height, width, self._feat_stride, self._anchor_scales, self._anchor_ratios) self._anchors = Variable(torch.from_numpy(anchors).cuda()) self._anchor_length = anchor_length
Example #24
Source File: network.py From Collaborative-Learning-for-Weakly-Supervised-Object-Detection with MIT License | 5 votes |
def _generate_pseudo_gtbox(self, fuse_prob, boxes): # Inputs are two variables #return gt_boxes Variable(torch.from_numpy(gt_boxes).cuda()) size: gt_num * (x1,y1,x2,y2,class) gt_boxes, proposals = generate_pseudo_gtbox(boxes, fuse_prob, self._labels) return gt_boxes, proposals
Example #25
Source File: network.py From Collaborative-Learning-for-Weakly-Supervised-Object-Detection with MIT License | 5 votes |
def forward(self, image, im_info, boxes, labels=None, mode='TRAIN'): #done self._image_gt_summaries['image'] = image self._image_gt_summaries['boxes'] = boxes self._image_gt_summaries['im_info'] = im_info self._image_gt_summaries['labels'] = labels self._image = Variable(torch.from_numpy(image.transpose([0,3,1,2])).cuda(), volatile=mode == 'TEST') self._im_info = im_info # No need to change; actually it can be an list self._boxes = Variable(torch.from_numpy(boxes).type('torch.Tensor').cuda()) self._labels = Variable(torch.from_numpy(labels).type('torch.Tensor').cuda()) if labels is not None else None self._mode = mode ''' (1) do image -> net_conv ''' torch.backends.cudnn.benchmark = False net_conv = self._image_to_head() ''' (2) do net_conv -> faster-branch ''' cls_prob_fast, bbox_pred_fast = self._predict_fast(net_conv) if mode == 'TEST': if 1: ''' (3) do net_conv -> wsddn-branch ''' fuse_prob = self._predict(net_conv) stds = bbox_pred_fast.data.new(cfg.TRAIN.BBOX_NORMALIZE_STDS).repeat(self._num_classes + 1).unsqueeze(0).expand_as(bbox_pred_fast) means = bbox_pred_fast.data.new(cfg.TRAIN.BBOX_NORMALIZE_MEANS).repeat(self._num_classes + 1).unsqueeze(0).expand_as(bbox_pred_fast) self._predictions["bbox_pred_fast"] = bbox_pred_fast.mul(Variable(stds)).add(Variable(means)) else: self._add_losses() # compute losses
Example #26
Source File: anchor_target_layer.py From Collaborative-Learning-for-Weakly-Supervised-Object-Detection with MIT License | 5 votes |
def _compute_targets(ex_rois, gt_rois): """Compute bounding-box regression targets for an image.""" assert ex_rois.shape[0] == gt_rois.shape[0] assert ex_rois.shape[1] == 4 assert gt_rois.shape[1] == 5 return bbox_transform(torch.from_numpy(ex_rois), torch.from_numpy(gt_rois[:, :4])).numpy()
Example #27
Source File: transforms.py From Random-Erasing with Apache License 2.0 | 5 votes |
def __call__(self, img): if random.uniform(0, 1) > self.EPSILON: return img for attempt in range(100): area = img.size()[1] * img.size()[2] target_area = random.uniform(self.sl, self.sh) * area aspect_ratio = random.uniform(self.r1, 1/self.r1) h = int(round(math.sqrt(target_area * aspect_ratio))) w = int(round(math.sqrt(target_area / aspect_ratio))) if w < img.size()[2] and h < img.size()[1]: x1 = random.randint(0, img.size()[1] - h) y1 = random.randint(0, img.size()[2] - w) if img.size()[0] == 3: #img[0, x1:x1+h, y1:y1+w] = random.uniform(0, 1) #img[1, x1:x1+h, y1:y1+w] = random.uniform(0, 1) #img[2, x1:x1+h, y1:y1+w] = random.uniform(0, 1) img[0, x1:x1+h, y1:y1+w] = self.mean[0] img[1, x1:x1+h, y1:y1+w] = self.mean[1] img[2, x1:x1+h, y1:y1+w] = self.mean[2] #img[:, x1:x1+h, y1:y1+w] = torch.from_numpy(np.random.rand(3, h, w)) else: img[0, x1:x1+h, y1:y1+w] = self.mean[1] # img[0, x1:x1+h, y1:y1+w] = torch.from_numpy(np.random.rand(1, h, w)) return img return img
Example #28
Source File: model_architecture.py From models with MIT License | 5 votes |
def edit_tensor_in_numpy(input, trafo): # Kept in case tensor transformations should be done in numpy rather than pytorch (might be slightly faster, but is ugly and might break code..) is_cuda = input.is_cuda if is_cuda: input_np = input.cpu().data.numpy() else: input_np = input.data.numpy() del input input_np = trafo(input_np) input = Variable(torch.from_numpy(input_np)) if is_cuda: input = input.cuda() return input
Example #29
Source File: pth_nms.py From Collaborative-Learning-for-Weakly-Supervised-Object-Detection with MIT License | 5 votes |
def pth_nms(dets, thresh): """ dets has to be a tensor """ if not dets.is_cuda: x1 = dets[:, 0] y1 = dets[:, 1] x2 = dets[:, 2] y2 = dets[:, 3] scores = dets[:, 4] areas = (x2 - x1 + 1) * (y2 - y1 + 1) order = scores.sort(0, descending=True)[1] # order = torch.from_numpy(np.ascontiguousarray(scores.numpy().argsort()[::-1])).long() keep = torch.LongTensor(dets.size(0)) num_out = torch.LongTensor(1) nms.cpu_nms(keep, num_out, dets, order, areas, thresh) return keep[:num_out[0]] else: x1 = dets[:, 0] y1 = dets[:, 1] x2 = dets[:, 2] y2 = dets[:, 3] scores = dets[:, 4] areas = (x2 - x1 + 1) * (y2 - y1 + 1) order = scores.sort(0, descending=True)[1] # order = torch.from_numpy(np.ascontiguousarray(scores.cpu().numpy().argsort()[::-1])).long().cuda() dets = dets[order].contiguous() keep = torch.LongTensor(dets.size(0)) num_out = torch.LongTensor(1) # keep = torch.cuda.LongTensor(dets.size(0)) # num_out = torch.cuda.LongTensor(1) nms.gpu_nms(keep, num_out, dets, thresh) return order[keep[:num_out[0]].cuda()].contiguous() # return order[keep[:num_out[0]]].contiguous()
Example #30
Source File: dataset.py From PolarSeg with BSD 3-Clause "New" or "Revised" License | 5 votes |
def collate_fn_BEV(data): data2stack=np.stack([d[0] for d in data]).astype(np.float32) label2stack=np.stack([d[1] for d in data]) grid_ind_stack = [d[2] for d in data] point_label = [d[3] for d in data] xyz = [d[4] for d in data] return torch.from_numpy(data2stack),torch.from_numpy(label2stack),grid_ind_stack,point_label,xyz