Python utils.blob.serialize() Examples
The following are 11
code examples of utils.blob.serialize().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
utils.blob
, or try the search function
.
Example #1
Source File: loader.py From Detectron.pytorch with MIT License | 5 votes |
def __getitem__(self, index_tuple): index, ratio = index_tuple single_db = [self._roidb[index]] blobs, valid = get_minibatch(single_db) #TODO: Check if minibatch is valid ? If not, abandon it. # Need to change _worker_loop in torch.utils.data.dataloader.py. # Squeeze batch dim for key in blobs: if key != 'roidb': blobs[key] = blobs[key].squeeze(axis=0) if self._roidb[index]['need_crop']: self.crop_data(blobs, ratio) # Check bounding box entry = blobs['roidb'][0] boxes = entry['boxes'] invalid = (boxes[:, 0] == boxes[:, 2]) | (boxes[:, 1] == boxes[:, 3]) valid_inds = np.nonzero(~ invalid)[0] if len(valid_inds) < len(boxes): for key in ['boxes', 'gt_classes', 'seg_areas', 'gt_overlaps', 'is_crowd', 'box_to_gt_ind_map', 'gt_keypoints']: if key in entry: entry[key] = entry[key][valid_inds] entry['segms'] = [entry['segms'][ind] for ind in valid_inds] blobs['roidb'] = blob_utils.serialize(blobs['roidb']) # CHECK: maybe we can serialize in collate_fn return blobs
Example #2
Source File: loader.py From FPN-Pytorch with MIT License | 5 votes |
def __getitem__(self, index_tuple): index, ratio = index_tuple single_db = [self._roidb[index]] blobs, valid = get_minibatch(single_db) #TODO: Check if minibatch is valid ? If not, abandon it. # Need to change _worker_loop in torch.utils.data.dataloader.py. # Squeeze batch dim for key in blobs: if key != 'roidb': blobs[key] = blobs[key].squeeze(axis=0) if self._roidb[index]['need_crop']: self.crop_data(blobs, ratio) # Check bounding box entry = blobs['roidb'][0] boxes = entry['boxes'] invalid = (boxes[:, 0] == boxes[:, 2]) | (boxes[:, 1] == boxes[:, 3]) valid_inds = np.nonzero(~ invalid)[0] if len(valid_inds) < len(boxes): for key in ['boxes', 'gt_classes', 'seg_areas', 'gt_overlaps', 'is_crowd', 'box_to_gt_ind_map', 'gt_keypoints']: if key in entry: entry[key] = entry[key][valid_inds] entry['segms'] = [entry['segms'][ind] for ind in valid_inds] blobs['roidb'] = blob_utils.serialize(blobs['roidb']) # CHECK: maybe we can serialize in collate_fn return blobs
Example #3
Source File: loader.py From Detectron.pytorch with MIT License | 5 votes |
def __getitem__(self, index_tuple): index, ratio = index_tuple single_db = [self._roidb[index]] blobs, valid = get_minibatch(single_db) #TODO: Check if minibatch is valid ? If not, abandon it. # Need to change _worker_loop in torch.utils.data.dataloader.py. # Squeeze batch dim for key in blobs: if key != 'roidb': blobs[key] = blobs[key].squeeze(axis=0) if self._roidb[index]['need_crop']: self.crop_data(blobs, ratio) # Check bounding box entry = blobs['roidb'][0] boxes = entry['boxes'] invalid = (boxes[:, 0] == boxes[:, 2]) | (boxes[:, 1] == boxes[:, 3]) valid_inds = np.nonzero(~ invalid)[0] if len(valid_inds) < len(boxes): for key in ['boxes', 'gt_classes', 'seg_areas', 'gt_overlaps', 'is_crowd', 'box_to_gt_ind_map', 'gt_keypoints']: if key in entry: entry[key] = entry[key][valid_inds] entry['segms'] = [entry['segms'][ind] for ind in valid_inds] blobs['roidb'] = blob_utils.serialize(blobs['roidb']) # CHECK: maybe we can serialize in collate_fn return blobs
Example #4
Source File: loader.py From Context-aware-ZSR with MIT License | 5 votes |
def __getitem__(self, index_tuple): index, ratio = index_tuple single_db = [self._roidb[index]] blobs, valid = get_minibatch(single_db) #TODO: Check if minibatch is valid ? If not, abandon it. # Need to change _worker_loop in torch.utils.data.dataloader.py. # Squeeze batch dim for key in blobs: if key != 'roidb': blobs[key] = blobs[key].squeeze(axis=0) if self._roidb[index]['need_crop']: self.crop_data(blobs, ratio) # Check bounding box entry = blobs['roidb'][0] boxes = entry['boxes'] invalid = (boxes[:, 0] == boxes[:, 2]) | (boxes[:, 1] == boxes[:, 3]) valid_inds = np.nonzero(~ invalid)[0] if len(valid_inds) < len(boxes): for key in ['boxes', 'gt_classes', 'seg_areas', 'gt_overlaps', 'is_crowd', 'box_to_gt_ind_map', 'gt_keypoints']: if key in entry: entry[key] = entry[key][valid_inds] entry['segms'] = [entry['segms'][ind] for ind in valid_inds] blobs['roidb'] = blob_utils.serialize(blobs['roidb']) # CHECK: maybe we can serialize in collate_fn return blobs
Example #5
Source File: loader.py From PANet with MIT License | 5 votes |
def __getitem__(self, index_tuple): index, ratio = index_tuple single_db = [self._roidb[index]] blobs, valid = get_minibatch(single_db) #TODO: Check if minibatch is valid ? If not, abandon it. # Need to change _worker_loop in torch.utils.data.dataloader.py. # Squeeze batch dim for key in blobs: if key != 'roidb': blobs[key] = blobs[key].squeeze(axis=0) if self._roidb[index]['need_crop']: self.crop_data(blobs, ratio) # Check bounding box entry = blobs['roidb'][0] boxes = entry['boxes'] invalid = (boxes[:, 0] == boxes[:, 2]) | (boxes[:, 1] == boxes[:, 3]) valid_inds = np.nonzero(~ invalid)[0] if len(valid_inds) < len(boxes): for key in ['boxes', 'gt_classes', 'seg_areas', 'gt_overlaps', 'is_crowd', 'box_to_gt_ind_map', 'gt_keypoints']: if key in entry: entry[key] = entry[key][valid_inds] entry['segms'] = [entry['segms'][ind] for ind in valid_inds] blobs['roidb'] = blob_utils.serialize(blobs['roidb']) # CHECK: maybe we can serialize in collate_fn return blobs
Example #6
Source File: loader.py From PMFNet with MIT License | 5 votes |
def __getitem__(self, index_tuple): index, ratio = index_tuple single_db = [self._roidb[index]] ## _add_proposals(xxx) blobs, valid = get_minibatch(single_db) #TODO: Check if minibatch is valid ? If not, abandon it. # Need to change _worker_loop in torch.utils.data.dataloader.py. # Squeeze batch dim for key in blobs: if key != 'roidb': blobs[key] = blobs[key].squeeze(axis=0) if self._roidb[index]['need_crop']: self.crop_data(blobs, ratio) # Check bounding box entry = blobs['roidb'][0] boxes = entry['boxes'] invalid = (boxes[:, 0] == boxes[:, 2]) | (boxes[:, 1] == boxes[:, 3]) valid_inds = np.nonzero(~ invalid)[0] if len(valid_inds) < len(boxes): for key in ['boxes', 'precomp_keypoints', 'gt_classes', 'seg_areas', 'gt_overlaps', 'is_crowd', 'box_to_gt_ind_map', 'gt_keypoints', 'gt_actions', 'gt_role_id']: if key in entry: entry[key] = entry[key][valid_inds] entry['segms'] = [entry['segms'][ind] for ind in valid_inds] blobs['roidb'] = blob_utils.serialize(blobs['roidb']) # CHECK: maybe we can serialize in collate_fn return blobs
Example #7
Source File: loader.py From Large-Scale-VRD.pytorch with MIT License | 5 votes |
def __getitem__(self, index_tuple): index, ratio = index_tuple single_db = [self._roidb[index]] blobs, valid = get_minibatch(single_db) #TODO: Check if minibatch is valid ? If not, abandon it. # Need to change _worker_loop in torch.utils.data.dataloader.py. # Squeeze batch dim for key in blobs: if key != 'roidb': blobs[key] = blobs[key].squeeze(axis=0) if self._roidb[index]['need_crop']: self.crop_data(blobs, ratio) # Check bounding box entry = blobs['roidb'][0] boxes = entry['boxes'] invalid = (boxes[:, 0] == boxes[:, 2]) | (boxes[:, 1] == boxes[:, 3]) valid_inds = np.nonzero(~ invalid)[0] if len(valid_inds) < len(boxes): for key in ['boxes', 'gt_classes', 'seg_areas', 'gt_overlaps', 'is_crowd', 'box_to_gt_ind_map', 'gt_keypoints']: if key in entry: entry[key] = entry[key][valid_inds] entry['segms'] = [entry['segms'][ind] for ind in valid_inds] blobs['roidb'] = blob_utils.serialize(blobs['roidb']) # CHECK: maybe we can serialize in collate_fn return blobs
Example #8
Source File: loader.py From detectron-self-train with MIT License | 5 votes |
def __getitem__(self, index_tuple): index, ratio = index_tuple single_db = [self._roidb[index]] blobs, valid = get_minibatch(single_db) #TODO: Check if minibatch is valid ? If not, abandon it. # Need to change _worker_loop in torch.utils.data.dataloader.py. # Squeeze batch dim for key in blobs: if key != 'roidb': # print('%s %s' % (key, type(blobs[key]))) # print(blobs[key].shape) blobs[key] = blobs[key].squeeze(axis=0) if self._roidb[index]['need_crop']: self.crop_data(blobs, ratio) # Check bounding box entry = blobs['roidb'][0] boxes = entry['boxes'] invalid = (boxes[:, 0] == boxes[:, 2]) | (boxes[:, 1] == boxes[:, 3]) valid_inds = np.nonzero(~ invalid)[0] if len(valid_inds) < len(boxes): for key in ['boxes', 'gt_classes', 'seg_areas', 'gt_overlaps', 'is_crowd', 'box_to_gt_ind_map', 'gt_keypoints', 'gt_scores', 'dataset_id', 'gt_source']: # EDIT: gt_scores if key in entry: entry[key] = entry[key][valid_inds] entry['segms'] = [entry['segms'][ind] for ind in valid_inds] blobs['roidb'] = blob_utils.serialize(blobs['roidb']) # CHECK: maybe we can serialize in collate_fn return blobs
Example #9
Source File: loader.py From DIoU-pytorch-detectron with GNU General Public License v3.0 | 5 votes |
def __getitem__(self, index_tuple): index, ratio = index_tuple single_db = [self._roidb[index]] blobs, valid = get_minibatch(single_db) #TODO: Check if minibatch is valid ? If not, abandon it. # Need to change _worker_loop in torch.utils.data.dataloader.py. # Squeeze batch dim for key in blobs: if key != 'roidb': blobs[key] = blobs[key].squeeze(axis=0) if self._roidb[index]['need_crop']: self.crop_data(blobs, ratio) # Check bounding box entry = blobs['roidb'][0] boxes = entry['boxes'] invalid = (boxes[:, 0] == boxes[:, 2]) | (boxes[:, 1] == boxes[:, 3]) valid_inds = np.nonzero(~ invalid)[0] if len(valid_inds) < len(boxes): for key in ['boxes', 'gt_classes', 'seg_areas', 'gt_overlaps', 'is_crowd', 'box_to_gt_ind_map', 'gt_keypoints']: if key in entry: entry[key] = entry[key][valid_inds] entry['segms'] = [entry['segms'][ind] for ind in valid_inds] blobs['roidb'] = blob_utils.serialize(blobs['roidb']) # CHECK: maybe we can serialize in collate_fn return blobs
Example #10
Source File: loader_rel.py From Large-Scale-VRD.pytorch with MIT License | 4 votes |
def __getitem__(self, index_tuple): index, ratio = index_tuple single_db = [self._roidb[index]] blobs, valid = get_minibatch(single_db) #TODO: Check if minibatch is valid ? If not, abandon it. # Need to change _worker_loop in torch.utils.data.dataloader.py. # Squeeze batch dim for key in blobs: if key != 'roidb': blobs[key] = blobs[key].squeeze(axis=0) if self._roidb[index]['need_crop']: self.crop_data(blobs, ratio) # Check bounding box entry = blobs['roidb'][0] boxes = entry['boxes'] invalid = (boxes[:, 0] == boxes[:, 2]) | (boxes[:, 1] == boxes[:, 3]) valid_inds = np.nonzero(~ invalid)[0] if len(valid_inds) < len(boxes): for key in ['boxes', 'gt_classes', 'seg_areas', 'gt_overlaps', 'is_crowd', 'box_to_gt_ind_map', 'gt_keypoints']: if key in entry: entry[key] = entry[key][valid_inds] entry['segms'] = [entry['segms'][ind] for ind in valid_inds] # for rel sanity check sbj_gt_boxes = entry['sbj_gt_boxes'] obj_gt_boxes = entry['obj_gt_boxes'] sbj_invalid = (sbj_gt_boxes[:, 0] == sbj_gt_boxes[:, 2]) | (sbj_gt_boxes[:, 1] == sbj_gt_boxes[:, 3]) obj_invalid = (obj_gt_boxes[:, 0] == obj_gt_boxes[:, 2]) | (obj_gt_boxes[:, 1] == obj_gt_boxes[:, 3]) rel_valid = sbj_invalid | obj_invalid rel_valid_inds = np.nonzero(~ rel_invalid)[0] if len(rel_valid_inds) < len(sbj_gt_boxes): for key in ['sbj_gt_boxes', 'sbj_gt_classes', 'obj_gt_boxes', 'obj_gt_classes', 'prd_gt_classes', 'sbj_gt_overlaps', 'obj_gt_overlaps', 'prd_gt_overlaps', 'pair_to_gt_ind_map', 'width', 'height']: if key in entry: entry[key] = entry[key][rel_valid_inds] blobs['roidb'] = blob_utils.serialize(blobs['roidb']) # CHECK: maybe we can serialize in collate_fn return blobs
Example #11
Source File: rpn.py From DetectAndTrack with Apache License 2.0 | 4 votes |
def add_rpn_blobs(blobs, im_scales, roidb): """Add blobs needed training RPN-only and end-to-end Faster R-CNN models.""" # Temporal dimensions of the output T = roidb[0]['boxes'].shape[-1] // 4 # Following vars are only used in FPN case, but keeping it out of the "if" # condition, so as to allow for _populate_rpn_blobs to work (it will pass # these dummy values and not use them) foas = [] k_max = cfg.FPN.RPN_MAX_LEVEL k_min = cfg.FPN.RPN_MIN_LEVEL if cfg.FPN.FPN_ON and cfg.FPN.MULTILEVEL_RPN: # RPN applied to many feature levels, as in the FPN paper for lvl in range(k_min, k_max + 1): field_stride = 2. ** lvl anchor_sizes = ( cfg.FPN.RPN_ANCHOR_START_SIZE * 2. ** (lvl - k_min), ) anchor_aspect_ratios = cfg.FPN.RPN_ASPECT_RATIOS foa = _get_field_of_anchors( field_stride, anchor_sizes, anchor_aspect_ratios, T) foas.append(foa) all_anchors = np.concatenate([f.field_of_anchors for f in foas]) else: foa = _get_field_of_anchors( cfg.RPN.STRIDE, cfg.RPN.SIZES, cfg.RPN.ASPECT_RATIOS, T) all_anchors = foa.field_of_anchors for im_i, entry in enumerate(roidb): _populate_rpn_blobs(entry, im_scales[im_i], blobs, all_anchors, foas, foa, k_min, k_max) for k, v in blobs.items(): if isinstance(v, list) and len(v) > 0: blobs[k] = np.concatenate(v) valid_keys = [ 'has_visible_keypoints', 'boxes', 'segms', 'seg_areas', 'gt_classes', 'gt_overlaps', 'is_crowd', 'box_to_gt_ind_map', 'gt_keypoints'] minimal_roidb = [{} for _ in range(len(roidb))] for i, e in enumerate(roidb): for k in valid_keys: if k in e: minimal_roidb[i][k] = e[k] blobs['roidb'] = blob_utils.serialize(minimal_roidb) # Always return valid=True, since RPN minibatches are valid by design return True