Python utils.image.tensor_vstack() Examples
The following are 28
code examples of utils.image.tensor_vstack().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
utils.image
, or try the search function
.
Example #1
Source File: loader.py From kaggle-rsna18 with MIT License | 5 votes |
def get_batch(self): # slice roidb cur_from = self.cur cur_to = min(cur_from + self.batch_size, self.size) roidb = [self.roidb[self.index[i]] for i in range(cur_from, cur_to)] # decide multi device slices work_load_list = self.work_load_list ctx = self.ctx if work_load_list is None: work_load_list = [1] * len(ctx) assert isinstance(work_load_list, list) and len(work_load_list) == len(ctx), \ "Invalid settings for work load. " slices = _split_input_slice(self.batch_size, work_load_list) # get each device data_list = [] label_list = [] for islice in slices: iroidb = [roidb[i] for i in range(islice.start, islice.stop)] data, label = get_rcnn_batch(iroidb, self.cfg) data_list.append(data) label_list.append(label) all_data = dict() for key in data_list[0].keys(): all_data[key] = tensor_vstack([batch[key] for batch in data_list]) all_label = dict() for key in label_list[0].keys(): all_label[key] = tensor_vstack([batch[key] for batch in label_list]) self.data = [mx.nd.array(all_data[name]) for name in self.data_name] self.label = [mx.nd.array(all_label[name]) for name in self.label_name]
Example #2
Source File: loader.py From Decoupled-Classification-Refinement with MIT License | 5 votes |
def get_batch(self): # slice roidb cur_from = self.cur cur_to = min(cur_from + self.batch_size, self.size) roidb = [self.roidb[self.index[i]] for i in range(cur_from, cur_to)] # decide multi device slices work_load_list = self.work_load_list ctx = self.ctx if work_load_list is None: work_load_list = [1] * len(ctx) assert isinstance(work_load_list, list) and len(work_load_list) == len(ctx), \ "Invalid settings for work load. " slices = _split_input_slice(self.batch_size, work_load_list) # get each device data_list = [] label_list = [] for islice in slices: iroidb = [roidb[i] for i in range(islice.start, islice.stop)] data, label = get_rcnn_batch(iroidb, self.cfg) data_list.append(data) label_list.append(label) all_data = dict() for key in data_list[0].keys(): all_data[key] = tensor_vstack([batch[key] for batch in data_list]) all_label = dict() for key in label_list[0].keys(): all_label[key] = tensor_vstack([batch[key] for batch in label_list]) self.data = [mx.nd.array(all_data[name]) for name in self.data_name] self.label = [mx.nd.array(all_label[name]) for name in self.label_name]
Example #3
Source File: loader.py From Decoupled-Classification-Refinement with MIT License | 5 votes |
def get_batch(self): # slice roidb cur_from = self.cur cur_to = min(cur_from + self.batch_size, self.size) roidb = [self.roidb[self.index[i]] for i in range(cur_from, cur_to)] # decide multi device slices work_load_list = self.work_load_list ctx = self.ctx if work_load_list is None: work_load_list = [1] * len(ctx) assert isinstance(work_load_list, list) and len(work_load_list) == len(ctx), \ "Invalid settings for work load. " slices = _split_input_slice(self.batch_size, work_load_list) # get each device data_list = [] label_list = [] for islice in slices: iroidb = [roidb[i] for i in range(islice.start, islice.stop)] data, label = get_rcnn_batch(iroidb, self.cfg) data_list.append(data) label_list.append(label) all_data = dict() for key in data_list[0].keys(): all_data[key] = tensor_vstack([batch[key] for batch in data_list]) all_label = dict() for key in label_list[0].keys(): all_label[key] = tensor_vstack([batch[key] for batch in label_list]) self.data = [mx.nd.array(all_data[name]) for name in self.data_name] self.label = [mx.nd.array(all_label[name]) for name in self.label_name]
Example #4
Source File: loader.py From kaggle-rsna18 with MIT License | 5 votes |
def get_batch(self): # slice roidb cur_from = self.cur cur_to = min(cur_from + self.batch_size, self.size) roidb = [self.roidb[self.index[i]] for i in range(cur_from, cur_to)] # decide multi device slices work_load_list = self.work_load_list ctx = self.ctx if work_load_list is None: work_load_list = [1] * len(ctx) assert isinstance(work_load_list, list) and len(work_load_list) == len(ctx), \ "Invalid settings for work load. " slices = _split_input_slice(self.batch_size, work_load_list) # get each device data_list = [] label_list = [] for islice in slices: iroidb = [roidb[i] for i in range(islice.start, islice.stop)] data, label = get_rcnn_batch(iroidb, self.cfg) data_list.append(data) label_list.append(label) all_data = dict() for key in data_list[0].keys(): all_data[key] = tensor_vstack([batch[key] for batch in data_list]) all_label = dict() for key in label_list[0].keys(): all_label[key] = tensor_vstack([batch[key] for batch in label_list]) self.data = [mx.nd.array(all_data[name]) for name in self.data_name] self.label = [mx.nd.array(all_label[name]) for name in self.label_name]
Example #5
Source File: loader.py From RoITransformer_DOTA with MIT License | 5 votes |
def get_batch(self): # slice roidb cur_from = self.cur cur_to = min(cur_from + self.batch_size, self.size) roidb = [self.roidb[self.index[i]] for i in range(cur_from, cur_to)] # decide multi device slices work_load_list = self.work_load_list ctx = self.ctx if work_load_list is None: work_load_list = [1] * len(ctx) assert isinstance(work_load_list, list) and len(work_load_list) == len(ctx), \ "Invalid settings for work load. " slices = _split_input_slice(self.batch_size, work_load_list) # get each device data_list = [] label_list = [] for islice in slices: iroidb = [roidb[i] for i in range(islice.start, islice.stop)] data, label = get_rcnn_batch(iroidb, self.cfg) data_list.append(data) label_list.append(label) all_data = dict() for key in data_list[0].keys(): all_data[key] = tensor_vstack([batch[key] for batch in data_list]) all_label = dict() for key in label_list[0].keys(): all_label[key] = tensor_vstack([batch[key] for batch in label_list]) self.data = [mx.nd.array(all_data[name]) for name in self.data_name] self.label = [mx.nd.array(all_label[name]) for name in self.label_name]
Example #6
Source File: loader.py From Deformable-ConvNets with MIT License | 5 votes |
def get_batch(self): # slice roidb cur_from = self.cur cur_to = min(cur_from + self.batch_size, self.size) roidb = [self.roidb[self.index[i]] for i in range(cur_from, cur_to)] # decide multi device slices work_load_list = self.work_load_list ctx = self.ctx if work_load_list is None: work_load_list = [1] * len(ctx) assert isinstance(work_load_list, list) and len(work_load_list) == len(ctx), \ "Invalid settings for work load. " slices = _split_input_slice(self.batch_size, work_load_list) # get each device data_list = [] label_list = [] for islice in slices: iroidb = [roidb[i] for i in range(islice.start, islice.stop)] data, label = get_rcnn_batch(iroidb, self.cfg) data_list.append(data) label_list.append(label) all_data = dict() for key in data_list[0].keys(): all_data[key] = tensor_vstack([batch[key] for batch in data_list]) all_label = dict() for key in label_list[0].keys(): all_label[key] = tensor_vstack([batch[key] for batch in label_list]) self.data = [mx.nd.array(all_data[name]) for name in self.data_name] self.label = [mx.nd.array(all_label[name]) for name in self.label_name]
Example #7
Source File: loader.py From Faster_RCNN_for_DOTA with Apache License 2.0 | 5 votes |
def get_batch(self): # slice roidb cur_from = self.cur cur_to = min(cur_from + self.batch_size, self.size) roidb = [self.roidb[self.index[i]] for i in range(cur_from, cur_to)] # decide multi device slices work_load_list = self.work_load_list ctx = self.ctx if work_load_list is None: work_load_list = [1] * len(ctx) assert isinstance(work_load_list, list) and len(work_load_list) == len(ctx), \ "Invalid settings for work load. " slices = _split_input_slice(self.batch_size, work_load_list) # get each device data_list = [] label_list = [] for islice in slices: iroidb = [roidb[i] for i in range(islice.start, islice.stop)] data, label = get_rcnn_batch(iroidb, self.cfg) data_list.append(data) label_list.append(label) all_data = dict() for key in data_list[0].keys(): all_data[key] = tensor_vstack([batch[key] for batch in data_list]) all_label = dict() for key in label_list[0].keys(): all_label[key] = tensor_vstack([batch[key] for batch in label_list]) self.data = [mx.nd.array(all_data[name]) for name in self.data_name] self.label = [mx.nd.array(all_label[name]) for name in self.label_name]
Example #8
Source File: loader.py From Deformable-ConvNets with MIT License | 5 votes |
def get_batch(self): # slice roidb cur_from = self.cur cur_to = min(cur_from + self.batch_size, self.size) roidb = [self.roidb[self.index[i]] for i in range(cur_from, cur_to)] # decide multi device slices work_load_list = self.work_load_list ctx = self.ctx if work_load_list is None: work_load_list = [1] * len(ctx) assert isinstance(work_load_list, list) and len(work_load_list) == len(ctx), \ "Invalid settings for work load. " slices = _split_input_slice(self.batch_size, work_load_list) # get each device data_list = [] label_list = [] for islice in slices: iroidb = [roidb[i] for i in range(islice.start, islice.stop)] data, label = get_rcnn_batch(iroidb, self.cfg) data_list.append(data) label_list.append(label) all_data = dict() for key in data_list[0].keys(): all_data[key] = tensor_vstack([batch[key] for batch in data_list]) all_label = dict() for key in label_list[0].keys(): all_label[key] = tensor_vstack([batch[key] for batch in label_list]) self.data = [mx.nd.array(all_data[name]) for name in self.data_name] self.label = [mx.nd.array(all_label[name]) for name in self.label_name]
Example #9
Source File: loader.py From Faster_RCNN_for_DOTA with Apache License 2.0 | 4 votes |
def get_batch(self): # slice roidb cur_from = self.cur cur_to = min(cur_from + self.batch_size, self.size) roidb = [self.roidb[self.index[i]] for i in range(cur_from, cur_to)] # decide multi device slice work_load_list = self.work_load_list ctx = self.ctx if work_load_list is None: work_load_list = [1] * len(ctx) assert isinstance(work_load_list, list) and len(work_load_list) == len(ctx), \ "Invalid settings for work load. " slices = _split_input_slice(self.batch_size, work_load_list) # get testing data for multigpu data_list = [] label_list = [] for islice in slices: iroidb = [roidb[i] for i in range(islice.start, islice.stop)] data, label = get_rpn_batch(iroidb, self.cfg) data_list.append(data) label_list.append(label) # pad data first and then assign anchor (read label) data_tensor = tensor_vstack([batch['data'] for batch in data_list]) for data, data_pad in zip(data_list, data_tensor): data['data'] = data_pad[np.newaxis, :] new_label_list = [] for data, label in zip(data_list, label_list): # infer label shape data_shape = {k: v.shape for k, v in data.items()} del data_shape['im_info'] _, feat_shape, _ = self.feat_sym.infer_shape(**data_shape) feat_shape = [int(i) for i in feat_shape[0]] # add gt_boxes to data for e2e data['gt_boxes'] = label['gt_boxes'][np.newaxis, :, :] # assign anchor for label label = assign_anchor(feat_shape, label['gt_boxes'], data['im_info'], self.cfg, self.feat_stride, self.anchor_scales, self.anchor_ratios, self.allowed_border) new_label_list.append(label) all_data = dict() for key in self.data_name: all_data[key] = tensor_vstack([batch[key] for batch in data_list]) all_label = dict() for key in self.label_name: pad = -1 if key == 'label' else 0 all_label[key] = tensor_vstack([batch[key] for batch in new_label_list], pad=pad) self.data = [mx.nd.array(all_data[key]) for key in self.data_name] self.label = [mx.nd.array(all_label[key]) for key in self.label_name]
Example #10
Source File: loader.py From Accel with MIT License | 4 votes |
def get_batch(self): # slice roidb cur_from = self.cur cur_to = min(cur_from + self.batch_size, self.size) roidb = [self.roidb[self.index[i]] for i in range(cur_from, cur_to)] # decide multi device slice work_load_list = self.work_load_list ctx = self.ctx if work_load_list is None: work_load_list = [1] * len(ctx) assert isinstance(work_load_list, list) and len(work_load_list) == len(ctx), \ "Invalid settings for work load. " slices = _split_input_slice(self.batch_size, work_load_list) # get testing data for multigpu data_list = [] label_list = [] for islice in slices: iroidb = [roidb[i] for i in range(islice.start, islice.stop)] data, label = get_rpn_pair_batch(iroidb, self.cfg) data_list.append(data) label_list.append(label) # pad data first and then assign anchor (read label) data_tensor = tensor_vstack([batch['data'] for batch in data_list]) for data, data_pad in zip(data_list, data_tensor): data['data'] = data_pad[np.newaxis, :] new_label_list = [] for data, label in zip(data_list, label_list): # infer label shape data_shape = {k: v.shape for k, v in data.items()} del data_shape['im_info'] _, feat_shape, _ = self.feat_sym.infer_shape(**data_shape) feat_shape = [int(i) for i in feat_shape[0]] # add gt_boxes to data for e2e data['gt_boxes'] = label['gt_boxes'][np.newaxis, :, :] # assign anchor for label label = assign_anchor(feat_shape, label['gt_boxes'], data['im_info'], self.cfg, self.feat_stride, self.anchor_scales, self.anchor_ratios, self.allowed_border, self.normalize_target, self.bbox_mean, self.bbox_std) new_label_list.append(label) all_data = dict() for key in self.data_name: all_data[key] = tensor_vstack([batch[key] for batch in data_list]) all_label = dict() for key in self.label_name: pad = -1 if key == 'label' else 0 all_label[key] = tensor_vstack([batch[key] for batch in new_label_list], pad=pad) self.data = [mx.nd.array(all_data[key]) for key in self.data_name] self.label = [mx.nd.array(all_label[key]) for key in self.label_name]
Example #11
Source File: loader.py From Accel with MIT License | 4 votes |
def get_batch(self): # slice roidb cur_from = self.cur cur_to = min(cur_from + self.batch_size, self.size) roidb = [self.roidb[self.index[i]] for i in range(cur_from, cur_to)] # decide multi device slice work_load_list = self.work_load_list ctx = self.ctx if work_load_list is None: work_load_list = [1] * len(ctx) assert isinstance(work_load_list, list) and len(work_load_list) == len(ctx), \ "Invalid settings for work load. " slices = _split_input_slice(self.batch_size, work_load_list) # get testing data for multigpu data_list = [] label_list = [] for islice in slices: iroidb = [roidb[i] for i in range(islice.start, islice.stop)] data, label = get_rpn_pair_batch(iroidb, self.cfg) data_list.append(data) label_list.append(label) # pad data first and then assign anchor (read label) data_tensor = tensor_vstack([batch['data'] for batch in data_list]) for data, data_pad in zip(data_list, data_tensor): data['data'] = data_pad[np.newaxis, :] new_label_list = [] for data, label in zip(data_list, label_list): # infer label shape data_shape = {k: v.shape for k, v in data.items()} del data_shape['im_info'] _, feat_shape, _ = self.feat_sym.infer_shape(**data_shape) feat_shape = [int(i) for i in feat_shape[0]] # add gt_boxes to data for e2e data['gt_boxes'] = label['gt_boxes'][np.newaxis, :, :] # assign anchor for label label = assign_anchor(feat_shape, label['gt_boxes'], data['im_info'], self.cfg, self.feat_stride, self.anchor_scales, self.anchor_ratios, self.allowed_border, self.normalize_target, self.bbox_mean, self.bbox_std) new_label_list.append(label) all_data = dict() for key in self.data_name: all_data[key] = tensor_vstack([batch[key] for batch in data_list]) all_label = dict() for key in self.label_name: pad = -1 if key == 'label' else 0 all_label[key] = tensor_vstack([batch[key] for batch in new_label_list], pad=pad) self.data = [mx.nd.array(all_data[key]) for key in self.data_name] self.label = [mx.nd.array(all_label[key]) for key in self.label_name]
Example #12
Source File: loader.py From Accel with MIT License | 4 votes |
def get_batch(self): # slice roidb cur_from = self.cur cur_to = min(cur_from + self.batch_size, self.size) roidb = [self.roidb[self.index[i]] for i in range(cur_from, cur_to)] # decide multi device slice work_load_list = self.work_load_list ctx = self.ctx if work_load_list is None: work_load_list = [1] * len(ctx) assert isinstance(work_load_list, list) and len(work_load_list) == len(ctx), \ "Invalid settings for work load. " slices = _split_input_slice(self.batch_size, work_load_list) # get testing data for multigpu data_list = [] label_list = [] for islice in slices: iroidb = [roidb[i] for i in range(islice.start, islice.stop)] data, label = get_rpn_batch(iroidb, self.cfg) data_list.append(data) label_list.append(label) # pad data first and then assign anchor (read label) data_tensor = tensor_vstack([batch['data'] for batch in data_list]) for data, data_pad in zip(data_list, data_tensor): data['data'] = data_pad[np.newaxis, :] new_label_list = [] for data, label in zip(data_list, label_list): # infer label shape data_shape = {k: v.shape for k, v in data.items()} del data_shape['im_info'] _, feat_shape, _ = self.feat_sym.infer_shape(**data_shape) feat_shape = [int(i) for i in feat_shape[0]] # add gt_boxes to data for e2e data['gt_boxes'] = label['gt_boxes'][np.newaxis, :, :] # assign anchor for label label = assign_anchor(feat_shape, label['gt_boxes'], data['im_info'], self.cfg, self.feat_stride, self.anchor_scales, self.anchor_ratios, self.allowed_border, self.normalize_target, self.bbox_mean, self.bbox_std) new_label_list.append(label) all_data = dict() for key in self.data_name: all_data[key] = tensor_vstack([batch[key] for batch in data_list]) all_label = dict() for key in self.label_name: pad = -1 if key == 'label' else 0 all_label[key] = tensor_vstack([batch[key] for batch in new_label_list], pad=pad) self.data = [mx.nd.array(all_data[key]) for key in self.data_name] self.label = [mx.nd.array(all_label[key]) for key in self.label_name]
Example #13
Source File: loader.py From RoITransformer_DOTA with MIT License | 4 votes |
def get_batch(self): # slice roidb cur_from = self.cur cur_to = min(cur_from + self.batch_size, self.size) roidb = [self.roidb[self.index[i]] for i in range(cur_from, cur_to)] # decide multi device slice work_load_list = self.work_load_list ctx = self.ctx if work_load_list is None: work_load_list = [1] * len(ctx) assert isinstance(work_load_list, list) and len(work_load_list) == len(ctx), \ "Invalid settings for work load. " slices = _split_input_slice(self.batch_size, work_load_list) # get testing data for multigpu data_list = [] label_list = [] for islice in slices: iroidb = [roidb[i] for i in range(islice.start, islice.stop)] data, label = get_rpn_batch(iroidb, self.cfg) data_list.append(data) label_list.append(label) # pad data first and then assign anchor (read label) data_tensor = tensor_vstack([batch['data'] for batch in data_list]) for data, data_pad in zip(data_list, data_tensor): data['data'] = data_pad[np.newaxis, :] new_label_list = [] for data, label in zip(data_list, label_list): # infer label shape data_shape = {k: v.shape for k, v in data.items()} del data_shape['im_info'] _, feat_shape, _ = self.feat_sym.infer_shape(**data_shape) feat_shape = [int(i) for i in feat_shape[0]] # add gt_boxes to data for e2e data['gt_boxes'] = label['gt_boxes'][np.newaxis, :, :] # assign anchor for label label = assign_anchor(feat_shape, label['gt_boxes'], data['im_info'], self.cfg, self.feat_stride, self.anchor_scales, self.anchor_ratios, self.allowed_border) new_label_list.append(label) all_data = dict() for key in self.data_name: all_data[key] = tensor_vstack([batch[key] for batch in data_list]) all_label = dict() for key in self.label_name: pad = -1 if key == 'label' else 0 all_label[key] = tensor_vstack([batch[key] for batch in new_label_list], pad=pad) self.data = [mx.nd.array(all_data[key]) for key in self.data_name] self.label = [mx.nd.array(all_label[key]) for key in self.label_name]
Example #14
Source File: loader.py From Faster_RCNN_for_DOTA with Apache License 2.0 | 4 votes |
def get_batch(self): # slice roidb cur_from = self.cur cur_to = min(cur_from + self.batch_size, self.size) roidb = [self.roidb[self.index[i]] for i in range(cur_from, cur_to)] # decide multi device slice work_load_list = self.work_load_list ctx = self.ctx if work_load_list is None: work_load_list = [1] * len(ctx) assert isinstance(work_load_list, list) and len(work_load_list) == len(ctx), \ "Invalid settings for work load. " slices = _split_input_slice(self.batch_size, work_load_list) # get testing data for multigpu data_list = [] label_list = [] for islice in slices: iroidb = [roidb[i] for i in range(islice.start, islice.stop)] data, label = get_rpn_batch_quadrangle(iroidb, self.cfg) data_list.append(data) label_list.append(label) # pad data first and then assign anchor (read label) data_tensor = tensor_vstack([batch['data'] for batch in data_list]) for data, data_pad in zip(data_list, data_tensor): data['data'] = data_pad[np.newaxis, :] new_label_list = [] for data, label in zip(data_list, label_list): # infer label shape data_shape = {k: v.shape for k, v in data.items()} del data_shape['im_info'] _, feat_shape, _ = self.feat_sym.infer_shape(**data_shape) feat_shape = [int(i) for i in feat_shape[0]] # add gt_boxes to data for e2e data['gt_boxes'] = label['gt_boxes'][np.newaxis, :, :] # assign quadrangle anchor for label label = assign_quadrangle_anchor(feat_shape, label['gt_boxes'], data['im_info'], self.cfg, self.feat_stride, self.anchor_scales, self.anchor_ratios, self.allowed_border) new_label_list.append(label) all_data = dict() for key in self.data_name: all_data[key] = tensor_vstack([batch[key] for batch in data_list]) all_label = dict() for key in self.label_name: pad = -1 if key == 'label' else 0 all_label[key] = tensor_vstack([batch[key] for batch in new_label_list], pad=pad) self.data = [mx.nd.array(all_data[key]) for key in self.data_name] self.label = [mx.nd.array(all_label[key]) for key in self.label_name]
Example #15
Source File: loader.py From Sequence-Level-Semantics-Aggregation with Apache License 2.0 | 4 votes |
def get_batch(self): # slice roidb cur_from = self.cur cur_to = min(cur_from + self.batch_size, self.size) roidb = [self.roidb[self.index[i]] for i in range(cur_from, cur_to)] # decide multi device slice work_load_list = self.work_load_list ctx = self.ctx if work_load_list is None: work_load_list = [1] * len(ctx) assert isinstance(work_load_list, list) and len(work_load_list) == len(ctx), \ "Invalid settings for work load. " slices = _split_input_slice(self.batch_size, work_load_list) # get testing data for multigpu data_list = [] label_list = [] for islice in slices: iroidb = [roidb[i] for i in range(islice.start, islice.stop)] data, label = get_rpn_triple_batch(iroidb, self.cfg) data_list.append(data) label_list.append(label) # pad data first and then assign anchor (read label) data_tensor = tensor_vstack([batch['data'] for batch in data_list]) for data, data_pad in zip(data_list, data_tensor): data['data'] = data_pad[np.newaxis, :] new_label_list = [] for data, label in zip(data_list, label_list): # infer label shape data_shape = {k: v.shape for k, v in data.items()} del data_shape['im_info'] _, feat_shape, _ = self.feat_sym.infer_shape(**data_shape) feat_shape = [int(i) for i in feat_shape[0]] # add gt_boxes to data for e2e data['gt_boxes'] = label['gt_boxes'][np.newaxis, :, :] # assign anchor for label label_f = assign_anchor(feat_shape, label['gt_boxes'], data['im_info'], self.cfg, self.feat_stride, self.anchor_scales, self.anchor_ratios, self.allowed_border, self.normalize_target, self.bbox_mean, self.bbox_std) new_label_list.append(label_f) all_data = dict() for key in self.data_name: all_data[key] = tensor_vstack([batch[key] for batch in data_list]) all_label = dict() for key in self.label_name: pad = -1 if key == 'label' else 0 all_label[key] = tensor_vstack([batch[key] for batch in new_label_list], pad=pad) self.data = [mx.nd.array(all_data[key]) for key in self.data_name] self.label = [mx.nd.array(all_label[key]) for key in self.label_name]
Example #16
Source File: loader.py From Decoupled-Classification-Refinement with MIT License | 4 votes |
def get_batch(self): # slice roidb cur_from = self.cur cur_to = min(cur_from + self.batch_size, self.size) roidb = [self.roidb[self.index[i]] for i in range(cur_from, cur_to)] # decide multi device slice work_load_list = self.work_load_list ctx = self.ctx if work_load_list is None: work_load_list = [1] * len(ctx) assert isinstance(work_load_list, list) and len(work_load_list) == len(ctx), \ "Invalid settings for work load. " slices = _split_input_slice(self.batch_size, work_load_list) # get testing data for multigpu data_list = [] label_list = [] for islice in slices: iroidb = [roidb[i] for i in range(islice.start, islice.stop)] data, label = get_rpn_batch(iroidb, self.cfg) data_list.append(data) label_list.append(label) # pad data first and then assign anchor (read label) data_tensor = tensor_vstack([batch['data'] for batch in data_list]) for data, data_pad in zip(data_list, data_tensor): data['data'] = data_pad[np.newaxis, :] new_label_list = [] for data, label in zip(data_list, label_list): # infer label shape data_shape = {k: v.shape for k, v in data.items()} del data_shape['im_info'] _, feat_shape, _ = self.feat_sym.infer_shape(**data_shape) feat_shape = [int(i) for i in feat_shape[0]] # add gt_boxes to data for e2e data['gt_boxes'] = label['gt_boxes'][np.newaxis, :, :] # assign anchor for label label = assign_anchor(feat_shape, label['gt_boxes'], data['im_info'], self.cfg, self.feat_stride, self.anchor_scales, self.anchor_ratios, self.allowed_border) new_label_list.append(label) all_data = dict() for key in self.data_name: all_data[key] = tensor_vstack([batch[key] for batch in data_list]) all_label = dict() for key in self.label_name: pad = -1 if key == 'label' else 0 all_label[key] = tensor_vstack([batch[key] for batch in new_label_list], pad=pad) self.data = [mx.nd.array(all_data[key]) for key in self.data_name] self.label = [mx.nd.array(all_label[key]) for key in self.label_name]
Example #17
Source File: loader.py From Decoupled-Classification-Refinement with MIT License | 4 votes |
def get_batch(self): # slice roidb cur_from = self.cur cur_to = min(cur_from + self.batch_size, self.size) roidb = [self.roidb[self.index[i]] for i in range(cur_from, cur_to)] # decide multi device slice work_load_list = self.work_load_list ctx = self.ctx if work_load_list is None: work_load_list = [1] * len(ctx) assert isinstance(work_load_list, list) and len(work_load_list) == len(ctx), \ "Invalid settings for work load. " slices = _split_input_slice(self.batch_size, work_load_list) # get testing data for multigpu data_list = [] label_list = [] for islice in slices: iroidb = [roidb[i] for i in range(islice.start, islice.stop)] data, label = get_rpn_batch(iroidb, self.cfg) data_list.append(data) label_list.append(label) # pad data first and then assign anchor (read label) data_tensor = tensor_vstack([batch['data'] for batch in data_list]) for data, data_pad in zip(data_list, data_tensor): data['data'] = data_pad[np.newaxis, :] new_label_list = [] for data, label in zip(data_list, label_list): # infer label shape data_shape = {k: v.shape for k, v in data.items()} del data_shape['im_info'] _, feat_shape, _ = self.feat_sym.infer_shape(**data_shape) feat_shape = [int(i) for i in feat_shape[0]] # add gt_boxes to data for e2e data['gt_boxes'] = label['gt_boxes'][np.newaxis, :, :] # assign anchor for label label = assign_anchor(feat_shape, label['gt_boxes'], data['im_info'], self.cfg, self.feat_stride, self.anchor_scales, self.anchor_ratios, self.allowed_border) new_label_list.append(label) all_data = dict() for key in self.data_name: all_data[key] = tensor_vstack([batch[key] for batch in data_list]) all_label = dict() for key in self.label_name: pad = -1 if key == 'label' else 0 all_label[key] = tensor_vstack([batch[key] for batch in new_label_list], pad=pad) self.data = [mx.nd.array(all_data[key]) for key in self.data_name] self.label = [mx.nd.array(all_label[key]) for key in self.label_name]
Example #18
Source File: loader.py From Relation-Networks-for-Object-Detection with MIT License | 4 votes |
def get_batch(self, cur_from=None): # slice roidb if cur_from is None: cur_from = self.cur cur_to = min(cur_from + self.batch_size, self.size) roidb = [self.roidb[self.index[i]] for i in range(cur_from, cur_to)] # decide multi device slices work_load_list = self.work_load_list ctx = self.ctx if work_load_list is None: work_load_list = [1] * len(ctx) assert isinstance(work_load_list, list) and len(work_load_list) == len(ctx), \ "Invalid settings for work load. " slices = _split_input_slice(self.batch_size, work_load_list) # get each device data_list = [] label_list = [] for islice in slices: iroidb = [roidb[i] for i in range(islice.start, islice.stop)] data, label = get_rcnn_batch(iroidb, self.cfg) data_list.append(data) label_list.append(label) all_data = dict() for key in data_list[0].keys(): all_data[key] = tensor_vstack([batch[key] for batch in data_list]) all_label = dict() for key in label_list[0].keys(): all_label[key] = tensor_vstack([batch[key] for batch in label_list]) data = [mx.nd.array(all_data[name]) for name in self.data_name] label = [mx.nd.array(all_label[name]) for name in self.label_name] self.lock_data.acquire() self.data = data self.label = label self.lock_data.release() return data, label
Example #19
Source File: loader.py From RoITransformer_DOTA with MIT License | 4 votes |
def get_batch(self): # slice roidb cur_from = self.cur cur_to = min(cur_from + self.batch_size, self.size) roidb = [self.roidb[self.index[i]] for i in range(cur_from, cur_to)] # decide multi device slice work_load_list = self.work_load_list ctx = self.ctx if work_load_list is None: work_load_list = [1] * len(ctx) assert isinstance(work_load_list, list) and len(work_load_list) == len(ctx), \ "Invalid settings for work load. " slices = _split_input_slice(self.batch_size, work_load_list) # get testing data for multigpu data_list = [] label_list = [] for islice in slices: iroidb = [roidb[i] for i in range(islice.start, islice.stop)] data, label = get_rpn_batch_poly(iroidb, self.cfg) data_list.append(data) label_list.append(label) # pad data first and then assign anchor (read label) data_tensor = tensor_vstack([batch['data'] for batch in data_list]) for data, data_pad in zip(data_list, data_tensor): data['data'] = data_pad[np.newaxis, :] new_label_list = [] for data, label in zip(data_list, label_list): # infer label shape data_shape = {k: v.shape for k, v in data.items()} del data_shape['im_info'] _, feat_shape, _ = self.feat_sym.infer_shape(**data_shape) feat_shape = [int(i) for i in feat_shape[0]] # add gt_boxes to data for e2e data['gt_boxes'] = label['gt_boxes'][np.newaxis, :, :] # assign quadrangle anchor for label label = assign_anchor_poly(feat_shape, label['gt_boxes'], data['im_info'], self.cfg, self.feat_stride, self.anchor_scales, self.anchor_ratios, self.allowed_border) new_label_list.append(label) all_data = dict() for key in self.data_name: all_data[key] = tensor_vstack([batch[key] for batch in data_list]) all_label = dict() for key in self.label_name: pad = -1 if key == 'label' else 0 all_label[key] = tensor_vstack([batch[key] for batch in new_label_list], pad=pad) self.data = [mx.nd.array(all_data[key]) for key in self.data_name] self.label = [mx.nd.array(all_label[key]) for key in self.label_name]
Example #20
Source File: loader.py From Deformable-ConvNets with MIT License | 4 votes |
def get_batch(self): # slice roidb cur_from = self.cur cur_to = min(cur_from + self.batch_size, self.size) roidb = [self.roidb[self.index[i]] for i in range(cur_from, cur_to)] # decide multi device slice work_load_list = self.work_load_list ctx = self.ctx if work_load_list is None: work_load_list = [1] * len(ctx) assert isinstance(work_load_list, list) and len(work_load_list) == len(ctx), \ "Invalid settings for work load. " slices = _split_input_slice(self.batch_size, work_load_list) # get testing data for multigpu data_list = [] label_list = [] for islice in slices: iroidb = [roidb[i] for i in range(islice.start, islice.stop)] data, label = get_rpn_batch(iroidb, self.cfg) data_list.append(data) label_list.append(label) # pad data first and then assign anchor (read label) data_tensor = tensor_vstack([batch['data'] for batch in data_list]) for data, data_pad in zip(data_list, data_tensor): data['data'] = data_pad[np.newaxis, :] new_label_list = [] for data, label in zip(data_list, label_list): # infer label shape data_shape = {k: v.shape for k, v in data.items()} del data_shape['im_info'] _, feat_shape, _ = self.feat_sym.infer_shape(**data_shape) feat_shape = [int(i) for i in feat_shape[0]] # add gt_boxes to data for e2e data['gt_boxes'] = label['gt_boxes'][np.newaxis, :, :] # assign anchor for label label = assign_anchor(feat_shape, label['gt_boxes'], data['im_info'], self.cfg, self.feat_stride, self.anchor_scales, self.anchor_ratios, self.allowed_border) new_label_list.append(label) all_data = dict() for key in self.data_name: all_data[key] = tensor_vstack([batch[key] for batch in data_list]) all_label = dict() for key in self.label_name: pad = -1 if key == 'label' else 0 all_label[key] = tensor_vstack([batch[key] for batch in new_label_list], pad=pad) self.data = [mx.nd.array(all_data[key]) for key in self.data_name] self.label = [mx.nd.array(all_label[key]) for key in self.label_name]
Example #21
Source File: loader.py From Deformable-ConvNets with MIT License | 4 votes |
def get_batch(self): # slice roidb cur_from = self.cur cur_to = min(cur_from + self.batch_size, self.size) roidb = [self.roidb[self.index[i]] for i in range(cur_from, cur_to)] # decide multi device slice work_load_list = self.work_load_list ctx = self.ctx if work_load_list is None: work_load_list = [1] * len(ctx) assert isinstance(work_load_list, list) and len(work_load_list) == len(ctx), \ "Invalid settings for work load. " slices = _split_input_slice(self.batch_size, work_load_list) # get testing data for multigpu data_list = [] label_list = [] for islice in slices: iroidb = [roidb[i] for i in range(islice.start, islice.stop)] data, label = get_rpn_batch(iroidb, self.cfg) data_list.append(data) label_list.append(label) # pad data first and then assign anchor (read label) data_tensor = tensor_vstack([batch['data'] for batch in data_list]) for data, data_pad in zip(data_list, data_tensor): data['data'] = data_pad[np.newaxis, :] new_label_list = [] for data, label in zip(data_list, label_list): # infer label shape data_shape = {k: v.shape for k, v in data.items()} del data_shape['im_info'] _, feat_shape, _ = self.feat_sym.infer_shape(**data_shape) feat_shape = [int(i) for i in feat_shape[0]] # add gt_boxes to data for e2e data['gt_boxes'] = label['gt_boxes'][np.newaxis, :, :] # assign anchor for label label = assign_anchor(feat_shape, label['gt_boxes'], data['im_info'], self.cfg, self.feat_stride, self.anchor_scales, self.anchor_ratios, self.allowed_border) new_label_list.append(label) all_data = dict() for key in self.data_name: all_data[key] = tensor_vstack([batch[key] for batch in data_list]) all_label = dict() for key in self.label_name: pad = -1 if key == 'label' else 0 all_label[key] = tensor_vstack([batch[key] for batch in new_label_list], pad=pad) self.data = [mx.nd.array(all_data[key]) for key in self.data_name] self.label = [mx.nd.array(all_label[key]) for key in self.label_name]
Example #22
Source File: loader.py From Flow-Guided-Feature-Aggregation with MIT License | 4 votes |
def get_batch(self): # slice roidb cur_from = self.cur cur_to = min(cur_from + self.batch_size, self.size) roidb = [self.roidb[self.index[i]] for i in range(cur_from, cur_to)] # decide multi device slice work_load_list = self.work_load_list ctx = self.ctx if work_load_list is None: work_load_list = [1] * len(ctx) assert isinstance(work_load_list, list) and len(work_load_list) == len(ctx), \ "Invalid settings for work load. " slices = _split_input_slice(self.batch_size, work_load_list) # get testing data for multigpu data_list = [] label_list = [] for islice in slices: iroidb = [roidb[i] for i in range(islice.start, islice.stop)] data, label = get_rpn_triple_batch(iroidb, self.cfg) data_list.append(data) label_list.append(label) # pad data first and then assign anchor (read label) data_tensor = tensor_vstack([batch['data'] for batch in data_list]) for data, data_pad in zip(data_list, data_tensor): data['data'] = data_pad[np.newaxis, :] new_label_list = [] for data, label in zip(data_list, label_list): # infer label shape data_shape = {k: v.shape for k, v in data.items()} del data_shape['im_info'] _, feat_shape, _ = self.feat_sym.infer_shape(**data_shape) feat_shape = [int(i) for i in feat_shape[0]] # add gt_boxes to data for e2e data['gt_boxes'] = label['gt_boxes'][np.newaxis, :, :] # assign anchor for label label = assign_anchor(feat_shape, label['gt_boxes'], data['im_info'], self.cfg, self.feat_stride, self.anchor_scales, self.anchor_ratios, self.allowed_border, self.normalize_target, self.bbox_mean, self.bbox_std) new_label_list.append(label) all_data = dict() for key in self.data_name: all_data[key] = tensor_vstack([batch[key] for batch in data_list]) all_label = dict() for key in self.label_name: pad = -1 if key == 'label' else 0 all_label[key] = tensor_vstack([batch[key] for batch in new_label_list], pad=pad) self.data = [mx.nd.array(all_data[key]) for key in self.data_name] self.label = [mx.nd.array(all_label[key]) for key in self.label_name]
Example #23
Source File: loader.py From Deep-Feature-Flow with MIT License | 4 votes |
def get_batch(self): # slice roidb cur_from = self.cur cur_to = min(cur_from + self.batch_size, self.size) roidb = [self.roidb[self.index[i]] for i in range(cur_from, cur_to)] # decide multi device slice work_load_list = self.work_load_list ctx = self.ctx if work_load_list is None: work_load_list = [1] * len(ctx) assert isinstance(work_load_list, list) and len(work_load_list) == len(ctx), \ "Invalid settings for work load. " slices = _split_input_slice(self.batch_size, work_load_list) # get testing data for multigpu data_list = [] label_list = [] for islice in slices: iroidb = [roidb[i] for i in range(islice.start, islice.stop)] data, label = get_rpn_batch(iroidb, self.cfg) data_list.append(data) label_list.append(label) # pad data first and then assign anchor (read label) data_tensor = tensor_vstack([batch['data'] for batch in data_list]) for data, data_pad in zip(data_list, data_tensor): data['data'] = data_pad[np.newaxis, :] new_label_list = [] for data, label in zip(data_list, label_list): # infer label shape data_shape = {k: v.shape for k, v in data.items()} del data_shape['im_info'] _, feat_shape, _ = self.feat_sym.infer_shape(**data_shape) feat_shape = [int(i) for i in feat_shape[0]] # add gt_boxes to data for e2e data['gt_boxes'] = label['gt_boxes'][np.newaxis, :, :] # assign anchor for label label = assign_anchor(feat_shape, label['gt_boxes'], data['im_info'], self.cfg, self.feat_stride, self.anchor_scales, self.anchor_ratios, self.allowed_border, self.normalize_target, self.bbox_mean, self.bbox_std) new_label_list.append(label) all_data = dict() for key in self.data_name: all_data[key] = tensor_vstack([batch[key] for batch in data_list]) all_label = dict() for key in self.label_name: pad = -1 if key == 'label' else 0 all_label[key] = tensor_vstack([batch[key] for batch in new_label_list], pad=pad) self.data = [mx.nd.array(all_data[key]) for key in self.data_name] self.label = [mx.nd.array(all_label[key]) for key in self.label_name]
Example #24
Source File: loader.py From Deep-Feature-Flow with MIT License | 4 votes |
def get_batch(self): # slice roidb cur_from = self.cur cur_to = min(cur_from + self.batch_size, self.size) roidb = [self.roidb[self.index[i]] for i in range(cur_from, cur_to)] # decide multi device slice work_load_list = self.work_load_list ctx = self.ctx if work_load_list is None: work_load_list = [1] * len(ctx) assert isinstance(work_load_list, list) and len(work_load_list) == len(ctx), \ "Invalid settings for work load. " slices = _split_input_slice(self.batch_size, work_load_list) # get testing data for multigpu data_list = [] label_list = [] for islice in slices: iroidb = [roidb[i] for i in range(islice.start, islice.stop)] data, label = get_rpn_pair_batch(iroidb, self.cfg) data_list.append(data) label_list.append(label) # pad data first and then assign anchor (read label) data_tensor = tensor_vstack([batch['data'] for batch in data_list]) for data, data_pad in zip(data_list, data_tensor): data['data'] = data_pad[np.newaxis, :] new_label_list = [] for data, label in zip(data_list, label_list): # infer label shape data_shape = {k: v.shape for k, v in data.items()} del data_shape['im_info'] _, feat_shape, _ = self.feat_sym.infer_shape(**data_shape) feat_shape = [int(i) for i in feat_shape[0]] # add gt_boxes to data for e2e data['gt_boxes'] = label['gt_boxes'][np.newaxis, :, :] # assign anchor for label label = assign_anchor(feat_shape, label['gt_boxes'], data['im_info'], self.cfg, self.feat_stride, self.anchor_scales, self.anchor_ratios, self.allowed_border, self.normalize_target, self.bbox_mean, self.bbox_std) new_label_list.append(label) all_data = dict() for key in self.data_name: all_data[key] = tensor_vstack([batch[key] for batch in data_list]) all_label = dict() for key in self.label_name: pad = -1 if key == 'label' else 0 all_label[key] = tensor_vstack([batch[key] for batch in new_label_list], pad=pad) self.data = [mx.nd.array(all_data[key]) for key in self.data_name] self.label = [mx.nd.array(all_label[key]) for key in self.label_name]
Example #25
Source File: loader.py From MANet_for_Video_Object_Detection with Apache License 2.0 | 4 votes |
def get_batch(self): # slice roidb cur_from = self.cur cur_to = min(cur_from + self.batch_size, self.size) roidb = [self.roidb[self.index[i]] for i in range(cur_from, cur_to)] # decide multi device slice work_load_list = self.work_load_list ctx = self.ctx if work_load_list is None: work_load_list = [1] * len(ctx) assert isinstance(work_load_list, list) and len(work_load_list) == len(ctx), \ "Invalid settings for work load. " slices = _split_input_slice(self.batch_size, work_load_list) # get testing data for multigpu data_list = [] label_list = [] for islice in slices: iroidb = [roidb[i] for i in range(islice.start, islice.stop)] data, label = get_rpn_triple_batch(iroidb, self.cfg) data_list.append(data) label_list.append(label) # pad data first and then assign anchor (read label) data_tensor = tensor_vstack([batch['data'] for batch in data_list]) for data, data_pad in zip(data_list, data_tensor): data['data'] = data_pad[np.newaxis, :] new_label_list = [] for data, label in zip(data_list, label_list): # infer label shape data_shape = {k: v.shape for k, v in data.items()} del data_shape['im_info'] _, feat_shape, _ = self.feat_sym.infer_shape(**data_shape) feat_shape = [int(i) for i in feat_shape[0]] # add gt_boxes to data for e2e data['gt_boxes'] = label['gt_boxes'][np.newaxis, :, :] print data['gt_boxes'] # assign anchor for label label = assign_anchor(feat_shape, label['gt_boxes'], data['im_info'], self.cfg, self.feat_stride, self.anchor_scales, self.anchor_ratios, self.allowed_border, self.normalize_target, self.bbox_mean, self.bbox_std) new_label_list.append(label) all_data = dict() for key in self.data_name: all_data[key] = tensor_vstack([batch[key] for batch in data_list]) all_label = dict() for key in self.label_name: pad = -1 if key == 'label' else 0 all_label[key] = tensor_vstack([batch[key] for batch in new_label_list], pad=pad) self.data = [mx.nd.array(all_data[key]) for key in self.data_name] self.label = [mx.nd.array(all_label[key]) for key in self.label_name]
Example #26
Source File: loader.py From kaggle-rsna18 with MIT License | 4 votes |
def get_batch(self, cur_from=None): # slice roidb if cur_from is None: cur_from = self.cur cur_to = min(cur_from + self.batch_size, self.size) roidb = [self.roidb[self.index[i]] for i in range(cur_from, cur_to)] # decide multi device slices work_load_list = self.work_load_list ctx = self.ctx if work_load_list is None: work_load_list = [1] * len(ctx) assert isinstance(work_load_list, list) and len(work_load_list) == len(ctx), \ "Invalid settings for work load. " slices = _split_input_slice(self.batch_size, work_load_list) # get each device data_list = [] label_list = [] for islice in slices: iroidb = [roidb[i] for i in range(islice.start, islice.stop)] data, label = get_rcnn_batch(iroidb, self.cfg) data_list.append(data) label_list.append(label) all_data = dict() for key in data_list[0].keys(): all_data[key] = tensor_vstack([batch[key] for batch in data_list]) all_label = dict() for key in label_list[0].keys(): all_label[key] = tensor_vstack([batch[key] for batch in label_list]) data = [mx.nd.array(all_data[name]) for name in self.data_name] label = [mx.nd.array(all_label[name]) for name in self.label_name] self.lock_data.acquire() self.data = data self.label = label self.lock_data.release() return data, label
Example #27
Source File: loader.py From kaggle-rsna18 with MIT License | 4 votes |
def get_batch(self): # slice roidb cur_from = self.cur cur_to = min(cur_from + self.batch_size, self.size) roidb = [self.roidb[self.index[i]] for i in range(cur_from, cur_to)] # decide multi device slice work_load_list = self.work_load_list ctx = self.ctx if work_load_list is None: work_load_list = [1] * len(ctx) assert isinstance(work_load_list, list) and len(work_load_list) == len(ctx), \ "Invalid settings for work load. " slices = _split_input_slice(self.batch_size, work_load_list) # get testing data for multigpu data_list = [] label_list = [] for islice in slices: iroidb = [roidb[i] for i in range(islice.start, islice.stop)] data, label = get_rpn_batch(iroidb, self.cfg) data_list.append(data) label_list.append(label) # pad data first and then assign anchor (read label) data_tensor = tensor_vstack([batch['data'] for batch in data_list]) for data, data_pad in zip(data_list, data_tensor): data['data'] = data_pad[np.newaxis, :] new_label_list = [] for data, label in zip(data_list, label_list): # infer label shape data_shape = {k: v.shape for k, v in data.items()} del data_shape['im_info'] _, feat_shape, _ = self.feat_sym.infer_shape(**data_shape) feat_shape = [int(i) for i in feat_shape[0]] # add gt_boxes to data for e2e data['gt_boxes'] = label['gt_boxes'][np.newaxis, :, :] # assign anchor for label label = assign_anchor(feat_shape, label['gt_boxes'], data['im_info'], self.cfg, self.feat_stride, self.anchor_scales, self.anchor_ratios, self.allowed_border) new_label_list.append(label) all_data = dict() for key in self.data_name: all_data[key] = tensor_vstack([batch[key] for batch in data_list]) all_label = dict() for key in self.label_name: pad = -1 if key == 'label' else 0 all_label[key] = tensor_vstack([batch[key] for batch in new_label_list], pad=pad) self.data = [mx.nd.array(all_data[key]) for key in self.data_name] self.label = [mx.nd.array(all_label[key]) for key in self.label_name]
Example #28
Source File: loader.py From kaggle-rsna18 with MIT License | 4 votes |
def get_batch(self): # slice roidb cur_from = self.cur cur_to = min(cur_from + self.batch_size, self.size) roidb = [self.roidb[self.index[i]] for i in range(cur_from, cur_to)] # decide multi device slice work_load_list = self.work_load_list ctx = self.ctx if work_load_list is None: work_load_list = [1] * len(ctx) assert isinstance(work_load_list, list) and len(work_load_list) == len(ctx), \ "Invalid settings for work load. " slices = _split_input_slice(self.batch_size, work_load_list) # get testing data for multigpu data_list = [] label_list = [] for islice in slices: iroidb = [roidb[i] for i in range(islice.start, islice.stop)] data, label = get_rpn_batch(iroidb, self.cfg) data_list.append(data) label_list.append(label) # pad data first and then assign anchor (read label) data_tensor = tensor_vstack([batch['data'] for batch in data_list]) for data, data_pad in zip(data_list, data_tensor): data['data'] = data_pad[np.newaxis, :] new_label_list = [] for data, label in zip(data_list, label_list): # infer label shape data_shape = {k: v.shape for k, v in data.items()} del data_shape['im_info'] _, feat_shape, _ = self.feat_sym.infer_shape(**data_shape) feat_shape = [int(i) for i in feat_shape[0]] # add gt_boxes to data for e2e data['gt_boxes'] = label['gt_boxes'][np.newaxis, :, :] # assign anchor for label label = assign_anchor(feat_shape, label['gt_boxes'], data['im_info'], self.cfg, self.feat_stride, self.anchor_scales, self.anchor_ratios, self.allowed_border) new_label_list.append(label) all_data = dict() for key in self.data_name: all_data[key] = tensor_vstack([batch[key] for batch in data_list]) all_label = dict() for key in self.label_name: pad = -1 if key == 'label' else 0 all_label[key] = tensor_vstack([batch[key] for batch in new_label_list], pad=pad) self.data = [mx.nd.array(all_data[key]) for key in self.data_name] self.label = [mx.nd.array(all_label[key]) for key in self.label_name]