Python mxnet.nd.concatenate() Examples
The following are 13
code examples of mxnet.nd.concatenate().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
mxnet.nd
, or try the search function
.
Example #1
Source File: utils.py From ya_mxdet with MIT License | 6 votes |
def bbox_transform(anchor, bbox): w = anchor[:, 2] - anchor[:, 0] h = anchor[:, 3] - anchor[:, 1] cx = (anchor[:, 0] + anchor[:, 2]) / 2.0 cy = (anchor[:, 1] + anchor[:, 3]) / 2.0 g_w = bbox[:, 2] - bbox[:, 0] g_h = bbox[:, 3] - bbox[:, 1] g_cx = (bbox[:, 0] + bbox[:, 2]) / 2.0 g_cy = (bbox[:, 1] + bbox[:, 3]) / 2.0 g_w = mx.ndarray.log(g_w / w) g_h = mx.ndarray.log(g_h / h) g_cx = (g_cx - cx) / w g_cy = (g_cy - cy) / h return mx.ndarray.concatenate([ g_w.reshape((-1, 1)), g_h.reshape((-1, 1)), g_cx.reshape((-1, 1)), g_cy.reshape((-1, 1))], axis=1)
Example #2
Source File: utils.py From ya_mxdet with MIT License | 6 votes |
def bbox_inverse_transform(anchor, bbox): w = anchor[:, 2] - anchor[:, 0] h = anchor[:, 3] - anchor[:, 1] cx = (anchor[:, 0] + anchor[:, 2]) / 2.0 cy = (anchor[:, 1] + anchor[:, 3]) / 2.0 g_w = mx.ndarray.exp(bbox[:, 0]) * w g_h = mx.ndarray.exp(bbox[:, 1]) * h g_cx = bbox[:, 2] * w + cx g_cy = bbox[:, 3] * h + cy g_x1 = g_cx - g_w / 2 g_y1 = g_cy - g_h / 2 g_x2 = g_cx + g_w / 2 g_y2 = g_cy + g_h / 2 return mx.ndarray.concatenate([ g_x1.reshape((-1, 1)), g_y1.reshape((-1, 1)), g_x2.reshape((-1, 1)), g_y2.reshape((-1, 1))], axis=1)
Example #3
Source File: utils.py From ya_mxdet with MIT License | 6 votes |
def bbox_overlaps(anchors:mx.nd.NDArray, gt:mx.nd.NDArray): """ Get IoU of the anchors and ground truth bounding boxes. The shape of anchors and gt should be (N, 4) and (M, 4) So the shape of return value is (N, M) """ ret = [] for i in range(gt.shape[0]): cgt = gt[i].reshape((1, 4)).broadcast_to(anchors.shape) # inter x0 = nd.max(nd.stack(anchors[:,0], cgt[:,0]), axis=0) y0 = nd.max(nd.stack(anchors[:,1], cgt[:,1]), axis=0) x1 = nd.min(nd.stack(anchors[:,2], cgt[:,2]), axis=0) y1 = nd.min(nd.stack(anchors[:,3], cgt[:,3]), axis=0) inter = _get_area(nd.concatenate([x0.reshape((-1, 1)), y0.reshape((-1, 1)), x1.reshape((-1, 1)), y1.reshape((-1, 1))], axis=1)) outer = _get_area(anchors) + _get_area(cgt) - inter iou = inter / outer ret.append(iou.reshape((-1, 1))) ret=nd.concatenate(ret, axis=1) return ret
Example #4
Source File: data.py From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 | 5 votes |
def sample_train_batch(self): """Sample a training batch (data and label).""" batch = [] labels = [] num_groups = self.batch_size // self.batch_k # For CUB200, we use the first 100 classes for training. sampled_classes = np.random.choice(100, num_groups, replace=False) for i in range(num_groups): img_fnames = np.random.choice(self.train_image_files[sampled_classes[i]], self.batch_k, replace=False) batch += [self.get_image(img_fname, is_train=True) for img_fname in img_fnames] labels += [sampled_classes[i] for _ in range(self.batch_k)] return nd.concatenate(batch, axis=0), labels
Example #5
Source File: data.py From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 | 5 votes |
def get_test_batch(self): """Sample a testing batch (data and label).""" batch_size = self.batch_size batch = [self.get_image(self.test_image_files[(self.test_count*batch_size + i) % len(self.test_image_files)], is_train=False) for i in range(batch_size)] labels = [self.test_labels[(self.test_count*batch_size + i) % len(self.test_image_files)] for i in range(batch_size)] return nd.concatenate(batch, axis=0), labels
Example #6
Source File: train.py From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 | 5 votes |
def test(ctx): """Test a model.""" val_data.reset() outputs = [] labels = [] for batch in val_data: data = gluon.utils.split_and_load(batch.data[0], ctx_list=ctx, batch_axis=0) label = gluon.utils.split_and_load(batch.label[0], ctx_list=ctx, batch_axis=0) for x in data: outputs.append(net(x)[-1]) labels += label outputs = nd.concatenate(outputs, axis=0)[:val_data.n_test] labels = nd.concatenate(labels, axis=0)[:val_data.n_test] return evaluate_emb(outputs, labels)
Example #7
Source File: model.py From YOLO with MIT License | 5 votes |
def predict(yolo:Yolo,x,threshold=0.5): """ return label ,C,location :param yolo: :return: """ assert len(x)==1,"Only One image for now" ypre = yolo(x) label, preds, location = deal_output(ypre, yolo.s, b=yolo.b, c=yolo.class_num) indexs = [] for i,c in enumerate(preds[0]): if c > threshold: indexs.append(i) class_names = [] C_list =[] bos_list = [] for index in indexs: label_index = int(index / 2) location_offect = int(index % 2) class_index = nd.argmax(label[0][label_index], axis=0) C = preds[0][index] locat = location[0][label_index][location_offect] C_list.append(C.asscalar()) #######traslate the name label_name = yolo.class_names text = label_name[int(class_index.asscalar()) ] class_names.append(text) ###traslate the locat x, y, w, h = locat w, h = nd.power(w, 2), nd.power(h, 2) ceil = 1 / 4 row = int(label_index / 4) columns = label_index % 4 x_center = columns * ceil + x y_center = row * ceil + y x_min, y_min, x_max, y_max = x_center - 0.5 * w, y_center - 0.5 * h, x_center + 0.5 * w, y_center + 0.5 * h box = nd.concatenate([x_min, y_min, x_max, y_max], axis=0) * 256 bos_list.append(box.asnumpy()) return class_names,C_list,bos_list
Example #8
Source File: data.py From training_results_v0.6 with Apache License 2.0 | 5 votes |
def sample_train_batch(self): """Sample a training batch (data and label).""" batch = [] labels = [] num_groups = self.batch_size // self.batch_k # For CUB200, we use the first 100 classes for training. sampled_classes = np.random.choice(100, num_groups, replace=False) for i in range(num_groups): img_fnames = np.random.choice(self.train_image_files[sampled_classes[i]], self.batch_k, replace=False) batch += [self.get_image(img_fname, is_train=True) for img_fname in img_fnames] labels += [sampled_classes[i] for _ in range(self.batch_k)] return nd.concatenate(batch, axis=0), labels
Example #9
Source File: data.py From training_results_v0.6 with Apache License 2.0 | 5 votes |
def get_test_batch(self): """Sample a testing batch (data and label).""" batch_size = self.batch_size batch = [self.get_image(self.test_image_files[(self.test_count*batch_size + i) % len(self.test_image_files)], is_train=False) for i in range(batch_size)] labels = [self.test_labels[(self.test_count*batch_size + i) % len(self.test_image_files)] for i in range(batch_size)] return nd.concatenate(batch, axis=0), labels
Example #10
Source File: train.py From training_results_v0.6 with Apache License 2.0 | 5 votes |
def test(ctx): """Test a model.""" val_data.reset() outputs = [] labels = [] for batch in val_data: data = gluon.utils.split_and_load(batch.data[0], ctx_list=ctx, batch_axis=0) label = gluon.utils.split_and_load(batch.label[0], ctx_list=ctx, batch_axis=0) for x in data: outputs.append(net(x)[-1]) labels += label outputs = nd.concatenate(outputs, axis=0)[:val_data.n_test] labels = nd.concatenate(labels, axis=0)[:val_data.n_test] return evaluate_emb(outputs, labels)
Example #11
Source File: data.py From SNIPER-mxnet with Apache License 2.0 | 5 votes |
def sample_train_batch(self): """Sample a training batch (data and label).""" batch = [] labels = [] num_groups = self.batch_size // self.batch_k # For CUB200, we use the first 100 classes for training. sampled_classes = np.random.choice(100, num_groups, replace=False) for i in range(num_groups): img_fnames = np.random.choice(self.train_image_files[sampled_classes[i]], self.batch_k, replace=False) batch += [self.get_image(img_fname, is_train=True) for img_fname in img_fnames] labels += [sampled_classes[i] for _ in range(self.batch_k)] return nd.concatenate(batch, axis=0), labels
Example #12
Source File: data.py From SNIPER-mxnet with Apache License 2.0 | 5 votes |
def get_test_batch(self): """Sample a testing batch (data and label).""" batch_size = self.batch_size batch = [self.get_image(self.test_image_files[(self.test_count*batch_size + i) % len(self.test_image_files)], is_train=False) for i in range(batch_size)] labels = [self.test_labels[(self.test_count*batch_size + i) % len(self.test_image_files)] for i in range(batch_size)] return nd.concatenate(batch, axis=0), labels
Example #13
Source File: train.py From SNIPER-mxnet with Apache License 2.0 | 5 votes |
def test(ctx): """Test a model.""" val_data.reset() outputs = [] labels = [] for batch in val_data: data = gluon.utils.split_and_load(batch.data[0], ctx_list=ctx, batch_axis=0) label = gluon.utils.split_and_load(batch.label[0], ctx_list=ctx, batch_axis=0) for x in data: outputs.append(net(x)[-1]) labels += label outputs = nd.concatenate(outputs, axis=0)[:val_data.n_test] labels = nd.concatenate(labels, axis=0)[:val_data.n_test] return evaluate_emb(outputs, labels)