Python mxnet.nd.array() Examples
The following are 30
code examples of mxnet.nd.array().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
mxnet.nd
, or try the search function
.
Example #1
Source File: pose.py From gluon-cv with Apache License 2.0 | 6 votes |
def heatmap_to_coord(heatmaps, bbox_list): heatmap_height = heatmaps.shape[2] heatmap_width = heatmaps.shape[3] coords, maxvals = get_max_pred(heatmaps) preds = nd.zeros_like(coords) for i, bbox in enumerate(bbox_list): x0 = bbox[0] y0 = bbox[1] x1 = bbox[2] y1 = bbox[3] w = (x1 - x0) / 2 h = (y1 - y0) / 2 center = np.array([x0 + w, y0 + h]) scale = np.array([w, h]) w_ratio = coords[i][:, 0] / heatmap_width h_ratio = coords[i][:, 1] / heatmap_height preds[i][:, 0] = scale[0] * 2 * w_ratio + center[0] - scale[0] preds[i][:, 1] = scale[1] * 2 * h_ratio + center[1] - scale[1] return preds, maxvals
Example #2
Source File: E2FAR.py From mxnet-E2FAR with Apache License 2.0 | 6 votes |
def __getitem__(self, idx): img_path = self.data_frame.iloc[idx, 0] img = cv2.imread(img_path, 1) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) x, y, w, h = self.data_frame.iloc[idx, 1:5] l, t, ww, hh = enlarge_bbox(x, y, w, h, self.enlarge_factor) r, b = l + ww, t + hh img = img[t: b, l:r, :] img = cv2.resize(img, (self.img_size, self.img_size)) img = img.astype(np.float32) - 127.5 img = nd.transpose(nd.array(img), (2, 0, 1)) label_path = img_path.replace('.jpg', '.mat') label = sio.loadmat(label_path) params_shape = label['Shape_Para'].astype(np.float32).ravel() params_exp = label['Exp_Para'].astype(np.float32).ravel() return img, params_shape, params_exp
Example #3
Source File: dataloader.py From panoptic-fpn-gluon with Apache License 2.0 | 6 votes |
def default_mp_pad_batchify_fn(data): """Use shared memory for collating data into batch, labels are padded to same shape""" if isinstance(data[0], nd.NDArray): out = nd.empty((len(data),) + data[0].shape, dtype=data[0].dtype, ctx=context.Context('cpu_shared', 0)) return nd.stack(*data, out=out) elif isinstance(data[0], tuple): data = zip(*data) return [default_mp_pad_batchify_fn(i) for i in data] else: data = np.asarray(data) batch_size = len(data) pad = max([l.shape[0] for l in data] + [1,]) buf = np.full((batch_size, pad, data[0].shape[-1]), -1, dtype=data[0].dtype) for i, l in enumerate(data): buf[i][:l.shape[0], :] = l return nd.array(buf, dtype=data[0].dtype, ctx=context.Context('cpu_shared', 0))
Example #4
Source File: TextEXAM_multi-class.py From AAAI_2019_EXAM with GNU General Public License v2.0 | 6 votes |
def hybrid_forward(self, F, seq): region_radius = region_size/2 aligned_seq = map(lambda i: F.slice(seq, begin=[None, i-region_radius], end=[None, i-region_radius+region_size]).asnumpy(), \ range(region_radius, seq.shape[1] - region_radius)) aligned_seq = nd.array(aligned_seq) region_aligned_seq = aligned_seq.transpose((1, 0, 2)) region_aligned_emb = self.embedding_region(region_aligned_seq).reshape((batch_size,-1,region_size,emb_size)) trimed_seq = seq[:, region_radius: seq.shape[1] - region_radius] context_unit = self.embedding(trimed_seq).reshape((batch_size,-1,region_size,emb_size)) projected_emb = region_aligned_emb * context_unit feature = self.max_pool(projected_emb.transpose((0,1,3,2)).reshape((batch_size,-1,region_size))).reshape((batch_size,-1,emb_size)) trimed_seq = seq[:,region_radius:seq.shape[1]-region_radius] mask = F.greater(trimed_seq,0).reshape((batch_size,-1,1)) feature = mask*feature feature = feature.reshape((-1,emb_size)) feature = self.dense(feature).reshape((batch_size,-1,n_classes)).transpose((0,2,1)).reshape((batch_size*n_classes,-1)) #accumulation feature = F.expand_dims(feature,axis = 1) residual = F.sum(feature,axis=2).reshape((batch_size,n_classes)) res = self.dense2(self.dense1(feature)).reshape(batch_size*n_classes,1,-1).reshape((batch_size,n_classes)) return res+residual
Example #5
Source File: pose.py From panoptic-fpn-gluon with Apache License 2.0 | 6 votes |
def crop_resize_normalize(img, bbox_list, output_size): output_list = [] transform_test = transforms.Compose([ transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) for bbox in bbox_list: x0 = max(int(bbox[0]), 0) y0 = max(int(bbox[1]), 0) x1 = min(int(bbox[2]), int(img.shape[1])) y1 = min(int(bbox[3]), int(img.shape[0])) w = x1 - x0 h = y1 - y0 res_img = image.fixed_crop(nd.array(img), x0, y0, w, h, (output_size[1], output_size[0])) res_img = transform_test(res_img) output_list.append(res_img) output_array = nd.stack(*output_list) return output_array
Example #6
Source File: pose.py From gluon-cv with Apache License 2.0 | 6 votes |
def cv_rotate(img, rot, resW, resH): cv2 = try_import_cv2() center = np.array((resW - 1, resH - 1)) / 2 rot_rad = np.pi * rot / 180 src_dir = get_dir([0, (resH - 1) * -0.5], rot_rad) dst_dir = np.array([0, (resH - 1) * -0.5], np.float32) src = np.zeros((3, 2), dtype=np.float32) dst = np.zeros((3, 2), dtype=np.float32) src[0, :] = center src[1, :] = center + src_dir dst[0, :] = [(resW - 1) * 0.5, (resH - 1) * 0.5] dst[1, :] = np.array([(resW - 1) * 0.5, (resH - 1) * 0.5]) + dst_dir src[2:, :] = get_3rd_point(src[0, :], src[1, :]) dst[2:, :] = get_3rd_point(dst[0, :], dst[1, :]) trans = cv2.getAffineTransform(np.float32(src), np.float32(dst)) dst_img = cv2.warpAffine(img, trans, (resW, resH), flags=cv2.INTER_LINEAR) return dst_img
Example #7
Source File: pose.py From gluon-cv with Apache License 2.0 | 6 votes |
def crop_resize_normalize(img, bbox_list, output_size, mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)): output_list = [] transform_test = transforms.Compose([ transforms.ToTensor(), transforms.Normalize(mean, std) ]) for bbox in bbox_list: x0 = max(int(bbox[0]), 0) y0 = max(int(bbox[1]), 0) x1 = min(int(bbox[2]), int(img.shape[1])) y1 = min(int(bbox[3]), int(img.shape[0])) w = x1 - x0 h = y1 - y0 res_img = image.fixed_crop(nd.array(img), x0, y0, w, h, (output_size[1], output_size[0])) res_img = transform_test(res_img) output_list.append(res_img) output_array = nd.stack(*output_list) return output_array
Example #8
Source File: base_layers.py From STGCN with GNU General Public License v3.0 | 6 votes |
def forward(self, x): ''' Parameters ---------- x: nd.array, shape is (batch_size, c_in, time_step, num_of_vertices) Returns ---------- shape is (batch_size, c_out, time_step, num_of_vertices) ''' batch_size, c_in, T, num_of_vertices = x.shape x_input = self.align(x) x_tmp = x.transpose((0, 2, 3, 1)) \ .reshape((-1, num_of_vertices, c_in)) x_gconv = self.gconv(x_tmp, self.cheb_polys.data()) x_gc = x_gconv.reshape((-1, T, num_of_vertices, self.c_out)) \ .transpose((0, 3, 1, 2)) x_gc = x_gc[:, : self.c_out, :, :] return nd.relu(x_gc + x_input)
Example #9
Source File: base_layers.py From STGCN with GNU General Public License v3.0 | 6 votes |
def forward(self, x): ''' Parameters ---------- x: nd.array, shape is (batch_size, c_in, time_step, num_of_vertices) Returns ---------- shape is (batch_size, c_out, time_step - Kt + 1, num_of_vertices) ''' x_input = self.align(x)[:, :, self.Kt - 1:, :] x_conv = self.conv(x) if self.activation == 'GLU': x_conv = self.conv(x) x_conv1, x_conv2 = nd.split(x_conv, axis=1, num_outputs=2) return (x_conv1 + x_input) * nd.sigmoid(x_conv2) if self.activation == 'relu': return nd.relu(x_conv + x_input) return x_conv
Example #10
Source File: dataloader.py From gluon-cv with Apache License 2.0 | 6 votes |
def default_mp_pad_batchify_fn(data): """Use shared memory for collating data into batch, labels are padded to same shape""" if isinstance(data[0], nd.NDArray): out = nd.empty((len(data),) + data[0].shape, dtype=data[0].dtype, ctx=context.Context('cpu_shared', 0)) return nd.stack(*data, out=out) elif isinstance(data[0], tuple): data = zip(*data) return [default_mp_pad_batchify_fn(i) for i in data] else: data = np.asarray(data) batch_size = len(data) pad = max([l.shape[0] for l in data] + [1,]) buf = np.full((batch_size, pad, data[0].shape[-1]), -1, dtype=data[0].dtype) for i, l in enumerate(data): buf[i][:l.shape[0], :] = l return nd.array(buf, dtype=data[0].dtype, ctx=context.Context('cpu_shared', 0))
Example #11
Source File: classification.py From gluon-cv with Apache License 2.0 | 6 votes |
def _sample_val_indices(self, num_frames): if num_frames > self.num_segments + self.skip_length - 1: tick = (num_frames - self.skip_length + 1) / \ float(self.num_segments) offsets = np.array([int(tick / 2.0 + tick * x) for x in range(self.num_segments)]) else: offsets = np.zeros((self.num_segments,)) if self.temporal_jitter: skip_offsets = np.random.randint( self.new_step, size=self.skip_length // self.new_step) else: skip_offsets = np.zeros( self.skip_length // self.new_step, dtype=int) return offsets + 1, skip_offsets
Example #12
Source File: classification.py From gluon-cv with Apache License 2.0 | 6 votes |
def _sample_test_indices(self, num_frames): if num_frames > self.skip_length - 1: tick = (num_frames - self.skip_length + 1) / \ float(self.num_segments) offsets = np.array([int(tick / 2.0 + tick * x) for x in range(self.num_segments)]) else: offsets = np.zeros((self.num_segments,)) if self.temporal_jitter: skip_offsets = np.random.randint( self.new_step, size=self.skip_length // self.new_step) else: skip_offsets = np.zeros( self.skip_length // self.new_step, dtype=int) return offsets + 1, skip_offsets
Example #13
Source File: utils_final.py From InsightFace_TF with MIT License | 6 votes |
def __iter__(self): data = self.dataset[:] X = data[0] y = nd.array(data[1]) n = X.shape[0] if self.shuffle: idx = np.arange(n) np.random.shuffle(idx) X = nd.array(X.asnumpy()[idx]) y = nd.array(y.asnumpy()[idx]) for i in range(n // self.batch_size): if self.transform is not None: yield self.transform(X[i * self.batch_size:(i + 1) * self.batch_size], y[i * self.batch_size:(i + 1) * self.batch_size]) else: yield (X[i * self.batch_size:(i + 1) * self.batch_size], y[i * self.batch_size:(i + 1) * self.batch_size])
Example #14
Source File: utils_final.py From InsightFace_TF with MIT License | 6 votes |
def data_iter_random(corpus_indices, batch_size, num_steps, ctx=None): """Sample mini-batches in a random order from sequential data.""" # Subtract 1 because label indices are corresponding input indices + 1. num_examples = (len(corpus_indices) - 1) // num_steps epoch_size = num_examples // batch_size # Randomize samples. example_indices = list(range(num_examples)) random.shuffle(example_indices) def _data(pos): return corpus_indices[pos: pos + num_steps] for i in range(epoch_size): # Read batch_size random samples each time. i = i * batch_size batch_indices = example_indices[i: i + batch_size] data = nd.array( [_data(j * num_steps) for j in batch_indices], ctx=ctx) label = nd.array( [_data(j * num_steps + 1) for j in batch_indices], ctx=ctx) yield data, label
Example #15
Source File: datasets_su.py From d-SNE with Apache License 2.0 | 6 votes |
def __init__(self, arr1=None, arr2=None, tform1=None, tform2=None): """ Initialization of dataset :param arr1: source array :param arr2: target array :param tform1: transformers for source array :param tform2: transformers for target array """ assert arr1 is not None or arr2 is not None, "One of src array or tgt array should not be None" self.arr1 = arr1 self.use1 = False if arr1 is None else True self.arr2 = arr2 self.use2 = False if arr2 is None else True self.tform1 = tform1 self.tform2 = tform2 self._gen_cls_idx_dicts()
Example #16
Source File: utils_final.py From InsightFace_TF with MIT License | 6 votes |
def predict_rnn(rnn, prefix, num_chars, params, hidden_dim, ctx, idx_to_char, char_to_idx, get_inputs, is_lstm=False): """Predict the next chars given the prefix.""" prefix = prefix.lower() state_h = nd.zeros(shape=(1, hidden_dim), ctx=ctx) if is_lstm: state_c = nd.zeros(shape=(1, hidden_dim), ctx=ctx) output = [char_to_idx[prefix[0]]] for i in range(num_chars + len(prefix)): X = nd.array([output[-1]], ctx=ctx) if is_lstm: Y, state_h, state_c = rnn(get_inputs(X), state_h, state_c, *params) else: Y, state_h = rnn(get_inputs(X), state_h, *params) if i < len(prefix) - 1: next_input = char_to_idx[prefix[i + 1]] else: next_input = int(Y[0].argmax(axis=1).asscalar()) output.append(next_input) return ''.join([idx_to_char[i] for i in output])
Example #17
Source File: datasets_su.py From d-SNE with Apache License 2.0 | 6 votes |
def _create_pairs(self): """ Create pairs for array :return: """ pos_pairs, neg_pairs = [], [] for ids, ys in enumerate(self.arrs[1]): for idt, yt in enumerate(self.arrt[1]): if ys == yt: pos_pairs.append([ids, ys, idt, yt, 1]) else: neg_pairs.append([ids, ys, idt, yt, 0]) if self.ratio > 0: random.shuffle(neg_pairs) pairs = pos_pairs + neg_pairs[: self.ratio * len(pos_pairs)] else: pairs = pos_pairs + neg_pairs random.shuffle(pairs) return pairs
Example #18
Source File: image.py From MobileFace with MIT License | 5 votes |
def random_pca_lighting(src, alphastd, eigval=None, eigvec=None): """Apply random pca lighting noise to input image. Parameters ---------- img : mxnet.nd.NDArray Input image with HWC format. alphastd : float Noise level [0, 1) for image with range [0, 255]. eigval : list of floats. Eigen values, defaults to [55.46, 4.794, 1.148]. eigvec : nested lists of floats Eigen vectors with shape (3, 3), defaults to [[-0.5675, 0.7192, 0.4009], [-0.5808, -0.0045, -0.8140], [-0.5836, -0.6948, 0.4203]]. Returns ------- mxnet.nd.NDArray Augmented image. """ if alphastd <= 0: return src if eigval is None: eigval = np.array([55.46, 4.794, 1.148]) if eigvec is None: eigvec = np.array([[-0.5675, 0.7192, 0.4009], [-0.5808, -0.0045, -0.8140], [-0.5836, -0.6948, 0.4203]]) alpha = np.random.normal(0, alphastd, size=(3,)) rgb = np.dot(eigvec * alpha, eigval) src += nd.array(rgb, ctx=src.context) return src
Example #19
Source File: inception_score.py From training_results_v0.6 with Apache License 2.0 | 5 votes |
def resize(images, w, h): nums = images.shape[0] res = nd.random.uniform(0, 255, (nums, 3, w, h)) for i in range(nums): img = images[i, :, :, :] img = mx.nd.transpose(img, (1, 2, 0)) # Replace 'mx.image.imresize()' with 'cv2.resize()' because : Operator _cvimresize is not implemented for GPU. # img = mx.image.imresize(img, w, h) img = cv2.resize(img.asnumpy(), (299, 299)) img = nd.array(img) img = mx.nd.transpose(img, (2, 0, 1)) res[i, :, :, :] = img return res
Example #20
Source File: block.py From panoptic-fpn-gluon with Apache License 2.0 | 5 votes |
def forward(self, x): if self.pad: x_pad = np.pad(x.asnumpy(), self.pad, mode='constant', constant_values=0) return image.random_crop(nd.array(x_pad), *self._args)[0]
Example #21
Source File: base_layers.py From STGCN with GNU General Public License v3.0 | 5 votes |
def forward(self, x, cheb_polys): ''' Parameters ---------- x: nd.array, shape is (batch_size * time_step, num_of_vertices, c_in) cheb_polys: nd.array, shape is (num_of_vertices, order_of_cheb * num_of_vertices) Returns ---------- shape is (batch_size * time_step, num_of_vertices, c_out) ''' _, num_of_vertices, c_in = x.shape # (batch_size * c_in, num_of_vertices) x_tmp = x.transpose((0, 2, 1)).reshape((-1, num_of_vertices)) # (batch_size, c_in, order_of_cheb, num_of_vertices) x_mul = nd.dot(x_tmp, cheb_polys).reshape((-1, c_in, self.order_of_cheb, num_of_vertices)) # (batch_size, num_of_vertices, c_in * order_of_cheb) x_ker = x_mul.transpose((0, 3, 1, 2)) \ .reshape((-1, num_of_vertices, c_in * self.order_of_cheb)) return self.theta(x_ker)
Example #22
Source File: base_dataset.py From crnn.gluon with Apache License 2.0 | 5 votes |
def __getitem__(self, index): img, label = self.get_sample(index) img = self.apply_pre_processes(img) img = nd.array(img, dtype=img.dtype) label = self.label_enocder(label) return img, label
Example #23
Source File: datasets_ss.py From d-SNE with Apache License 2.0 | 5 votes |
def __getitem__(self, id_s): im_s, l_s = self.arr_s[0][id_s], self.arr_s[1][id_s] im_s = nd.array(im_s, dtype='float32') id_l, id_u = self.ostg(l_s) im_l, l_l = self.arr_l[0][id_l], self.arr_l[1][id_l] im_l = nd.array(im_l, dtype='float32') im_u, l_u = self.arr_u[0][id_u], self.arr_u[1][id_u] im_u = nd.array(im_u, dtype='float32') if self.tform_s is not None: im_s = self.tform_s(im_s) if self.tform_l is not None: im_l = self.tform_l(im_l) if self.tform_u is not None: im_u1 = self.tform_l(im_u) else: im_u1 = im_u if self.tform_u is not None: im_u2 = self.tform_u(im_u) else: im_u2 = im_u return im_s, l_s, im_l, l_l, im_u1, im_u2
Example #24
Source File: datasets_ss.py From d-SNE with Apache License 2.0 | 5 votes |
def __getitem__(self, idx): """ Override the function getitem :param idx: index :return: """ if self.use1 and not self.use2: im, l = self.arr1[0][idx], self.arr1[1][idx] im = nd.array(im, dtype='float32') if self.tform1 is not None: im = self.tform1(im) return im, l elif self.use2 and not self.use1: im, l = self.arr2[0][idx], self.arr2[1][idx] im = nd.array(im, dtype='float32') if self.tform2 is not None: im = self.tform2(im) return im, l else: idx1, idx2 = idx im1, l1 = self.arr1[0][idx1], self.arr1[1][idx1] im2, l2 = self.arr2[0][idx2], self.arr2[1][idx2] im1 = nd.array(im1, dtype='float32') im2 = nd.array(im2, dtype='float32') if self.tform1 is not None: im1 = self.tform1(im1) if self.tform2 is not None: im2 = self.tform2(im2) lc = 1 if l1 == l2 else 0 return im1, l1, im2, l2, lc
Example #25
Source File: datasets_su.py From d-SNE with Apache License 2.0 | 5 votes |
def __getitem__(self, idx): """ Override the function getitem :param idx: index :return: """ if self.use1 and not self.use2: im, l = self.arr1[0][idx], self.arr1[1][idx] im = nd.array(im, dtype='float32') if self.tform1 is not None: im = self.tform1(im) return im, l elif self.use2 and not self.use1: im, l = self.arr2[0][idx], self.arr2[1][idx] im = nd.array(im, dtype='float32') if self.tform2 is not None: im = self.tform2(im) return im, l else: idx1, idx2 = idx im1, l1 = self.arr1[0][idx1], self.arr1[1][idx1] im2, l2 = self.arr2[0][idx2], self.arr2[1][idx2] im1 = nd.array(im1, dtype='float32') im2 = nd.array(im2, dtype='float32') if self.tform1 is not None: im1 = self.tform1(im1) if self.tform2 is not None: im2 = self.tform2(im2) lc = 1 if l1 == l2 else 0 return im1, l1, im2, l2, lc
Example #26
Source File: util.py From crnn.gluon with Apache License 2.0 | 5 votes |
def get_ctx(gpus): import mxnet as mx from mxnet import nd """If GPU is available, return mx.gpu(0); else return mx.cpu()""" try: ctx = [] for gpu in gpus: ctx_i = mx.gpu(gpu) _ = nd.array([0], ctx=ctx_i) ctx.append(ctx_i) except: ctx = [mx.cpu()] return ctx
Example #27
Source File: resize.py From crnn.gluon with Apache License 2.0 | 5 votes |
def __call__(self, img: np.ndarray): """ 对图片进行处理,先按照高度进行resize,resize之后如果宽度不足指定宽度,就补黑色像素,否则就强行缩放到指定宽度 :param img_path: 图片地址 :return: """ data_augment = False if self.phase == 'train' and np.random.rand() > 0.5: data_augment = True if data_augment: img_h = 40 img_w = 340 else: img_h = self.img_h img_w = self.img_w h, w = img.shape[:2] ratio_h = float(img_h) / h new_w = int(w * ratio_h) if new_w < img_w and self.pad: img = cv2.resize(img, (new_w, img_h)) if len(img.shape) == 2: img = np.expand_dims(img, 3) step = np.zeros((img_h, img_w - new_w, img.shape[-1]), dtype=img.dtype) img = np.column_stack((img, step)) else: img = cv2.resize(img, (img_w, img_h)) if data_augment: img = nd.array(img) img, _ = image.random_crop(img, (self.img_w, self.img_h)) img = img.asnumpy() return img
Example #28
Source File: datasets_su.py From d-SNE with Apache License 2.0 | 5 votes |
def __getitem__(self, idx): """ Override the function getitem :param idx: index :return: """ if self.use_src and not self.use_tgt: im, l = self.arrs[0][idx], self.arrs[1][idx] im = nd.array(im, dtype='float32') if self.tforms is not None: im = self.tforms(im) return im, l elif self.use_tgt and not self.use_src: im, l = self.arrt[0][idx], self.arrt[1][idx] im = nd.array(im, dtype='float32') if self.tformt is not None: im = self.tformt(im) return im, l else: [ids, ys, idt, yt, lc] = self.pairs[idx] ims, ls = self.arrs[0][ids], self.arrs[1][ids] imt, lt = self.arrt[0][idt], self.arrt[1][idt] ims = nd.array(ims, dtype='float32') imt = nd.array(imt, dtype='float32') assert ys == ls assert yt == lt if self.tforms is not None: ims = self.tforms(ims) if self.tformt is not None: imt = self.tformt(imt) return ims, ls, imt, lt, lc
Example #29
Source File: pose.py From panoptic-fpn-gluon with Apache License 2.0 | 5 votes |
def get_affine_transform(center, scale, rot, output_size, shift=np.array([0, 0], dtype=np.float32), inv=0): cv2 = try_import_cv2() if not isinstance(scale, np.ndarray) and not isinstance(scale, list): scale = np.array([scale, scale]) scale_tmp = scale * 200.0 src_w = scale_tmp[0] dst_w = output_size[0] dst_h = output_size[1] rot_rad = np.pi * rot / 180 src_dir = get_dir([0, src_w * -0.5], rot_rad) dst_dir = np.array([0, dst_w * -0.5], np.float32) src = np.zeros((3, 2), dtype=np.float32) dst = np.zeros((3, 2), dtype=np.float32) src[0, :] = center + scale_tmp * shift src[1, :] = center + src_dir + scale_tmp * shift dst[0, :] = [dst_w * 0.5, dst_h * 0.5] dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5]) + dst_dir src[2:, :] = get_3rd_point(src[0, :], src[1, :]) dst[2:, :] = get_3rd_point(dst[0, :], dst[1, :]) if inv: trans = cv2.getAffineTransform(np.float32(dst), np.float32(src)) else: trans = cv2.getAffineTransform(np.float32(src), np.float32(dst)) return trans
Example #30
Source File: predict.py From crnn.gluon with Apache License 2.0 | 5 votes |
def try_gpu(gpu): """If GPU is available, return mx.gpu(0); else return mx.cpu()""" try: ctx = mx.gpu(gpu) _ = nd.array([0], ctx=ctx) except: ctx = mx.cpu() return ctx