Python mxnet.ndarray.sum() Examples
The following are 30
code examples of mxnet.ndarray.sum().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
mxnet.ndarray
, or try the search function
.
Example #1
Source File: tensor.py From dgl with Apache License 2.0 | 6 votes |
def unsorted_1d_segment_sum(input, seg_id, n_segs, dim): # TODO: support other dimensions assert dim == 0, 'MXNet only supports segment sum on first dimension' # Use SPMV to simulate segment sum ctx = input.context n_inputs = input.shape[0] input_shape_suffix = input.shape[1:] input = input.reshape(n_inputs, -1) n_range = nd.arange(n_inputs, dtype='int64').as_in_context(input.context) w_nnz = nd.ones(n_inputs).as_in_context(input.context) w_nid = nd.stack(seg_id, n_range, axis=0) w = nd.sparse.csr_matrix((w_nnz, (seg_id, n_range)), (n_segs, n_inputs)) w = w.as_in_context(input.context) y = nd.dot(w, input) y = nd.reshape(y, (n_segs,) + input_shape_suffix) return y
Example #2
Source File: score_fun.py From dgl with Apache License 2.0 | 5 votes |
def edge_func(self, edges): head = edges.src['emb'] tail = edges.dst['emb'].expand_dims(2) rel = edges.data['emb'] rel = rel.reshape(-1, self.relation_dim, self.entity_dim) score = head * mx.nd.batch_dot(rel, tail).squeeze() # TODO: check if use self.gamma return {'score': mx.nd.sum(score, -1)} # return {'score': self.gamma - th.norm(score, p=1, dim=-1)}
Example #3
Source File: score_fun.py From dgl with Apache License 2.0 | 5 votes |
def edge_func(self, edges): real_head, img_head = nd.split(edges.src['emb'], num_outputs=2, axis=-1) real_tail, img_tail = nd.split(edges.dst['emb'], num_outputs=2, axis=-1) phase_rel = edges.data['emb'] / (self.emb_init / np.pi) re_rel, im_rel = nd.cos(phase_rel), nd.sin(phase_rel) real_score = real_head * re_rel - img_head * im_rel img_score = real_head * im_rel + img_head * re_rel real_score = real_score - real_tail img_score = img_score - img_tail #sqrt((x*x).sum() + eps) score = mx.nd.sqrt(real_score * real_score + img_score * img_score + self.eps).sum(-1) return {'score': self.gamma - score}
Example #4
Source File: custom_layers.py From d-SNE with Apache License 2.0 | 5 votes |
def hybrid_forward(self, F, preds, label): label = label.astype('float32') dist = F.sqrt(F.sum(F.square(preds), axis=1)) return label * F.square(dist) + (1 - label) * F.square(F.max(self._m - dist, 0))
Example #5
Source File: custom_layers.py From d-SNE with Apache License 2.0 | 5 votes |
def hybrid_forward(self, F, fts, ys, ftt, yt): """ Semantic Alignment Loss :param F: Function :param yt: label for the target domain [N] :param ftt: features for the target domain [N, K] :param ys: label for the source domain [M] :param fts: features for the source domain [M, K] :return: """ if self._fn: # Normalize ft fts = F.L2Normalization(fts, mode='instance') ftt = F.L2Normalization(ftt, mode='instance') fts_rpt = F.broadcast_to(fts.expand_dims(axis=0), shape=(self._bs_tgt, self._bs_src, self._embed_size)) ftt_rpt = F.broadcast_to(ftt.expand_dims(axis=1), shape=(self._bs_tgt, self._bs_src, self._embed_size)) dists = F.sum(F.square(ftt_rpt - fts_rpt), axis=2) yt_rpt = F.broadcast_to(yt.expand_dims(axis=1), shape=(self._bs_tgt, self._bs_src)).astype('int32') ys_rpt = F.broadcast_to(ys.expand_dims(axis=0), shape=(self._bs_tgt, self._bs_src)).astype('int32') y_same = F.equal(yt_rpt, ys_rpt).astype('float32') y_diff = F.not_equal(yt_rpt, ys_rpt).astype('float32') intra_cls_dists = dists * y_same inter_cls_dists = dists * y_diff max_dists = F.max(dists, axis=1, keepdims=True) max_dists = F.broadcast_to(max_dists, shape=(self._bs_tgt, self._bs_src)) revised_inter_cls_dists = F.where(y_same, max_dists, inter_cls_dists) max_intra_cls_dist = F.max(intra_cls_dists, axis=1) min_inter_cls_dist = F.min(revised_inter_cls_dists, axis=1) loss = F.relu(max_intra_cls_dist - min_inter_cls_dist + self._margin) return loss
Example #6
Source File: kaggle_k_fold_cross_validation.py From training_results_v0.6 with Apache License 2.0 | 5 votes |
def get_rmse_log(net, X_train, y_train): """Gets root mse between the logarithms of the prediction and the truth.""" num_train = X_train.shape[0] clipped_preds = nd.clip(net(X_train), 1, float('inf')) return np.sqrt(2 * nd.sum(square_loss( nd.log(clipped_preds), nd.log(y_train))).asscalar() / num_train)
Example #7
Source File: image_iter.py From 1.FaceRecognition with MIT License | 5 votes |
def contrast_aug(self, src, x): alpha = 1.0 + random.uniform(-x, x) coef = nd.array([[[0.299, 0.587, 0.114]]]) gray = src * coef gray = (3.0 * (1.0 - alpha) / gray.size) * nd.sum(gray) src *= alpha src += gray return src
Example #8
Source File: image_iter.py From 1.FaceRecognition with MIT License | 5 votes |
def saturation_aug(self, src, x): alpha = 1.0 + random.uniform(-x, x) coef = nd.array([[[0.299, 0.587, 0.114]]]) gray = src * coef gray = nd.sum(gray, axis=2, keepdims=True) gray *= (1.0 - alpha) src *= alpha src += gray return src
Example #9
Source File: data.py From 1.FaceRecognition with MIT License | 5 votes |
def contrast_aug(self, src, x): alpha = 1.0 + random.uniform(-x, x) coef = nd.array([[[0.299, 0.587, 0.114]]]) gray = src * coef gray = (3.0 * (1.0 - alpha) / gray.size) * nd.sum(gray) src *= alpha src += gray return src
Example #10
Source File: data.py From 1.FaceRecognition with MIT License | 5 votes |
def saturation_aug(self, src, x): alpha = 1.0 + random.uniform(-x, x) coef = nd.array([[[0.299, 0.587, 0.114]]]) gray = src * coef gray = nd.sum(gray, axis=2, keepdims=True) gray *= (1.0 - alpha) src *= alpha src += gray return src
Example #11
Source File: image_iter.py From 1.FaceRecognition with MIT License | 5 votes |
def contrast_aug(self, src, x): alpha = 1.0 + random.uniform(-x, x) coef = nd.array([[[0.299, 0.587, 0.114]]]) gray = src * coef gray = (3.0 * (1.0 - alpha) / gray.size) * nd.sum(gray) src *= alpha src += gray return src
Example #12
Source File: obs.py From xfer with Apache License 2.0 | 5 votes |
def log_pdf(self, y): return nd.sum(nd.nansum(y * nd.log_softmax(self.unnormalized_mean), axis=0, exclude=True))
Example #13
Source File: prior.py From xfer with Apache License 2.0 | 5 votes |
def log_pdf(self, obs): self.check_observation_shapes(obs) raw_params_ext = self._replicate_shared_parameters() return sum([nd.sum(log_gaussian(obs[ii], raw_params_ext["mean"][ii], raw_params_ext["sigma"][ii])) for ii in range(len(self.shapes))])
Example #14
Source File: var.py From xfer with Apache License 2.0 | 5 votes |
def log_pdf(self, obs): self.check_observation_shapes(obs) raw_params_ext = self._replicate_shared_parameters() sigmas = transform_rhos(raw_params_ext[RHO]) return sum([nd.sum(log_gaussian(obs[ii], raw_params_ext[MEAN][ii], sigmas[ii])) for ii in range(len(self.shapes))])
Example #15
Source File: var.py From xfer with Apache License 2.0 | 5 votes |
def KL(self, other_prob): if not self.is_conjugate(other_prob): raise ValueError("KL cannot be computed in closed form.") if (not len(self.shapes) == len(other_prob.shapes)) or \ (not np.all(np.array([s == o for s, o in zip(self.shapes, other_prob.shapes)]))): raise ValueError("KL cannot be computed: The 2 distributions have different support") raw_params_ext_var_posterior = self._replicate_shared_parameters() sigmas_var_posterior = transform_rhos(raw_params_ext_var_posterior[RHO]) raw_params_ext_prior = other_prob._replicate_shared_parameters() out = 0.0 for ii in range(len(self.shapes)): means_p = raw_params_ext_prior[MEAN][ii] var_p = raw_params_ext_prior["sigma"][ii] ** 2 means_q = raw_params_ext_var_posterior[MEAN][ii] var_q = sigmas_var_posterior[ii] ** 2 inc_means = (means_q - means_p) prec_p = 1.0 / var_p temp = 0.5 * (var_q*prec_p + ((inc_means ** 2) * prec_p) - 1.0 + nd.log(var_p) - nd.log(var_q)) if temp.shape == (1, 1): # If parameters are shared, multiply by the number of variables temp = temp * (self.shapes[ii][0] * self.shapes[ii][1]) out = out + nd.sum(temp) return out
Example #16
Source File: bnn_repurposer.py From xfer with Apache License 2.0 | 5 votes |
def _evaluate_accuracy(self, data_iterator, net, layer_params): numerator = 0. denominator = 0. for i, (data, label) in enumerate(data_iterator): data = data.as_in_context(self._context_bnn).reshape((-1, data.shape[1])) label = label.as_in_context(self._context_bnn) replace_params_net(layer_params, net, self._context_bnn) output = net(data) predictions = nd.argmax(output, axis=1) numerator += nd.sum(predictions == label) denominator += data.shape[0] return (numerator / denominator).asscalar()
Example #17
Source File: image_iter.py From MaskInsightface with Apache License 2.0 | 5 votes |
def contrast_aug(self, src, x): alpha = 1.0 + random.uniform(-x, x) coef = nd.array([[[0.299, 0.587, 0.114]]]) gray = src * coef gray = (3.0 * (1.0 - alpha) / gray.size) * nd.sum(gray) src *= alpha src += gray return src
Example #18
Source File: image_iter.py From MaskInsightface with Apache License 2.0 | 5 votes |
def saturation_aug(self, src, x): alpha = 1.0 + random.uniform(-x, x) coef = nd.array([[[0.299, 0.587, 0.114]]]) gray = src * coef gray = nd.sum(gray, axis=2, keepdims=True) gray *= (1.0 - alpha) src *= alpha src += gray return src
Example #19
Source File: kaggle_k_fold_cross_validation.py From SNIPER-mxnet with Apache License 2.0 | 5 votes |
def get_rmse_log(net, X_train, y_train): """Gets root mse between the logarithms of the prediction and the truth.""" num_train = X_train.shape[0] clipped_preds = nd.clip(net(X_train), 1, float('inf')) return np.sqrt(2 * nd.sum(square_loss( nd.log(clipped_preds), nd.log(y_train))).asscalar() / num_train)
Example #20
Source File: tensor.py From dgl with Apache License 2.0 | 5 votes |
def backward(self, grad_out): lhs_data_nd, rhs_data_nd, out_data_nd, feat_shape, degs = self.saved_tensors if self.reducer == 'mean': grad_out = grad_out / degs grad_out_nd = zerocopy_to_dgl_ndarray(grad_out) grad_lhs = nd.empty((lhs_data_nd.shape[0],) + feat_shape, ctx=grad_out.context, dtype=grad_out.dtype) K.backward_lhs_binary_op_reduce( self.reducer if self.reducer != 'mean' else 'sum', self.binary_op, self.graph, self.lhs, self.rhs, lhs_data_nd, rhs_data_nd, out_data_nd, grad_out_nd, zerocopy_to_dgl_ndarray_for_write(grad_lhs), self.lhs_map[1], self.rhs_map[1], self.out_map[1]) grad_lhs = _reduce_grad(grad_lhs, lhs_data_nd.shape) grad_rhs = nd.empty((rhs_data_nd.shape[0],) + feat_shape, ctx=grad_out.context, dtype=grad_out.dtype) K.backward_rhs_binary_op_reduce( self.reducer if self.reducer != 'mean' else 'sum', self.binary_op, self.graph, self.lhs, self.rhs, lhs_data_nd, rhs_data_nd, out_data_nd, grad_out_nd, zerocopy_to_dgl_ndarray_for_write(grad_rhs), self.lhs_map[1], self.rhs_map[1], self.out_map[1]) grad_rhs = _reduce_grad(grad_rhs, rhs_data_nd.shape) # clear saved tensors explicitly self.saved_tensors = None return grad_lhs, grad_rhs
Example #21
Source File: kaggle_k_fold_cross_validation.py From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 | 5 votes |
def get_rmse_log(net, X_train, y_train): """Gets root mse between the logarithms of the prediction and the truth.""" num_train = X_train.shape[0] clipped_preds = nd.clip(net(X_train), 1, float('inf')) return np.sqrt(2 * nd.sum(square_loss( nd.log(clipped_preds), nd.log(y_train))).asscalar() / num_train)
Example #22
Source File: image_iter.py From insightface with MIT License | 5 votes |
def contrast_aug(self, src, x): alpha = 1.0 + random.uniform(-x, x) coef = nd.array([[[0.299, 0.587, 0.114]]]) gray = src * coef gray = (3.0 * (1.0 - alpha) / gray.size) * nd.sum(gray) src *= alpha src += gray return src
Example #23
Source File: image_iter.py From insightface with MIT License | 5 votes |
def saturation_aug(self, src, x): alpha = 1.0 + random.uniform(-x, x) coef = nd.array([[[0.299, 0.587, 0.114]]]) gray = src * coef gray = nd.sum(gray, axis=2, keepdims=True) gray *= (1.0 - alpha) src *= alpha src += gray return src
Example #24
Source File: data.py From insightface with MIT License | 5 votes |
def contrast_aug(self, src, x): alpha = 1.0 + random.uniform(-x, x) coef = nd.array([[[0.299, 0.587, 0.114]]]) gray = src * coef gray = (3.0 * (1.0 - alpha) / gray.size) * nd.sum(gray) src *= alpha src += gray return src
Example #25
Source File: data.py From insightface with MIT License | 5 votes |
def saturation_aug(self, src, x): alpha = 1.0 + random.uniform(-x, x) coef = nd.array([[[0.299, 0.587, 0.114]]]) gray = src * coef gray = nd.sum(gray, axis=2, keepdims=True) gray *= (1.0 - alpha) src *= alpha src += gray return src
Example #26
Source File: image_iter.py From insightface with MIT License | 5 votes |
def contrast_aug(self, src, x): alpha = 1.0 + random.uniform(-x, x) coef = nd.array([[[0.299, 0.587, 0.114]]]) gray = src * coef gray = (3.0 * (1.0 - alpha) / gray.size) * nd.sum(gray) src *= alpha src += gray return src
Example #27
Source File: tensor.py From dgl with Apache License 2.0 | 5 votes |
def sum(input, dim, keepdims=False): return nd.sum(input, axis=dim, keepdims=keepdims)
Example #28
Source File: tensor.py From dgl with Apache License 2.0 | 5 votes |
def reduce_sum(input): return input.sum()
Example #29
Source File: score_fun.py From dgl with Apache License 2.0 | 5 votes |
def edge_func(self, edges): real_head, img_head = nd.split(edges.src['emb'], num_outputs=2, axis=-1) real_tail, img_tail = nd.split(edges.dst['emb'], num_outputs=2, axis=-1) real_rel, img_rel = nd.split(edges.data['emb'], num_outputs=2, axis=-1) score = real_head * real_tail * real_rel \ + img_head * img_tail * real_rel \ + real_head * img_tail * img_rel \ - img_head * real_tail * img_rel # TODO: check if there exists minus sign and if gamma should be used here(jin) return {'score': nd.sum(score, -1)}
Example #30
Source File: tensor.py From dgl with Apache License 2.0 | 5 votes |
def backward(self, grad_out): in_data_nd, out_data_nd, degs = self.saved_tensors grad_in = nd.empty(in_data_nd.shape, ctx=grad_out.context, dtype=grad_out.dtype) if self.reducer == 'mean': grad_out = grad_out / degs grad_out_nd = zerocopy_to_dgl_ndarray(grad_out) K.backward_copy_reduce( self.reducer if self.reducer != 'mean' else 'sum', self.graph, self.target, in_data_nd, out_data_nd, grad_out_nd, zerocopy_to_dgl_ndarray_for_write(grad_in), self.in_map[1], self.out_map[1]) # clear saved tensors explicitly self.saved_tensors = None return grad_in