Python numpy.spacing() Examples
The following are 30
code examples of numpy.spacing().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
numpy
, or try the search function
.
Example #1
Source File: test_half.py From vnpy_crypto with MIT License | 6 votes |
def test_spacing_nextafter(self): """Test np.spacing and np.nextafter""" # All non-negative finite #'s a = np.arange(0x7c00, dtype=uint16) hinf = np.array((np.inf,), dtype=float16) a_f16 = a.view(dtype=float16) assert_equal(np.spacing(a_f16[:-1]), a_f16[1:]-a_f16[:-1]) assert_equal(np.nextafter(a_f16[:-1], hinf), a_f16[1:]) assert_equal(np.nextafter(a_f16[0], -hinf), -a_f16[1]) assert_equal(np.nextafter(a_f16[1:], -hinf), a_f16[:-1]) # switch to negatives a |= 0x8000 assert_equal(np.spacing(a_f16[0]), np.spacing(a_f16[1])) assert_equal(np.spacing(a_f16[1:]), a_f16[:-1]-a_f16[1:]) assert_equal(np.nextafter(a_f16[0], hinf), -a_f16[1]) assert_equal(np.nextafter(a_f16[1:], hinf), a_f16[:-1]) assert_equal(np.nextafter(a_f16[:-1], -hinf), a_f16[1:])
Example #2
Source File: coarsening.py From dgl with Apache License 2.0 | 6 votes |
def laplacian(W, normalized=True): """Return graph Laplacian""" # Degree matrix. d = W.sum(axis=0) # Laplacian matrix. if not normalized: D = scipy.sparse.diags(d.A.squeeze(), 0) L = D - W else: d += np.spacing(np.array(0, W.dtype)) d = 1 / np.sqrt(d) D = scipy.sparse.diags(d.A.squeeze(), 0) I = scipy.sparse.identity(d.size, dtype=W.dtype) L = I - D * W * D assert np.abs(L - L.T).mean() < 1e-9 assert type(L) is scipy.sparse.csr.csr_matrix return L
Example #3
Source File: test_half.py From Computable with MIT License | 6 votes |
def test_spacing_nextafter(self): """Test np.spacing and np.nextafter""" # All non-negative finite #'s a = np.arange(0x7c00, dtype=uint16) hinf = np.array((np.inf,), dtype=float16) a_f16 = a.view(dtype=float16) assert_equal(np.spacing(a_f16[:-1]), a_f16[1:]-a_f16[:-1]) assert_equal(np.nextafter(a_f16[:-1], hinf), a_f16[1:]) assert_equal(np.nextafter(a_f16[0], -hinf), -a_f16[1]) assert_equal(np.nextafter(a_f16[1:], -hinf), a_f16[:-1]) # switch to negatives a |= 0x8000 assert_equal(np.spacing(a_f16[0]), np.spacing(a_f16[1])) assert_equal(np.spacing(a_f16[1:]), a_f16[:-1]-a_f16[1:]) assert_equal(np.nextafter(a_f16[0], hinf), -a_f16[1]) assert_equal(np.nextafter(a_f16[1:], hinf), a_f16[:-1]) assert_equal(np.nextafter(a_f16[:-1], -hinf), a_f16[1:])
Example #4
Source File: score.py From SegmenTron with Apache License 2.0 | 6 votes |
def intersectionAndUnion(imPred, imLab, numClass): """ This function takes the prediction and label of a single image, returns intersection and union areas for each class To compute over many images do: for i in range(Nimages): (area_intersection[:,i], area_union[:,i]) = intersectionAndUnion(imPred[i], imLab[i]) IoU = 1.0 * np.sum(area_intersection, axis=1) / np.sum(np.spacing(1)+area_union, axis=1) """ # Remove classes from unlabeled pixels in gt image. # We should not penalize detections in unlabeled portions of the image. imPred = imPred * (imLab >= 0) # Compute area intersection: intersection = imPred * (imPred == imLab) (area_intersection, _) = np.histogram(intersection, bins=numClass, range=(1, numClass)) # Compute area union: (area_pred, _) = np.histogram(imPred, bins=numClass, range=(1, numClass)) (area_lab, _) = np.histogram(imLab, bins=numClass, range=(1, numClass)) area_union = area_pred + area_lab - area_intersection return (area_intersection, area_union)
Example #5
Source File: test_half.py From Mastering-Elasticsearch-7.0 with MIT License | 6 votes |
def test_spacing_nextafter(self): """Test np.spacing and np.nextafter""" # All non-negative finite #'s a = np.arange(0x7c00, dtype=uint16) hinf = np.array((np.inf,), dtype=float16) a_f16 = a.view(dtype=float16) assert_equal(np.spacing(a_f16[:-1]), a_f16[1:]-a_f16[:-1]) assert_equal(np.nextafter(a_f16[:-1], hinf), a_f16[1:]) assert_equal(np.nextafter(a_f16[0], -hinf), -a_f16[1]) assert_equal(np.nextafter(a_f16[1:], -hinf), a_f16[:-1]) # switch to negatives a |= 0x8000 assert_equal(np.spacing(a_f16[0]), np.spacing(a_f16[1])) assert_equal(np.spacing(a_f16[1:]), a_f16[:-1]-a_f16[1:]) assert_equal(np.nextafter(a_f16[0], hinf), -a_f16[1]) assert_equal(np.nextafter(a_f16[1:], hinf), a_f16[:-1]) assert_equal(np.nextafter(a_f16[:-1], -hinf), a_f16[1:])
Example #6
Source File: segmentation.py From gluon-cv with Apache License 2.0 | 6 votes |
def intersectionAndUnion(imPred, imLab, numClass): """ This function takes the prediction and label of a single image, returns intersection and union areas for each class To compute over many images do: for i in range(Nimages): (area_intersection[:,i], area_union[:,i]) = intersectionAndUnion(imPred[i], imLab[i]) IoU = 1.0 * np.sum(area_intersection, axis=1) / np.sum(np.spacing(1)+area_union, axis=1) """ # Remove classes from unlabeled pixels in gt image. # We should not penalize detections in unlabeled portions of the image. imPred = imPred * (imLab > 0) # Compute area intersection: intersection = imPred * (imPred == imLab) (area_intersection, _) = np.histogram(intersection, bins=numClass, range=(1, numClass)) # Compute area union: (area_pred, _) = np.histogram(imPred, bins=numClass, range=(1, numClass)) (area_lab, _) = np.histogram(imLab, bins=numClass, range=(1, numClass)) area_union = area_pred + area_lab - area_intersection return (area_intersection, area_union)
Example #7
Source File: data.py From Mastering-Elasticsearch-7.0 with MIT License | 6 votes |
def _yeo_johnson_inverse_transform(self, x, lmbda): """Return inverse-transformed input x following Yeo-Johnson inverse transform with parameter lambda. """ x_inv = np.zeros_like(x) pos = x >= 0 # when x >= 0 if abs(lmbda) < np.spacing(1.): x_inv[pos] = np.exp(x[pos]) - 1 else: # lmbda != 0 x_inv[pos] = np.power(x[pos] * lmbda + 1, 1 / lmbda) - 1 # when x < 0 if abs(lmbda - 2) > np.spacing(1.): x_inv[~pos] = 1 - np.power(-(2 - lmbda) * x[~pos] + 1, 1 / (2 - lmbda)) else: # lmbda == 2 x_inv[~pos] = 1 - np.exp(-x[~pos]) return x_inv
Example #8
Source File: data.py From Mastering-Elasticsearch-7.0 with MIT License | 6 votes |
def _yeo_johnson_transform(self, x, lmbda): """Return transformed input x following Yeo-Johnson transform with parameter lambda. """ out = np.zeros_like(x) pos = x >= 0 # binary mask # when x >= 0 if abs(lmbda) < np.spacing(1.): out[pos] = np.log1p(x[pos]) else: # lmbda != 0 out[pos] = (np.power(x[pos] + 1, lmbda) - 1) / lmbda # when x < 0 if abs(lmbda - 2) > np.spacing(1.): out[~pos] = -(np.power(-x[~pos] + 1, 2 - lmbda) - 1) / (2 - lmbda) else: # lmbda == 2 out[~pos] = -np.log1p(-x[~pos]) return out
Example #9
Source File: test_half.py From recruit with Apache License 2.0 | 6 votes |
def test_spacing_nextafter(self): """Test np.spacing and np.nextafter""" # All non-negative finite #'s a = np.arange(0x7c00, dtype=uint16) hinf = np.array((np.inf,), dtype=float16) a_f16 = a.view(dtype=float16) assert_equal(np.spacing(a_f16[:-1]), a_f16[1:]-a_f16[:-1]) assert_equal(np.nextafter(a_f16[:-1], hinf), a_f16[1:]) assert_equal(np.nextafter(a_f16[0], -hinf), -a_f16[1]) assert_equal(np.nextafter(a_f16[1:], -hinf), a_f16[:-1]) # switch to negatives a |= 0x8000 assert_equal(np.spacing(a_f16[0]), np.spacing(a_f16[1])) assert_equal(np.spacing(a_f16[1:]), a_f16[:-1]-a_f16[1:]) assert_equal(np.nextafter(a_f16[0], hinf), -a_f16[1]) assert_equal(np.nextafter(a_f16[1:], hinf), a_f16[:-1]) assert_equal(np.nextafter(a_f16[:-1], -hinf), a_f16[1:])
Example #10
Source File: test_half.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def test_spacing_nextafter(self): """Test np.spacing and np.nextafter""" # All non-negative finite #'s a = np.arange(0x7c00, dtype=uint16) hinf = np.array((np.inf,), dtype=float16) a_f16 = a.view(dtype=float16) assert_equal(np.spacing(a_f16[:-1]), a_f16[1:]-a_f16[:-1]) assert_equal(np.nextafter(a_f16[:-1], hinf), a_f16[1:]) assert_equal(np.nextafter(a_f16[0], -hinf), -a_f16[1]) assert_equal(np.nextafter(a_f16[1:], -hinf), a_f16[:-1]) # switch to negatives a |= 0x8000 assert_equal(np.spacing(a_f16[0]), np.spacing(a_f16[1])) assert_equal(np.spacing(a_f16[1:]), a_f16[:-1]-a_f16[1:]) assert_equal(np.nextafter(a_f16[0], hinf), -a_f16[1]) assert_equal(np.nextafter(a_f16[1:], hinf), a_f16[:-1]) assert_equal(np.nextafter(a_f16[:-1], -hinf), a_f16[1:])
Example #11
Source File: score.py From awesome-semantic-segmentation-pytorch with Apache License 2.0 | 6 votes |
def intersectionAndUnion(imPred, imLab, numClass): """ This function takes the prediction and label of a single image, returns intersection and union areas for each class To compute over many images do: for i in range(Nimages): (area_intersection[:,i], area_union[:,i]) = intersectionAndUnion(imPred[i], imLab[i]) IoU = 1.0 * np.sum(area_intersection, axis=1) / np.sum(np.spacing(1)+area_union, axis=1) """ # Remove classes from unlabeled pixels in gt image. # We should not penalize detections in unlabeled portions of the image. imPred = imPred * (imLab >= 0) # Compute area intersection: intersection = imPred * (imPred == imLab) (area_intersection, _) = np.histogram(intersection, bins=numClass, range=(1, numClass)) # Compute area union: (area_pred, _) = np.histogram(imPred, bins=numClass, range=(1, numClass)) (area_lab, _) = np.histogram(imLab, bins=numClass, range=(1, numClass)) area_union = area_pred + area_lab - area_intersection return (area_intersection, area_union)
Example #12
Source File: keypoints.py From KL-Loss with Apache License 2.0 | 6 votes |
def compute_oks(src_keypoints, src_roi, dst_keypoints, dst_roi): """Compute OKS for predicted keypoints wrt gt_keypoints. src_keypoints: 4xK src_roi: 4x1 dst_keypoints: Nx4xK dst_roi: Nx4 """ sigmas = np.array([ .26, .25, .25, .35, .35, .79, .79, .72, .72, .62, .62, 1.07, 1.07, .87, .87, .89, .89]) / 10.0 vars = (sigmas * 2)**2 # area src_area = (src_roi[2] - src_roi[0] + 1) * (src_roi[3] - src_roi[1] + 1) # measure the per-keypoint distance if keypoints visible dx = dst_keypoints[:, 0, :] - src_keypoints[0, :] dy = dst_keypoints[:, 1, :] - src_keypoints[1, :] e = (dx**2 + dy**2) / vars / (src_area + np.spacing(1)) / 2 e = np.sum(np.exp(-e), axis=1) / e.shape[1] return e
Example #13
Source File: nms.py From PoseWarper with Apache License 2.0 | 6 votes |
def oks_iou(g, d, a_g, a_d, sigmas=None, in_vis_thre=None): if not isinstance(sigmas, np.ndarray): sigmas = np.array([.26, .25, .25, .35, .35, .79, .79, .72, .72, .62, .62, 1.07, 1.07, .87, .87, .89, .89]) / 10.0 vars = (sigmas * 2) ** 2 xg = g[0::3] yg = g[1::3] vg = g[2::3] ious = np.zeros((d.shape[0])) for n_d in range(0, d.shape[0]): xd = d[n_d, 0::3] yd = d[n_d, 1::3] vd = d[n_d, 2::3] dx = xd - xg dy = yd - yg e = (dx ** 2 + dy ** 2) / vars / ((a_g + a_d[n_d]) / 2 + np.spacing(1)) / 2 if in_vis_thre is not None: ind = list(vg > in_vis_thre) and list(vd > in_vis_thre) e = e[ind] ious[n_d] = np.sum(np.exp(-e)) / e.shape[0] if e.shape[0] != 0 else 0.0 return ious
Example #14
Source File: keypoints.py From Detectron.pytorch with MIT License | 6 votes |
def compute_oks(src_keypoints, src_roi, dst_keypoints, dst_roi): """Compute OKS for predicted keypoints wrt gt_keypoints. src_keypoints: 4xK src_roi: 4x1 dst_keypoints: Nx4xK dst_roi: Nx4 """ sigmas = np.array([ .26, .25, .25, .35, .35, .79, .79, .72, .72, .62, .62, 1.07, 1.07, .87, .87, .89, .89]) / 10.0 vars = (sigmas * 2)**2 # area src_area = (src_roi[2] - src_roi[0] + 1) * (src_roi[3] - src_roi[1] + 1) # measure the per-keypoint distance if keypoints visible dx = dst_keypoints[:, 0, :] - src_keypoints[0, :] dy = dst_keypoints[:, 1, :] - src_keypoints[1, :] e = (dx**2 + dy**2) / vars / (src_area + np.spacing(1)) / 2 e = np.sum(np.exp(-e), axis=1) / e.shape[1] return e
Example #15
Source File: utils.py From Semantic-Aware-Scene-Recognition with MIT License | 6 votes |
def semanticIoU(pred, label): """ Computes the mean Intersection over Union for all the classes between two mini-batch tensors of semantic segmentation :param pred: Tensor of predictions :param label: Tensor of ground-truth :return: Mean semantic intersection over Union for all the classes """ imPred = np.asarray(torch.squeeze(pred)) imLab = np.asarray(torch.squeeze(label)) area_intersection = [] area_union = [] for i in range(imLab.shape[0]): intersection, union = intersectionAndUnion(imPred[i], imLab[i]) area_intersection.append(intersection) area_union.append(union) IoU = 1.0 * np.sum(area_intersection, axis=0) / np.sum(np.spacing(1)+area_union, axis=0) return np.mean(IoU)
Example #16
Source File: utils.py From Semantic-Aware-Scene-Recognition with MIT License | 6 votes |
def MeanPixelAccuracy(pred, label): """ Function to compute the mean pixel accuracy for semantic segmentation between mini-batch tensors :param pred: Tensor of predictions :param label: Tensor of ground-truth :return: Mean pixel accuracy for all the mini-bath """ # Convert tensors to numpy arrays imPred = np.asarray(torch.squeeze(pred)) imLab = np.asarray(torch.squeeze(label)) # Create empty numpy arrays pixel_accuracy = np.empty(imLab.shape[0]) pixel_correct = np.empty(imLab.shape[0]) pixel_labeled = np.empty(imLab.shape[0]) # Compute pixel accuracy for each pair of images in the batch for i in range(imLab.shape[0]): pixel_accuracy[i], pixel_correct[i], pixel_labeled[i] = pixelAccuracy(imPred[i], imLab[i]) # Compute the final accuracy for the batch acc = 100.0 * np.sum(pixel_correct) / (np.spacing(1) + np.sum(pixel_labeled)) return acc
Example #17
Source File: kissme.py From Dispersion-based-Clustering with MIT License | 6 votes |
def validate_cov_matrix(M): M = (M + M.T) * 0.5 k = 0 I = np.eye(M.shape[0]) while True: try: _ = np.linalg.cholesky(M) break except np.linalg.LinAlgError: # Find the nearest positive definite matrix for M. Modified from # http://www.mathworks.com/matlabcentral/fileexchange/42885-nearestspd # Might take several minutes k += 1 w, v = np.linalg.eig(M) min_eig = v.min() M += (-min_eig * k * k + np.spacing(min_eig)) * I return M
Example #18
Source File: graph.py From gconvRNN with MIT License | 6 votes |
def laplacian(W, normalized=True): """Return the Laplacian of the weigth matrix.""" # Degree matrix. d = W.sum(axis=0) # Laplacian matrix. if not normalized: D = scipy.sparse.diags(d.A.squeeze(), 0) L = D - W else: d += np.spacing(np.array(0, W.dtype)) d = 1 / np.sqrt(d) D = scipy.sparse.diags(d.A.squeeze(), 0) I = scipy.sparse.identity(d.size, dtype=W.dtype) L = I - D * W * D # assert np.abs(L - L.T).mean() < 1e-9 assert type(L) is scipy.sparse.csr.csr_matrix return L
Example #19
Source File: _classes.py From harold with MIT License | 6 votes |
def __rtruediv__(self, other): """ Support for division .../G """ if not np.equal(*self._shape): raise ValueError('Nonsquare systems cannot be inverted') a, b, c, d = self._a, self._b, self._c, self._d if np.any(svdvals(d) < np.spacing(1.)): raise LinAlgError('The feedthrough term of the system is not' ' invertible.') else: # A-BD^{-1}C | BD^{-1} # -----------|-------- # -D^{-1}C | D^{-1} if self._isgain: ai, bi, ci = None, None, None else: ai = a - b @ solve(d, c) bi = (solve(d.T, b.T)).T ci = -solve(d, c) di = inv(d) return other @ State(ai, bi, ci, di, dt=self._dt)
Example #20
Source File: metrics.py From PyTorch-Encoding with MIT License | 5 votes |
def get_pixacc_miou(total_correct, total_label, total_inter, total_union): pixAcc = 1.0 * total_correct / (np.spacing(1) + total_label) IoU = 1.0 * total_inter / (np.spacing(1) + total_union) mIoU = IoU.mean() return pixAcc, mIoU
Example #21
Source File: metric.py From TreeFilter-Torch with MIT License | 5 votes |
def mean_pixel_accuracy(pixel_correct, pixel_labeled): mean_pixel_accuracy = 1.0 * np.sum(pixel_correct) / ( np.spacing(1) + np.sum(pixel_labeled)) return mean_pixel_accuracy
Example #22
Source File: coKriging.py From pyKriging with MIT License | 5 votes |
def updatePsi(self): self.PsicXc = np.zeros((self.nc,self.nc), dtype=np.float) self.PsicXe = np.zeros((self.ne,self.ne), dtype=np.float) self.PsicXcXe = np.zeros((self.nc,self.ne), dtype=np.float) # # print self.thetac # print self.pc # print self.distanceXc newPsicXc = np.exp(-np.sum(self.thetac*np.power(self.distanceXc,self.pc), axis=2)) print(newPsicXc[0]) self.PsicXc = np.triu(newPsicXc,1) self.PsicXc = self.PsicXc + self.PsicXc.T + np.mat(eye(self.nc))+np.multiply(np.mat(eye(self.nc)),np.spacing(1)) self.UPsicXc = np.linalg.cholesky(self.PsicXc) self.UPsicXc = self.UPsicXc.T print(self.PsicXc[0]) print(self.UPsicXc) exit() newPsicXe = np.exp(-np.sum(self.thetac*np.power(self.distanceXe,self.pc), axis=2)) self.PsicXe = np.triu(newPsicXe,1) self.PsiXe = self.PsicXe + self.PsicXe.T + np.mat(eye(self.ne))+np.multiply(np.mat(eye(self.ne)),np.spacing(1)) self.UPsicXe = np.linalg.cholesky(self.PsicXe) self.UPsicXe = self.UPsicXe.T newPsiXeXc = np.exp(-np.sum(self.thetad*np.power(self.distanceXcXe,self.pd), axis=2)) self.PsicXcXe = np.triu(newPsiXeXc,1)
Example #23
Source File: test_umath.py From Mastering-Elasticsearch-7.0 with MIT License | 5 votes |
def _test_spacing(t): one = t(1) eps = np.finfo(t).eps nan = t(np.nan) inf = t(np.inf) with np.errstate(invalid='ignore'): assert_(np.spacing(one) == eps) assert_(np.isnan(np.spacing(nan))) assert_(np.isnan(np.spacing(inf))) assert_(np.isnan(np.spacing(-inf))) assert_(np.spacing(t(1e30)) != 0)
Example #24
Source File: test_half.py From Computable with MIT License | 5 votes |
def test_nans_infs(self): with np.errstate(all='ignore'): # Check some of the ufuncs assert_equal(np.isnan(self.all_f16), np.isnan(self.all_f32)) assert_equal(np.isinf(self.all_f16), np.isinf(self.all_f32)) assert_equal(np.isfinite(self.all_f16), np.isfinite(self.all_f32)) assert_equal(np.signbit(self.all_f16), np.signbit(self.all_f32)) assert_equal(np.spacing(float16(65504)), np.inf) # Check comparisons of all values with NaN nan = float16(np.nan) assert_(not (self.all_f16 == nan).any()) assert_(not (nan == self.all_f16).any()) assert_((self.all_f16 != nan).all()) assert_((nan != self.all_f16).all()) assert_(not (self.all_f16 < nan).any()) assert_(not (nan < self.all_f16).any()) assert_(not (self.all_f16 <= nan).any()) assert_(not (nan <= self.all_f16).any()) assert_(not (self.all_f16 > nan).any()) assert_(not (nan > self.all_f16).any()) assert_(not (self.all_f16 >= nan).any()) assert_(not (nan >= self.all_f16).any())
Example #25
Source File: test_utils.py From PyTorch-Encoding with MIT License | 5 votes |
def test_segmentation_metrics(): # check torch evaluation metrics rows, cols = 640, 480 nclass = 30 # numpy data im_lab = np.matrix(np.random.randint(0, nclass, size=(rows, cols))) mask = np.random.random((nclass, rows, cols)) im_pred = mask.argmax(axis=0) # torch data tim_lab = torch.from_numpy(im_lab).unsqueeze(0).long() tim_pred = torch.from_numpy(mask).unsqueeze(0) # numpy prediction pixel_correct, pixel_labeled = pixel_accuracy(im_pred, im_lab) area_inter, area_union = intersection_and_union(im_pred, im_lab, nclass) pixAcc = 1.0 * pixel_correct / (np.spacing(1) + pixel_labeled) IoU = 1.0 * area_inter / (np.spacing(1) + area_union) mIoU = IoU.mean() print('numpy predictionis :', pixAcc, mIoU) # torch metric prediction pixel_correct, pixel_labeled = batch_pix_accuracy(tim_pred, tim_lab) area_inter, area_union = batch_intersection_union(tim_pred, tim_lab, nclass) batch_pixAcc = 1.0 * pixel_correct / (np.spacing(1) + pixel_labeled) IoU = 1.0 * area_inter / (np.spacing(1) + area_union) batch_mIoU = IoU.mean() print('torch predictionis :', batch_pixAcc, batch_mIoU) assert (batch_pixAcc - pixAcc) < 1e-3 assert (batch_mIoU - mIoU) < 1e-3
Example #26
Source File: test_umath.py From vnpy_crypto with MIT License | 5 votes |
def test_nextafter_vs_spacing(): # XXX: spacing does not handle long double yet for t in [np.float32, np.float64]: for _f in [1, 1e-5, 1000]: f = t(_f) f1 = t(_f + 1) assert_(np.nextafter(f, f1) - f == np.spacing(f))
Example #27
Source File: train.py From PyTorch-Encoding with MIT License | 5 votes |
def validation(self, epoch): # Fast test during the training def eval_batch(model, image, target): outputs = model(image) outputs = gather(outputs, 0, dim=0) pred = outputs[0] target = target.cuda() correct, labeled = utils.batch_pix_accuracy(pred.data, target) inter, union = utils.batch_intersection_union(pred.data, target, self.nclass) return correct, labeled, inter, union is_best = False self.model.eval() total_inter, total_union, total_correct, total_label = 0, 0, 0, 0 tbar = tqdm(self.valloader, desc='\r') for i, (image, target) in enumerate(tbar): with torch.no_grad(): correct, labeled, inter, union = eval_batch(self.model, image, target) total_correct += correct total_label += labeled total_inter += inter total_union += union pixAcc = 1.0 * total_correct / (np.spacing(1) + total_label) IoU = 1.0 * total_inter / (np.spacing(1) + total_union) mIoU = IoU.mean() tbar.set_description( 'pixAcc: %.3f, mIoU: %.3f' % (pixAcc, mIoU)) new_pred = (pixAcc + mIoU)/2 if new_pred > self.best_pred: is_best = True self.best_pred = new_pred utils.save_checkpoint({ 'epoch': epoch + 1, 'state_dict': self.model.module.state_dict(), 'optimizer': self.optimizer.state_dict(), 'best_pred': self.best_pred, }, self.args, is_best)
Example #28
Source File: rpn_target.py From gluon-cv with Apache License 2.0 | 5 votes |
def __init__(self, num_sample, pos_iou_thresh, neg_iou_thresh, pos_ratio): super(RPNTargetSampler, self).__init__() self._num_sample = num_sample self._max_pos = int(round(num_sample * pos_ratio)) self._pos_iou_thresh = pos_iou_thresh self._neg_iou_thresh = neg_iou_thresh self._eps = np.spacing(np.float32(1.0))
Example #29
Source File: segmentation.py From gluon-cv with Apache License 2.0 | 5 votes |
def pixelAccuracy(imPred, imLab): """ This function takes the prediction and label of a single image, returns pixel-wise accuracy To compute over many images do: for i = range(Nimages): (pixel_accuracy[i], pixel_correct[i], pixel_labeled[i]) = \ pixelAccuracy(imPred[i], imLab[i]) mean_pixel_accuracy = 1.0 * np.sum(pixel_correct) / (np.spacing(1) + np.sum(pixel_labeled)) """ # Remove classes from unlabeled pixels in gt image. # We should not penalize detections in unlabeled portions of the image. pixel_labeled = np.sum(imLab > 0) pixel_correct = np.sum((imPred == imLab)*(imLab > 0)) pixel_accuracy = 1.0 * pixel_correct / pixel_labeled return (pixel_accuracy, pixel_correct, pixel_labeled)
Example #30
Source File: segmentation.py From gluon-cv with Apache License 2.0 | 5 votes |
def get(self): """Gets the current evaluation result. Returns ------- metrics : tuple of float pixAcc and mIoU """ pixAcc = 1.0 * self.total_correct / (np.spacing(1) + self.total_label) IoU = 1.0 * self.total_inter / (np.spacing(1) + self.total_union) mIoU = IoU.mean() return pixAcc, mIoU