Python cv2.findHomography() Examples
The following are 30
code examples of cv2.findHomography().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
cv2
, or try the search function
.
Example #1
Source File: fisheye2equirec.py From dual-fisheye-video-stitching with MIT License | 8 votes |
def buildPano(defished): # Build the panoram from the defisheye images offsets = [] finalWidth = defished[0].width # Get the offsets and calculte the final size for i in range(0, len(defished) - 1): H, M, offset = findHomography(defished[i], defished[i + 1]) dfw = defished[i + 1].width offsets.append(offset) finalWidth += int(dfw - offset[0]) final = Image((finalWidth, defished[0].height)) final = final.blit(defished[0], pos=(0, 0)) xs = 0 # blit subsequent images into the final image for i in range(0, len(defished) - 1): w = defished[i + 1].width h = defished[i + 1].height mask = constructMask(w, h, offsets[i][0]) xs += int(w - offsets[i][0]) final = final.blit(defished[i + 1], pos=(xs, 0), alphaMask=mask) return final
Example #2
Source File: dm_tracker.py From videoseg with MIT License | 7 votes |
def frame_homography(totalPts, homTh): """ Filter foreground points i.e. the outlier points found by fitting homography using RANSAC Input: totalPts: (numAllPoints, 4): x0, y0, x1, y1 fgPts: (numAllPoints, 4): x0, y0, x1, y1 """ if totalPts.ndim != 2 or totalPts.shape[0] < 8 or homTh < 0: return totalPts import cv2 p1 = totalPts[:, :2].astype('float') p2 = totalPts[:, 2:4].astype('float') _, status = cv2.findHomography( p1, p2, cv2.RANSAC, ransacReprojThreshold=homTh) fgPts = totalPts[status[:, 0] == 0, :] return fgPts
Example #3
Source File: Placer.py From ImageAnalysis with MIT License | 6 votes |
def findHomography(self, i1, i2, pairs): src = [] dst = [] for pair in pairs: c1 = i1.coord_list[pair[0]] c2 = i2.coord_list[pair[1]] src.append( c1 ) dst.append( c2 ) #H, status = cv2.findHomography(np.array([src]).astype(np.float32), # np.array([dst]).astype(np.float32), # cv2.RANSAC, 5.0) H, status = cv2.findHomography(np.array([src]).astype(np.float32), np.array([dst]).astype(np.float32)) #print str(affine) return H # compare against best 'placed' image (averaging transform # matrices together directly doesn't do what we want)
Example #4
Source File: FocusStack.py From focusstack with Apache License 2.0 | 6 votes |
def findHomography(image_1_kp, image_2_kp, matches): image_1_points = np.zeros((len(matches), 1, 2), dtype=np.float32) image_2_points = np.zeros((len(matches), 1, 2), dtype=np.float32) for i in range(0,len(matches)): image_1_points[i] = image_1_kp[matches[i].queryIdx].pt image_2_points[i] = image_2_kp[matches[i].trainIdx].pt homography, mask = cv2.findHomography(image_1_points, image_2_points, cv2.RANSAC, ransacReprojThreshold=2.0) return homography # # Align the images so they overlap properly... # #
Example #5
Source File: ImageStitching.py From ImageProcessingProjects with MIT License | 6 votes |
def getHomography(self, rightKps, rightDescriptor): rawMatches = self.matcher.knnMatch(self.leftDescriptor, rightDescriptor, 2) matches = [] for m in rawMatches: if(len(m)==2 and m[0].distance < m[1].distance*self.ratio): matches.append((m[0].trainIdx, m[0].queryIdx)) if(len(matches) >=4): # print(matches) ptsB = np.float32([self.leftKps[i] for (_, i) in matches]) ptsA = np.float32([rightKps[i] for (i, _) in matches]) # ptsB = H*ptsA H, status = cv2.findHomography(ptsA, ptsB, cv2.RANSAC, self.reprojThresh) return H return None
Example #6
Source File: CopyTexture.py From PlaneNet with MIT License | 6 votes |
def copyTextureTest(options): testdir = 'texture_test/' for index in xrange(1): planes = np.load(testdir + '/planes_' + str(index) + '.npy') image = cv2.imread(testdir + '/image_' + str(index) + '.png') segmentations = np.load(testdir + '/segmentations_' + str(index) + '.npy') segmentation = np.argmax(segmentations, axis=2) plane_depths = calcPlaneDepths(planes, WIDTH, HEIGHT) textureImage = cv2.imread('../textures/texture_0.jpg') textureImage = cv2.resize(textureImage, (WIDTH, HEIGHT), interpolation=cv2.INTER_LINEAR) floorPlaneIndex = findFloorPlane(planes, segmentation) if floorPlaneIndex == -1: continue mask = segmentation == floorPlaneIndex uv = findCornerPoints(planes[floorPlaneIndex], plane_depths[:, :, floorPlaneIndex], mask) source_uv = np.array([[0, 0], [0, HEIGHT], [WIDTH, 0], [WIDTH, HEIGHT]]) h, status = cv2.findHomography(source_uv, uv) textureImageWarped = cv2.warpPerspective(textureImage, h, (WIDTH, HEIGHT)) image[mask] = textureImageWarped[mask] cv2.imwrite(testdir + '/' + str(index) + '_texture.png', textureImageWarped) cv2.imwrite(testdir + '/' + str(index) + '_result.png', image) continue return
Example #7
Source File: StitchingFromVideo.py From ImageProcessingProjects with MIT License | 6 votes |
def getHomography(self, rightKps, rightDescriptor): rawMatches = self.matcher.knnMatch(self.leftDescriptor, rightDescriptor, 2) matches = [] for m in rawMatches: if(len(m)==2 and m[0].distance < m[1].distance*self.ratio): matches.append((m[0].trainIdx, m[0].queryIdx)) if(len(matches) >=4): # print(matches) ptsB = np.float32([self.leftKps[i] for (_, i) in matches]) ptsA = np.float32([rightKps[i] for (i, _) in matches]) # ptsB = H*ptsA H, status = cv2.findHomography(ptsA, ptsB, cv2.RANSAC, self.reprojThresh) return H return None
Example #8
Source File: local_descriptors.py From hfnet with MIT License | 6 votes |
def compute_homography_error(kpts1, kpts2, matches, shape2, H_gt): if matches.shape[0] == 0: return False, None kpts1 = kpts1[matches[:, 0]] kpts2 = kpts2[matches[:, 1]] H, _ = cv2.findHomography(kpts2, kpts1, cv2.RANSAC, 3.0) if H is None: return None w, h = shape2 corners2 = to_homogeneous( np.array([[0, 0], [0, h-1], [w-1, h-1], [w-1, 0]])) corners1_gt = np.dot(corners2, np.transpose(H_gt)) corners1_gt = corners1_gt[:, :2] / corners1_gt[:, 2:] corners1 = np.dot(corners2, np.transpose(H)) corners1 = corners1[:, :2] / corners1[:, 2:] mean_dist = np.mean(np.linalg.norm(corners1 - corners1_gt, axis=1)) return mean_dist
Example #9
Source File: utils.py From VerifAI with BSD 3-Clause "New" or "Revised" License | 5 votes |
def unit2bbH(boundingBox, ldBox=unitBox): '''Tranform 2d box-sample into trapezoid (car displacement area) ''' bbox = bb2array(boundingBox) ubox = bb2array(ldBox) h, _ = cv2.findHomography(np.float_(ubox), np.float_(np.array(bbox))) return h
Example #10
Source File: Placer.py From ImageAnalysis with MIT License | 5 votes |
def findImageHomography2(self, i1): # find the homography matrix for best (most connected) already # placed neighbor best_index = 0 best_pairs = 0 for i, pairs in enumerate(i1.match_list): if len(pairs) < 4: # can't compute homography on < 4 points continue i2 = self.image_list[i] if not i2.placed: continue if len(pairs) > best_pairs: best_pairs = len(pairs) best_index = i if best_pairs == 0: return np.identity(3) i2 = self.image_list[best_index] #print "Affine %s vs %s" % (i1.name, i2.name) H = self.findHomography(i1, i2, i1.match_list[best_index]) if H == None: # it's possible given a degenerate point set, the # affine estimator will return None return np.identity(3) return H
Example #11
Source File: find_obj.py From ImageAnalysis with MIT License | 5 votes |
def match_and_draw(win): print('matching...') raw_matches = matcher.knnMatch(desc1, trainDescriptors = desc2, k = 2) #2 p1, p2, kp_pairs = filter_matches(kp1, kp2, raw_matches) if len(p1) >= 4: H, status = cv2.findHomography(p1, p2, cv2.RANSAC, 5.0) print('%d / %d inliers/matched' % (np.sum(status), len(status))) else: H, status = None, None print('%d matches found, not enough for homography estimation' % len(p1)) vis = explore_match(win, img1, img2, kp_pairs, status, H)
Example #12
Source File: 1a-est-gyro-rates.py From ImageAnalysis with MIT License | 5 votes |
def filterFeatures(p1, p2, K, method): inliers = 0 total = len(p1) space = "" status = [] M = None if len(p1) < 7: # not enough points return None, np.zeros(total), [], [] if method == 'homography': M, status = cv2.findHomography(p1, p2, cv2.LMEDS, tol) elif method == 'fundamental': M, status = cv2.findFundamentalMat(p1, p2, cv2.LMEDS, tol) elif method == 'essential': M, status = cv2.findEssentialMat(p1, p2, K, cv2.LMEDS, threshold=tol) elif method == 'none': M = None status = np.ones(total) newp1 = [] newp2 = [] for i, flag in enumerate(status): if flag: newp1.append(p1[i]) newp2.append(p2[i]) p1 = np.float32(newp1) p2 = np.float32(newp2) inliers = np.sum(status) total = len(status) #print '%s%d / %d inliers/matched' % (space, np.sum(status), len(status)) return M, status, np.float32(newp1), np.float32(newp2)
Example #13
Source File: Camera.py From SimpleCV2 with BSD 3-Clause "New" or "Revised" License | 5 votes |
def projectPoint( self, point, H ,whichImage): """ **SUMMARY** This method returns the corresponding point (x, y) **PARAMETERS** * *point* - Input point (x, y) * *whichImage* - Index of the image (1 or 2) that contains the point * *H* - Homography that can be estimated using StereoCamera.findHomography() **RETURNS** Corresponding point (x, y) as tuple **EXAMPLE** >>> img1 = Image("sampleimages/stereo_view1.png") >>> img2 = Image("sampleimages/stereo_view2.png") >>> stereoImg = StereoImage(img1,img2) >>> F,pts1,pts2 = stereoImg.findFundamentalMat() >>> point = pts2[0] >>> projectPoint = stereoImg.projectPoint(point,H ,1) #finds corresponding point in the left image. """ H = np.matrix(H) point = np.matrix((point[1], point[0],1.00)) if whichImage == 1.00: corres_pt = H * point.T else: corres_pt = np.linalg.inv(H) * point.T corres_pt = corres_pt / corres_pt[2] return (float(corres_pt[1]), float(corres_pt[0]))
Example #14
Source File: distance_ransac_orb.py From douglas-quaid with GNU General Public License v3.0 | 5 votes |
def find_homography(keypoints_pic1, keypoints_pic2, matches) -> (List, np.float32, np.float32): # Find an Homography matrix between two pictures # From two list of keypoints and a list of matches, extrat # A list of good matches found by RANSAC and two transformation matrix (an homography and a rigid homography/affine) # Instanciate outputs good = [] # Transforming keypoints to list of points src_pts = np.float32([keypoints_pic1[m.queryIdx].pt for m in matches]).reshape(-1, 1, 2) dst_pts = np.float32([keypoints_pic2[m.trainIdx].pt for m in matches]).reshape(-1, 1, 2) # Find the transformation between points transformation_matrix, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0) # Compute a rigid transformation (without depth, only scale + rotation + translation) transformation_rigid_matrix, rigid_mask = cv2.estimateAffinePartial2D(src_pts, dst_pts) # Get a mask list for matches = A list that says "This match is an in/out-lier" matchesMask = mask.ravel().tolist() # Filter the matches list thanks to the mask for i, element in enumerate(matchesMask): if element == 1: good.append(matches[i]) return good, transformation_matrix, transformation_rigid_matrix
Example #15
Source File: keypoint_base.py From Airtest with Apache License 2.0 | 5 votes |
def _find_homography(self, sch_pts, src_pts): """多组特征点对时,求取单向性矩阵.""" try: M, mask = cv2.findHomography(sch_pts, src_pts, cv2.RANSAC, 5.0) except Exception: import traceback traceback.print_exc() raise HomographyError("OpenCV error in _find_homography()...") else: if mask is None: raise HomographyError("In _find_homography(), find no transfomation matrix...") else: return M, mask
Example #16
Source File: sift.py From Airtest with Apache License 2.0 | 5 votes |
def _find_homography(sch_pts, src_pts): """多组特征点对时,求取单向性矩阵.""" try: M, mask = cv2.findHomography(sch_pts, src_pts, cv2.RANSAC, 5.0) except Exception: import traceback traceback.print_exc() raise HomographyError("OpenCV error in _find_homography()...") else: if mask is None: raise HomographyError("In _find_homography(), find no mask...") else: return M, mask
Example #17
Source File: find_obj.py From OpenCV-Python-Tutorial with MIT License | 5 votes |
def match_and_draw(win): print('matching...') raw_matches = matcher.knnMatch(desc1, trainDescriptors = desc2, k = 2) #2 p1, p2, kp_pairs = filter_matches(kp1, kp2, raw_matches) if len(p1) >= 4: H, status = cv2.findHomography(p1, p2, cv2.RANSAC, 5.0) print('%d / %d inliers/matched' % (np.sum(status), len(status))) else: H, status = None, None print('%d matches found, not enough for homography estimation' % len(p1)) vis = explore_match(win, img1, img2, kp_pairs, status, H)
Example #18
Source File: find_obj.py From PyCV-time with MIT License | 5 votes |
def match_and_draw(win): print 'matching...' raw_matches = matcher.knnMatch(desc1, trainDescriptors = desc2, k = 2) #2 p1, p2, kp_pairs = filter_matches(kp1, kp2, raw_matches) if len(p1) >= 4: H, status = cv2.findHomography(p1, p2, cv2.RANSAC, 5.0) print '%d / %d inliers/matched' % (np.sum(status), len(status)) else: H, status = None, None print '%d matches found, not enough for homography estimation' % len(p1) vis = explore_match(win, img1, img2, kp_pairs, status, H)
Example #19
Source File: Placer.py From ImageAnalysis with MIT License | 5 votes |
def findGroupHomography(self, i1): # find the homography matrix representing the best fit against # all the placed neighbors. Builds a cumulative src/dest list # with our src points listed once for each image pair. src = [] dst = [] for i, pairs in enumerate(i1.match_list): if len(pairs) < 4: # can't compute homography on < 4 points continue i2 = self.image_list[i] if not i2.placed: # don't consider non-yet-placed neighbors continue # add coordinate matches for this image pair for pair in pairs: c1 = i1.coord_list[pair[0]] c2 = i2.coord_list[pair[1]] src.append( c1 ) dst.append( c2 ) if len(src) < 4: # no placed neighbors, just return the identity matrix return np.identity(3) # find the homography matrix on the communlative set of all # matching coordinates for all matching image pairs # simultaneously... H, status = cv2.findHomography(np.array([src]).astype(np.float32), np.array([dst]).astype(np.float32), cv2.RANSAC, 5.0) if H == None: # it's possible given a degenerate point set, the # homography estimator will return None return np.identity(3) return H # compare against best 'placed' image (averaging transform # matrices together directly doesn't do what we want)
Example #20
Source File: dataset.py From deep-image-retrieval with BSD 3-Clause "New" or "Revised" License | 5 votes |
def crop_image(self, img, polygons): import cv2 crop_signs=[] if len(polygons)==0: pdb.set_trace() for Polycc in polygons: rgbimg = img.copy() rgbimg = np.array(rgbimg) # pil to cv2 Poly_s = np.array(Polycc) ## rearrange if Poly_s[0, 1]<Poly_s[1, 1]: temp = Poly_s[1, :].copy() Poly_s[1, :]= Poly_s[0, :] Poly_s[0, :]=temp if Poly_s[2, 1]>Poly_s[3, 1]: temp = Poly_s[3, :].copy() Poly_s[3, :]= Poly_s[2, :] Poly_s[2, :]=temp cy_s = np.mean( Poly_s[:,0] ) cx_s = np.mean( Poly_s[:,1] ) w_s = np.abs( Poly_s[0][1]-Poly_s[1][1] ) h_s = np.abs( Poly_s[0][0]-Poly_s[2][0] ) Poly_d = np.array([(cy_s-h_s/2, cx_s+w_s/2), (cy_s-h_s/2, cx_s-w_s/2), (cy_s+h_s/2, cx_s-w_s/2), (cy_s+h_s/2, cx_s+w_s/2)]).astype(np.int) M, mask= cv2.findHomography(Poly_s, Poly_d) warpimg = Image.fromarray(cv2.warpPerspective(rgbimg, M, (645,800))) # from cv2 type rgbimg crop_sign = warpimg.crop([np.min(Poly_d[:,0]), np.min(Poly_d[:,1]), np.max(Poly_d[:,0]), np.max(Poly_d[:,1])]) ### append crop_signs.append(crop_sign) return crop_signs
Example #21
Source File: 1b-est-gyro-rates.py From ImageAnalysis with MIT License | 5 votes |
def filterFeatures(p1, p2, K, method): inliers = 0 total = len(p1) space = "" status = [] while inliers < total and total >= 7: if method == 'homography': M, status = cv2.findHomography(p1, p2, cv2.LMEDS, tol) elif method == 'fundamental': M, status = cv2.findFundamentalMat(p1, p2, cv2.LMEDS, tol) elif method == 'essential': M, status = cv2.findEssentialMat(p1, p2, K, cv2.LMEDS, threshold=tol) elif method == 'none': M = none status = np.ones(total) newp1 = [] newp2 = [] for i, flag in enumerate(status): if flag: newp1.append(p1[i]) newp2.append(p2[i]) p1 = np.float32(newp1) p2 = np.float32(newp2) inliers = np.sum(status) total = len(status) #print '%s%d / %d inliers/matched' % (space, np.sum(status), len(status)) space += " " return M, status, np.float32(newp1), np.float32(newp2)
Example #22
Source File: face_mask.py From portrait_matting with GNU General Public License v3.0 | 5 votes |
def _align_face_projection(img, src_lmk, dst_lmk, size=None): H, _ = cv2.findHomography(src_lmk, dst_lmk) if size is None: size = (img.shape[1], img.shape[0]) # TODO: Border method aligned_img = cv2.warpPerspective(img, H, size, borderMode=cv2.BORDER_REPLICATE) return aligned_img
Example #23
Source File: tagDetection.py From Apriltag_python with MIT License | 5 votes |
def _recomputeHomography(self): """ find Homography :return: Homography """ src = np.array([ [-1, -1], [1, -1], [1, 1], [-1, 1], ]).reshape(-1,1,2) dst = np.array(self.points) retval,mark = cv2.findHomography(np.array(src),np.array(dst)) self.homography = retval
Example #24
Source File: dm_tracker.py From videoseg with MIT License | 5 votes |
def shot_homography(shotTracks, homTh): """ Filter foreground points i.e. the outlier points found by fitting homography using RANSAC Input: shotTracks: (numFrames, numAllPoints, 2) fgTracks: (numFrames, numForegroundPoints, 2) """ if shotTracks.ndim < 3 or shotTracks.shape[0] < 2 or homTh < 0: return shotTracks import cv2 status = 1 for i in range(1, shotTracks.shape[0]): if shotTracks[i - 1, 0, 2] > -1000: p1 = shotTracks[i - 1, :, 2:].astype('float') else: p1 = shotTracks[i - 1, :, :2].astype('float') p2 = shotTracks[i, :, :2].astype('float') _, new_status = cv2.findHomography( p1, p2, cv2.RANSAC, ransacReprojThreshold=homTh) status = new_status * status fgTracks = shotTracks[:, status[:, 0] == 0, :] print(shotTracks.shape[0], shotTracks.shape[1], fgTracks.shape[1]) return fgTracks
Example #25
Source File: ImageStitching.py From ImageProcessingProjects with MIT License | 5 votes |
def matchKeypoints(self, kpsA, kpsB, featuresA, featuresB, ratio, reprojThresh): # compute the raw matches and initialize the list of actual # matches matcher = cv2.DescriptorMatcher_create("BruteForce") rawMatches = matcher.knnMatch(featuresA, featuresB, 2) matches = [] # loop over the raw matches for m in rawMatches: # ensure the distance is within a certain ratio of each # other (i.e. Lowe's ratio test) if len(m) == 2 and m[0].distance < m[1].distance * ratio: matches.append((m[0].trainIdx, m[0].queryIdx)) # computing a homography requires at least 4 matches if len(matches) > 4: # construct the two sets of points ptsA = np.float32([kpsA[i] for (_, i) in matches]) ptsB = np.float32([kpsB[i] for (i, _) in matches]) # compute the homography between the two sets of points (H, status) = cv2.findHomography(ptsA, ptsB, cv2.RANSAC, reprojThresh) # return the matches along with the homograpy matrix # and status of each matched point return (matches, H, status) # otherwise, no homograpy could be computed return None
Example #26
Source File: morpher.py From face_merge_master with Apache License 2.0 | 5 votes |
def tran_matrix(src_img, src_points, dst_img, dst_points): h = cv2.findHomography(dst_points, src_points) output = cv2.warpAffine(dst_img, h[0][:2], (src_img.shape[1], src_img.shape[0]), borderMode=cv2.BORDER_TRANSPARENT, flags=cv2.WARP_INVERSE_MAP) return output
Example #27
Source File: ImageUtility.py From ImageStitch with MIT License | 5 votes |
def getOffsetByRansac(self, kpsA, kpsB, matches, offsetEvaluate=100): """ 功能:通过求Ransac的方法获得偏移量(不完善) :param kpsA: 第一张图像的特征 :param kpsB: 第二张图像的特征 :param matches: 配准列表 :param offsetEvaluate:对于Ransac求属于最小范围的个数,大于本阈值,则正确 :return:返回(totalStatus, [dx, dy]), totalStatus 是否正确,[dx, dy]默认[0, 0] """ totalStatus = False ptsA = np.float32([kpsA[i] for (_, i) in matches]) ptsB = np.float32([kpsB[i] for (i, _) in matches]) if len(matches) == 0: return (totalStatus, [0, 0], 0) # 计算视角变换矩阵 H1 = cv2.getAffineTransform(ptsA, ptsB) # print("H1") # print(H1) (H, status) = cv2.findHomography(ptsA, ptsB, cv2.RANSAC, 3, 0.9) trueCount = 0 for i in range(0, len(status)): if status[i] == True: trueCount = trueCount + 1 if trueCount >= offsetEvaluate: totalStatus = True adjustH = H.copy() adjustH[0, 2] = 0;adjustH[1, 2] = 0 adjustH[2, 0] = 0;adjustH[2, 1] = 0 return (totalStatus ,[np.round(np.array(H).astype(np.int)[1,2]) * (-1), np.round(np.array(H).astype(np.int)[0,2]) * (-1)], adjustH) else: return (totalStatus, [0, 0], 0)
Example #28
Source File: tools.py From BlindWatermark with GNU General Public License v3.0 | 5 votes |
def run(self): img = cv2.imread(self.ori_img) img2 = cv2.imread(self.attacked_img) height = img.shape[0] width = img.shape[1] # Initiate SIFT detector orb = cv2.ORB_create(128) MIN_MATCH_COUNT=10 # find the keypoints and descriptors with SIFT kp1, des1 = orb.detectAndCompute(img,None) kp2, des2 = orb.detectAndCompute(img2,None) FLANN_INDEX_KDTREE = 0 index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5) search_params = dict(checks = 50) flann = cv2.FlannBasedMatcher(index_params, search_params) des1 = np.float32(des1) des2 = np.float32(des2) matches = flann.knnMatch(des1,des2,k=2) # store all the good matches as per Lowe's ratio test. good = [] for m,n in matches: if m.distance < self.rate*n.distance: good.append(m) if len(good)>MIN_MATCH_COUNT: src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2) dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2) M, mask = cv2.findHomography( dst_pts,src_pts, cv2.RANSAC,5.0) out = cv2.warpPerspective(img2, M, (width,height)) #先列后行 cv2.imwrite(self.outfile_name,out) self.num_of_good.emit(len(good),self.outfile_name) else : self.num_of_good.emit(0,'')
Example #29
Source File: tools.py From BlindWatermark with GNU General Public License v3.0 | 5 votes |
def recovery(ori_img,attacked_img,outfile_name = './recoveried.png',rate=0.7): img = cv2.imread(ori_img) img2 = cv2.imread(attacked_img) height = img.shape[0] width = img.shape[1] # Initiate SIFT detector orb = cv2.ORB_create(128) MIN_MATCH_COUNT=10 # find the keypoints and descriptors with SIFT kp1, des1 = orb.detectAndCompute(img,None) kp2, des2 = orb.detectAndCompute(img2,None) FLANN_INDEX_KDTREE = 0 index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5) search_params = dict(checks = 50) flann = cv2.FlannBasedMatcher(index_params, search_params) des1 = np.float32(des1) des2 = np.float32(des2) matches = flann.knnMatch(des1,des2,k=2) # store all the good matches as per Lowe's ratio test. good = [] for m,n in matches: if m.distance < rate*n.distance: good.append(m) if len(good)>MIN_MATCH_COUNT: src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2) dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2) M, mask = cv2.findHomography( dst_pts,src_pts, cv2.RANSAC,5.0) out = cv2.warpPerspective(img2, M, (width,height)) #先列后行 cv2.imwrite(outfile_name,out)
Example #30
Source File: sift.py From SoTu with MIT License | 5 votes |
def filter(self, pt_qt): if len(pt_qt) > 0: pt_q, pt_t = zip(*pt_qt) # 获取匹配坐标的变换矩阵和正常点的掩码 M, mask = cv2.findHomography(np.float32(pt_q).reshape(-1, 1, 2), np.float32(pt_t).reshape(-1, 1, 2), cv2.RANSAC, 3) return mask.ravel().tolist() else: return []