Python cv2.FlannBasedMatcher() Examples

The following are 30 code examples of cv2.FlannBasedMatcher(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module cv2 , or try the search function .
Example #1
Source File: find_obj.py    From OpenCV-Python-Tutorial with MIT License 10 votes vote down vote up
def init_feature(name):
    chunks = name.split('-')
    if chunks[0] == 'sift':
        detector = cv2.xfeatures2d.SIFT_create()
        norm = cv2.NORM_L2
    elif chunks[0] == 'surf':
        detector = cv2.xfeatures2d.SURF_create(800)
        norm = cv2.NORM_L2
    elif chunks[0] == 'orb':
        detector = cv2.ORB_create(400)
        norm = cv2.NORM_HAMMING
    elif chunks[0] == 'akaze':
        detector = cv2.AKAZE_create()
        norm = cv2.NORM_HAMMING
    elif chunks[0] == 'brisk':
        detector = cv2.BRISK_create()
        norm = cv2.NORM_HAMMING
    else:
        return None, None
    if 'flann' in chunks:
        if norm == cv2.NORM_L2:
            flann_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
        else:
            flann_params= dict(algorithm = FLANN_INDEX_LSH,
                               table_number = 6, # 12
                               key_size = 12,     # 20
                               multi_probe_level = 1) #2
        matcher = cv2.FlannBasedMatcher(flann_params, {})  # bug : need to pass empty dict (#1329)
    else:
        matcher = cv2.BFMatcher(norm)
    return detector, matcher 
Example #2
Source File: keypoint_matching_contrib.py    From Airtest with Apache License 2.0 8 votes vote down vote up
def init_detector(self):
        """Init keypoint detector object."""
        # BRIEF is a feature descriptor, recommand CenSurE as a fast detector:
        if check_cv_version_is_new():
            # OpenCV3/4, surf is in contrib module, you need to compile it seperately.
            try:
                self.detector = cv2.xfeatures2d.SURF_create(self.HESSIAN_THRESHOLD, upright=self.UPRIGHT)
            except:
                import traceback
                traceback.print_exc()
                raise NoModuleError("There is no %s module in your OpenCV environment, need contribmodule!" % self.METHOD_NAME)
        else:
            # OpenCV2.x
            self.detector = cv2.SURF(self.HESSIAN_THRESHOLD, upright=self.UPRIGHT)

        # # create FlnnMatcher object:
        self.matcher = cv2.FlannBasedMatcher({'algorithm': self.FLANN_INDEX_KDTREE, 'trees': 5}, dict(checks=50)) 
Example #3
Source File: keypoint_matching_contrib.py    From Airtest with Apache License 2.0 7 votes vote down vote up
def init_detector(self):
        """Init keypoint detector object."""
        # BRIEF is a feature descriptor, recommand CenSurE as a fast detector:
        if check_cv_version_is_new():
            # OpenCV3/4, sift is in contrib module, you need to compile it seperately.
            try:
                self.detector = cv2.xfeatures2d.SIFT_create(edgeThreshold=10)
            except:
                import traceback
                traceback.print_exc()
                raise NoModuleError("There is no %s module in your OpenCV environment, need contribmodule!" % self.METHOD_NAME)
        else:
            # OpenCV2.x
            self.detector = cv2.SIFT(edgeThreshold=10)

        # # create FlnnMatcher object:
        self.matcher = cv2.FlannBasedMatcher({'algorithm': self.FLANN_INDEX_KDTREE, 'trees': 5}, dict(checks=50)) 
Example #4
Source File: image-matching.py    From image-matching with MIT License 6 votes vote down vote up
def knn_match(des1, des2, nn_ratio=0.7):
  
  # FLANN parameters
  index_params = dict(algorithm = 0, trees = 5)
  search_params = dict(checks = 50)

  flann = cv2.FlannBasedMatcher(index_params, search_params)
  
  # Match features from each image
  matches = flann.knnMatch(des1, des2, k=2)

  # store only the good matches as per Lowe's ratio test.
  good = []
  for m, n in matches:
    if m.distance < nn_ratio * n.distance:
      good.append(m)

  return good

# calculate the angle with the horizontal 
Example #5
Source File: find_obj.py    From PyCV-time with MIT License 6 votes vote down vote up
def init_feature(name):
    chunks = name.split('-')
    if chunks[0] == 'sift':
        detector = cv2.SIFT()
        norm = cv2.NORM_L2
    elif chunks[0] == 'surf':
        detector = cv2.SURF(800)
        norm = cv2.NORM_L2
    elif chunks[0] == 'orb':
        detector = cv2.ORB(400)
        norm = cv2.NORM_HAMMING
    else:
        return None, None
    if 'flann' in chunks:
        if norm == cv2.NORM_L2:
            flann_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
        else:
            flann_params= dict(algorithm = FLANN_INDEX_LSH,
                               table_number = 6, # 12
                               key_size = 12,     # 20
                               multi_probe_level = 1) #2
        matcher = cv2.FlannBasedMatcher(flann_params, {})  # bug : need to pass empty dict (#1329)
    else:
        matcher = cv2.BFMatcher(norm)
    return detector, matcher 
Example #6
Source File: find_obj.py    From ImageAnalysis with MIT License 6 votes vote down vote up
def init_feature(name):
    chunks = name.split('-')
    if chunks[0] == 'sift':
        detector = cv2.SIFT()
        norm = cv2.NORM_L2
    elif chunks[0] == 'surf':
        detector = cv2.SURF(400)
        norm = cv2.NORM_L2
    elif chunks[0] == 'orb':
        detector = cv2.ORB(400)
        norm = cv2.NORM_HAMMING
    else:
        return None, None
    if 'flann' in chunks:
        if norm == cv2.NORM_L2:
            flann_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
        else:
            flann_params= dict(algorithm = FLANN_INDEX_LSH,
                               table_number = 6, # 12
                               key_size = 12,     # 20
                               multi_probe_level = 1) #2
        matcher = cv2.FlannBasedMatcher(flann_params, {})  # bug : need to pass empty dict (#1329)
    else:
        matcher = cv2.BFMatcher(norm)
    return detector, matcher 
Example #7
Source File: feature_matcher.py    From pyslam with GNU General Public License v3.0 6 votes vote down vote up
def __init__(self, norm_type=cv2.NORM_HAMMING, cross_check = False, ratio_test=kRatioTest, type = FeatureMatcherTypes.FLANN):
        super().__init__(norm_type=norm_type, cross_check=cross_check, ratio_test=ratio_test, type=type)
        if norm_type == cv2.NORM_HAMMING:
            # FLANN parameters for binary descriptors 
            FLANN_INDEX_LSH = 6
            self.index_params= dict(algorithm = FLANN_INDEX_LSH,   # Multi-Probe LSH: Efficient Indexing for High-Dimensional Similarity Search
                        table_number = 6,      # 12
                        key_size = 12,         # 20
                        multi_probe_level = 1) # 2            
        if norm_type == cv2.NORM_L2: 
            # FLANN parameters for float descriptors 
            FLANN_INDEX_KDTREE = 1
            self.index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 4)  
        self.search_params = dict(checks=32)   # or pass empty dictionary                 
        self.matcher = cv2.FlannBasedMatcher(self.index_params, self.search_params)  
        self.matcher_name = 'FlannFeatureMatcher' 
Example #8
Source File: pose_flow.py    From detectron2-pipeline with MIT License 6 votes vote down vote up
def flann_matching(orb_match1, orb_match2):
    kp1, des1 = orb_match1
    kp2, des2 = orb_match2

    # FLANN parameters
    index_params = dict(algorithm=6,  # FLANN_INDEX_LSH
                        table_number=12,
                        key_size=12,
                        multi_probe_level=2)
    search_params = dict(checks=100)  # or pass empty dictionary
    flann_matcher = cv2.FlannBasedMatcher(index_params, search_params)
    matches = flann_matcher.knnMatch(des1, des2, k=2)

    cor = []
    # ratio test as per Lowe's paper
    for m_n in matches:
        if len(m_n) != 2:
            continue
        elif m_n[0].distance < 0.80 * m_n[1].distance:
            cor.append([kp1[m_n[0].queryIdx].pt[0], kp1[m_n[0].queryIdx].pt[1],
                        kp2[m_n[0].trainIdx].pt[0], kp2[m_n[0].trainIdx].pt[1],
                        m_n[0].distance])

    return np.array(cor) 
Example #9
Source File: image_SIFT.py    From airtest with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def _searchAndmatch(image_1_descriptors, image_2_descriptors, threshold=0.7
                    ,image_2_keypoint=None):
    """KNN Match"""
    Good_match_keypoints, kp2_xy = [], []
    FLANN_INDEX_KDTREE = 0
    index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
    search_params = dict(checks=50)
    flann = cv2.FlannBasedMatcher(index_params, search_params)
    matches = flann.knnMatch(image_1_descriptors, image_2_descriptors, k=2)
    """Lower's threshold"""
    for m,n in matches:
        if image_2_keypoint: kp2_xy.append(image_2_keypoint[m.trainIdx].pt)
        if m.distance < threshold*n.distance: Good_match_keypoints.append(m)
    return Good_match_keypoints, kp2_xy

#refine center 
Example #10
Source File: find_obj.py    From airtest with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def init_feature(name):
    chunks = name.split('-')
    if chunks[0] == 'sift':
        detector = cv2.SIFT()
        norm = cv2.NORM_L2
    elif chunks[0] == 'surf':
        detector = cv2.SURF(800)
        norm = cv2.NORM_L2
    elif chunks[0] == 'orb':
        detector = cv2.ORB(400)
        norm = cv2.NORM_HAMMING
    else:
        return None, None
    if 'flann' in chunks:
        if norm == cv2.NORM_L2:
            flann_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
        else:
            flann_params= dict(algorithm = FLANN_INDEX_LSH,
                               table_number = 6, # 12
                               key_size = 12,     # 20
                               multi_probe_level = 1) #2
        matcher = cv2.FlannBasedMatcher(flann_params, {})  # bug : need to pass empty dict (#1329)
    else:
        matcher = cv2.BFMatcher(norm)
    return detector, matcher 
Example #11
Source File: findobj.py    From airtest with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def init_feature(name):
    chunks = name.split('-')
    if chunks[0] == 'sift':
        detector = cv2.SIFT()
        norm = cv2.NORM_L2
    elif chunks[0] == 'surf':
        detector = cv2.SURF(800)
        norm = cv2.NORM_L2
    elif chunks[0] == 'orb':
        detector = cv2.ORB(400)
        norm = cv2.NORM_HAMMING
    else:
        return None, None
    if 'flann' in chunks:
        if norm == cv2.NORM_L2:
            flann_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
        else:
            flann_params= dict(algorithm = FLANN_INDEX_LSH,
                               table_number = 6, # 12
                               key_size = 12,     # 20
                               multi_probe_level = 1) #2
        matcher = cv2.FlannBasedMatcher(flann_params, {})  # bug : need to pass empty dict (#1329)
    else:
        matcher = cv2.BFMatcher(norm)
    return detector, matcher 
Example #12
Source File: statistics.py    From dual-fisheye-video-stitching with MIT License 5 votes vote down vote up
def FlannMatch_SIFT(img1, img2):
    # Initiate SIFT detector
    sift = cv2.xfeatures2d.SIFT_create()

    # find the keypoints and descriptors with SIFT
    kp1, des1 = sift.detectAndCompute(img1, None)
    kp2, des2 = sift.detectAndCompute(img2, None)

    # FLANN parameters
    FLANN_INDEX_KDTREE = 0
    index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
    search_params = dict(checks=50)   # or pass empty dictionary

    flann = cv2.FlannBasedMatcher(index_params, search_params)

    matches = flann.knnMatch(des1, des2, k=2)

    # Need to draw only good matches, so create a mask
    matchesMask = [[0, 0] for i in xrange(len(matches))]

    # ratio test as per Lowe's paper
    for i, (m, n) in enumerate(matches):
        if m.distance < 0.7 * n.distance:
            matchesMask[i] = [1, 0]

    return (kp1, kp2, matches, matchesMask) 
Example #13
Source File: plane_tracker.py    From OpenCV-Python-Tutorial with MIT License 5 votes vote down vote up
def __init__(self):
        self.detector = cv2.ORB_create( nfeatures = 1000 )
        self.matcher = cv2.FlannBasedMatcher(flann_params, {})  # bug : need to pass empty dict (#1329)
        self.targets = []
        self.frame_points = [] 
Example #14
Source File: find_obj.py    From PyCV-time with MIT License 5 votes vote down vote up
def init_feature(name):
    chunks = name.split('-')
    if chunks[0] == 'sift':
        detector = cv2.xfeatures2d.SIFT()
        norm = cv2.NORM_L2
    elif chunks[0] == 'surf':
        detector = cv2.xfeatures2d.SURF(800)
        norm = cv2.NORM_L2
    elif chunks[0] == 'orb':
        detector = cv2.ORB(400)
        norm = cv2.NORM_HAMMING
    elif chunks[0] == 'akaze':
        detector = cv2.AKAZE()
        norm = cv2.NORM_HAMMING
    elif chunks[0] == 'brisk':
        detector = cv2.BRISK()
        norm = cv2.NORM_HAMMING
    else:
        return None, None
    if 'flann' in chunks:
        if norm == cv2.NORM_L2:
            flann_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
        else:
            flann_params= dict(algorithm = FLANN_INDEX_LSH,
                               table_number = 6, # 12
                               key_size = 12,     # 20
                               multi_probe_level = 1) #2
        matcher = cv2.FlannBasedMatcher(flann_params, {})  # bug : need to pass empty dict (#1329)
    else:
        matcher = cv2.BFMatcher(norm)
    return detector, matcher 
Example #15
Source File: plane_tracker.py    From PyCV-time with MIT License 5 votes vote down vote up
def __init__(self):
        self.detector = cv2.ORB( nfeatures = 1000 )
        self.matcher = cv2.FlannBasedMatcher(flann_params, {})  # bug : need to pass empty dict (#1329)
        self.targets = [] 
Example #16
Source File: plane_tracker.py    From PyCV-time with MIT License 5 votes vote down vote up
def __init__(self):
        self.detector = cv2.ORB( nfeatures = 1000 )
        self.matcher = cv2.FlannBasedMatcher(flann_params, {})  # bug : need to pass empty dict (#1329)
        self.targets = [] 
Example #17
Source File: stitch.py    From Image-stitcher with MIT License 5 votes vote down vote up
def __init__(self, image1: np.ndarray, image2: np.ndarray, method: Enum=Method.SIFT, threshold=800) -> None:
        """输入两幅图像,计算其特征值
        此类用于输入两幅图像,计算其特征值,输入两幅图像分别为numpy数组格式的图像,其中的method参数要求输入SURF、SIFT或者ORB,threshold参数为特征值检测所需的阈值。

        Args:
            image1 (np.ndarray): 图像一
            image2 (np.ndarray): 图像二
            method (Enum, optional): Defaults to Method.SIFT. 特征值检测方法
            threshold (int, optional): Defaults to 800. 特征值阈值

        """

        self.image1 = image1
        self.image2 = image2
        self.method = method
        self.threshold = threshold

        self._keypoints1: List[cv2.KeyPoint] = None
        self._descriptors1: np.ndarray = None
        self._keypoints2: List[cv2.KeyPoint] = None
        self._descriptors2: np.ndarray = None

        if self.method == Method.ORB:
            # error if not set this
            self.matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
        else:
            # self.matcher = cv2.BFMatcher(crossCheck=True)
            self.matcher = cv2.FlannBasedMatcher()

        self.match_points = []

        self.image_points1 = np.array([])
        self.image_points2 = np.array([]) 
Example #18
Source File: sift.py    From SoTu with MIT License 5 votes vote down vote up
def match(self, des_q, des_t):
        ratio = 0.7  # 按照Lowe的测试
        flann = cv2.FlannBasedMatcher()
        # 对des_q中的每个描述子,在des_t中找到最好的两个匹配
        two_nn = flann.knnMatch(des_q, des_t, k=2)
        # 找到所有显著好于次匹配的最好匹配,得到对应的索引对
        matches = [(first.queryIdx, first.trainIdx) for first, second in two_nn
                   if first.distance < ratio * second.distance]
        return matches 
Example #19
Source File: tools.py    From BlindWatermark with GNU General Public License v3.0 5 votes vote down vote up
def recovery(ori_img,attacked_img,outfile_name = './recoveried.png',rate=0.7):
    img = cv2.imread(ori_img)
    img2 = cv2.imread(attacked_img)

    height = img.shape[0]
    width  = img.shape[1]
    # Initiate SIFT detector
    orb = cv2.ORB_create(128)
    MIN_MATCH_COUNT=10
    # find the keypoints and descriptors with SIFT
    kp1, des1 = orb.detectAndCompute(img,None)
    kp2, des2 = orb.detectAndCompute(img2,None)

    FLANN_INDEX_KDTREE = 0
    index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
    search_params = dict(checks = 50)

    flann = cv2.FlannBasedMatcher(index_params, search_params)



    des1 = np.float32(des1)
    des2 = np.float32(des2)

    matches = flann.knnMatch(des1,des2,k=2)

    # store all the good matches as per Lowe's ratio test.
    good = []
    for m,n in matches:
        if m.distance < rate*n.distance:
            good.append(m)

    if len(good)>MIN_MATCH_COUNT:
        src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2)
        dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2)
        M, mask = cv2.findHomography( dst_pts,src_pts, cv2.RANSAC,5.0)
        out = cv2.warpPerspective(img2, M, (width,height)) #先列后行
        cv2.imwrite(outfile_name,out) 
Example #20
Source File: tools.py    From BlindWatermark with GNU General Public License v3.0 5 votes vote down vote up
def run(self):
        img = cv2.imread(self.ori_img)
        img2 = cv2.imread(self.attacked_img)

        height = img.shape[0]
        width  = img.shape[1]
        # Initiate SIFT detector
        orb = cv2.ORB_create(128)
        MIN_MATCH_COUNT=10
        # find the keypoints and descriptors with SIFT
        kp1, des1 = orb.detectAndCompute(img,None)
        kp2, des2 = orb.detectAndCompute(img2,None)

        FLANN_INDEX_KDTREE = 0
        index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
        search_params = dict(checks = 50)

        flann = cv2.FlannBasedMatcher(index_params, search_params)



        des1 = np.float32(des1)
        des2 = np.float32(des2)

        matches = flann.knnMatch(des1,des2,k=2)

        # store all the good matches as per Lowe's ratio test.
        good = []
        for m,n in matches:
            if m.distance < self.rate*n.distance:
                good.append(m)

        if len(good)>MIN_MATCH_COUNT:
            src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2)
            dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2)
            M, mask = cv2.findHomography( dst_pts,src_pts, cv2.RANSAC,5.0)
            out = cv2.warpPerspective(img2, M, (width,height)) #先列后行
            cv2.imwrite(self.outfile_name,out)
            self.num_of_good.emit(len(good),self.outfile_name)
        else :
            self.num_of_good.emit(0,'') 
Example #21
Source File: image_proc.py    From onmyoji_bot with GNU General Public License v3.0 5 votes vote down vote up
def match_img_knn(queryImage, trainingImage, thread=0):
    sift = cv2.xfeatures2d.SIFT_create()  # 创建sift检测器
    kp1, des1 = sift.detectAndCompute(queryImage, None)
    kp2, des2 = sift.detectAndCompute(trainingImage, None)
    #print(len(kp1))
    # 设置Flannde参数
    FLANN_INDEX_KDTREE = 1
    indexParams = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
    searchParams = dict(checks=50)
    flann = cv2.FlannBasedMatcher(indexParams, searchParams)
    matches = flann.knnMatch(des1, des2, k=2)

    good = []

    # 设置好初始匹配值
    matchesMask = [[0, 0] for i in range(len(matches))]
    for i, (m, n) in enumerate(matches):
        if m.distance < 0.7*n.distance:  # 舍弃小于0.7的匹配结果
            matchesMask[i] = [1, 0]
            good.append(m)

    s = sorted(good, key=lambda x: x.distance)
    '''
    drawParams=dict(matchColor=(0,0,255),singlePointColor=(255,0,0),matchesMask=matchesMask,flags=0) #给特征点和匹配的线定义颜色
    resultimage=cv2.drawMatchesKnn(queryImage,kp1,trainingImage,kp2,matches,None,**drawParams) #画出匹配的结果
    cv2.imshow('res',resultimage)
    cv2.waitKey(0)
    '''
    #print(len(good))
    if len(good) > thread:
        maxLoc = kp2[s[0].trainIdx].pt
        #print(maxLoc)
        return (int(maxLoc[0]), int(maxLoc[1]))
    else:
        return (0, 0) 
Example #22
Source File: TemplateMatcher.py    From DoNotSnap with GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, templates, ratio=0.75):
        self.templates = templates
        self.ratio = ratio

        flann_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
        self.matcher = cv2.FlannBasedMatcher(flann_params, {})
        self.pool = ThreadPool(processes=cv2.getNumberOfCPUs()) 
Example #23
Source File: pose_estimation.py    From OpenCV-3-x-with-Python-By-Example with MIT License 5 votes vote down vote up
def __init__(self): 
        # Use locality sensitive hashing algorithm 
        flann_params = dict(algorithm = 6, table_number = 6, key_size = 12, multi_probe_level = 1) 
 
        self.min_matches = 10 
        self.cur_target = namedtuple('Current', 'image, rect, keypoints, descriptors, data')
        self.tracked_target = namedtuple('Tracked', 'target, points_prev, points_cur, H, quad') 
 
        self.feature_detector = cv2.ORB_create()
        self.feature_detector.setMaxFeatures(1000)
        self.feature_matcher = cv2.FlannBasedMatcher(flann_params, {}) 
        self.tracking_targets = [] 
 
    # Function to add a new target for tracking 
Example #24
Source File: stitcher.py    From dual-fisheye-video-stitching with MIT License 5 votes vote down vote up
def matchKeypoints(self, kpsA, kpsB, featuresA, featuresB,
                       ratio, reprojThresh):
        # FLANN parameters
        FLANN_INDEX_KDTREE = 0
        index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
        search_params = dict(checks=50)   # or pass empty dictionary

        # compute the raw matches
        flann = cv2.FlannBasedMatcher(index_params, search_params)
        rawMatches = flann.knnMatch(featuresA, featuresB, k=2)

        # perform Lowe's ratio test to get actual matches
        matches = []
        for m, n in rawMatches:
            # ensure the distance is within a certain ratio of each
            # other (i.e. Lowe's ratio test)
            if m.distance < ratio * n.distance:
                # here queryIdx corresponds to kpsA
                # trainIdx corresponds to kpsB
                matches.append((m.trainIdx, m.queryIdx))

        # computing a homography requires at least 4 matches
        if len(matches) > 4:
            # construct the two sets of points
            ptsA = np.float32([kpsA[i] for (_, i) in matches])
            ptsB = np.float32([kpsB[i] for (i, _) in matches])

            # compute the homography between the two sets of points
            (H, status) = cv2.findHomography(
                ptsB, ptsA, cv2.RANSAC, reprojThresh)

            # return the matches along with the homograpy matrix
            # and status of each matched point
            return (matches, H, status)
        else:
            # otherwise, no homograpy could be computed
            return None 
Example #25
Source File: feature_match.py    From dual-fisheye-video-stitching with MIT License 5 votes vote down vote up
def FlannMatch_SIFT(img1, img2):
    # Initiate SIFT detector
    sift = cv2.xfeatures2d.SIFT_create()

    # find the keypoints and descriptors with SIFT
    kp1, des1 = sift.detectAndCompute(img1, None)
    kp2, des2 = sift.detectAndCompute(img2, None)

    # FLANN parameters
    FLANN_INDEX_KDTREE = 0
    index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
    search_params = dict(checks=50)   # or pass empty dictionary

    flann = cv2.FlannBasedMatcher(index_params, search_params)

    matches = flann.knnMatch(des1, des2, k=2)

    # Need to draw only good matches, so create a mask
    matchesMask = [[0, 0] for i in xrange(len(matches))]

    # ratio test as per Lowe's paper
    for i, (m, n) in enumerate(matches):
        if m.distance < 0.7 * n.distance:
            matchesMask[i] = [1, 0]

    return (kp1, kp2, matches, matchesMask) 
Example #26
Source File: sift.py    From airtest with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def find(search_file, image_file, threshold=None):
    '''
    param threshold are disabled in sift match.
    '''
    sch = _cv2open(search_file, 0)
    img = _cv2open(image_file, 0)

    kp_sch, des_sch = sift.detectAndCompute(sch, None)
    kp_img, des_img = sift.detectAndCompute(img, None)

    if len(kp_sch) < MIN_MATCH_COUNT or len(kp_img) < MIN_MATCH_COUNT:
        return None

    FLANN_INDEX_KDTREE = 0
    index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
    search_params = dict(checks = 50)

    flann = cv2.FlannBasedMatcher(index_params, search_params)

    matches = flann.knnMatch(des_sch, des_img, k=2)

    good = []
    for m,n in matches:
        if m.distance < 0.7*n.distance:
            good.append(m)

    if len(good) > MIN_MATCH_COUNT:
        sch_pts = np.float32([kp_sch[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
        img_pts = np.float32([kp_img[m.trainIdx].pt for m in good]).reshape(-1, 1, 2) 

        M, mask = cv2.findHomography(sch_pts, img_pts, cv2.RANSAC, 5.0)
        # matchesMask = mask.ravel().tolist()

        h, w = sch.shape
        pts = np.float32([ [0, 0], [0, h-1], [w-1, h-1], [w-1, 0] ]).reshape(-1, 1, 2)
        dst = cv2.perspectiveTransform(pts, M)
        lt, br = dst[0][0], dst[2][0]
        return map(int, (lt[0]+w/2, lt[1]+h/2))
    else:
        return None 
Example #27
Source File: TemplateMatcher.py    From DoNotSnap with GNU General Public License v3.0 5 votes vote down vote up
def __setstate__(self, state):
        self.templates = state['templates']
        self.ratio = state['ratio']

        flann_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
        self.matcher = cv2.FlannBasedMatcher(flann_params, {})
        self.pool = ThreadPool(processes=1)  # cv2.getNumberOfCPUs()) 
Example #28
Source File: pose_estimator.py    From self-supervised-depth-completion with MIT License 5 votes vote down vote up
def feature_match(img1, img2):
    r''' Find features on both images and match them pairwise
   '''
    max_n_features = 1000
    # max_n_features = 500
    use_flann = False  # better not use flann

    detector = cv2.xfeatures2d.SIFT_create(max_n_features)

    # find the keypoints and descriptors with SIFT
    kp1, des1 = detector.detectAndCompute(img1, None)
    kp2, des2 = detector.detectAndCompute(img2, None)
    if (des1 is None) or (des2 is None):
        return [], []
    des1 = des1.astype(np.float32)
    des2 = des2.astype(np.float32)

    if use_flann:
        # FLANN parameters
        FLANN_INDEX_KDTREE = 0
        index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
        search_params = dict(checks=50)
        flann = cv2.FlannBasedMatcher(index_params, search_params)
        matches = flann.knnMatch(des1, des2, k=2)
    else:
        matcher = cv2.DescriptorMatcher().create('BruteForce')
        matches = matcher.knnMatch(des1, des2, k=2)

    good = []
    pts1 = []
    pts2 = []
    # ratio test as per Lowe's paper
    for i, (m, n) in enumerate(matches):
        if m.distance < 0.8 * n.distance:
            good.append(m)
            pts2.append(kp2[m.trainIdx].pt)
            pts1.append(kp1[m.queryIdx].pt)

    pts1 = np.int32(pts1)
    pts2 = np.int32(pts2)
    return pts1, pts2 
Example #29
Source File: sift.py    From airtest with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def findall(search_file, image_file, threshold=None, maxcnt=0):
    sch = _cv2open(search_file, 0)
    img = _cv2open(image_file, 0)

    kp_sch, des_sch = sift.detectAndCompute(sch, None)
    kp_img, des_img = sift.detectAndCompute(img, None)

    if len(kp_sch) < MIN_MATCH_COUNT or len(kp_img) < MIN_MATCH_COUNT:
        return None

    FLANN_INDEX_KDTREE = 0
    index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
    search_params = dict(checks = 50)

    flann = cv2.FlannBasedMatcher(index_params, search_params)

    points = []
    while True:
        matches = flann.knnMatch(des_sch, des_img, k=2)
        good = []
        for m,n in matches:
            if m.distance < 0.7*n.distance:
                good.append(m)
        if len(good) < MIN_MATCH_COUNT:
            break

        if maxcnt and len(points) > maxcnt:
            break

        # print good
        sch_pts = np.float32([kp_sch[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
        img_pts = np.float32([kp_img[m.trainIdx].pt for m in good]).reshape(-1, 1, 2) 

        M, mask = cv2.findHomography(sch_pts, img_pts, cv2.RANSAC, 5.0)

        h, w = sch.shape
        pts = np.float32([ [0, 0], [0, h-1], [w-1, h-1], [w-1, 0] ]).reshape(-1, 1, 2)
        dst = cv2.perspectiveTransform(pts, M)
        lt, br = dst[0][0], dst[2][0]
        pt = map(int, (lt[0]+w/2, lt[1]+h/2))

        qindexes = []
        tindexes = []
        for m in good:
            qindexes.append(m.queryIdx)
            tindexes.append(m.trainIdx)
        def filter_index(indexes, arr):
            r = np.ndarray(0, np.float32)
            for i, item in enumerate(arr):
                if i not in qindexes:
                    # r.append(item)
                    r = np.append(r, item)
            return r
        # print type(des_sch[0][0])
        kp_sch = filter_index(qindexes, kp_sch)
        des_sch =filter_index(qindexes, des_sch)
        kp_img = filter_index(tindexes, kp_img)
        des_img = filter_index(tindexes, des_img)
        points.append(pt)

    return points 
Example #30
Source File: feature.py    From findit with MIT License 4 votes vote down vote up
def get_feature_point_list(
        self, template_pic_object: np.ndarray, target_pic_object: np.ndarray
    ) -> typing.Sequence[Point]:
        """
        compare via feature matching

        :param template_pic_object:
        :param target_pic_object:
        :return:
        """
        # IMPORTANT
        # sift and surf can not be used in python >= 3.8
        # so we switch it to ORB detector
        # maybe not enough precisely now

        # Initiate ORB detector
        orb = cv2.ORB_create()

        # find the keypoints and descriptors with ORB
        template_kp, template_desc = orb.detectAndCompute(template_pic_object, None)
        target_kp, target_desc = orb.detectAndCompute(target_pic_object, None)

        # key points count
        logger.debug(f"template key point count: {len(template_kp)}")
        logger.debug(f"target key point count: {len(target_kp)}")

        # find 2 points, which are the closest
        # 找到帧和帧之间的一致性的过程就是在一个描述符集合(询问集)中找另一个集合(相当于训练集)的最近邻。 这里找到 每个描述符 的 最近邻与次近邻
        # 一个正确的匹配会更接近第一个邻居。换句话说,一个不正确的匹配,两个邻居的距离是相似的。因此,我们可以通过查看二者距离的不同来评判距匹配程度的好坏。
        # more details: https://blog.csdn.net/liangjiubujiu/article/details/80418079
        # flann = cv2.FlannBasedMatcher()
        # matches = flann.knnMatch(template_desc, target_desc, k=2)

        bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
        # 特征描述子匹配
        matches = bf.knnMatch(template_desc, target_desc, k=1)

        # matches are something like:
        # [[<DMatch 0x12400a350>, <DMatch 0x12400a430>], [<DMatch 0x124d6a170>, <DMatch 0x124d6a450>]]

        logger.debug(f"matches num: {len(matches)}")

        # TODO here is a sample to show feature points
        # temp = cv2.drawMatchesKnn(template_pic_object, kp1, target_pic_object, kp2, matches, None, flags=2)
        # cv2.imshow('feature_points', temp)
        # cv2.waitKey(0)

        good = list()
        if matches:
            good = matches[0]

        # get positions
        point_list = list()
        for each in good:
            target_idx = each.trainIdx
            each_point = Point(*target_kp[target_idx].pt)
            point_list.append(each_point)

        return point_list