Python skimage.measure.ransac() Examples

The following are 13 code examples of skimage.measure.ransac(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module skimage.measure , or try the search function .
Example #1
Source File: matching_localfeatures.py    From Landmark2019-1st-and-3rd-Place-Solution with Apache License 2.0 6 votes vote down vote up
def get_inliers(loc1, desc1, loc2, desc2):
    n_feat1, n_feat2 = loc1.shape[0], loc2.shape[0]

    # from scipy.spatial import cKDTree
    KD_THRESH = 0.8
    d1_tree = cKDTree(desc1)
    distances, indices = d1_tree.query(desc2, distance_upper_bound=KD_THRESH)

    loc2_to_use = np.array([
        loc2[i, ] for i in range(n_feat2) if indices[i] != n_feat1])
    loc1_to_use = np.array([
        loc1[indices[i], ] for i in range(n_feat2) if indices[i] != n_feat1])

    np.random.seed(114514)

    # from skimage.measure import ransac as _ransac
    # from skimage.transform import AffineTransform
    model_robust, inliers = _ransac(
        (loc1_to_use, loc2_to_use),
        AffineTransform,
        min_samples=3,
        residual_threshold=20,
        max_trials=1000)

    return sum(inliers) 
Example #2
Source File: bm_comp_perform.py    From BIRL with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def register_image_pair(idx, path_img_target, path_img_source, path_out):
    """ register two images together

    :param int idx: empty parameter for using the function in parallel
    :param str path_img_target: path to the target image
    :param str path_img_source: path to the source image
    :param str path_out: path for exporting the output
    :return tuple(str,float):
    """
    start = time.time()
    # load and denoise reference image
    img_target = io.imread(path_img_target)
    img_target = denoise_wavelet(img_target, wavelet_levels=7, multichannel=True)
    img_target_gray = rgb2gray(img_target)

    # load and denoise moving image
    img_source = io.imread(path_img_source)
    img_source = denoise_bilateral(img_source, sigma_color=0.05,
                                   sigma_spatial=2, multichannel=True)
    img_source_gray = rgb2gray(img_source)

    # detect ORB features on both images
    detector_target = ORB(n_keypoints=150)
    detector_source = ORB(n_keypoints=150)
    detector_target.detect_and_extract(img_target_gray)
    detector_source.detect_and_extract(img_source_gray)
    matches = match_descriptors(detector_target.descriptors,
                                detector_source.descriptors)
    # robustly estimate affine transform model with RANSAC
    model, _ = ransac((detector_target.keypoints[matches[:, 0]],
                       detector_source.keypoints[matches[:, 1]]),
                      AffineTransform, min_samples=25, max_trials=500,
                      residual_threshold=0.95)

    # warping source image with estimated transformations
    img_warped = warp(img_target, model.inverse, output_shape=img_target.shape[:2])
    path_img_warped = os.path.join(path_out, NAME_IMAGE_WARPED % idx)
    io.imsave(path_img_warped, img_warped)
    # summarise experiment
    execution_time = time.time() - start
    return path_img_warped, execution_time 
Example #3
Source File: ransac_fitline.py    From DVCNN_Lane_Detection with Apache License 2.0 5 votes vote down vote up
def ransac_linefit_sklearn(points):
        """
        Use sklearn ransac function to fit the line
        :param points:
        :return: skimage ransac return the model param set ('origin', 'direction')
        """
        # robustly fit line only using inlier data with RANSAC algorithm
        model_robust, inliers = ransac(points, LineModelND, min_samples=2,
                                       residual_threshold=1, max_trials=1000)
        return model_robust, inliers 
Example #4
Source File: run_ovary_egg-segmentation.py    From pyImSegm with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def segment_fit_ellipse_ransac(seg, centers, fn_preproc_points, nb_inliers=0.6,
                               thr_overlap=SEGM_OVERLAP):
    """ segment eggs using ellipse fitting and RANDSAC strategy

    :param ndarray seg: input image / segmentation
    :param [[int, int]] centers: position of centres / seeds
    :param fn_preproc_points: function for detection boundary points
    :param float nb_inliers: ratio of inliers for RANSAC
    :param float thr_overlap: threshold for removing overlapping segmentations
    :return (ndarray, [[int, int]]): resulting segmentation, updated centres
    """
    points_centers = fn_preproc_points(seg, centers)

    centres_new, ell_params = [], []
    segm = np.zeros_like(seg)
    for i, points in enumerate(points_centers):
        lb = i + 1
        nb_min = int(len(points) * nb_inliers)
        ransac_model, _ = measure.ransac(points, EllipseModel,
                                         min_samples=nb_min,
                                         residual_threshold=15,
                                         max_trials=250)
        if not ransac_model:
            continue
        logging.debug('ellipse params: %r', ransac_model.params)
        segm = ell_fit.add_overlap_ellipse(segm, ransac_model.params, lb,
                                           thr_overlap)

        if np.any(segm == lb):
            centres_new.append(centers[i])
            ell_params.append(ransac_model.params)

    dict_export = {'ellipses.csv': pd.DataFrame(ell_params, columns=COLUMNS_ELLIPSE)}
    return segm, np.array(centres_new), dict_export 
Example #5
Source File: track_lib.py    From TNT with GNU General Public License v3.0 4 votes vote down vote up
def estimateF(img1, img2):
    
    np.random.seed(0)

    #img1, img2, groundtruth_disp = data.stereo_motorcycle()

    img1_gray, img2_gray = map(rgb2gray, (img1, img2))
    
    descriptor_extractor = ORB()

    descriptor_extractor.detect_and_extract(img1_gray)
    keypoints_left = descriptor_extractor.keypoints
    descriptors_left = descriptor_extractor.descriptors

    descriptor_extractor.detect_and_extract(img2_gray)
    keypoints_right = descriptor_extractor.keypoints
    descriptors_right = descriptor_extractor.descriptors

    matches = match_descriptors(descriptors_left, descriptors_right,
                            cross_check=True)

    # Estimate the epipolar geometry between the left and right image.

    model, inliers = ransac((keypoints_left[matches[:, 0]],
                         keypoints_right[matches[:, 1]]),
                        FundamentalMatrixTransform, min_samples=8,
                        residual_threshold=1, max_trials=5000)

    inlier_keypoints_left = keypoints_left[matches[inliers, 0]]
    inlier_keypoints_right = keypoints_right[matches[inliers, 1]]

    print("Number of matches:", matches.shape[0])
    print("Number of inliers:", inliers.sum())

    # Visualize the results.
    '''
    fig, ax = plt.subplots(nrows=2, ncols=1)

    plt.gray()

    plot_matches(ax[0], img1, img2, keypoints_left, keypoints_right,
                 matches[inliers], only_matches=True)
    ax[0].axis("off")
    ax[0].set_title("Inlier correspondences")
    
    plt.show()
    '''
    #import pdb; pdb.set_trace()
    
    return model, matches.shape[0], inliers.sum(), inlier_keypoints_left, inlier_keypoints_right 
Example #6
Source File: track_lib.py    From TNT with GNU General Public License v3.0 4 votes vote down vote up
def estimateF(img1, img2):
    
    np.random.seed(0)

    #img1, img2, groundtruth_disp = data.stereo_motorcycle()

    img1_gray, img2_gray = map(rgb2gray, (img1, img2))
    
    descriptor_extractor = ORB()

    descriptor_extractor.detect_and_extract(img1_gray)
    keypoints_left = descriptor_extractor.keypoints
    descriptors_left = descriptor_extractor.descriptors

    descriptor_extractor.detect_and_extract(img2_gray)
    keypoints_right = descriptor_extractor.keypoints
    descriptors_right = descriptor_extractor.descriptors

    matches = match_descriptors(descriptors_left, descriptors_right,
                            cross_check=True)

    # Estimate the epipolar geometry between the left and right image.

    model, inliers = ransac((keypoints_left[matches[:, 0]],
                         keypoints_right[matches[:, 1]]),
                        FundamentalMatrixTransform, min_samples=8,
                        residual_threshold=1, max_trials=5000)

    inlier_keypoints_left = keypoints_left[matches[inliers, 0]]
    inlier_keypoints_right = keypoints_right[matches[inliers, 1]]

    print("Number of matches:", matches.shape[0])
    print("Number of inliers:", inliers.sum())

    # Visualize the results.
    '''
    fig, ax = plt.subplots(nrows=2, ncols=1)

    plt.gray()

    plot_matches(ax[0], img1, img2, keypoints_left, keypoints_right,
                 matches[inliers], only_matches=True)
    ax[0].axis("off")
    ax[0].set_title("Inlier correspondences")
    
    plt.show()
    '''
    #import pdb; pdb.set_trace()
    
    return model, matches.shape[0], inliers.sum(), inlier_keypoints_left, inlier_keypoints_right 
Example #7
Source File: frame.py    From twitchslam with MIT License 4 votes vote down vote up
def match_frames(f1, f2):
  bf = cv2.BFMatcher(cv2.NORM_HAMMING)
  matches = bf.knnMatch(f1.des, f2.des, k=2)

  # Lowe's ratio test
  ret = []
  idx1, idx2 = [], []
  idx1s, idx2s = set(), set()

  for m,n in matches:
    if m.distance < 0.75*n.distance:
      p1 = f1.kps[m.queryIdx]
      p2 = f2.kps[m.trainIdx]

      # be within orb distance 32
      if m.distance < 32:
        # keep around indices
        # TODO: refactor this to not be O(N^2)
        if m.queryIdx not in idx1s and m.trainIdx not in idx2s:
          idx1.append(m.queryIdx)
          idx2.append(m.trainIdx)
          idx1s.add(m.queryIdx)
          idx2s.add(m.trainIdx)
          ret.append((p1, p2))

  # no duplicates
  assert(len(set(idx1)) == len(idx1))
  assert(len(set(idx2)) == len(idx2))

  assert len(ret) >= 8
  ret = np.array(ret)
  idx1 = np.array(idx1)
  idx2 = np.array(idx2)

  # fit matrix
  model, inliers = ransac((ret[:, 0], ret[:, 1]),
                          EssentialMatrixTransform,
                          min_samples=8,
                          residual_threshold=RANSAC_RESIDUAL_THRES,
                          max_trials=RANSAC_MAX_TRIALS)
  print("Matches:  %d -> %d -> %d -> %d" % (len(f1.des), len(matches), len(inliers), sum(inliers)))
  return idx1[inliers], idx2[inliers], fundamentalToRt(model.params) 
Example #8
Source File: match_images.py    From yolo_v2 with Apache License 2.0 4 votes vote down vote up
def main(unused_argv):
  tf.logging.set_verbosity(tf.logging.INFO)

  # Read features.
  locations_1, _, descriptors_1, _, _ = feature_io.ReadFromFile(
      cmd_args.features_1_path)
  num_features_1 = locations_1.shape[0]
  tf.logging.info("Loaded image 1's %d features" % num_features_1)
  locations_2, _, descriptors_2, _, _ = feature_io.ReadFromFile(
      cmd_args.features_2_path)
  num_features_2 = locations_2.shape[0]
  tf.logging.info("Loaded image 2's %d features" % num_features_2)

  # Find nearest-neighbor matches using a KD tree.
  d1_tree = cKDTree(descriptors_1)
  distances, indices = d1_tree.query(
      descriptors_2, distance_upper_bound=_DISTANCE_THRESHOLD)

  # Select feature locations for putative matches.
  locations_2_to_use = np.array([
      locations_2[i,] for i in range(num_features_2)
      if indices[i] != num_features_1
  ])
  locations_1_to_use = np.array([
      locations_1[indices[i],] for i in range(num_features_2)
      if indices[i] != num_features_1
  ])

  # Perform geometric verification using RANSAC.
  model_robust, inliers = ransac(
      (locations_1_to_use, locations_2_to_use),
      AffineTransform,
      min_samples=3,
      residual_threshold=20,
      max_trials=1000)

  tf.logging.info('Found %d inliers' % sum(inliers))

  # Visualize correspondences, and save to file.
  fig, ax = plt.subplots()
  img_1 = mpimg.imread(cmd_args.image_1_path)
  img_2 = mpimg.imread(cmd_args.image_2_path)
  inlier_idxs = np.nonzero(inliers)[0]
  plot_matches(
      ax,
      img_1,
      img_2,
      locations_1_to_use,
      locations_2_to_use,
      np.column_stack((inlier_idxs, inlier_idxs)),
      matches_color='b')
  ax.axis('off')
  ax.set_title('DELF correspondences')
  plt.savefig(cmd_args.output_image) 
Example #9
Source File: match_images.py    From Gun-Detector with Apache License 2.0 4 votes vote down vote up
def main(unused_argv):
  tf.logging.set_verbosity(tf.logging.INFO)

  # Read features.
  locations_1, _, descriptors_1, _, _ = feature_io.ReadFromFile(
      cmd_args.features_1_path)
  num_features_1 = locations_1.shape[0]
  tf.logging.info("Loaded image 1's %d features" % num_features_1)
  locations_2, _, descriptors_2, _, _ = feature_io.ReadFromFile(
      cmd_args.features_2_path)
  num_features_2 = locations_2.shape[0]
  tf.logging.info("Loaded image 2's %d features" % num_features_2)

  # Find nearest-neighbor matches using a KD tree.
  d1_tree = cKDTree(descriptors_1)
  _, indices = d1_tree.query(
      descriptors_2, distance_upper_bound=_DISTANCE_THRESHOLD)

  # Select feature locations for putative matches.
  locations_2_to_use = np.array([
      locations_2[i,]
      for i in range(num_features_2)
      if indices[i] != num_features_1
  ])
  locations_1_to_use = np.array([
      locations_1[indices[i],]
      for i in range(num_features_2)
      if indices[i] != num_features_1
  ])

  # Perform geometric verification using RANSAC.
  _, inliers = ransac(
      (locations_1_to_use, locations_2_to_use),
      AffineTransform,
      min_samples=3,
      residual_threshold=20,
      max_trials=1000)

  tf.logging.info('Found %d inliers' % sum(inliers))

  # Visualize correspondences, and save to file.
  _, ax = plt.subplots()
  img_1 = mpimg.imread(cmd_args.image_1_path)
  img_2 = mpimg.imread(cmd_args.image_2_path)
  inlier_idxs = np.nonzero(inliers)[0]
  plot_matches(
      ax,
      img_1,
      img_2,
      locations_1_to_use,
      locations_2_to_use,
      np.column_stack((inlier_idxs, inlier_idxs)),
      matches_color='b')
  ax.axis('off')
  ax.set_title('DELF correspondences')
  plt.savefig(cmd_args.output_image) 
Example #10
Source File: match_images.py    From object_detection_with_tensorflow with MIT License 4 votes vote down vote up
def main(unused_argv):
  tf.logging.set_verbosity(tf.logging.INFO)

  # Read features.
  locations_1, _, descriptors_1, _, _ = feature_io.ReadFromFile(
      cmd_args.features_1_path)
  num_features_1 = locations_1.shape[0]
  tf.logging.info("Loaded image 1's %d features" % num_features_1)
  locations_2, _, descriptors_2, _, _ = feature_io.ReadFromFile(
      cmd_args.features_2_path)
  num_features_2 = locations_2.shape[0]
  tf.logging.info("Loaded image 2's %d features" % num_features_2)

  # Find nearest-neighbor matches using a KD tree.
  d1_tree = cKDTree(descriptors_1)
  distances, indices = d1_tree.query(
      descriptors_2, distance_upper_bound=_DISTANCE_THRESHOLD)

  # Select feature locations for putative matches.
  locations_2_to_use = np.array([
      locations_2[i,] for i in range(num_features_2)
      if indices[i] != num_features_1
  ])
  locations_1_to_use = np.array([
      locations_1[indices[i],] for i in range(num_features_2)
      if indices[i] != num_features_1
  ])

  # Perform geometric verification using RANSAC.
  model_robust, inliers = ransac(
      (locations_1_to_use, locations_2_to_use),
      AffineTransform,
      min_samples=3,
      residual_threshold=20,
      max_trials=1000)

  tf.logging.info('Found %d inliers' % sum(inliers))

  # Visualize correspondences, and save to file.
  fig, ax = plt.subplots()
  img_1 = mpimg.imread(cmd_args.image_1_path)
  img_2 = mpimg.imread(cmd_args.image_2_path)
  inlier_idxs = np.nonzero(inliers)[0]
  plot_matches(
      ax,
      img_1,
      img_2,
      locations_1_to_use,
      locations_2_to_use,
      np.column_stack((inlier_idxs, inlier_idxs)),
      matches_color='b')
  ax.axis('off')
  ax.set_title('DELF correspondences')
  plt.savefig(cmd_args.output_image) 
Example #11
Source File: match_images.py    From g-tensorflow-models with Apache License 2.0 4 votes vote down vote up
def main(unused_argv):
  tf.logging.set_verbosity(tf.logging.INFO)

  # Read features.
  locations_1, _, descriptors_1, _, _ = feature_io.ReadFromFile(
      cmd_args.features_1_path)
  num_features_1 = locations_1.shape[0]
  tf.logging.info("Loaded image 1's %d features" % num_features_1)
  locations_2, _, descriptors_2, _, _ = feature_io.ReadFromFile(
      cmd_args.features_2_path)
  num_features_2 = locations_2.shape[0]
  tf.logging.info("Loaded image 2's %d features" % num_features_2)

  # Find nearest-neighbor matches using a KD tree.
  d1_tree = cKDTree(descriptors_1)
  _, indices = d1_tree.query(
      descriptors_2, distance_upper_bound=_DISTANCE_THRESHOLD)

  # Select feature locations for putative matches.
  locations_2_to_use = np.array([
      locations_2[i,]
      for i in range(num_features_2)
      if indices[i] != num_features_1
  ])
  locations_1_to_use = np.array([
      locations_1[indices[i],]
      for i in range(num_features_2)
      if indices[i] != num_features_1
  ])

  # Perform geometric verification using RANSAC.
  _, inliers = ransac(
      (locations_1_to_use, locations_2_to_use),
      AffineTransform,
      min_samples=3,
      residual_threshold=20,
      max_trials=1000)

  tf.logging.info('Found %d inliers' % sum(inliers))

  # Visualize correspondences, and save to file.
  _, ax = plt.subplots()
  img_1 = mpimg.imread(cmd_args.image_1_path)
  img_2 = mpimg.imread(cmd_args.image_2_path)
  inlier_idxs = np.nonzero(inliers)[0]
  plot_matches(
      ax,
      img_1,
      img_2,
      locations_1_to_use,
      locations_2_to_use,
      np.column_stack((inlier_idxs, inlier_idxs)),
      matches_color='b')
  ax.axis('off')
  ax.set_title('DELF correspondences')
  plt.savefig(cmd_args.output_image) 
Example #12
Source File: match_images.py    From models with Apache License 2.0 4 votes vote down vote up
def main(unused_argv):
  # Read features.
  locations_1, _, descriptors_1, _, _ = feature_io.ReadFromFile(
      cmd_args.features_1_path)
  num_features_1 = locations_1.shape[0]
  print(f"Loaded image 1's {num_features_1} features")
  locations_2, _, descriptors_2, _, _ = feature_io.ReadFromFile(
      cmd_args.features_2_path)
  num_features_2 = locations_2.shape[0]
  print(f"Loaded image 2's {num_features_2} features")

  # Find nearest-neighbor matches using a KD tree.
  d1_tree = spatial.cKDTree(descriptors_1)
  _, indices = d1_tree.query(
      descriptors_2, distance_upper_bound=_DISTANCE_THRESHOLD)

  # Select feature locations for putative matches.
  locations_2_to_use = np.array([
      locations_2[i,]
      for i in range(num_features_2)
      if indices[i] != num_features_1
  ])
  locations_1_to_use = np.array([
      locations_1[indices[i],]
      for i in range(num_features_2)
      if indices[i] != num_features_1
  ])

  # Perform geometric verification using RANSAC.
  _, inliers = measure.ransac((locations_1_to_use, locations_2_to_use),
                              transform.AffineTransform,
                              min_samples=3,
                              residual_threshold=20,
                              max_trials=1000)

  print(f'Found {sum(inliers)} inliers')

  # Visualize correspondences, and save to file.
  _, ax = plt.subplots()
  img_1 = mpimg.imread(cmd_args.image_1_path)
  img_2 = mpimg.imread(cmd_args.image_2_path)
  inlier_idxs = np.nonzero(inliers)[0]
  feature.plot_matches(
      ax,
      img_1,
      img_2,
      locations_1_to_use,
      locations_2_to_use,
      np.column_stack((inlier_idxs, inlier_idxs)),
      matches_color='b')
  ax.axis('off')
  ax.set_title('DELF correspondences')
  plt.savefig(cmd_args.output_image) 
Example #13
Source File: match_images.py    From multilabel-image-classification-tensorflow with MIT License 4 votes vote down vote up
def main(unused_argv):
  tf.logging.set_verbosity(tf.logging.INFO)

  # Read features.
  locations_1, _, descriptors_1, _, _ = feature_io.ReadFromFile(
      cmd_args.features_1_path)
  num_features_1 = locations_1.shape[0]
  tf.logging.info("Loaded image 1's %d features" % num_features_1)
  locations_2, _, descriptors_2, _, _ = feature_io.ReadFromFile(
      cmd_args.features_2_path)
  num_features_2 = locations_2.shape[0]
  tf.logging.info("Loaded image 2's %d features" % num_features_2)

  # Find nearest-neighbor matches using a KD tree.
  d1_tree = cKDTree(descriptors_1)
  _, indices = d1_tree.query(
      descriptors_2, distance_upper_bound=_DISTANCE_THRESHOLD)

  # Select feature locations for putative matches.
  locations_2_to_use = np.array([
      locations_2[i,]
      for i in range(num_features_2)
      if indices[i] != num_features_1
  ])
  locations_1_to_use = np.array([
      locations_1[indices[i],]
      for i in range(num_features_2)
      if indices[i] != num_features_1
  ])

  # Perform geometric verification using RANSAC.
  _, inliers = ransac(
      (locations_1_to_use, locations_2_to_use),
      AffineTransform,
      min_samples=3,
      residual_threshold=20,
      max_trials=1000)

  tf.logging.info('Found %d inliers' % sum(inliers))

  # Visualize correspondences, and save to file.
  _, ax = plt.subplots()
  img_1 = mpimg.imread(cmd_args.image_1_path)
  img_2 = mpimg.imread(cmd_args.image_2_path)
  inlier_idxs = np.nonzero(inliers)[0]
  plot_matches(
      ax,
      img_1,
      img_2,
      locations_1_to_use,
      locations_2_to_use,
      np.column_stack((inlier_idxs, inlier_idxs)),
      matches_color='b')
  ax.axis('off')
  ax.set_title('DELF correspondences')
  plt.savefig(cmd_args.output_image)