Python cv2.KeyPoint() Examples
The following are 30
code examples of cv2.KeyPoint().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
cv2
, or try the search function
.
Example #1
Source File: mainPanoCompletion2view.py From RelativePose with BSD 3-Clause "New" or "Revised" License | 7 votes |
def evalSiftDescriptor(self,rgb,denseCorres): ratios=[] n=rgb.shape[0] Kn = denseCorres['idxSrc'].shape[1] for jj in range(n): if denseCorres['valid'][jj].item() == 0: continue idx=np.random.choice(range(Kn),100) rs=(torch_op.npy(rgb[jj,0,:,:,:]).transpose(1,2,0)*255).astype('uint8') grays= cv2.cvtColor(rs,cv2.COLOR_BGR2GRAY) rt=(torch_op.npy(rgb[jj,1,:,:,:]).transpose(1,2,0)*255).astype('uint8') grayt= cv2.cvtColor(rt,cv2.COLOR_BGR2GRAY) step_size = 5 tp=torch_op.npy(denseCorres['idxSrc'][jj,idx,:]) kp = [cv2.KeyPoint(coord[0], coord[1], step_size) for coord in tp] _,sifts = self.sift.compute(grays, kp) tp=torch_op.npy(denseCorres['idxTgt'][jj,idx,:]) kp = [cv2.KeyPoint(coord[0], coord[1], step_size) for coord in tp] _,siftt = self.sift.compute(grayt, kp) dist=np.power(sifts-siftt,2).sum(1) kp = [cv2.KeyPoint(x, y, step_size) for y in range(0, rgb.shape[3], step_size) for x in range(0, rgb.shape[4], step_size)] _,dense_feat = self.sift.compute(grayt, kp) distRest=np.power(np.expand_dims(sifts,1)-np.expand_dims(dense_feat,0),2).sum(2) ratio=(distRest<dist[:,np.newaxis]).sum(1)/distRest.shape[1] ratios.append(ratio.mean()) return ratios
Example #2
Source File: pickle_import_export.py From douglas-quaid with GNU General Public License v3.0 | 6 votes |
def patch_Keypoint_pickiling(): # Create the bundling between class and arguements to save for Keypoint class # See : https://stackoverflow.com/questions/50337569/pickle-exception-for-cv2-boost-when-using-multiprocessing/50394788#50394788 def _pickle_keypoint(keypoint): # : cv2.KeyPoint return cv2.KeyPoint, ( keypoint.pt[0], keypoint.pt[1], keypoint.size, keypoint.angle, keypoint.response, keypoint.octave, keypoint.class_id, ) # C++ : KeyPoint (float x, float y, float _size, float _angle=-1, float _response=0, int _octave=0, int _class_id=-1) # Python: cv2.KeyPoint([x, y, _size[, _angle[, _response[, _octave[, _class_id]]]]]) → <KeyPoint object> # Apply the bundling to pickle copyreg.pickle(cv2.KeyPoint().__class__, _pickle_keypoint) # non static, to be sure we patched it before use, only once
Example #3
Source File: feature.py From stereo_ptam with GNU General Public License v3.0 | 6 votes |
def __init__(self, image, params): # TODO: pyramid representation self.image = image self.height, self.width = image.shape[:2] self.keypoints = [] # list of cv2.KeyPoint self.descriptors = [] # numpy.ndarray self.detector = params.feature_detector self.extractor = params.descriptor_extractor self.matcher = params.descriptor_matcher self.cell_size = params.matching_cell_size self.distance = params.matching_distance self.neighborhood = ( params.matching_cell_size * params.matching_neighborhood) self._lock = Lock()
Example #4
Source File: opencvhelper.py From GIFT with Apache License 2.0 | 6 votes |
def detect(self, gray_img): """Detect keypoints in the gray-scale image. Args: gray_img: The input gray-scale image. Returns: npy_kpts: (n_kpts, 6) Keypoints represented as NumPy array. cv_kpts: A list of keypoints represented as cv2.KeyPoint. """ cv_kpts = self.sift.detect(gray_img, None) if len(cv_kpts)==0: return np.zeros([0,6]), [] all_octaves = [np.int8(i.octave & 0xFF) for i in cv_kpts] self.first_octave = int(np.min(all_octaves)) self.max_octave = int(np.max(all_octaves)) npy_kpts, cv_kpts = sample_by_octave(cv_kpts, self.n_sample, self.down_octave) return npy_kpts, cv_kpts
Example #5
Source File: auto.py From airtest with BSD 3-Clause "New" or "Revised" License | 6 votes |
def _unpickle_keypoints(array, region_center, region_width, region_height, image_width, image_height): keypoints, descriptors = [], [] [center_x,center_y] = region_center top_left_x = int(center_x - region_width) top_left_y = int(center_y - region_height) bottom_right_x = int(center_x + region_width) bottom_right_y = int(center_y + region_height) if top_left_x < 0: top_left_x = 0 if top_left_y < 0: top_left_y = 0 if image_width < bottom_right_x: bottom_right_x = image_width - 1 if image_height < bottom_right_y: bottom_right_y = image_height - 1 for point in array: [x, y] = [int(point[0][0]), int(point[0][1])] if (x < top_left_x or y < top_left_y or bottom_right_x < x or bottom_right_y < y): temp_keypoint = cv2.KeyPoint(x=point[0][0],y=point[0][1],_size=point[1], _angle=point[2],_response=point[3], _octave=point[4],_class_id=point[5]) temp_descriptor = point[6] keypoints.append(temp_keypoint) descriptors.append(temp_descriptor) return keypoints, np.array(descriptors) #zero the pixel in the image's given region
Example #6
Source File: utils.py From posenet-python with Apache License 2.0 | 6 votes |
def draw_skel_and_kp( img, instance_scores, keypoint_scores, keypoint_coords, min_pose_score=0.5, min_part_score=0.5): out_img = img adjacent_keypoints = [] cv_keypoints = [] for ii, score in enumerate(instance_scores): if score < min_pose_score: continue new_keypoints = get_adjacent_keypoints( keypoint_scores[ii, :], keypoint_coords[ii, :, :], min_part_score) adjacent_keypoints.extend(new_keypoints) for ks, kc in zip(keypoint_scores[ii, :], keypoint_coords[ii, :, :]): if ks < min_part_score: continue cv_keypoints.append(cv2.KeyPoint(kc[1], kc[0], 10. * ks)) out_img = cv2.drawKeypoints( out_img, cv_keypoints, outImage=np.array([]), color=(255, 255, 0), flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) out_img = cv2.polylines(out_img, adjacent_keypoints, isClosed=False, color=(255, 255, 0)) return out_img
Example #7
Source File: utils_features.py From pyslam with GNU General Public License v3.0 | 6 votes |
def unpackSiftOctave(kpt): """unpackSIFTOctave(kpt)->(octave,layer,scale) @brief Unpack Sift Keypoint @param kpt: cv2.KeyPoint (of SIFT) """ _octave = kpt.octave octave = int(_octave)&0xFF layer = (_octave>>8)&0xFF if octave>=128: octave |= -128 if octave>=0: scale = float(1.0/(1<<octave)) else: scale = float(1<<(-octave)) #print('sift octave: ', octave,' layer: ', layer, ' scale: ', scale, 'size: ', kpt.size) return (octave, layer, scale)
Example #8
Source File: opencvhelper.py From geodesc with MIT License | 6 votes |
def detect(self, gray_img): """Detect keypoints in the gray-scale image. Args: gray_img: The input gray-scale image. Returns: npy_kpts: (n_kpts, 6) Keypoints represented as NumPy array. cv_kpts: A list of keypoints represented as cv2.KeyPoint. """ cv_kpts = self.sift.detect(gray_img, None) if self.ori_off: tmp_npy_kpts = [np.array([tmp_cv_kpt.pt[0], tmp_cv_kpt.pt[1], tmp_cv_kpt.size]) for i, tmp_cv_kpt in enumerate(cv_kpts)] tmp_npy_kpts = np.stack(tmp_npy_kpts, axis=0) _, unique = np.unique(tmp_npy_kpts, axis=0, return_index=True) cv_kpts = [cv_kpts[i] for i in unique] all_octaves = [np.int8(i.octave & 0xFF) for i in cv_kpts] self.first_octave = int(np.min(all_octaves)) self.max_octave = int(np.max(all_octaves)) npy_kpts, cv_kpts = sample_by_octave(cv_kpts, self.n_sample, self.down_octave) return npy_kpts, cv_kpts
Example #9
Source File: utils_features.py From pyslam with GNU General Public License v3.0 | 6 votes |
def convert_pts_to_keypoints(pts, size=1): kps = [] if pts is not None: if pts.ndim > 2: # convert matrix [Nx1x2] of pts into list of keypoints kps = [ cv2.KeyPoint(p[0][0], p[0][1], _size=size) for p in pts ] else: # convert matrix [Nx2] of pts into list of keypoints kps = [ cv2.KeyPoint(p[0], p[1], _size=size) for p in pts ] return kps # from https://stackoverflow.com/questions/48385672/opencv-python-unpack-sift-octave # from https://gist.github.com/lxc-xx/7088609 (SIFT implementation) # from https://stackoverflow.com/questions/17015995/opencv-sift-descriptor-keypoint-radius # from https://github.com/vlfeat/vlfeat/blob/38a03e12daf50ee98633de06120834d0d1d87e23/vl/sift.c#L1948 (vlfeat SIFT implementation) # see also https://www.vlfeat.org/api/sift.html (documentation of vlfeat SIFT implementation) # N.B.: the opencv SIFT implementation uses a negative first octave (int firstOctave = -1) to work with an higher resolution image (scale=2.0, double size)
Example #10
Source File: opencvhelper.py From pyslam with GNU General Public License v3.0 | 6 votes |
def detect(self, gray_img): """Detect keypoints in the gray-scale image. Args: gray_img: The input gray-scale image. Returns: npy_kpts: (n_kpts, 6) Keypoints represented as NumPy array. cv_kpts: A list of keypoints represented as cv2.KeyPoint. """ cv_kpts = self.sift.detect(gray_img, None) all_octaves = [np.int8(i.octave & 0xFF) for i in cv_kpts] self.first_octave = int(np.min(all_octaves)) self.max_octave = int(np.max(all_octaves)) npy_kpts, cv_kpts = sample_by_octave(cv_kpts, self.n_sample, self.down_octave) return npy_kpts, cv_kpts
Example #11
Source File: image.py From ImageAnalysis with MIT License | 6 votes |
def load_features(self): if os.path.exists(self.features_file): #print "Loading " + self.features_file try: fp = gzip.open(self.features_file, "rb") feature_list = pickle.load(fp) fp.close() self.kp_list = [] for point in feature_list: kp = cv2.KeyPoint(x=point[0][0], y=point[0][1], _size=point[1], _angle=point[2], _response=point[3], _octave=point[4], _class_id=point[5]) self.kp_list.append(kp) return True except: print(self.features_file + ":\n" + " feature load error: " \ + str(sys.exc_info()[0]) + ": " + str(sys.exc_info()[1])) return False
Example #12
Source File: feature.py From rgbd_ptam with GNU General Public License v3.0 | 6 votes |
def __init__(self, image, params): # TODO: pyramid representation self.image = image self.height, self.width = image.shape[:2] self.keypoints = [] # list of cv2.KeyPoint self.descriptors = [] # numpy.ndarray self.detector = params.feature_detector self.extractor = params.descriptor_extractor self.matcher = params.descriptor_matcher self.cell_size = params.matching_cell_size self.matching_distance = params.matching_distance self.neighborhood = ( params.matching_cell_size * params.matching_neighborhood) self._lock = Lock()
Example #13
Source File: opencvhelper.py From pyslam with GNU General Public License v3.0 | 6 votes |
def draw_matches(self, img1, cv_kpts1, img2, cv_kpts2, good_matches, mask, match_color=(0, 255, 0), pt_color=(0, 0, 255)): """Draw matches.""" if type(cv_kpts1) is np.ndarray and type(cv_kpts2) is np.ndarray: cv_kpts1 = [cv2.KeyPoint(cv_kpts1[i][0], cv_kpts1[i][1], 1) for i in range(cv_kpts1.shape[0])] cv_kpts2 = [cv2.KeyPoint(cv_kpts2[i][0], cv_kpts2[i][1], 1) for i in range(cv_kpts2.shape[0])] display = cv2.drawMatches(img1, cv_kpts1, img2, cv_kpts2, good_matches, None, matchColor=match_color, singlePointColor=pt_color, matchesMask=mask.ravel().tolist(), flags=4) return display
Example #14
Source File: opencvhelper.py From pyslam with GNU General Public License v3.0 | 6 votes |
def detect(self, gray_img): """Detect keypoints in the gray-scale image. Args: gray_img: The input gray-scale image. Returns: npy_kpts: (n_kpts, 6) Keypoints represented as NumPy array. cv_kpts: A list of keypoints represented as cv2.KeyPoint. """ cv_kpts = self.sift.detect(gray_img, None) response = np.array([kp.response for kp in cv_kpts]) resp_sort = np.argsort(response)[::-1][0:self.n_feature].tolist() cv_kpts = [cv_kpts[i] for i in resp_sort] if self.n_feature > 0 and len(cv_kpts) > self.n_feature: cv_kpts = cv_kpts[0:self.n_feature] if len(cv_kpts) > 0: all_octaves = [np.int8(i.octave & 0xFF) for i in cv_kpts] self.first_octave = int(np.min(all_octaves)) self.max_octave = int(np.max(all_octaves)) npy_kpts, cv_kpts = self.sample_by_octave(cv_kpts, self.n_sample, self.down_octave) else: npy_kpts = np.zeros((0, 0)) return npy_kpts, cv_kpts
Example #15
Source File: feature_superpoint.py From pyslam with GNU General Public License v3.0 | 6 votes |
def __init__(self, do_cuda=True): self.lock = RLock() self.opts = SuperPointOptions(do_cuda) print(self.opts) print('SuperPointFeature2D') print('==> Loading pre-trained network.') # This class runs the SuperPoint network and processes its outputs. self.fe = SuperPointFrontend(weights_path=self.opts.weights_path, nms_dist=self.opts.nms_dist, conf_thresh=self.opts.conf_thresh, nn_thresh=self.opts.nn_thresh, cuda=self.opts.cuda) print('==> Successfully loaded pre-trained network.') self.pts = [] self.kps = [] self.des = [] self.heatmap = [] self.frame = None self.frameFloat = None self.keypoint_size = 20 # just a representative size for visualization and in order to convert extracted points to cv2.KeyPoint # compute both keypoints and descriptors
Example #16
Source File: stitch.py From Image-stitcher with MIT License | 5 votes |
def __init__(self, image1: np.ndarray, image2: np.ndarray, method: Enum=Method.SIFT, threshold=800) -> None: """输入两幅图像,计算其特征值 此类用于输入两幅图像,计算其特征值,输入两幅图像分别为numpy数组格式的图像,其中的method参数要求输入SURF、SIFT或者ORB,threshold参数为特征值检测所需的阈值。 Args: image1 (np.ndarray): 图像一 image2 (np.ndarray): 图像二 method (Enum, optional): Defaults to Method.SIFT. 特征值检测方法 threshold (int, optional): Defaults to 800. 特征值阈值 """ self.image1 = image1 self.image2 = image2 self.method = method self.threshold = threshold self._keypoints1: List[cv2.KeyPoint] = None self._descriptors1: np.ndarray = None self._keypoints2: List[cv2.KeyPoint] = None self._descriptors2: np.ndarray = None if self.method == Method.ORB: # error if not set this self.matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True) else: # self.matcher = cv2.BFMatcher(crossCheck=True) self.matcher = cv2.FlannBasedMatcher() self.match_points = [] self.image_points1 = np.array([]) self.image_points2 = np.array([])
Example #17
Source File: feature_d2net.py From pyslam with GNU General Public License v3.0 | 5 votes |
def __init__(self, use_relu=True, # remove ReLU after the dense feature extraction module multiscale=False, # extract multiscale features (read the note above) max_edge=1600, # maximum image size at network input max_sum_edges=2800, # maximum sum of image sizes at network input preprocessing='torch', # image preprocessing (caffe or torch) do_cuda=True): print('Using D2NetFeature2D') self.lock = RLock() self.model_base_path = config.cfg.root_folder + '/thirdparty/d2net/' self.models_path = self.model_base_path + 'models/d2_ots.pth' # best performances obtained with 'd2_ots.pth' self.use_relu = use_relu self.multiscale = multiscale self.max_edge = max_edge self.max_sum_edges = max_sum_edges self.preprocessing = preprocessing self.pts = [] self.kps = [] self.des = [] self.frame = None self.keypoint_size = 20 # just a representative size for visualization and in order to convert extracted points to cv2.KeyPoint self.do_cuda = do_cuda & torch.cuda.is_available() print('cuda:',self.do_cuda) self.device = torch.device("cuda:0" if self.do_cuda else "cpu") torch.set_grad_enabled(False) print('==> Loading pre-trained network.') # Creating CNN model self.model = D2Net( model_file=self.models_path, use_relu=use_relu, use_cuda=do_cuda) if self.do_cuda: print('Extracting on GPU') else: print('Extracting on CPU') print('==> Successfully loaded pre-trained network.')
Example #18
Source File: feature_orbslam2.py From pyslam with GNU General Public License v3.0 | 5 votes |
def detect(self, img, mask=None): #mask is fake: it is not considered by the c++ implementation # detect and compute kps_tuples = self.orb_extractor.detect(img) # convert keypoints kps = [cv2.KeyPoint(*kp) for kp in kps_tuples] return kps
Example #19
Source File: sift.py From SoTu with MIT License | 5 votes |
def load(self, filename): with open(os.path.join(self.path, filename), 'rb') as sift_pkl: tmp, des = pickle.load(sift_pkl) kp = [ cv2.KeyPoint(x=t[0][0], y=t[0][1], _size=t[1], _angle=t[2], _response=t[3], _octave=t[4], _class_id=t[5]) for t in tmp ] return kp, des
Example #20
Source File: frame_drawer.py From DF-VO with MIT License | 5 votes |
def draw_match_2_side(img1, kp1, img2, kp2, N): """Draw matches on 2 sides Args: img1 (HxW(xC) array): image 1 kp1 (Nx2 array): keypoint for image 1 img2 (HxW(xC) array): image 2 kp2 (Nx2 array): keypoint for image 2 N (int): number of matches to draw Returns: out_img (Hx2W(xC) array): output image with drawn matches """ kp_list = np.linspace(0, min(kp1.shape[0], kp2.shape[0])-1, N, dtype=np.int ) # Convert keypoints to cv2.Keypoint object cv_kp1 = [cv2.KeyPoint(x=pt[0], y=pt[1], _size=1) for pt in kp1[kp_list]] cv_kp2 = [cv2.KeyPoint(x=pt[0], y=pt[1], _size=1) for pt in kp2[kp_list]] out_img = np.array([]) good_matches = [cv2.DMatch(_imgIdx=0, _queryIdx=idx, _trainIdx=idx,_distance=0) for idx in range(N)] out_img = cv2.drawMatches(img1, cv_kp1, img2, cv_kp2, matches1to2=good_matches, outImg=out_img) return out_img
Example #21
Source File: frame_matching.py From hfnet with MIT License | 5 votes |
def get_ocv_kpts_from_np(keypoints_np): return [cv2.KeyPoint(x=x, y=y, _size=1) for x, y in keypoints_np]
Example #22
Source File: opencvhelper.py From pyslam with GNU General Public License v3.0 | 5 votes |
def unpack_octave(self, kpt): """Get scale coefficients of a keypoints. Args: kpt: A keypoint object represented as cv2.KeyPoint. Returns: octave: The octave index. layer: The level index. scale: The sampling step. """ octave = kpt.octave & 255 layer = (kpt.octave >> 8) & 255 octave = octave if octave < 128 else (-128 | octave) scale = 1. / (1 << octave) if octave >= 0 else float(1 << -octave) return octave, layer, scale
Example #23
Source File: feature_d2net.py From pyslam with GNU General Public License v3.0 | 5 votes |
def convert_pts_to_keypoints(pts, scores, size=1): assert(len(pts)==len(scores)) kps = [] if pts is not None: # convert matrix [Nx2] of pts into list of keypoints kps = [ cv2.KeyPoint(p[0], p[1], _size=size, _response=scores[i]) for i,p in enumerate(pts) ] return kps # interface for pySLAM # from https://github.com/mihaidusmanu/d2-net # N.B.: The singlescale features require less than 6GB of VRAM for 1200x1600 images. # The multiscale flag can be used to extract multiscale features - for this, we recommend at least 12GB of VRAM.
Example #24
Source File: feature_r2d2.py From pyslam with GNU General Public License v3.0 | 5 votes |
def convert_pts_to_keypoints(pts, scores, sizes, levels): assert(len(pts)==len(scores)) kps = [] if pts is not None: # convert matrix [Nx2] of pts into list of keypoints kps = [ cv2.KeyPoint(p[0], p[1], _size=sizes[i], _response=scores[i], _octave=levels[i]) for i,p in enumerate(pts) ] return kps # TODO: fix the octave field of the output keypoints # interface for pySLAM
Example #25
Source File: feature_contextdesc.py From pyslam with GNU General Public License v3.0 | 5 votes |
def convert_pts_to_keypoints(pts, scores, sizes): assert(len(pts)==len(scores)) kps = [] if pts is not None: # convert matrix [Nx2] of pts into list of keypoints kps = [ cv2.KeyPoint(p[0], p[1], _size=sizes[i], _response=scores[i]) for i,p in enumerate(pts) ] return kps # interface for pySLAM
Example #26
Source File: feature_shitomasi.py From pyslam with GNU General Public License v3.0 | 5 votes |
def detect(self, frame, mask=None): pts = cv2.goodFeaturesToTrack(frame, self.num_features, self.quality_level, self.min_coner_distance, blockSize=self.blockSize, mask=mask) # convert matrix of pts into list of keypoints if pts is not None: kps = [ cv2.KeyPoint(p[0][0], p[0][1], self.blockSize) for p in pts ] else: kps = [] #if kVerbose: # print('detector: Shi-Tomasi, #features: ', len(kps), ', #ref: ', self.num_features, ', frame res: ', frame.shape[0:2]) return kps
Example #27
Source File: opencvhelper.py From pyslam with GNU General Public License v3.0 | 5 votes |
def draw_matches(self, img1, cv_kpts1, img2, cv_kpts2, good_matches, mask, match_color=(0, 255, 0), pt_color=(0, 0, 255)): """Draw matches.""" if type(cv_kpts1) is np.ndarray and type(cv_kpts2) is np.ndarray: cv_kpts1 = [cv2.KeyPoint(cv_kpts1[i][0], cv_kpts1[i][1], 1) for i in range(cv_kpts1.shape[0])] cv_kpts2 = [cv2.KeyPoint(cv_kpts2[i][0], cv_kpts2[i][1], 1) for i in range(cv_kpts2.shape[0])] display = cv2.drawMatches(img1, cv_kpts1, img2, cv_kpts2, good_matches, None, matchColor=match_color, singlePointColor=pt_color, matchesMask=mask.ravel().tolist(), flags=4) return display
Example #28
Source File: opencvhelper.py From pyslam with GNU General Public License v3.0 | 5 votes |
def get_patches(self, cv_kpts): """Get all patches around given keypoints. Args: cv_kpts: A list of keypoints represented as cv2.KeyPoint. Return: all_patches: (n_kpts, 32, 32) Cropped patches. """ # generate sampling grids. n_pixel = np.square(self.patch_size) self.output_grid = np.zeros((n_pixel, 3), dtype=np.float32) for i in range(n_pixel): self.output_grid[i, 0] = (i % self.patch_size) * 1. / self.patch_size * 2 - 1 self.output_grid[i, 1] = (i // self.patch_size) * 1. / self.patch_size * 2 - 1 self.output_grid[i, 2] = 1 if self.pyr_off: if not self.down_octave: cv_kpts = cv_kpts[::-1] all_patches = self.get_interest_region(self.pyr, cv_kpts) else: scale_index = [[] for i in range(len(self.pyr))] for idx, val in enumerate(cv_kpts): octave, layer, _ = self.unpack_octave(val) scale_val = (int(octave) - self.first_octave) * (self.n_octave_layers + 3) + int(layer) scale_index[scale_val].append(idx) all_patches = [] for idx, val in enumerate(scale_index): tmp_cv_kpts = [cv_kpts[i] for i in val] scale_img = self.pyr[idx] patches = self.get_interest_region(scale_img, tmp_cv_kpts) if patches is not None: all_patches.append(patches) if self.down_octave: all_patches = np.concatenate(all_patches[::-1], axis=0) else: all_patches = np.concatenate(all_patches, axis=0) assert len(cv_kpts) == all_patches.shape[0] return all_patches
Example #29
Source File: opencvhelper.py From pyslam with GNU General Public License v3.0 | 5 votes |
def compute(self, img, cv_kpts): """Compute SIFT descriptions on given keypoints. Args: img: The input image, can be either color or gray-scale. cv_kpts: A list of cv2.KeyPoint. Returns: sift_desc: (n_kpts, 128) SIFT descriptions. """ _, sift_desc = self.sift.compute(img, cv_kpts) return sift_desc
Example #30
Source File: example.py From rfnet with MIT License | 5 votes |
def to_cv2_kp(kp): # kp is like [batch_idx, y, x, channel] return cv2.KeyPoint(kp[2], kp[1], 0)