Python cv2.drawKeypoints() Examples
The following are 30
code examples of cv2.drawKeypoints().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
cv2
, or try the search function
.
Example #1
Source File: 04_fast_feature.py From Practical-Computer-Vision with MIT License | 8 votes |
def compute_fast_det(filename, is_nms=True, thresh = 10): img = cv2.imread(filename) # Initiate FAST object with default values fast = cv2.FastFeatureDetector_create() #FastFeatureDetector() # find and draw the keypoints if not is_nms: fast.setNonmaxSuppression(0) fast.setThreshold(thresh) kp = fast.detect(img,None) cv2.drawKeypoints(img, kp, img, color=(255,0,0)) return img
Example #2
Source File: BlobDetector.py From openag_cv with GNU General Public License v3.0 | 7 votes |
def postprocessing_image(self, frame): # Detect blobs. keypoints = self.detector.detect(frame) # Draw detected blobs as red circles. # cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures the size of the # circle corresponds to the size of blob im_with_keypoints = cv2.drawKeypoints(frame, keypoints, np.array([]), (0, 0, 255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) leaves_data = self.k_means(keypoints) frame = self.print_number_of_leaves(im_with_keypoints, leaves_data) return frame
Example #3
Source File: utils_nms.py From cvToolkit with MIT License | 6 votes |
def show_blobs_in_heatmap(heatmap, blobs): heatmap_with_blobs = cv2.drawKeypoints(heatmap, blobs, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) [i,j] = np.unravel_index(heatmap.argmin(), heatmap.shape) cv2.circle(heatmap_with_blobs, (j,i), 3, (0,255,0)) cv2.imshow("Heatmap Blobs", heatmap_with_blobs) cv2.waitKey(0)
Example #4
Source File: webcam_track_blobs.py From pc-drone with MIT License | 6 votes |
def add_blobs(crop_frame): frame=cv2.GaussianBlur(crop_frame, (3, 3), 0) # Convert BGR to HSV hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) # define range of green color in HSV lower_green = np.array([70,50,50]) upper_green = np.array([85,255,255]) # Threshold the HSV image to get only blue colors mask = cv2.inRange(hsv, lower_green, upper_green) mask = cv2.erode(mask, None, iterations=1) mask = cv2.dilate(mask, None, iterations=1) # Bitwise-AND mask and original image res = cv2.bitwise_and(frame,frame, mask= mask) detector = cv2.SimpleBlobDetector_create(params) # Detect blobs. reversemask=255-mask keypoints = detector.detect(reversemask) if keypoints: print "found blobs" if len(keypoints) > 4: keypoints.sort(key=(lambda s: s.size)) keypoints=keypoints[0:3] # Draw detected blobs as red circles. # cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures the size of the circle corresponds to the size of blob im_with_keypoints = cv2.drawKeypoints(frame, keypoints, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) else: print "no blobs" im_with_keypoints=crop_frame return im_with_keypoints #, max_blob_dist, blob_center, keypoint_in_orders
Example #5
Source File: 04_sift_features.py From Practical-Computer-Vision with MIT License | 6 votes |
def compute_sift_features(img): gray= cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) sift = cv2.xfeatures2d.SIFT_create() kp = sift.detect(gray,None) img=cv2.drawKeypoints(gray,kp) plt.figure(figsize=(12, 8)) plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB)) plt.axis('off') plt.show()
Example #6
Source File: utils.py From posenet-python with Apache License 2.0 | 6 votes |
def draw_skel_and_kp( img, instance_scores, keypoint_scores, keypoint_coords, min_pose_score=0.5, min_part_score=0.5): out_img = img adjacent_keypoints = [] cv_keypoints = [] for ii, score in enumerate(instance_scores): if score < min_pose_score: continue new_keypoints = get_adjacent_keypoints( keypoint_scores[ii, :], keypoint_coords[ii, :, :], min_part_score) adjacent_keypoints.extend(new_keypoints) for ks, kc in zip(keypoint_scores[ii, :], keypoint_coords[ii, :, :]): if ks < min_part_score: continue cv_keypoints.append(cv2.KeyPoint(kc[1], kc[0], 10. * ks)) out_img = cv2.drawKeypoints( out_img, cv_keypoints, outImage=np.array([]), color=(255, 255, 0), flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) out_img = cv2.polylines(out_img, adjacent_keypoints, isClosed=False, color=(255, 255, 0)) return out_img
Example #7
Source File: utils_nms.py From lighttrack with MIT License | 6 votes |
def show_blobs_in_heatmap(heatmap, blobs): heatmap_with_blobs = cv2.drawKeypoints(heatmap, blobs, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) [i,j] = np.unravel_index(heatmap.argmin(), heatmap.shape) cv2.circle(heatmap_with_blobs, (j,i), 3, (0,255,0)) cv2.imshow("Heatmap Blobs", heatmap_with_blobs) cv2.waitKey(0)
Example #8
Source File: surf_image_processing.py From Indian-Sign-Language-Recognition with MIT License | 5 votes |
def func2(path): frame = cv2.imread(path) frame = cv2.resize(frame,(128,128)) converted2 = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) converted = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) # Convert from RGB to HSV #cv2.imshow("original",converted2) lowerBoundary = np.array([0,40,30],dtype="uint8") upperBoundary = np.array([43,255,254],dtype="uint8") skinMask = cv2.inRange(converted, lowerBoundary, upperBoundary) skinMask = cv2.addWeighted(skinMask,0.5,skinMask,0.5,0.0) #cv2.imshow("masked",skinMask) skinMask = cv2.medianBlur(skinMask, 5) skin = cv2.bitwise_and(converted2, converted2, mask = skinMask) #cv2.imshow("masked2",skin) img2 = cv2.Canny(skin,60,60) #cv2.imshow("edge detection",img2) img2 = cv2.resize(img2,(256,256)) orb = cv2.xfeatures2d.ORB_create() kp, des = orb.detectAndCompute(img2,None) #print(len(des2)) img2 = cv2.drawKeypoints(img2,kp,None,color=(0,255,0), flags=0) #plt.imshow(img2),plt.show() cv2.waitKey(0) cv2.destroyAllWindows() return des2 #func("001.jpg")
Example #9
Source File: utils.py From posenet-pytorch with Apache License 2.0 | 5 votes |
def draw_skel_and_kp( img, instance_scores, keypoint_scores, keypoint_coords, min_pose_score=0.5, min_part_score=0.5): out_img = img adjacent_keypoints = [] cv_keypoints = [] for ii, score in enumerate(instance_scores): if score < min_pose_score: continue new_keypoints = get_adjacent_keypoints( keypoint_scores[ii, :], keypoint_coords[ii, :, :], min_part_score) adjacent_keypoints.extend(new_keypoints) for ks, kc in zip(keypoint_scores[ii, :], keypoint_coords[ii, :, :]): if ks < min_part_score: continue cv_keypoints.append(cv2.KeyPoint(kc[1], kc[0], 10. * ks)) if cv_keypoints: out_img = cv2.drawKeypoints( out_img, cv_keypoints, outImage=np.array([]), color=(255, 255, 0), flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) out_img = cv2.polylines(out_img, adjacent_keypoints, isClosed=False, color=(255, 255, 0)) return out_img
Example #10
Source File: vo.py From Monocular-Visual-Inertial-Odometry with MIT License | 5 votes |
def feature_detection(img1, points1): fast = cv2.FastFeatureDetector(20) #sets the threshold fast.setBool('nonmaxSuppression',1) #makes non-maxsupresison true kp = fast.detect(img1,None) cd_x=np.array([k.pt[0] for k in kp]) cd_y=np.array([k.pt[1] for k in kp]) for i in range(len(cd_x)): points1.append([[cd_x[i],cd_y[i]]]) #img1 = cv2.drawKeypoints(img1,kp,img1) #for testing keypoint generation #cv2.imwrite('kp_test.png',img1) #test feature detection #points1= [] #feature_detection(img,points1) #print points1
Example #11
Source File: utils_nms.py From cvToolkit with MIT License | 5 votes |
def show_blobs_in_heatmap(heatmap, blobs): heatmap_with_blobs = cv2.drawKeypoints(heatmap, blobs, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) [i,j] = np.unravel_index(heatmap.argmin(), heatmap.shape) cv2.circle(heatmap_with_blobs, (j,i), 3, (0,255,0)) cv2.imshow("Heatmap Blobs", heatmap_with_blobs) cv2.waitKey(0)
Example #12
Source File: 04_orb_detections.py From Practical-Computer-Vision with MIT License | 5 votes |
def draw_keyp(img, kp): """ Takes image and keypoints and plots on the same images Does not display it. """ cv2.drawKeypoints(img,kp,img, color=(255,0,0), flags=2) return img
Example #13
Source File: 04_flann_feature_match.py From Practical-Computer-Vision with MIT License | 5 votes |
def draw_keyp(img, kp): """ Draws color around keypoint pixels """ cv2.drawKeypoints(img,kp,img, color=(255,0,0), flags=2) return img
Example #14
Source File: utils_nms.py From video-to-pose3D with MIT License | 5 votes |
def show_blobs_in_heatmap(heatmap, blobs): heatmap_with_blobs = cv2.drawKeypoints(heatmap, blobs, np.array([]), (0, 0, 255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) [i, j] = np.unravel_index(heatmap.argmin(), heatmap.shape) cv2.circle(heatmap_with_blobs, (j, i), 3, (0, 255, 0)) cv2.imshow("Heatmap Blobs", heatmap_with_blobs) cv2.waitKey(0)
Example #15
Source File: opencv_py.py From python-urbanPlanning with MIT License | 5 votes |
def starDetection(inputImg_edge): imgStar=cv2.imread(inputImg_edge) # imgGray=cv2.cvtColor(imgStar,cv2.COLOR_BGR2GRAY) star=cv2.xfeatures2d.StarDetector_create() keypoints=star.detect(imgStar) # print(len(keypoints),keypoints) cv2.drawKeypoints(imgStar,keypoints,imgStar,flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) cv2.imshow('star features',imgStar) cv2.imwrite(os.path.join(rootDirectory,'star features.jpg'),imgStar) cv2.waitKey() #sift图像匹配
Example #16
Source File: image.py From ImageAnalysis with MIT License | 5 votes |
def show_features(self, flags=0): # flags=0: draw only keypoints location # flags=4: draw rich keypoints rgb = self.load_rgb(equalize=True) w, h = self.get_size() scale = 1000.0 / float(h) kp_list = [] for kp in self.kp_list: angle = kp.angle class_id = kp.class_id octave = kp.octave pt = kp.pt response = kp.response size = kp.size x = pt[0] * scale y = pt[1] * scale kp_list.append( cv2.KeyPoint(x, y, size, angle, response, octave, class_id) ) scaled_image = cv2.resize(rgb, (0,0), fx=scale, fy=scale) #res = cv2.drawKeypoints(scaled_image, kp_list, None, # color=(0,255,0), flags=flags) for kp in kp_list: cv2.circle(scaled_image, (int(kp.pt[0]), int(kp.pt[1])), 3, (0,255,0), 1, cv2.LINE_AA) cv2.imshow(self.name, scaled_image) print('waiting for keyboard input...') key = cv2.waitKey() & 0xff cv2.destroyWindow(self.name) return key
Example #17
Source File: find_balloon.py From ardupilot-balloon-finder with GNU General Public License v3.0 | 5 votes |
def analyse_frame(self,frame): balloon_found = False balloon_x = 0 balloon_y = 0 balloon_radius = 0 # Convert BGR to HSV hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) # Threshold the HSV image mask = cv2.inRange(hsv, self.filter_low, self.filter_high) # Erode erode_kernel = numpy.ones((3,3),numpy.uint8); eroded_img = cv2.erode(mask,erode_kernel,iterations = 1) # dilate dilate_kernel = numpy.ones((10,10),numpy.uint8); dilate_img = cv2.dilate(eroded_img,dilate_kernel,iterations = 1) # blog detector blob_params = cv2.SimpleBlobDetector_Params() blob_params.minDistBetweenBlobs = 50 blob_params.filterByInertia = False blob_params.filterByConvexity = False blob_params.filterByColor = True blob_params.blobColor = 255 blob_params.filterByCircularity = False blob_params.filterByArea = False #blob_params.minArea = 20 #blob_params.maxArea = 500 blob_detector = cv2.SimpleBlobDetector_create(blob_params) keypts = blob_detector.detect(dilate_img) # draw centers of all keypoints in new image #blob_img = cv2.drawKeypoints(frame, keypts, color=(0,255,0), flags=0) # find largest blob if len(keypts) > 0: kp_max = keypts[0] for kp in keypts: if kp.size > kp_max.size: kp_max = kp # draw circle around the largest blob cv2.circle(frame,(int(kp_max.pt[0]),int(kp_max.pt[1])),int(kp_max.size),(0,255,0),2) # set the balloon location balloon_found = True balloon_x = kp_max.pt[0] balloon_y = kp_max.pt[1] balloon_radius = kp_max.size # return results return balloon_found, balloon_x, balloon_y, balloon_radius # add_artificial_horizon - adds artificial horizon to an image using the vehicle's attitude
Example #18
Source File: feature_manager.py From pyslam with GNU General Public License v3.0 | 5 votes |
def detect(self, frame, mask=None, filter=True): if not self.need_color_image and frame.ndim>2: # check if we have to convert to gray image frame = cv2.cvtColor(frame,cv2.COLOR_RGB2GRAY) if self.use_pyramid_adaptor: # detection with pyramid adaptor (it can optionally include a block adaptor per level) kps = self.pyramid_adaptor.detect(frame, mask) elif self.use_bock_adaptor: # detection with block adaptor kps = self.block_adaptor.detect(frame, mask) else: # standard detection kps = self._feature_detector.detect(frame, mask) # filter keypoints filter_name = 'NONE' if filter: kps, _, filter_name = self.filter_keypoints(self.keypoint_filter_type, frame, kps) # if keypoints are FAST, etc. give them a decent size in order to properly compute the descriptors if self.do_keypoints_size_rescaling: self.rescale_keypoint_size(kps) if kDrawOriginalExtractedFeatures: # draw the original features imgDraw = cv2.drawKeypoints(frame, kps, None, color=(0,255,0), flags=0) cv2.imshow('detected keypoints',imgDraw) if kVerbose: print('detector:',self.detector_type.name,', #features:', len(kps),', [kp-filter:',filter_name,']') return kps # compute the descriptors once given the keypoints
Example #19
Source File: featureDetection.py From Finger-Detection-and-Tracking with BSD 2-Clause "Simplified" License | 5 votes |
def main(): org_image = cv2.imread("../data/house.tiff", 1) ''' SURF is better than SIFT and computes and detects feature fast, but unfortunately both are paid. Alternative, we have ORB by OpenCV. Free. OSS. PARAM: nfeatures : Number of features to be detected. Default value is around 100. ''' sift = cv2.xfeatures2d.SIFT_create() surf = cv2.xfeatures2d.SURF_create() orb = cv2.ORB_create(nfeatures=1000) kp_sift, decep_sift = sift.detectAndCompute(org_image, None) kp_surf, decep_sift = surf.detectAndCompute(org_image, None) kp_orb, decep_sift = orb.detectAndCompute(org_image, None) org_image_sift = cv2.drawKeypoints(org_image, kp_sift, None) org_image_surf = cv2.drawKeypoints(org_image, kp_surf, None) org_image_orb = cv2.drawKeypoints(org_image, kp_orb, None) cv2.imshow("SIFT Features Detected", org_image_sift) cv2.imshow("SURF Features Detected", org_image_surf) cv2.imshow("ORB Features Detected", org_image_orb) cv2.waitKey(0) cv2.destroyAllWindows()
Example #20
Source File: utils.py From posenet-python with Apache License 2.0 | 5 votes |
def draw_keypoints( img, instance_scores, keypoint_scores, keypoint_coords, min_pose_confidence=0.5, min_part_confidence=0.5): cv_keypoints = [] for ii, score in enumerate(instance_scores): if score < min_pose_confidence: continue for ks, kc in zip(keypoint_scores[ii, :], keypoint_coords[ii, :, :]): if ks < min_part_confidence: continue cv_keypoints.append(cv2.KeyPoint(kc[1], kc[0], 10. * ks)) out_img = cv2.drawKeypoints(img, cv_keypoints, outImage=np.array([])) return out_img
Example #21
Source File: feature.py From stereo_ptam with GNU General Public License v3.0 | 5 votes |
def draw_keypoints(self, name='keypoints', delay=1): if self.image.ndim == 2: image = np.repeat(self.image[..., np.newaxis], 3, axis=2) else: image = self.image img = cv2.drawKeypoints(image, self.keypoints, None, flags=0) cv2.imshow(name, img);cv2.waitKey(delay)
Example #22
Source File: feature.py From rgbd_ptam with GNU General Public License v3.0 | 5 votes |
def draw_keypoints(self, name='keypoints', delay=1): if self.image.ndim == 2: image = np.repeat(self.image[..., np.newaxis], 3, axis=2) else: image = self.image img = cv2.drawKeypoints(image, self.keypoints, None, flags=0) cv2.imshow(name, img);cv2.waitKey(delay)
Example #23
Source File: utils.py From posenet-pytorch with Apache License 2.0 | 5 votes |
def draw_keypoints( img, instance_scores, keypoint_scores, keypoint_coords, min_pose_confidence=0.5, min_part_confidence=0.5): cv_keypoints = [] for ii, score in enumerate(instance_scores): if score < min_pose_confidence: continue for ks, kc in zip(keypoint_scores[ii, :], keypoint_coords[ii, :, :]): if ks < min_part_confidence: continue cv_keypoints.append(cv2.KeyPoint(kc[1], kc[0], 10. * ks)) out_img = cv2.drawKeypoints(img, cv_keypoints, outImage=np.array([])) return out_img
Example #24
Source File: surf_image_processing.py From Indian-Sign-Language-Recognition with MIT License | 4 votes |
def func(path): frame = cv2.imread(path) frame = cv2.resize(frame,(128,128)) converted2 = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) converted = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) # Convert from RGB to HSV #cv2.imshow("original",converted2) lowerBoundary = np.array([0,40,30],dtype="uint8") upperBoundary = np.array([43,255,254],dtype="uint8") skinMask = cv2.inRange(converted, lowerBoundary, upperBoundary) skinMask = cv2.addWeighted(skinMask,0.5,skinMask,0.5,0.0) #cv2.imshow("masked",skinMask) skinMask = cv2.medianBlur(skinMask, 5) skin = cv2.bitwise_and(converted2, converted2, mask = skinMask) #frame = cv2.addWeighted(frame,1.5,skin,-0.5,0) #skin = cv2.bitwise_and(frame, frame, mask = skinMask) #skinGray=cv2.cvtColor(skin, cv2.COLOR_BGR2GRAY) #cv2.imshow("masked2",skin) img2 = cv2.Canny(skin,60,60) #cv2.imshow("edge detection",img2) ''' hog = cv2.HOGDescriptor() h = hog.compute(img2) print(len(h)) ''' surf = cv2.xfeatures2d.SURF_create() #surf.extended=True img2 = cv2.resize(img2,(256,256)) kp, des = surf.detectAndCompute(img2,None) #print(len(des)) img2 = cv2.drawKeypoints(img2,kp,None,(0,0,255),4) #plt.imshow(img2),plt.show() cv2.waitKey(0) cv2.destroyAllWindows() print(len(des)) return des
Example #25
Source File: spfunctions.py From spfeas with MIT License | 4 votes |
def get_orb_keypoints(bd, image_min, image_max): """ Computes the ORB key points Args: bd (2d array) image_min (int or float) image_max (int or float) """ # We want odd patch sizes. # if parameter_object.scales[-1] % 2 == 0: # patch_size = parameter_object.scales[-1] - 1 if bd.dtype != 'uint8': bd = np.uint8(rescale_intensity(bd, in_range=(image_min, image_max), out_range=(0, 255))) patch_size = 31 patch_size_d = patch_size * 3 # Initiate ORB detector orb = cv2.ORB_create(nfeatures=int(.25*(bd.shape[0]*bd.shape[1])), edgeThreshold=patch_size, scaleFactor=1.2, nlevels=8, patchSize=patch_size, WTA_K=4, scoreType=cv2.ORB_FAST_SCORE) # Add padding because ORB ignores edges. bd = cv2.copyMakeBorder(bd, patch_size_d, patch_size_d, patch_size_d, patch_size_d, cv2.BORDER_REFLECT) # Compute ORB keypoints key_points = orb.detectAndCompute(bd, None)[0] # img = cv2.drawKeypoints(np.uint8(ch_bd), key_points, np.uint8(ch_bd).copy()) return fill_key_points(np.float32(bd), key_points)[patch_size_d:-patch_size_d, patch_size_d:-patch_size_d]
Example #26
Source File: image.py From ATX with Apache License 2.0 | 4 votes |
def find_image_position(origin='origin.png', query='query.png', outfile=None): ''' find all image positions @return None if not found else a tuple: (origin.shape, query.shape, postions) might raise Exception ''' img1 = cv2.imread(query, 0) # query image(small) img2 = cv2.imread(origin, 0) # train image(big) # Initiate SIFT detector sift = cv2.SIFT() # find the keypoints and descriptors with SIFT kp1, des1 = sift.detectAndCompute(img1,None) kp2, des2 = sift.detectAndCompute(img2,None) print len(kp1), len(kp2) FLANN_INDEX_KDTREE = 0 index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5) search_params = dict(checks = 50) # flann flann = cv2.FlannBasedMatcher(index_params, search_params) matches = flann.knnMatch(des1, des2, k=2) # store all the good matches as per Lowe's ratio test. good = [] for m,n in matches: if m.distance < 0.7*n.distance: good.append(m) print len(kp1), len(kp2), 'good cnt:', len(good) if len(good)*1.0/len(kp1) < 0.5: #if len(good)<MIN_MATCH_COUNT: print "Not enough matches are found - %d/%d" % (len(good),MIN_MATCH_COUNT) return img2.shape, img1.shape, [] queryPts = [] trainPts = [] for dm in good: queryPts.append(kp1[dm.queryIdx]) trainPts.append(kp2[dm.trainIdx]) img3 = cv2.drawKeypoints(img1, queryPts) cv2.imwrite('image/query.png', img3) img3 = cv2.drawKeypoints(img2, trainPts) point = _middlePoint(trainPts) print 'position in', point if outfile: edge = 10 top_left = (point[0]-edge, point[1]-edge) bottom_right = (point[0]+edge, point[1]+edge) cv2.rectangle(img3, top_left, bottom_right, 255, 2) cv2.imwrite(outfile, img3) return img2.shape, img1.shape, [point]
Example #27
Source File: test_keypoints.py From transforms with MIT License | 4 votes |
def test_keypoints(): feature_detector = cv2.ORB_create( nfeatures=500, scaleFactor=1.2, nlevels=1, edgeThreshold=31) image = misc.face() # RGB image = cv2.resize(image, None, fx=0.5, fy=0.5) print('image shape', image.shape) keypoints = feature_detector.detect(image[..., ::-1]) points = [kp.pt for kp in keypoints] print('num of keypoints', len(keypoints)) PRNG = RandomState() transform = Compose([ [ColorJitter(prob=0.75), None], Expand((0.8, 1.5)), RandomCompose([ RandomRotate(360), RandomShift(0.2)]), Scale(512), # ElasticTransform(300), RandomCrop(512), HorizontalFlip(), ], PRNG, border='constant', fillval=0, outside_points='inf') results = [] for _ in range(100): img, pts = transform(image, points) filtered = [] for pt in pts: x = [abs(pt[0]), abs(pt[1])] if np.inf not in x and np.nan not in x: filtered.append(pt) kps = [cv2.KeyPoint(*pt, 1) for pt in filtered] print('num of keypoints', len(kps)) img = cv2.drawKeypoints(img[..., ::-1], kps, None, flags=0) results.append(img[..., ::-1]) cv2.imshow('keypoints', img) c = cv2.waitKey(600) if c == 27 or c == ord('q'): # ESC / 'q' break # imageio.mimsave('keypoints.gif', results, duration=0.5)
Example #28
Source File: 04_sift_features.py From Practical-Computer-Vision with MIT License | 4 votes |
def compute_fast_det(img, is_nms=True, thresh = 10): gray= cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) # Initiate FAST object with default values fast = cv2.FastFeatureDetector_create() #FastFeatureDetector() # # find and draw the keypoints if not is_nms: fast.setNonmaxSuppression(0) fast.setThreshold(thresh) kp = fast.detect(img,None) cv2.drawKeypoints(img, kp, img, color=(255,0,0)) sift = cv2.SIFT() kp = sift.detect(gray,None) img=cv2.drawKeypoints(gray,kp) plt.figure(figsize=(12, 8)) plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB)) plt.axis('off') plt.show()
Example #29
Source File: image.py From airtest with BSD 3-Clause "New" or "Revised" License | 4 votes |
def find_image_position(origin='origin.png', query='query.png', outfile=None): ''' find all image positions @return None if not found else a tuple: (origin.shape, query.shape, postions) might raise Exception ''' img1 = cv2.imread(query, 0) # query image(small) img2 = cv2.imread(origin, 0) # train image(big) # Initiate SIFT detector sift = cv2.SIFT() # find the keypoints and descriptors with SIFT kp1, des1 = sift.detectAndCompute(img1,None) kp2, des2 = sift.detectAndCompute(img2,None) print len(kp1), len(kp2) FLANN_INDEX_KDTREE = 0 index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5) search_params = dict(checks = 50) # flann flann = cv2.FlannBasedMatcher(index_params, search_params) matches = flann.knnMatch(des1, des2, k=2) # store all the good matches as per Lowe's ratio test. good = [] for m,n in matches: if m.distance < 0.7*n.distance: good.append(m) print len(kp1), len(kp2), 'good cnt:', len(good) if len(good)*1.0/len(kp1) < 0.5: #if len(good)<MIN_MATCH_COUNT: print "Not enough matches are found - %d/%d" % (len(good),MIN_MATCH_COUNT) return img2.shape, img1.shape, [] queryPts = [] trainPts = [] for dm in good: queryPts.append(kp1[dm.queryIdx]) trainPts.append(kp2[dm.trainIdx]) img3 = cv2.drawKeypoints(img1, queryPts) cv2.imwrite('image/query.png', img3) img3 = cv2.drawKeypoints(img2, trainPts) point = _middlePoint(trainPts) print 'position in', point if outfile: edge = 10 top_left = (point[0]-edge, point[1]-edge) bottom_right = (point[0]+edge, point[1]+edge) cv2.rectangle(img3, top_left, bottom_right, 255, 2) cv2.imwrite(outfile, img3) return img2.shape, img1.shape, [point]
Example #30
Source File: opencv_py.py From python-urbanPlanning with MIT License | 4 votes |
def siftDetection(inputImg_edge): imgSift=cv2.imread(inputImg_edge) imgGray=cv2.cvtColor(imgSift,cv2.COLOR_BGR2GRAY) print(imgGray.shape) sift=cv2.xfeatures2d.SIFT_create() #SIFT特征实例化 keypoints=sift.detect(imgGray,None) #提取SIFT特征关键点detector print(keypoints[:3],len(keypoints)) for k in keypoints[:3]: print(k.pt,k.size,k.octave,k.response,k.class_id,k.angle) """ 关键点信息包含: k.pt关键点点的坐标(图像像素位置) k.size该点直径的大小 k.octave从高斯金字塔的哪一层提取得到的数据 k.response响应程度,代表该点强壮大小,即角点的程度。角点:极值点,某方面属性特别突出的点(最大或最小)。 k.class_id对图像进行分类时,可以用class_id对每个特征点进行区分,未设置时为-1 k.angle角度,关键点的方向。SIFT算法通过对邻域做梯度运算,求得该方向。-1为初始值 """ des = sift.compute(imgGray,keypoints) #提取SIFT调整描述子descriptor print(type(keypoints),type(des)) print(des[0][:2]) #关键点 print(des[1][:2]) #描述子(关键点周围对其有贡献的像素点) print(des[1].shape) imgSift=np.copy(imgSift) cv2.drawKeypoints(imgSift,keypoints,imgSift,flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) """ help(cv2.drawKeypoints) Help on built-in function drawKeypoints: drawKeypoints(...) drawKeypoints(image, keypoints, outImage[, color[, flags]]) -> outImage . @brief Draws keypoints. . . @param image Source image. 原始图像(3通道或单通道) . @param keypoints Keypoints from the source image. 关键点(特征点向量),向量内每一个元素是一个keypoint对象,包含特征点的各种属性特征 . @param outImage Output image. Its content depends on the flags value defining what is drawn in the. output image. See possible flags bit values below. 特征点绘制的画布图像(可以是原始图像)。标记类型,参看@note部分 . @param color Color of keypoints. 显示颜色,默认随机彩色 . @param flags Flags setting drawing features. Possible flags bit values are defined by.DrawMatchesFlags. See details above in drawMatches . . . @note 特征点的 绘制模式,即绘制特征点的哪些信息 . For Python API, flags are modified as cv2.DRAW_MATCHES_FLAGS_DEFAULT, . cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS, cv2.DRAW_MATCHES_FLAGS_DRAW_OVER_OUTIMG, . cv2.DRAW_MATCHES_FLAGS_NOT_DRAW_SINGLE_POINTS """ cv2.imshow('sift features',imgSift) cv2.imwrite(os.path.join(rootDirectory,'sift features.jpg'),imgSift) cv2.waitKey() #star特征检测器