Python cv2.perspectiveTransform() Examples
The following are 30
code examples of cv2.perspectiveTransform().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
cv2
, or try the search function
.
Example #1
Source File: distance_ransac_orb.py From douglas-quaid with GNU General Public License v3.0 | 6 votes |
def filter_matrix_corners_homography(pts, max, matrix) -> (float, List): ''' Compute the images of the image corners and of its center (i.e. the points you get when you apply the homography to those corners and center), and verify that they make sense, i.e. are they inside the image canvas (if you expect them to be)? Are they well separated from each other? Return a distance and a list of the transformed points ''' # Transform the 4 corners thanks to the transformation matrix calculated transformed_pts = cv2.perspectiveTransform(pts, matrix) # Compute the difference between original and modified position of points dist = round(cv2.norm(pts - transformed_pts, cv2.NORM_L2) / max, 10) # sqrt((X1-X2)²+(Y1-Y2)²+...) # Totally an heuristic (geometry based): if dist < 0.20: return dist, transformed_pts else: return 1, transformed_pts
Example #2
Source File: distance_ransac_orb.py From douglas-quaid with GNU General Public License v3.0 | 6 votes |
def filter_matrix_corners_affine(pts, max, matrix) -> (float, List): ''' Compute the images of the image corners and of its center (i.e. the points you get when you apply the homography to those corners and center), and verify that they make sense, i.e. are they inside the image canvas (if you expect them to be)? Are they well separated from each other? Return a distance and a list of the transformed points ''' # Make affine transformation add_row = np.array([[0, 0, 1]]) affine_matrix = np.concatenate((matrix, add_row), axis=0) transformed_pts_affine = cv2.perspectiveTransform(pts, affine_matrix) # Affine distance tmp_dist_affine = round(cv2.norm(pts - transformed_pts_affine, cv2.NORM_L2) / max, 10) # sqrt((X1-X2)²+(Y1-Y2)²+...) # Totally an heuristic (geometry based): if tmp_dist_affine < 0.20: return tmp_dist_affine, transformed_pts_affine else: return 1, transformed_pts_affine
Example #3
Source File: DartsMapping.py From opencv-steel-darts with GNU General Public License v3.0 | 6 votes |
def getTransformedLocation(x_coord,y_coord, calData): try: # transform only the hit point with the saved transformation matrix # ToDo: idea for second camera -> transform complete image and overlap both images to find dart location? dart_loc_temp = np.array([[x_coord, y_coord]], dtype="float32") dart_loc_temp = np.array([dart_loc_temp]) dart_loc = cv2.perspectiveTransform(dart_loc_temp, calData.transformation_matrix) new_dart_loc = tuple(dart_loc.reshape(1, -1)[0]) return new_dart_loc #system not calibrated except AttributeError as err1: print err1 return (-1, -1) except NameError as err2: #not calibrated error print err2 return (-2, -2) #Returns dartThrow (score, multiplier, angle, magnitude) based on x,y location
Example #4
Source File: geometric.py From ViolenceDetection with Apache License 2.0 | 6 votes |
def _augment_keypoints(self, keypoints_on_images, random_state, parents, hooks): result = keypoints_on_images matrices, max_heights, max_widths = self._create_matrices( [kps.shape for kps in keypoints_on_images], random_state ) for i, (M, max_height, max_width) in enumerate(zip(matrices, max_heights, max_widths)): keypoints_on_image = keypoints_on_images[i] kps_arr = keypoints_on_image.get_coords_array() #nb_channels = keypoints_on_image.shape[2] if len(keypoints_on_image.shape) >= 3 else None warped = cv2.perspectiveTransform(np.array([kps_arr], dtype=np.float32), M) warped = warped[0] warped_kps = ia.KeypointsOnImage.from_coords_array( warped, shape=(max_height, max_width) + keypoints_on_image.shape[2:] ) if self.keep_size: warped_kps = warped_kps.on(keypoints_on_image.shape) result[i] = warped_kps return result
Example #5
Source File: ar_main.py From augmented-reality with MIT License | 6 votes |
def render(img, obj, projection, model, color=False): """ Render a loaded obj model into the current video frame """ vertices = obj.vertices scale_matrix = np.eye(3) * 3 h, w = model.shape for face in obj.faces: face_vertices = face[0] points = np.array([vertices[vertex - 1] for vertex in face_vertices]) points = np.dot(points, scale_matrix) # render model in the middle of the reference surface. To do so, # model points must be displaced points = np.array([[p[0] + w / 2, p[1] + h / 2, p[2]] for p in points]) dst = cv2.perspectiveTransform(points.reshape(-1, 1, 3), projection) imgpts = np.int32(dst) if color is False: cv2.fillConvexPoly(img, imgpts, (137, 27, 211)) else: color = hex_to_rgb(face[-1]) color = color[::-1] # reverse cv2.fillConvexPoly(img, imgpts, color) return img
Example #6
Source File: auto.py From airtest with BSD 3-Clause "New" or "Revised" License | 5 votes |
def _homography(src_pts,dst_pts,template_width,template_height,match_point=None): row,col,dim = dst_pts.shape if match_point: for i in range(row): match_point.append([int(dst_pts[i][0][0]),int(dst_pts[i][0][1])]) M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0) pts = np.float32([[0, 0], [0, template_height - 1], [template_width - 1, template_height - 1], [template_width - 1, 0]]).reshape(-1, 1, 2) #找到一个变换矩阵,从查询图映射到检测图片 dst = cv2.perspectiveTransform(pts, M) return dst #SIFT + Homography
Example #7
Source File: screen_finder.py From PyCV-time with MIT License | 5 votes |
def reverse_transform(self, cam_pts): pts = np.float32(cam_pts).reshape(-1,1,2) return cv2.perspectiveTransform(pts, self.cam2screen_matrix)
Example #8
Source File: screen_finder.py From PyCV-time with MIT License | 5 votes |
def reverse_transform(self, cam_pts): pts = np.float32(cam_pts).reshape(-1,1,2) return cv2.perspectiveTransform(pts, self.cam2screen_matrix)
Example #9
Source File: plane_tracker.py From PyCV-time with MIT License | 5 votes |
def track(self, frame): '''Returns a list of detected TrackedTarget objects''' self.frame_points, self.frame_descrs = self.detect_features(frame) if len(self.frame_points) < MIN_MATCH_COUNT: return [] matches = self.matcher.knnMatch(self.frame_descrs, k = 2) matches = [m[0] for m in matches if len(m) == 2 and m[0].distance < m[1].distance * 0.75] if len(matches) < MIN_MATCH_COUNT: return [] matches_by_id = [[] for _ in xrange(len(self.targets))] for m in matches: matches_by_id[m.imgIdx].append(m) tracked = [] for imgIdx, matches in enumerate(matches_by_id): if len(matches) < MIN_MATCH_COUNT: continue target = self.targets[imgIdx] p0 = [target.keypoints[m.trainIdx].pt for m in matches] p1 = [self.frame_points[m.queryIdx].pt for m in matches] p0, p1 = np.float32((p0, p1)) H, status = cv2.findHomography(p0, p1, cv2.RANSAC, 3.0) status = status.ravel() != 0 if status.sum() < MIN_MATCH_COUNT: continue p0, p1 = p0[status], p1[status] x0, y0, x1, y1 = target.rect quad = np.float32([[x0, y0], [x1, y0], [x1, y1], [x0, y1]]) quad = cv2.perspectiveTransform(quad.reshape(1, -1, 2), H).reshape(-1, 2) track = TrackedTarget(target=target, p0=p0, p1=p1, H=H, quad=quad) tracked.append(track) tracked.sort(key = lambda t: len(t.p0), reverse=True) return tracked
Example #10
Source File: plane_tracker.py From PyCV-time with MIT License | 5 votes |
def track(self, frame): '''Returns a list of detected TrackedTarget objects''' self.frame_points, self.frame_descrs = self.detect_features(frame) if len(self.frame_points) < MIN_MATCH_COUNT: return [] matches = self.matcher.knnMatch(self.frame_descrs, k = 2) matches = [m[0] for m in matches if len(m) == 2 and m[0].distance < m[1].distance * 0.75] if len(matches) < MIN_MATCH_COUNT: return [] matches_by_id = [[] for _ in xrange(len(self.targets))] for m in matches: matches_by_id[m.imgIdx].append(m) tracked = [] for imgIdx, matches in enumerate(matches_by_id): if len(matches) < MIN_MATCH_COUNT: continue target = self.targets[imgIdx] p0 = [target.keypoints[m.trainIdx].pt for m in matches] p1 = [self.frame_points[m.queryIdx].pt for m in matches] p0, p1 = np.float32((p0, p1)) H, status = cv2.findHomography(p0, p1, cv2.RANSAC, 3.0) status = status.ravel() != 0 if status.sum() < MIN_MATCH_COUNT: continue p0, p1 = p0[status], p1[status] x0, y0, x1, y1 = target.rect quad = np.float32([[x0, y0], [x1, y0], [x1, y1], [x0, y1]]) quad = cv2.perspectiveTransform(quad.reshape(1, -1, 2), H).reshape(-1, 2) track = TrackedTarget(target=target, p0=p0, p1=p1, H=H, quad=quad) tracked.append(track) tracked.sort(key = lambda t: len(t.p0), reverse=True) return tracked
Example #11
Source File: inverse_perspective_map.py From DVCNN_Lane_Detection with Apache License 2.0 | 5 votes |
def perspective_point(self, pt1): """ map point in top view image into front view image :param pt1: :return: pt2 [x, y] """ pt1 = np.array([[pt1]], dtype=np.float32) top_ctrl_point = np.array(self.__top_view_ctrl_point).astype(dtype=np.float32) fv_ctrl_point = np.array(self.__front_view_ctrl_point).astype(dtype=np.float32) warp_transform = cv2.getPerspectiveTransform(src=top_ctrl_point, dst=fv_ctrl_point) pt_warp = cv2.perspectiveTransform(src=pt1, m=warp_transform) return pt_warp[0, 0, :]
Example #12
Source File: inverse_perspective_map.py From DVCNN_Lane_Detection with Apache License 2.0 | 5 votes |
def inverse_perspective_point(self, pt1): """ map point in front view image into top view image :param pt1: :return: pt2 [x, y] """ fv_ctrl_point = np.array(self.__front_view_ctrl_point).astype(dtype=np.float32) top_ctrl_point = np.array(self.__top_view_ctrl_point).astype(dtype=np.float32) warp_transform = cv2.getPerspectiveTransform(src=fv_ctrl_point, dst=top_ctrl_point) pt_warp = cv2.perspectiveTransform(src=pt1, m=warp_transform) return pt_warp[0, 0, :]
Example #13
Source File: math_utils.py From text_renderer with MIT License | 5 votes |
def transform_pnts(self, pnts, M33): """ :param pnts: 2D pnts, left-top, right-top, right-bottom, left-bottom :param M33: output from transform_image() :return: 2D pnts apply perspective transform """ pnts = np.asarray(pnts, dtype=np.float32) pnts = np.array([pnts]) dst_pnts = cv2.perspectiveTransform(pnts, M33)[0] return dst_pnts
Example #14
Source File: utils.py From VerifAI with BSD 3-Clause "New" or "Revised" License | 5 votes |
def ld2bbSample(sample, h): sample = np.float32([sample]).reshape(-1, 1, 2) con = cv2.perspectiveTransform(sample, h) return np.array(list(con[0][0]))
Example #15
Source File: data_generation.py From keras-ocr with MIT License | 5 votes |
def compute_transformed_contour(width, height, fontsize, M, contour, minarea=0.5): """Compute the permitted drawing contour on a padded canvas for an image of a given size. We assume the canvas is padded with one full image width and height on left and right, top and bottom respectively. Args: width: Width of image height: Height of image fontsize: Size of characters M: The transformation matrix contour: The contour to which we are limited inside the rectangle of size width / height minarea: The minimum area required for a character slot to qualify as being visible, expressed as a fraction of the untransformed fontsize x fontsize slot. """ spacing = math.ceil(fontsize / 2) xslots = int(np.floor(width / spacing)) yslots = int(np.floor(height / spacing)) ys, xs = np.mgrid[:yslots, :xslots] basis = np.concatenate([xs[..., np.newaxis], ys[..., np.newaxis]], axis=-1).reshape((-1, 2)) basis *= spacing slots_pretransform = np.concatenate( [(basis + offset)[:, np.newaxis, :] for offset in [[0, 0], [spacing, 0], [spacing, spacing], [0, spacing]]], axis=1) slots = cv2.perspectiveTransform(src=slots_pretransform.reshape((1, -1, 2)).astype('float32'), m=M)[0] inside = np.array([ cv2.pointPolygonTest(contour=contour, pt=(x, y), measureDist=False) >= 0 for x, y in slots ]).reshape(-1, 4).all(axis=1) slots = slots.reshape(-1, 4, 2) areas = np.abs((slots[:, 0, 0] * slots[:, 1, 1] - slots[:, 0, 1] * slots[:, 1, 0]) + (slots[:, 1, 0] * slots[:, 2, 1] - slots[:, 1, 1] * slots[:, 2, 0]) + (slots[:, 2, 0] * slots[:, 3, 1] - slots[:, 2, 1] * slots[:, 3, 0]) + (slots[:, 3, 0] * slots[:, 0, 1] - slots[:, 3, 1] * slots[:, 0, 0])) / 2 slots_filtered = slots_pretransform[(areas > minarea * spacing * spacing) & inside] temporary_image = cv2.drawContours(image=np.zeros((height, width), dtype='uint8'), contours=slots_filtered, contourIdx=-1, color=255) temporary_image = cv2.dilate(src=temporary_image, kernel=np.ones((spacing, spacing))) newContours, _ = cv2.findContours(temporary_image, mode=cv2.RETR_TREE, method=cv2.CHAIN_APPROX_SIMPLE) x, y = slots_filtered[0][0] contour = newContours[next( index for index, contour in enumerate(newContours) if cv2.pointPolygonTest(contour=contour, pt=(x, y), measureDist=False) >= 0)][:, 0, :] return contour
Example #16
Source File: sift.py From airtest with BSD 3-Clause "New" or "Revised" License | 5 votes |
def find(search_file, image_file, threshold=None): ''' param threshold are disabled in sift match. ''' sch = _cv2open(search_file, 0) img = _cv2open(image_file, 0) kp_sch, des_sch = sift.detectAndCompute(sch, None) kp_img, des_img = sift.detectAndCompute(img, None) if len(kp_sch) < MIN_MATCH_COUNT or len(kp_img) < MIN_MATCH_COUNT: return None FLANN_INDEX_KDTREE = 0 index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5) search_params = dict(checks = 50) flann = cv2.FlannBasedMatcher(index_params, search_params) matches = flann.knnMatch(des_sch, des_img, k=2) good = [] for m,n in matches: if m.distance < 0.7*n.distance: good.append(m) if len(good) > MIN_MATCH_COUNT: sch_pts = np.float32([kp_sch[m.queryIdx].pt for m in good]).reshape(-1, 1, 2) img_pts = np.float32([kp_img[m.trainIdx].pt for m in good]).reshape(-1, 1, 2) M, mask = cv2.findHomography(sch_pts, img_pts, cv2.RANSAC, 5.0) # matchesMask = mask.ravel().tolist() h, w = sch.shape pts = np.float32([ [0, 0], [0, h-1], [w-1, h-1], [w-1, 0] ]).reshape(-1, 1, 2) dst = cv2.perspectiveTransform(pts, M) lt, br = dst[0][0], dst[2][0] return map(int, (lt[0]+w/2, lt[1]+h/2)) else: return None
Example #17
Source File: image_SIFT.py From airtest with BSD 3-Clause "New" or "Revised" License | 5 votes |
def _homography(src_pts,dst_pts,template_width,template_height,match_point=None): row,col,dim = dst_pts.shape if match_point: for i in range(row): match_point.append([int(dst_pts[i][0][0]),int(dst_pts[i][0][1])]) M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0) pts = np.float32([[0, 0], [0, template_height - 1], [template_width - 1, template_height - 1], [template_width - 1, 0]]).reshape(-1, 1, 2) #找到一个变换矩阵,从查询图映射到检测图片 dst = cv2.perspectiveTransform(pts, M) return dst
Example #18
Source File: transforms.py From One_Shot_Face_Reenactment with MIT License | 5 votes |
def dealcurve(curve): cmean = curve.mean(0) angle = (random.random()*10)-5 scale = ((random.random()-0.5)*0.1)+1.0 m = cv2.getRotationMatrix2D((0,0),angle,scale) m = np.vstack([m,[0,0,1]]) dmean = (np.random.rand(1,2)-0.5)*10 curve = curve - cmean curve = cv2.perspectiveTransform(np.array([curve]),m) curve += cmean curve += dmean return curve[0]
Example #19
Source File: pose_estimation.py From OpenCV-3-x-with-Python-By-Example with MIT License | 5 votes |
def track_target(self, frame): self.cur_keypoints, self.cur_descriptors = self.detect_features(frame) if len(self.cur_keypoints) < self.min_matches: return [] try: matches = self.feature_matcher.knnMatch(self.cur_descriptors, k=2) except Exception as e: print('Invalid target, please select another with features to extract') return [] matches = [match[0] for match in matches if len(match) == 2 and match[0].distance < match[1].distance * 0.75] if len(matches) < self.min_matches: return [] matches_using_index = [[] for _ in range(len(self.tracking_targets))] for match in matches: matches_using_index[match.imgIdx].append(match) tracked = [] for image_index, matches in enumerate(matches_using_index): if len(matches) < self.min_matches: continue target = self.tracking_targets[image_index] points_prev = [target.keypoints[m.trainIdx].pt for m in matches] points_cur = [self.cur_keypoints[m.queryIdx].pt for m in matches] points_prev, points_cur = np.float32((points_prev, points_cur)) H, status = cv2.findHomography(points_prev, points_cur, cv2.RANSAC, 3.0) status = status.ravel() != 0 if status.sum() < self.min_matches: continue points_prev, points_cur = points_prev[status], points_cur[status] x_start, y_start, x_end, y_end = target.rect quad = np.float32([[x_start, y_start], [x_end, y_start], [x_end, y_end], [x_start, y_end]]) quad = cv2.perspectiveTransform(quad.reshape(1, -1, 2), H).reshape(-1, 2) track = self.tracked_target(target=target, points_prev=points_prev, points_cur=points_cur, H=H, quad=quad) tracked.append(track) tracked.sort(key = lambda x: len(x.points_prev), reverse=True) return tracked # Detect features in the selected ROIs and return the keypoints and descriptors
Example #20
Source File: plane_tracker.py From OpenCV-Python-Tutorial with MIT License | 5 votes |
def track(self, frame): '''Returns a list of detected TrackedTarget objects''' self.frame_points, frame_descrs = self.detect_features(frame) if len(self.frame_points) < MIN_MATCH_COUNT: return [] matches = self.matcher.knnMatch(frame_descrs, k = 2) matches = [m[0] for m in matches if len(m) == 2 and m[0].distance < m[1].distance * 0.75] if len(matches) < MIN_MATCH_COUNT: return [] matches_by_id = [[] for _ in xrange(len(self.targets))] for m in matches: matches_by_id[m.imgIdx].append(m) tracked = [] for imgIdx, matches in enumerate(matches_by_id): if len(matches) < MIN_MATCH_COUNT: continue target = self.targets[imgIdx] p0 = [target.keypoints[m.trainIdx].pt for m in matches] p1 = [self.frame_points[m.queryIdx].pt for m in matches] p0, p1 = np.float32((p0, p1)) H, status = cv2.findHomography(p0, p1, cv2.RANSAC, 3.0) status = status.ravel() != 0 if status.sum() < MIN_MATCH_COUNT: continue p0, p1 = p0[status], p1[status] x0, y0, x1, y1 = target.rect quad = np.float32([[x0, y0], [x1, y0], [x1, y1], [x0, y1]]) quad = cv2.perspectiveTransform(quad.reshape(1, -1, 2), H).reshape(-1, 2) track = TrackedTarget(target=target, p0=p0, p1=p1, H=H, quad=quad) tracked.append(track) tracked.sort(key = lambda t: len(t.p0), reverse=True) return tracked
Example #21
Source File: homography.py From specularity-removal with GNU General Public License v3.0 | 5 votes |
def visualize_homo(img1, img2, kp1, kp2, matches, homo, mask): h, w, d = img1.shape pts = [[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]] pts = np.array(pts, dtype=np.float32).reshape((-1, 1, 2)) dst = cv.perspectiveTransform(pts, homo) img2 = cv.polylines(img2, [np.int32(dst)], True, [255, 0, 0], 3, 8) matches_mask = mask.ravel().tolist() draw_params = dict(matchesMask=matches_mask, singlePointColor=None, matchColor=(0, 255, 0), flags=2) res = cv.drawMatches(img1, kp1, img2, kp2, matches, None, **draw_params) return res
Example #22
Source File: rectangle_tracker.py From python-opencv-rectangle-tracker with Apache License 2.0 | 4 votes |
def persTransform(pts, H): """Transforms a list of points, `pts`, using the perspective transform `H`.""" src = np.zeros((len(pts), 1, 2)) src[:, 0] = pts dst = cv2.perspectiveTransform(src, H) return np.array(dst[:, 0, :], dtype='float32')
Example #23
Source File: sift.py From airtest with BSD 3-Clause "New" or "Revised" License | 4 votes |
def findall(search_file, image_file, threshold=None, maxcnt=0): sch = _cv2open(search_file, 0) img = _cv2open(image_file, 0) kp_sch, des_sch = sift.detectAndCompute(sch, None) kp_img, des_img = sift.detectAndCompute(img, None) if len(kp_sch) < MIN_MATCH_COUNT or len(kp_img) < MIN_MATCH_COUNT: return None FLANN_INDEX_KDTREE = 0 index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5) search_params = dict(checks = 50) flann = cv2.FlannBasedMatcher(index_params, search_params) points = [] while True: matches = flann.knnMatch(des_sch, des_img, k=2) good = [] for m,n in matches: if m.distance < 0.7*n.distance: good.append(m) if len(good) < MIN_MATCH_COUNT: break if maxcnt and len(points) > maxcnt: break # print good sch_pts = np.float32([kp_sch[m.queryIdx].pt for m in good]).reshape(-1, 1, 2) img_pts = np.float32([kp_img[m.trainIdx].pt for m in good]).reshape(-1, 1, 2) M, mask = cv2.findHomography(sch_pts, img_pts, cv2.RANSAC, 5.0) h, w = sch.shape pts = np.float32([ [0, 0], [0, h-1], [w-1, h-1], [w-1, 0] ]).reshape(-1, 1, 2) dst = cv2.perspectiveTransform(pts, M) lt, br = dst[0][0], dst[2][0] pt = map(int, (lt[0]+w/2, lt[1]+h/2)) qindexes = [] tindexes = [] for m in good: qindexes.append(m.queryIdx) tindexes.append(m.trainIdx) def filter_index(indexes, arr): r = np.ndarray(0, np.float32) for i, item in enumerate(arr): if i not in qindexes: # r.append(item) r = np.append(r, item) return r # print type(des_sch[0][0]) kp_sch = filter_index(qindexes, kp_sch) des_sch =filter_index(qindexes, des_sch) kp_img = filter_index(tindexes, kp_img) des_img = filter_index(tindexes, des_img) points.append(pt) return points
Example #24
Source File: keypoint_base.py From Airtest with Apache License 2.0 | 4 votes |
def _many_good_pts(self, kp_sch, kp_src, good): """特征点匹配点对数目>=4个,可使用单矩阵映射,求出识别的目标区域.""" sch_pts, img_pts = np.float32([kp_sch[m.queryIdx].pt for m in good]).reshape( -1, 1, 2), np.float32([kp_src[m.trainIdx].pt for m in good]).reshape(-1, 1, 2) # M是转化矩阵 M, mask = self._find_homography(sch_pts, img_pts) matches_mask = mask.ravel().tolist() # 从good中间筛选出更精确的点(假设good中大部分点为正确的,由ratio=0.7保障) selected = [v for k, v in enumerate(good) if matches_mask[k]] # 针对所有的selected点再次计算出更精确的转化矩阵M来 sch_pts, img_pts = np.float32([kp_sch[m.queryIdx].pt for m in selected]).reshape( -1, 1, 2), np.float32([kp_src[m.trainIdx].pt for m in selected]).reshape(-1, 1, 2) M, mask = self._find_homography(sch_pts, img_pts) # 计算四个角矩阵变换后的坐标,也就是在大图中的目标区域的顶点坐标: h, w = self.im_search.shape[:2] h_s, w_s = self.im_source.shape[:2] pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]]).reshape(-1, 1, 2) dst = cv2.perspectiveTransform(pts, M) # trans numpy arrary to python list: [(a, b), (a1, b1), ...] def cal_rect_pts(dst): return [tuple(npt[0]) for npt in dst.astype(int).tolist()] pypts = cal_rect_pts(dst) # 注意:虽然4个角点有可能越出source图边界,但是(根据精确化映射单映射矩阵M线性机制)中点不会越出边界 lt, br = pypts[0], pypts[2] middle_point = int((lt[0] + br[0]) / 2), int((lt[1] + br[1]) / 2) # 考虑到算出的目标矩阵有可能是翻转的情况,必须进行一次处理,确保映射后的“左上角”在图片中也是左上角点: x_min, x_max = min(lt[0], br[0]), max(lt[0], br[0]) y_min, y_max = min(lt[1], br[1]), max(lt[1], br[1]) # 挑选出目标矩形区域可能会有越界情况,越界时直接将其置为边界: # 超出左边界取0,超出右边界取w_s-1,超出下边界取0,超出上边界取h_s-1 # 当x_min小于0时,取0。 x_max小于0时,取0。 x_min, x_max = int(max(x_min, 0)), int(max(x_max, 0)) # 当x_min大于w_s时,取值w_s-1。 x_max大于w_s-1时,取w_s-1。 x_min, x_max = int(min(x_min, w_s - 1)), int(min(x_max, w_s - 1)) # 当y_min小于0时,取0。 y_max小于0时,取0。 y_min, y_max = int(max(y_min, 0)), int(max(y_max, 0)) # 当y_min大于h_s时,取值h_s-1。 y_max大于h_s-1时,取h_s-1。 y_min, y_max = int(min(y_min, h_s - 1)), int(min(y_max, h_s - 1)) # 目标区域的角点,按左上、左下、右下、右上点序:(x_min,y_min)(x_min,y_max)(x_max,y_max)(x_max,y_min) pts = np.float32([[x_min, y_min], [x_min, y_max], [ x_max, y_max], [x_max, y_min]]).reshape(-1, 1, 2) pypts = cal_rect_pts(pts) return middle_point, pypts, [x_min, x_max, y_min, y_max, w, h]
Example #25
Source File: sift.py From Airtest with Apache License 2.0 | 4 votes |
def _many_good_pts(im_source, im_search, kp_sch, kp_src, good): """特征点匹配点对数目>=4个,可使用单矩阵映射,求出识别的目标区域.""" sch_pts, img_pts = np.float32([kp_sch[m.queryIdx].pt for m in good]).reshape( -1, 1, 2), np.float32([kp_src[m.trainIdx].pt for m in good]).reshape(-1, 1, 2) # M是转化矩阵 M, mask = _find_homography(sch_pts, img_pts) matches_mask = mask.ravel().tolist() # 从good中间筛选出更精确的点(假设good中大部分点为正确的,由ratio=0.7保障) selected = [v for k, v in enumerate(good) if matches_mask[k]] # 针对所有的selected点再次计算出更精确的转化矩阵M来 sch_pts, img_pts = np.float32([kp_sch[m.queryIdx].pt for m in selected]).reshape( -1, 1, 2), np.float32([kp_src[m.trainIdx].pt for m in selected]).reshape(-1, 1, 2) M, mask = _find_homography(sch_pts, img_pts) # 计算四个角矩阵变换后的坐标,也就是在大图中的目标区域的顶点坐标: h, w = im_search.shape[:2] h_s, w_s = im_source.shape[:2] pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]]).reshape(-1, 1, 2) dst = cv2.perspectiveTransform(pts, M) # trans numpy arrary to python list: [(a, b), (a1, b1), ...] def cal_rect_pts(dst): return [tuple(npt[0]) for npt in dst.astype(int).tolist()] pypts = cal_rect_pts(dst) # 注意:虽然4个角点有可能越出source图边界,但是(根据精确化映射单映射矩阵M线性机制)中点不会越出边界 lt, br = pypts[0], pypts[2] middle_point = int((lt[0] + br[0]) / 2), int((lt[1] + br[1]) / 2) # 考虑到算出的目标矩阵有可能是翻转的情况,必须进行一次处理,确保映射后的“左上角”在图片中也是左上角点: x_min, x_max = min(lt[0], br[0]), max(lt[0], br[0]) y_min, y_max = min(lt[1], br[1]), max(lt[1], br[1]) # 挑选出目标矩形区域可能会有越界情况,越界时直接将其置为边界: # 超出左边界取0,超出右边界取w_s-1,超出下边界取0,超出上边界取h_s-1 # 当x_min小于0时,取0。 x_max小于0时,取0。 x_min, x_max = int(max(x_min, 0)), int(max(x_max, 0)) # 当x_min大于w_s时,取值w_s-1。 x_max大于w_s-1时,取w_s-1。 x_min, x_max = int(min(x_min, w_s - 1)), int(min(x_max, w_s - 1)) # 当y_min小于0时,取0。 y_max小于0时,取0。 y_min, y_max = int(max(y_min, 0)), int(max(y_max, 0)) # 当y_min大于h_s时,取值h_s-1。 y_max大于h_s-1时,取h_s-1。 y_min, y_max = int(min(y_min, h_s - 1)), int(min(y_max, h_s - 1)) # 目标区域的角点,按左上、左下、右下、右上点序:(x_min,y_min)(x_min,y_max)(x_max,y_max)(x_max,y_min) pts = np.float32([[x_min, y_min], [x_min, y_max], [ x_max, y_max], [x_max, y_min]]).reshape(-1, 1, 2) pypts = cal_rect_pts(pts) return middle_point, pypts, [x_min, x_max, y_min, y_max, w, h]
Example #26
Source File: GetDart.py From opencv-steel-darts with GNU General Public License v3.0 | 4 votes |
def DartLocation(x_coord,y_coord): try: #start a fresh set of points points = [] calFile = open('calibrationData.pkl', 'rb') calData = CalibrationData() calData = pickle.load(calFile) #load the data into the global variables global transformation_matrix transformation_matrix = calData.transformationMatrix global ring_radius ring_radius.append(calData.ring_radius[0]) ring_radius.append(calData.ring_radius[1]) ring_radius.append(calData.ring_radius[2]) ring_radius.append(calData.ring_radius[3]) ring_radius.append(calData.ring_radius[4]) ring_radius.append(calData.ring_radius[5]) # append the 6 ring radii global center_dartboard center_dartboard = calData.center_dartboard #close the file once we are done reading the data calFile.close() #print "Raw dart location:" #print x_coord,y_coord # transform only the hit point with the saved transformation matrix dart_loc_temp = np.array([[x_coord, y_coord]], dtype="float32") dart_loc_temp = np.array([dart_loc_temp]) dart_loc = cv2.perspectiveTransform(dart_loc_temp, transformation_matrix) new_dart_loc = tuple(dart_loc.reshape(1, -1)[0]) return new_dart_loc #system not calibrated except AttributeError as err1: print err1 return (-1, -1) except NameError as err2: #not calibrated error print err2 return (-2, -2) #Returns dartThrow (score, multiplier, angle, magnitude) based on x,y location
Example #27
Source File: result_detail.py From IkaLog with Apache License 2.0 | 4 votes |
def auto_warp(self, context): # 画面のオフセットを自動検出して image を返す (AKAZE利用) frame = context['engine'].get('frame', None) if frame is None: return None keypoints, descs = self.get_keypoints( self.result_detail_normalizer(frame)) matcher = cv2.BFMatcher(cv2.NORM_HAMMING) raw_matches = matcher.knnMatch( descs, trainDescriptors=self.ref_descriptors, k=2 ) p2, p1, kp_pairs = self.filter_matches( keypoints, self.ref_keypoints, raw_matches, ) if len(p1) >= 4: H, status = cv2.findHomography(p1, p2, cv2.RANSAC, 5.0) print('%d / %d inliers/matched' % (np.sum(status), len(status))) else: H, status = None, None print('%d matches found, not enough for homography estimation' % len(p1)) raise w = 1280 h = 720 corners = np.float32([[0, 0], [w, 0], [w, h], [0, h]]) pts2 = np.float32([[0, 0], [w, 0], [w, h], [0, h]]) pts1 = np.float32(cv2.perspectiveTransform( corners.reshape(1, -1, 2), H).reshape(-1, 2) + (0, 0)) M = cv2.getPerspectiveTransform(pts1, pts2) # out = cv2.drawKeypoints(img2, keypoints1, None) new_frame = cv2.warpPerspective(frame, M, (w, h)) # 変形した画像がマスクと一致するか? matched = ImageUtils.match_with_mask( new_frame, self.winlose_gray, 0.997, 0.22) if matched: return new_frame IkaUtils.dprint('%s: auto_warp() function broke the image.' % self) return None
Example #28
Source File: getPMatrix.py From AR-BXT-AR4Python with GNU Lesser General Public License v3.0 | 4 votes |
def getMatches(self, sceneImage): """ sceneImage: 场景图片的array形式 return dst: 反馈标记物关键点 """ # Initiate SIFT detector sift = cv2.xfeatures2d.SIFT_create() # find the keypoints and descriptors with SIFT kp1, des1 = sift.detectAndCompute(self.MarkImage[:,:,0],None) kp2, des2 = sift.detectAndCompute(sceneImage[:,:,0],None) # create BFMatcher object FLANN_INDEX_KDTREE = 0 index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5) search_params = dict(checks = 50) flann = cv2.FlannBasedMatcher(index_params, search_params) # Match descriptors. matches = flann.knnMatch(des1,des2,k=2) # Sort them in the order of their distance. good = [] for m,n in matches: if m.distance < 0.7*n.distance: good.append(m) if len(good) < self.MIN_MATCH_COUNT: return None src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2) dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2) M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0) matchesMask = mask.ravel().tolist() h,w = self.MarkImage.shape[:2] pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2) dst = cv2.perspectiveTransform(pts,M) draw_params = dict(matchColor = (0,255,0), # draw matches in green color singlePointColor = None, matchesMask = matchesMask, # draw only inliers flags = 2) self.SceneImage = sceneImage self.DrawParams = draw_params self.KP1 = kp1 self.KP2 = kp2 self.GoodMatches = good return dst
Example #29
Source File: getPMatrix.py From AR-BXT-AR4Python with GNU Lesser General Public License v3.0 | 4 votes |
def getMatches(self, sceneImage): """ sceneImage: 场景图片的array形式 return dst: 反馈标记物关键点 """ # Initiate SIFT detector sift = cv2.xfeatures2d.SIFT_create() # find the keypoints and descriptors with SIFT kp1, des1 = sift.detectAndCompute(self.MarkImage[:,:,0],None) kp2, des2 = sift.detectAndCompute(sceneImage[:,:,0],None) # create BFMatcher object FLANN_INDEX_KDTREE = 0 index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5) search_params = dict(checks = 50) flann = cv2.FlannBasedMatcher(index_params, search_params) # Match descriptors. matches = flann.knnMatch(des1,des2,k=2) # Sort them in the order of their distance. good = [] for m,n in matches: if m.distance < 0.7*n.distance: good.append(m) if len(good) < self.MIN_MATCH_COUNT: return None src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2) dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2) M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0) matchesMask = mask.ravel().tolist() h,w = self.MarkImage.shape[:2] pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2) dst = cv2.perspectiveTransform(pts,M) draw_params = dict(matchColor = (0,255,0), # draw matches in green color singlePointColor = None, matchesMask = matchesMask, # draw only inliers flags = 2) self.SceneImage = sceneImage self.DrawParams = draw_params self.KP1 = kp1 self.KP2 = kp2 self.GoodMatches = good return dst
Example #30
Source File: YuHunModule.py From yysScript with Apache License 2.0 | 4 votes |
def GetLocation(target, kp2, des2): """ 获取目标图像在截图中的位置 :param target: :param screenShot: :return: 返回坐标(x,y) 与opencv坐标系对应 """ MIN_MATCH_COUNT = 10 img1 = target # cv2.cvtColor(target,cv2.COLOR_BGR2GRAY)# 查询图片 # img2 = screenShot # img2 = cv2.cvtColor(screenShot, cv2.COLOR_BGR2GRAY) # 训练图片 # img2 = cv2.resize(img2, dsize=None, fx=0.5, fy=0.5, interpolation=cv2.INTER_NEAREST) # 用SIFT找到关键点和描述符 kp1, des1 = SIFT.detectAndCompute(img1, None) FLANN_INDEX_KDTREE = 0 index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=4) search_params = dict(checks=50) flann = cv2.FlannBasedMatcher(index_params, search_params) matches = flann.knnMatch(des1, des2, k=2) good = [] for m, n in matches: if m.distance < 0.7 * n.distance: good.append(m) if len(good) > MIN_MATCH_COUNT: src_pts = np.float32([kp1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2) dst_pts = np.float32([kp2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2) M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0) matchesMask = mask.ravel().tolist() h, w = img1.shape pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]]).reshape(-1, 1, 2) if M is not None: dst = cv2.perspectiveTransform(pts, M) arr = np.int32(dst) # midPosArr = arr[0] + (arr[2] - arr[0]) // 2 midPos = (midPosArr[0][0], midPosArr[0][1]) # show=cv2.circle(img2,midPos,30,(255,255,255),thickness=5) # cv2.imshow('s',show) # cv2.waitKey() # cv2.destroyAllWindows() return midPos else: return None else: return None