Python cv2.recoverPose() Examples
The following are 6
code examples of cv2.recoverPose().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
cv2
, or try the search function
.
Example #1
Source File: evaluation.py From GIFT with Apache License 2.0 | 10 votes |
def estimate_relative_pose_from_correspondence(pts1, pts2, K1, K2): f_avg = (K1[0, 0] + K2[0, 0]) / 2 pts1, pts2 = np.ascontiguousarray(pts1, np.float32), np.ascontiguousarray(pts2, np.float32) pts_l_norm = cv2.undistortPoints(np.expand_dims(pts1, axis=1), cameraMatrix=K1, distCoeffs=None) pts_r_norm = cv2.undistortPoints(np.expand_dims(pts2, axis=1), cameraMatrix=K2, distCoeffs=None) E, mask = cv2.findEssentialMat(pts_l_norm, pts_r_norm, focal=1.0, pp=(0., 0.), method=cv2.RANSAC, prob=0.999, threshold=3.0 / f_avg) points, R_est, t_est, mask_pose = cv2.recoverPose(E, pts_l_norm, pts_r_norm) return mask[:,0].astype(np.bool), R_est, t_est
Example #2
Source File: utils_geom.py From pyslam with GNU General Public License v3.0 | 5 votes |
def estimate_pose_ess_mat(kpn_ref, kpn_cur, method=cv2.RANSAC, prob=0.999, threshold=0.0003): # here, the essential matrix algorithm uses the five-point algorithm solver by D. Nister (see the notes and paper above ) E, mask_match = cv2.findEssentialMat(kpn_cur, kpn_ref, focal=1, pp=(0., 0.), method=method, prob=prob, threshold=threshold) _, R, t, mask = cv2.recoverPose(E, kpn_cur, kpn_ref, focal=1, pp=(0., 0.)) return poseRt(R,t.T), mask_match # Trc, mask_mat # z rotation, input in radians
Example #3
Source File: initializer.py From pyslam with GNU General Public License v3.0 | 5 votes |
def estimatePose(self, kpn_ref, kpn_cur): # here, the essential matrix algorithm uses the five-point algorithm solver by D. Nister (see the notes and paper above ) E, self.mask_match = cv2.findEssentialMat(kpn_cur, kpn_ref, focal=1, pp=(0., 0.), method=cv2.RANSAC, prob=kRansacProb, threshold=kRansacThresholdNormalized) _, R, t, mask = cv2.recoverPose(E, kpn_cur, kpn_ref, focal=1, pp=(0., 0.)) return poseRt(R,t.T) # Trc homogeneous transformation matrix with respect to 'ref' frame, pr_= Trc * pc_ # push the first image
Example #4
Source File: visual_odometry.py From pyslam with GNU General Public License v3.0 | 5 votes |
def estimatePose(self, kps_ref, kps_cur): kp_ref_u = self.cam.undistort_points(kps_ref) kp_cur_u = self.cam.undistort_points(kps_cur) self.kpn_ref = self.cam.unproject_points(kp_ref_u) self.kpn_cur = self.cam.unproject_points(kp_cur_u) if kUseEssentialMatrixEstimation: # the essential matrix algorithm is more robust since it uses the five-point algorithm solver by D. Nister (see the notes and paper above ) E, self.mask_match = cv2.findEssentialMat(self.kpn_cur, self.kpn_ref, focal=1, pp=(0., 0.), method=cv2.RANSAC, prob=kRansacProb, threshold=kRansacThresholdNormalized) else: # just for the hell of testing fundamental matrix fitting ;-) F, self.mask_match = self.computeFundamentalMatrix(kp_cur_u, kp_ref_u) E = self.cam.K.T @ F @ self.cam.K # E = K.T * F * K #self.removeOutliersFromMask(self.mask) # do not remove outliers, the last unmatched/outlier features can be matched and recognized as inliers in subsequent frames _, R, t, mask = cv2.recoverPose(E, self.kpn_cur, self.kpn_ref, focal=1, pp=(0., 0.)) return R,t # Rrc, trc (with respect to 'ref' frame)
Example #5
Source File: visual_odometry.py From Monocular-Visual-Inertial-Odometry with MIT License | 5 votes |
def processSecondFrame(self): self.px_ref, self.px_cur = featureTracking(self.last_frame, self.new_frame, self.px_ref) E, mask = cv2.findEssentialMat(self.px_cur, self.px_ref, focal=self.focal, pp=self.pp, method=cv2.RANSAC, prob=0.999, threshold=1.0) _, self.cur_R, self.cur_t, mask = cv2.recoverPose(E, self.px_cur, self.px_ref, focal=self.focal, pp = self.pp) self.frame_stage = STAGE_DEFAULT_FRAME self.px_ref = self.px_cur
Example #6
Source File: visual_odometry.py From Monocular-Visual-Inertial-Odometry with MIT License | 5 votes |
def processFrame(self, frame_id): self.px_ref, self.px_cur = featureTracking(self.last_frame, self.new_frame, self.px_ref) E, mask = cv2.findEssentialMat(self.px_cur, self.px_ref, focal=self.focal, pp=self.pp, method=cv2.RANSAC, prob=0.999, threshold=1.0) _, R, t, mask = cv2.recoverPose(E, self.px_cur, self.px_ref, focal=self.focal, pp = self.pp) absolute_scale = self.getAbsoluteScale(frame_id) if(absolute_scale > 0.1): self.cur_t = self.cur_t + absolute_scale*self.cur_R.dot(t) self.cur_R = R.dot(self.cur_R) if(self.px_ref.shape[0] < kMinNumFeature): self.px_cur = self.detector.detect(self.new_frame) self.px_cur = np.array([x.pt for x in self.px_cur], dtype=np.float32) self.px_ref = self.px_cur