Python cv2.calibrateCamera() Examples
The following are 12
code examples of cv2.calibrateCamera().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
cv2
, or try the search function
.
Example #1
Source File: utils.py From cvcalib with Apache License 2.0 | 6 votes |
def __calibrate_intrinsics(camera, image_points, object_points, flags, criteria): """ Calibrate intrinsics of the provided camera using provided image & object points & calibration flags & criteria. @param camera: camera to calibrate @param image_points: points in images taken with the camera that correspond to the 3d object_points. @param object_points: 3d points on the object that appears in *each* of the images. Usually, inner corners of a calibration board. Note: assumes *the same* object appears in all of the images. @param flags: OpenCV camera calibration flags. For details, see OpenCV calib3d documentation, calibrate function. @param criteria: OpenCV criteria. @return: estimated object-space rotation & translation vectors of the camera (assuming object is static) """ # OpenCV prefers [width x height] as "Size" to [height x width] frame_dims = (camera.intrinsics.resolution[1], camera.intrinsics.resolution[0]) start = time.time() camera.intrinsics.error, camera.intrinsics.intrinsic_mat, camera.intrinsics.distortion_coeffs, \ rotation_vectors, translation_vectors = \ cv2.calibrateCamera(objectPoints=np.array([object_points]*len(image_points)), imagePoints=image_points, imageSize=frame_dims, cameraMatrix=camera.intrinsics.intrinsic_mat, distCoeffs=camera.intrinsics.distortion_coeffs, flags=flags, criteria=criteria) end = time.time() camera.intrinsics.time = end - start camera.intrinsics.timestamp = end camera.intrinsics.calibration_image_count = len(image_points) return rotation_vectors, translation_vectors
Example #2
Source File: calibrate_camera.py From derplearning with MIT License | 5 votes |
def live_calibrate(camera, pattern_shape, n_matches_needed): """ Find calibration parameters as the user moves a checkerboard in front of the camera """ print("Looking for %s checkerboard" % (pattern_shape,)) criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001) example_3d = np.zeros((pattern_shape[0] * pattern_shape[1], 3), np.float32) example_3d[:, :2] = np.mgrid[0 : pattern_shape[1], 0 : pattern_shape[0]].T.reshape(-1, 2) points_3d = [] points_2d = [] while len(points_3d) < n_matches_needed: ret, frame = camera.cap.read() assert ret gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) ret, corners = cv2.findCirclesGrid( gray_frame, pattern_shape, flags=cv2.CALIB_CB_ASYMMETRIC_GRID ) cv2.imshow("camera", frame) if ret: points_3d.append(example_3d.copy()) points_2d.append(corners) print("Found calibration %i of %i" % (len(points_3d), n_matches_needed)) drawn_frame = cv2.drawChessboardCorners(frame, pattern_shape, corners, ret) cv2.imshow("calib", drawn_frame) cv2.waitKey(10) ret, camera_matrix, distortion_coefficients, _, _ = cv2.calibrateCamera( points_3d, points_2d, gray_frame.shape[::-1], None, None ) assert ret return camera_matrix, distortion_coefficients
Example #3
Source File: camera_calibration.py From lane_tracker with GNU General Public License v3.0 | 5 votes |
def calibrate_camera(filepaths, nx, ny): # Compute camera matrix and distortion coefficients # Get the calibration points object_points, image_points, image_size = get_calibration_points(images, nx, ny) # Compute camera calibration given object points and image points ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(object_points, image_points, image_size, None, None) # Save the camera calibration result to disk (we won't worry about rvecs / tvecs) cam_calib = {"cam_matrix": mtx, "dist_coeffs": dist} with open("cam_calib.p", "wb") as f: pickle.dump(cam_calib, f) return mtx, dist # Run the calibration process # Specify the filepaths to the calibration images # The images are expected to contain only chessboard patterns and bright background
Example #4
Source File: camera_calibration.py From Advanced_Lane_Lines with MIT License | 4 votes |
def calibrate(drawconer=False): ''' read the calibration image and do the camera calibration and output the result to a pickle file. if drawconer is True, will draw the corner on the chessboard file and save it to another folder. ''' # !!! IMPORTANT, set the nx, ny according the calibration chessboard pictures. nx = 9 ny = 6 # prepare object points, like (0,0,0), (1,0,0), (2,0,0), ...(6,5,0) objp = np.zeros((nx*ny,3), np.float32) objp[:,:2] = np.mgrid[0:nx, 0:ny].T.reshape(-1,2) # Arrays to store object points and image points from all the images. objpoints = [] # 3d points in real world space imgpoints = [] # 2d pionts in image plane. # Make a list of calibration images images = glob.glob('chessboard_img/calibration*.jpg') print("Reading the calibration file...") # Step through the list and search for chessboard corners for idx, fname in enumerate(images): img = cv2.imread(fname) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # Find the chessboard corners print("Searching corners on ", fname, "...") ret, corners = cv2.findChessboardCorners(gray, (nx,ny), None) # If found, add object points, image points if ret == True: objpoints.append(objp) imgpoints.append(corners) if drawconer: cv2.drawChessboardCorners(img, (nx,ny), corners, ret) write_name = 'corners_found'+str(idx)+'.jpg' cv2.imwrite(write_name, img) cv2.imshow('img', img) cv2.waitKey(500) cv2.destroyAllWindows() # Get image size img_size = (img.shape[1],img.shape[0]) # Do camera calibration given object points and image points ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img_size, None, None) # Save the camera calibration result for later use (we won't worry about rvecs / tvecs) print("Saving the parameter to file...>>camera_cal.p") dist_pickle = {} dist_pickle["mtx"] = mtx dist_pickle["dist"] = dist pickle_file = open("camera_cal.p", "wb") pickle.dump(dist_pickle, pickle_file) pickle_file.close()
Example #5
Source File: calibration.py From vehicle-detection with GNU General Public License v3.0 | 4 votes |
def calibrate_camera(nx, ny, basepath): """ :param nx: number of grids in x axis :param ny: number of grids in y axis :param basepath: path contains the calibration images :return: write calibration file into basepath as calibration_pickle.p """ objp = np.zeros((nx*ny,3), np.float32) objp[:,:2] = np.mgrid[0:nx,0:ny].T.reshape(-1,2) # Arrays to store object points and image points from all the images. objpoints = [] # 3d points in real world space imgpoints = [] # 2d points in image plane. # Make a list of calibration images images = glob.glob(path.join(basepath, 'calibration*.jpg')) # Step through the list and search for chessboard corners for fname in images: img = cv2.imread(fname) gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) # Find the chessboard corners ret, corners = cv2.findChessboardCorners(gray, (nx,ny),None) # If found, add object points, image points if ret == True: objpoints.append(objp) imgpoints.append(corners) # Draw and display the corners img = cv2.drawChessboardCorners(img, (nx,ny), corners, ret) cv2.imshow('input image',img) cv2.waitKey(500) cv2.destroyAllWindows() # calibrate the camera img_size = (img.shape[1], img.shape[0]) ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img_size, None, None) # Save the camera calibration result for later use (we don't use rvecs / tvecs) dist_pickle = {} dist_pickle["mtx"] = mtx dist_pickle["dist"] = dist destnation = path.join(basepath,'calibration_pickle.p') pickle.dump( dist_pickle, open( destnation, "wb" ) ) print("calibration data is written into: {}".format(destnation)) return mtx, dist
Example #6
Source File: calibration_utils.py From VerifAI with BSD 3-Clause "New" or "Revised" License | 4 votes |
def calibrate_camera(calib_images_dir, verbose=False): """ Calibrate the camera given a directory containing calibration chessboards. :param calib_images_dir: directory containing chessboard frames :param verbose: if True, draw and show chessboard corners :return: calibration parameters """ assert path.exists(calib_images_dir), '"{}" must exist and contain calibration images.'.format(calib_images_dir) # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0) objp = np.zeros((6 * 9, 3), np.float32) objp[:, :2] = np.mgrid[0:9, 0:6].T.reshape(-1, 2) # Arrays to store object points and image points from all the images. objpoints = [] # 3d points in real world space imgpoints = [] # 2d points in image plane. # Make a list of calibration images images = glob.glob(path.join(calib_images_dir, 'calibration*.jpg')) # Step through the list and search for chessboard corners for filename in images: img = cv2.imread(filename) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # Find the chessboard corners pattern_found, corners = cv2.findChessboardCorners(gray, (9, 6), None) if pattern_found is True: objpoints.append(objp) imgpoints.append(corners) if verbose: # Draw and display the corners img = cv2.drawChessboardCorners(img, (9, 6), corners, pattern_found) cv2.imshow('img',img) cv2.waitKey(500) if verbose: cv2.destroyAllWindows() ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None) return ret, mtx, dist, rvecs, tvecs
Example #7
Source File: camera_calibration.py From lane_tracker with GNU General Public License v3.0 | 4 votes |
def get_calibration_points(images, nx, ny): ''' Generate two lists of calibration points from a set of calibration images of chess boards to needed for `cv2.calibrateCamera()`. It is recommended that `images` contain at least 20 images. All images are expected to be of identical size and to contain the same, complete chess board pattern. Args: images (array-like): A list of file names of the images to be used for calibration. nx (int): The number of horizontal inner corners (i.e. corners where two white and two black tiles meet) of the chess board. ny (int): The number of vertical inner corners (i.e. corners where two white and two black tiles meet) of the chess board. Returns: object_points (list): The list of 3-D object points for calibration. image_points (list): The list of 2-D image points for calibration. ''' image_size = [] # Arrays to store object points and image points # of all calibration images for `cv2.calibrateCamera()`. object_points = [] # 3-D points in real world space image_points = [] # 2-D points in image plane. # All calibration images are expected to contain the same calibration pattern, # so the object points are the same for all images. # Format: (0,0,0), (1,0,0), (2,0,0), ...., (8,5,0) # The third coordinate is always zero as the points lie in a plane. objp = np.zeros((nx*ny,3), np.float32) objp[:,:2] = np.mgrid[0:nx, 0:ny].T.reshape(-1,2) # Step through the list and search for chess board corners for i, fname in enumerate(images): img = cv2.imread(fname) size = (img.shape[1], img.shape[0]) if i == 0: image_size = size if size != image_size: raise ValueError("Expected all images to have identical size, but found varying sizes.") image_size = size gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # Find the chess board corners ret, corners = cv2.findChessboardCorners(gray, (nx, ny), None) # If found, add object points, image points if ret == True: object_points.append(objp) image_points.append(corners) return object_points, image_points, image_size
Example #8
Source File: getPMatrix.py From AR-BXT-AR4Python with GNU Lesser General Public License v3.0 | 4 votes |
def getP(self, dst): """ dst: 标记物关键点 return self.MTX,self.DIST,self.RVEC,self.TVEC: 反馈 内参、畸变系数,旋转向量,位移向量 """ if self.SceneImage is None: return None corners = np.float32([dst[1], dst[0], dst[2], dst[3]]) gray = cv2.cvtColor(self.SceneImage, cv2.COLOR_BGR2GRAY) # termination criteria criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001) # prepare object points, like (0,0,0), (1,0,0), (1,0,0), (1,1,0) objp = np.zeros((2*2,3), np.float32) objp[:,:2] = np.mgrid[0:2,0:2].T.reshape(-1,2) corners2 = cv2.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria) if self.PTimes < self.PCount or self.PCount == 0: # Arrays to store object points and image points from all the images. objpoints = self.OBJPoints # 3d point in real world space imgpoints = self.IMGPoints # 2d points in image plane. if len(imgpoints) == 0 or np.sum(np.abs(imgpoints[-1] - corners2)) != 0: objpoints.append(objp) imgpoints.append(corners2) # Find mtx, dist, rvecs, tvecs ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1],None,None) if not ret: self.PTimes += 1 return None self.OBJPoints = objpoints self.IMGPoints = imgpoints self.MTX = mtx self.DIST = dist self.RVEC = rvecs[0] self.TVEC = tvecs[0] else: # Find the rotation and translation vectors. _, rvec, tvec, _= cv2.solvePnPRansac(objp, corners2, self.MTX, self.DIST) self.RVEC = rvec self.TVEC = tvec self.PTimes += 1 return self.MTX,self.DIST,self.RVEC,self.TVEC
Example #9
Source File: getPMatrix.py From AR-BXT-AR4Python with GNU Lesser General Public License v3.0 | 4 votes |
def getP(self, dst): """ dst: 标记物关键点 return self.MTX,self.DIST,self.RVEC,self.TVEC: 反馈 内参、畸变系数,旋转向量,位移向量 """ if self.SceneImage is None: return None corners = np.float32([dst[1], dst[0], dst[2], dst[3]]) gray = cv2.cvtColor(self.SceneImage, cv2.COLOR_BGR2GRAY) # termination criteria criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001) # prepare object points, like (0,0,0), (1,0,0), (1,0,0), (1,1,0) objp = np.zeros((2*2,3), np.float32) objp[:,:2] = np.mgrid[0:2,0:2].T.reshape(-1,2) corners2 = cv2.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria) if self.PTimes < self.PCount or self.PCount == 0: # Arrays to store object points and image points from all the images. objpoints = self.OBJPoints # 3d point in real world space imgpoints = self.IMGPoints # 2d points in image plane. if len(imgpoints) == 0 or np.sum(np.abs(imgpoints[-1] - corners2)) != 0: objpoints.append(objp) imgpoints.append(corners2) # Find mtx, dist, rvecs, tvecs ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1],None,None) if not ret: self.PTimes += 1 return None self.OBJPoints = objpoints self.IMGPoints = imgpoints self.MTX = mtx self.DIST = dist self.RVEC = rvecs[0] self.TVEC = tvecs[0] else: # Find the rotation and translation vectors. _, rvec, tvec, _= cv2.solvePnPRansac(objp, corners2, self.MTX, self.DIST) self.RVEC = rvec self.TVEC = tvec self.PTimes += 1 return self.MTX,self.DIST,self.RVEC,self.TVEC
Example #10
Source File: single_camera_calibration.py From stereoDepth with Apache License 2.0 | 4 votes |
def calibrate(dirpath, prefix, image_format, square_size, width=9, height=6): """ Apply camera calibration operation for images in the given directory path. """ # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(8,6,0) objp = np.zeros((height*width, 3), np.float32) objp[:, :2] = np.mgrid[0:width, 0:height].T.reshape(-1, 2) objp = objp * square_size # Create real world coords. Use your metric. # Arrays to store object points and image points from all the images. objpoints = [] # 3d point in real world space imgpoints = [] # 2d points in image plane. # Directory path correction. Remove the last character if it is '/' if dirpath[-1:] == '/': dirpath = dirpath[:-1] # Get the images images = glob.glob(dirpath+'/' + prefix + '*.' + image_format) # Iterate through the pairs and find chessboard corners. Add them to arrays # If openCV can't find the corners in an image, we discard the image. for fname in images: img = cv2.imread(fname) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # Find the chess board corners ret, corners = cv2.findChessboardCorners(gray, (width, height), None) # If found, add object points, image points (after refining them) if ret: objpoints.append(objp) corners2 = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1), criteria) imgpoints.append(corners2) # Draw and display the corners # Show the image to see if pattern is found ! imshow function. img = cv2.drawChessboardCorners(img, (width, height), corners2, ret) ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None) return [ret, mtx, dist, rvecs, tvecs]
Example #11
Source File: calibration.py From driving-lane-departure-warning with GNU General Public License v3.0 | 4 votes |
def calibrate_camera(nx, ny, basepath): """ :param nx: number of grids in x axis :param ny: number of grids in y axis :param basepath: path contains the calibration images :return: write calibration file into basepath as calibration_pickle.p """ objp = np.zeros((nx*ny,3), np.float32) objp[:,:2] = np.mgrid[0:nx,0:ny].T.reshape(-1,2) # Arrays to store object points and image points from all the images. objpoints = [] # 3d points in real world space imgpoints = [] # 2d points in image plane. # Make a list of calibration images images = glob.glob(path.join(basepath, 'calibration*.jpg')) # Step through the list and search for chessboard corners for fname in images: img = cv2.imread(fname) gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) # Find the chessboard corners ret, corners = cv2.findChessboardCorners(gray, (nx,ny),None) # If found, add object points, image points if ret == True: objpoints.append(objp) imgpoints.append(corners) # Draw and display the corners img = cv2.drawChessboardCorners(img, (nx,ny), corners, ret) cv2.imshow('input image',img) cv2.waitKey(500) cv2.destroyAllWindows() # calibrate the camera img_size = (img.shape[1], img.shape[0]) ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img_size, None, None) # Save the camera calibration result for later use (we don't use rvecs / tvecs) dist_pickle = {} dist_pickle["mtx"] = mtx dist_pickle["dist"] = dist destnation = path.join(basepath,'calibration_pickle.p') pickle.dump( dist_pickle, open( destnation, "wb" ) ) print("calibration data is written into: {}".format(destnation)) return mtx, dist
Example #12
Source File: calibration_utils.py From depthai with MIT License | 3 votes |
def stereo_calibrate(self): """Calibrate camera and construct Homography.""" # init camera calibrations rt, self.M1, self.d1, self.r1, self.t1 = cv2.calibrateCamera( self.objpoints, self.imgpoints_l, self.img_shape, None, None) rt, self.M2, self.d2, self.r2, self.t2 = cv2.calibrateCamera( self.objpoints, self.imgpoints_r, self.img_shape, None, None) # config flags = 0 #flags |= cv2.CALIB_FIX_ASPECT_RATIO flags |= cv2.CALIB_USE_INTRINSIC_GUESS #flags |= cv2.CALIB_SAME_FOCAL_LENGTH #flags |= cv2.CALIB_ZERO_TANGENT_DIST flags |= cv2.CALIB_RATIONAL_MODEL #flags |= cv2.CALIB_FIX_K1 #flags |= cv2.CALIB_FIX_K2 #flags |= cv2.CALIB_FIX_K3 #flags |= cv2.CALIB_FIX_K4 #flags |= cv2.CALIB_FIX_K5 #flags |= cv2.CALIB_FIX_K6 stereocalib_criteria = (cv2.TERM_CRITERIA_COUNT + cv2.TERM_CRITERIA_EPS, 100, 1e-5) # stereo calibration procedure ret, self.M1, self.d1, self.M2, self.d2, R, T, E, F = cv2.stereoCalibrate( self.objpoints, self.imgpoints_l, self.imgpoints_r, self.M1, self.d1, self.M2, self.d2, self.img_shape, criteria=stereocalib_criteria, flags=flags) assert ret < 1.0, "[ERROR] Calibration RMS error < 1.0 (%i). Re-try image capture." % (ret) print("[OK] Calibration successful w/ RMS error=" + str(ret)) # construct Homography plane_depth = 40000000.0 # arbitrary plane depth #TODO: Need to understand effect of plane_depth. Why does this improve some boards' cals? n = np.array([[0.0], [0.0], [-1.0]]) d_inv = 1.0 / plane_depth H = (R - d_inv * np.dot(T, n.transpose())) self.H = np.dot(self.M2, np.dot(H, np.linalg.inv(self.M1))) self.H /= self.H[2, 2] # rectify Homography for right camera disparity = (self.M1[0, 0] * T[0] / plane_depth) self.H[0, 2] -= disparity self.H = self.H.astype(np.float32) print("Rectifying Homography...") print(self.H)