Python cv2.undistort() Examples
The following are 30
code examples of cv2.undistort().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
cv2
, or try the search function
.
Example #1
Source File: calibrate_camera.py From derplearning with MIT License | 9 votes |
def live_undistort(camera, camera_matrix, distortion_coefficients): """ Using a given calibration matrix, display the distorted, undistorted, and cropped frame""" scaled_camera_matrix, roi = cv2.getOptimalNewCameraMatrix( camera_matrix, distortion_coefficients, camera.size, 1, camera.size ) while True: ret, frame = camera.cap.read() assert ret distorted_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) undistorted_frame = cv2.undistort( distorted_frame, camera_matrix, distortion_coefficients, None, scaled_camera_matrix, ) roi_x, roi_y, roi_w, roi_h = roi cropped_frame = undistorted_frame[roi_y : roi_y + roi_h, roi_x : roi_x + roi_w] cv2.imshow("distorted %s" % (distorted_frame.shape,), distorted_frame) cv2.imshow("undistorted %s" % (undistorted_frame.shape,), undistorted_frame) cv2.imshow("cropped %s" % (cropped_frame.shape,), cropped_frame) cv2.waitKey(10)
Example #2
Source File: camera_calibration.py From Advanced_Lane_Lines with MIT License | 7 votes |
def test(): """ read the pickle file on disk and implement undistor on image show the oringal/undistort image """ print("Reading the pickle file...") pickle_file = open("./camera_cal.p", "rb") dist_pickle = pickle.load(pickle_file) mtx = dist_pickle["mtx"] dist = dist_pickle["dist"] pickle_file.close() print("Reading the sample image...") img = cv2.imread('corners_founded/corners_found13.jpg') img_size = (img.shape[1],img.shape[0]) dst = cv2.undistort(img, mtx, dist, None, mtx) # dst = cv2.cvtColor(dst, cv2.COLOR_BGR2GRAY) # Visualize undistortion print("Visulize the result...") f, (ax1,ax2) = plt.subplots(1,2, figsize=(20,10)) ax1.imshow(img), ax1.set_title('Original Image', fontsize=15) ax2.imshow(dst), ax2.set_title('Undistored Image', fontsize=15) plt.show()
Example #3
Source File: PrecisionLand.py From Precland with GNU General Public License v3.0 | 6 votes |
def preprocess(self, image, scale, altitude): #remove distortion from video if self.undistort: image = cv2.undistort(image,self.matrix, self.distortion) #crop image if altitude > self.resize_alt_thres: img_size = Point(image.shape[1], image.shape[0]) image, origin = roi(image, img_size * scale, img_size/2) #subsample image else: image = cv2.resize(image,(0,0), fx = scale,fy = scale) scale = 1.0 return image, scale # if starting from mavproxy
Example #4
Source File: camera.py From camera.py with MIT License | 6 votes |
def image_to_world(self, image_px, z): """ Project image points with defined world z to world coordinates. :param image_px: image points :type image_px: numpy.ndarray, shape=(2 or 3, n) :param z: world z coordinate of the projected image points :type z: float :return: n projective world coordinates :rtype: numpy.ndarray, shape=(3, n) """ if image_px.shape[0] == 3: image_px = p2e(image_px) image_undistorted = self.undistort(image_px) tmpP = np.hstack((self.P[:, [0, 1]], self.P[:, 2, np.newaxis] * z + self.P[:, 3, np.newaxis])) world_xy = p2e(np.linalg.inv(tmpP).dot(e2p(image_undistorted))) return np.vstack((world_xy, z * np.ones(image_px.shape[1])))
Example #5
Source File: omnirobot_server.py From robotics-rl-srl with MIT License | 6 votes |
def imageCallback(self, msg): try: # Convert your ROS Image message to OpenCV cv2_img = bridge.imgmsg_to_cv2(msg, "rgb8") if self.first_msg: shape = cv2_img.shape min_length = min(shape[0], shape[1]) up_margin = int((shape[0] - min_length) / 2) # row left_margin = int((shape[1] - min_length) / 2) # col self.valid_box = [up_margin, up_margin + min_length, left_margin, left_margin + min_length] print("origin size: {}x{}".format(shape[0],shape[1])) print("crop each image to a square image, cropped size: {}x{}".format(min_length, min_length)) self.first_msg = False undistort_image = cv2.undistort(cv2_img, self.camera_matrix, self.distortion_coefficients) self.valid_img = undistort_image[self.valid_box[0]:self.valid_box[1], self.valid_box[2]:self.valid_box[3]] except CvBridgeError as e: print("CvBridgeError:", e)
Example #6
Source File: calibration_utils.py From VerifAI with BSD 3-Clause "New" or "Revised" License | 6 votes |
def undistort(frame, mtx, dist, verbose=False): """ Undistort a frame given camera matrix and distortion coefficients. :param frame: input frame :param mtx: camera matrix :param dist: distortion coefficients :param verbose: if True, show frame before/after distortion correction :return: undistorted frame """ frame_undistorted = cv2.undistort(frame, mtx, dist, newCameraMatrix=mtx) if verbose: fig, ax = plt.subplots(nrows=1, ncols=2) ax[0].imshow(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)) ax[1].imshow(cv2.cvtColor(frame_undistorted, cv2.COLOR_BGR2RGB)) plt.show() return frame_undistorted
Example #7
Source File: helpers.py From Advanced_Lane_Lines with MIT License | 6 votes |
def undistort_images(src, dst): """ undistort the images in src folder to dst folder """ # load dst, mtx pickle_file = open("../camera_cal/camera_cal.p", "rb") dist_pickle = pickle.load(pickle_file) mtx = dist_pickle["mtx"] dist = dist_pickle["dist"] pickle_file.close() # loop the image folder image_files = glob.glob(src+"*.jpg") for idx, file in enumerate(image_files): print(file) img = mpimg.imread(file) image_dist = cv2.undistort(img, mtx, dist, None, mtx) file_name = file.split("\\")[-1] print(file_name) out_image = dst+file_name print(out_image) image_dist = cv2.cvtColor(image_dist, cv2.COLOR_RGB2BGR) cv2.imwrite(out_image, image_dist)
Example #8
Source File: camera_realworldxyz.py From hta0-horizontal-robot-arm with GNU General Public License v2.0 | 5 votes |
def undistort_image(self,image): image_undst = cv2.undistort(image, self.cam_mtx, self.dist, None, self.newcam_mtx) return image_undst
Example #9
Source File: lane.py From driving-lane-departure-warning with GNU General Public License v3.0 | 5 votes |
def process_frame(img, visualization=False): start = timer() # resize the input image according to scale img_undist_ = cv2.undistort(img, mtx, dist, None, mtx) img_undist = cv2.resize(img_undist_, (0,0), fx=1/input_scale, fy=1/input_scale) # find the binary image of lane/edges img_binary = find_edges(img_undist) # warp the image to bird view binary_warped = warper(img_binary, M) # get binary image contains edges # crop the binary image binary_sub = np.zeros_like(binary_warped) binary_sub[:, int(150/input_scale):int(-80/input_scale)] = binary_warped[:, int(150/input_scale):int(-80/input_scale)] # start detector or tracker to find the lanes ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0]) if left_lane.detected: # start tracker tracker(binary_sub, ploty, visualization) else: # start detector detector(binary_sub, ploty, visualization) # average among the previous N frames to get the averaged lanes left_lane.process(ploty) right_lane.process(ploty) # measure the lane curvature curvature, curve_direction = measure_lane_curvature(ploty, left_lane.mean_fitx, right_lane.mean_fitx) # compute the car's off-center in meters offcenter, pts = compute_car_offcenter(ploty, left_lane.mean_fitx, right_lane.mean_fitx, img_undist) # compute the processing frame rate end = timer() fps = 1.0 / (end - start) # combine all images into final video output (only for visualization purpose) output = create_output_frame(offcenter, pts, img_undist_, fps, curvature, curve_direction, binary_sub) return output
Example #10
Source File: calibration.py From driving-lane-departure-warning with GNU General Public License v3.0 | 5 votes |
def undistort_image(imagepath, calib_file, visulization_flag): """ undistort the image and visualization :param imagepath: image path :param calib_file: includes calibration matrix and distortion coefficients :param visulization_flag: flag to plot the image :return: none """ mtx, dist = load_calibration(calib_file) img = cv2.imread(imagepath) # undistort the image img_undist = cv2.undistort(img, mtx, dist, None, mtx) img_undistRGB = cv2.cvtColor(img_undist, cv2.COLOR_BGR2RGB) if visulization_flag: imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) f, (ax1, ax2) = plt.subplots(1, 2) ax1.imshow(imgRGB) ax1.set_title('Original Image', fontsize=30) ax1.axis('off') ax2.imshow(img_undistRGB) ax2.set_title('Undistorted Image', fontsize=30) ax2.axis('off') plt.show() return img_undistRGB
Example #11
Source File: undistort.py From PyCV-time with MIT License | 5 votes |
def apply(self, img, crop = True): h, w = img.shape[:2] newcameramtx, roi=cv2.getOptimalNewCameraMatrix(self.mtx, self.dist,(w,h),1,(w,h)) # undistort dst = cv2.undistort(img, self.mtx, self.dist, None, newcameramtx) # crop the image if crop is not True: return dst x,y,w,h = roi dst = dst[y:y+h, x:x+w] return dst
Example #12
Source File: undistort.py From PyCV-time with MIT License | 5 votes |
def apply(self, img, crop = True): h, w = img.shape[:2] newcameramtx, roi=cv2.getOptimalNewCameraMatrix(self.mtx, self.dist,(w,h),1,(w,h)) # undistort dst = cv2.undistort(img, self.mtx, self.dist, None, newcameramtx) # crop the image if crop is not True: return dst x,y,w,h = roi dst = dst[y:y+h, x:x+w] return dst
Example #13
Source File: lane.py From Vehicle-and-Speed-Identification with MIT License | 5 votes |
def lane_process(img, visualization=False): start = timer() # resize the input image according to scale img_undist_ = cv2.undistort(img, mtx, dist, None, mtx) img_undist = cv2.resize(img_undist_, (0,0), fx=1/input_scale, fy=1/input_scale) # find the binary image of lane/edges img_binary = find_edges(img_undist) # warp the image to bird view binary_warped = warper(img_binary, M) # get binary image contains edges # crop the binary image binary_sub = np.zeros_like(binary_warped) binary_sub[:, int(150/input_scale):int(-80/input_scale)] = binary_warped[:, int(150/input_scale):int(-80/input_scale)] # start detector or tracker to find the lanes ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0]) if left_lane.detected: # start tracker tracker(binary_sub, ploty, visualization) else: # start detector detector(binary_sub, ploty, visualization) # average among the previous N frames to get the averaged lanes left_lane.process(ploty) right_lane.process(ploty) # measure the lane curvature curvature, curve_direction = measure_lane_curvature(ploty, left_lane.mean_fitx, right_lane.mean_fitx) # compute the car's off-center in meters offcenter, pts = compute_car_offcenter(ploty, left_lane.mean_fitx, right_lane.mean_fitx, img_undist) # compute the processing frame rate end = timer() fps = 1.0 / (end - start) # combine all images into final video output (only for visualization purpose) _, single_view, lane_info = create_output_frame(offcenter, pts, img_undist_, fps, curvature, curve_direction, binary_sub) return img_undist_, single_view, lane_info
Example #14
Source File: img_dim.py From sanet_relocal_demo with GNU General Public License v3.0 | 5 votes |
def undistort(img, K, k1, k2, p1=0., p2=0.): """ Undistort the image according to the given intrinsics :param img: input image :param k1: distortion param :param k2: distortion param :return: undistorted image """ return cv2.undistort(img, K, (k1, k2, p1, p2))
Example #15
Source File: camera.py From camera.py with MIT License | 5 votes |
def undistort(self, distorted_image_coords, Kundistortion=None): """ Remove distortion from image coordinates. :param distorted_image_coords: real image coordinates :type distorted_image_coords: numpy.ndarray, shape=(2, n) :param Kundistortion: camera matrix for undistorted view, None for self.K :type Kundistortion: array-like, shape=(3, 3) :return: linear image coordinates :rtype: numpy.ndarray, shape=(2, n) """ assert distorted_image_coords.shape[0] == 2 assert distorted_image_coords.ndim == 2 if Kundistortion is None: Kundistortion = self.K if self.calibration_type == 'division': A = self.get_A(Kundistortion) Ainv = np.linalg.inv(A) undistorted_image_coords = p2e(A.dot(e2p(self._undistort_division(p2e(Ainv.dot(e2p(distorted_image_coords))))))) elif self.calibration_type == 'opencv': undistorted_image_coords = cv2.undistortPoints(distorted_image_coords.T.reshape((1, -1, 2)), self.K, self.opencv_dist_coeff, P=Kundistortion).reshape(-1, 2).T elif self.calibration_type == 'opencv_fisheye': undistorted_image_coords = cv2.fisheye.undistortPoints(distorted_image_coords.T.reshape((1, -1, 2)), self.K, self.opencv_dist_coeff, P=Kundistortion).reshape(-1, 2).T else: warn('undistortion not implemented') undistorted_image_coords = distorted_image_coords assert undistorted_image_coords.shape[0] == 2 assert undistorted_image_coords.ndim == 2 return undistorted_image_coords
Example #16
Source File: camera.py From camera.py with MIT License | 5 votes |
def undistort_image(self, img, Kundistortion=None): """ Transform grayscale image such that radial distortion is removed. :param img: input image :type img: np.ndarray, shape=(n, m) or (n, m, 3) :param Kundistortion: camera matrix for undistorted view, None for self.K :type Kundistortion: array-like, shape=(3, 3) :return: transformed image :rtype: np.ndarray, shape=(n, m) or (n, m, 3) """ if Kundistortion is None: Kundistortion = self.K if self.calibration_type == 'opencv': return cv2.undistort(img, self.K, self.opencv_dist_coeff, newCameraMatrix=Kundistortion) elif self.calibration_type == 'opencv_fisheye': return cv2.fisheye.undistortImage(img, self.K, self.opencv_dist_coeff, Knew=Kundistortion) else: xx, yy = np.meshgrid(np.arange(img.shape[1]), np.arange(img.shape[0])) img_coords = np.array([xx.ravel(), yy.ravel()]) y_l = self.undistort(img_coords, Kundistortion) if img.ndim == 2: return griddata(y_l.T, img.ravel(), (xx, yy), fill_value=0, method='linear') else: channels = [griddata(y_l.T, img[:, :, i].ravel(), (xx, yy), fill_value=0, method='linear') for i in xrange(img.shape[2])] return np.dstack(channels)
Example #17
Source File: vo.py From Monocular-Visual-Inertial-Odometry with MIT License | 5 votes |
def undistort(img, dist, mtx): #undistorstion routine h, w = img.shape[:2] newcameramtx, roi=cv2.getOptimalNewCameraMatrix(mtx,dist,(w,h),1,(w,h)) dst = cv2.undistort(img, mtx, dist, None, newcameramtx) x,y,w,h = roi dst = dst[y:y+h, x:x+w] return dst #testing undistort function
Example #18
Source File: image_functions.py From niryo_one_ros with GNU General Public License v3.0 | 5 votes |
def undistort_image(img, mtx, dist, newcameramtx=None): """ Use camera intrinsics to undistort raw image :param img: Raw Image :param mtx: Camera Intrinsics matrix :param dist: Distortion Coefficient :param newcameramtx: Camera Intrinsics matrix after correction :return: Undistorted image """ return cv2.undistort(src=img, cameraMatrix=mtx, distCoeffs=dist, newCameraMatrix=newcameramtx)
Example #19
Source File: camera.py From soccerontable with BSD 2-Clause "Simplified" License | 5 votes |
def set_mask(self, mask_name, undistort=True): if os.path.exists(mask_name): mask = cv2.imread(mask_name, 0) if undistort: mask = cv2.undistort(mask, self.A, self.dist_coeff) mask = cv2.resize(mask, None, fx=1.0 / self.scale_factor, fy=1.0 / self.scale_factor, interpolation=cv2.INTER_NEAREST) self.mask = mask / 255 else: self.mask = np.ones((self.height, self.width), dtype=int)
Example #20
Source File: camera.py From soccerontable with BSD 2-Clause "Simplified" License | 5 votes |
def set_view(self, img_name, undistort=True): img1 = cv2.imread(img_name) self.org_height, self.org_width = img1.shape[0:2] if undistort: img1 = cv2.undistort(img1, self.A, self.dist_coeff) img1 = cv2.resize(img1, None, fx=1.0 / self.scale_factor, fy=1.0 / self.scale_factor) self.view = cv2.cvtColor(img1, cv2.COLOR_BGR2RGB)/255. self.height = self.view.shape[0] self.width = self.view.shape[1]
Example #21
Source File: base_camera.py From Color-Tracker with MIT License | 5 votes |
def _undistort_image(self, image): if self._camera_matrix is None or self._distortion_coefficients is None: import warnings warnings.warn("Undistortion has no effect because <camera_matrix>/<distortion_coefficients> is None!") return image h, w = image.shape[:2] new_camera_matrix, roi = cv2.getOptimalNewCameraMatrix(self._camera_matrix, self._distortion_coefficients, (w, h), 1, (w, h)) undistorted = cv2.undistort(image, self._camera_matrix, self._distortion_coefficients, None, new_camera_matrix) return undistorted
Example #22
Source File: blob_detect.py From pc-drone with MIT License | 5 votes |
def undistort_crop2(orig_img): #undistort and crop #cv2.undistort(src, cameraMatrix, distCoeffs[, dst[, newCameraMatrix]]) -> dst dst = cv2.undistort(orig_img, mtx, dist, None, newcameramtx) x,y,w,h = roi crop_frame = dst[y:y+h, x:x+w] return crop_frame # create maps for undistortion
Example #23
Source File: webcam_track_blobs.py From pc-drone with MIT License | 5 votes |
def undistort_crop(orig_img): #undistort and crop dst = cv2.undistort(orig_img, mtx, dist, None, newcameramtx) x,y,w,h = roi crop_frame = dst[y:y+h, x:x+w] return crop_frame
Example #24
Source File: webcam_track_blobs_2.py From pc-drone with MIT License | 5 votes |
def undistort_crop(orig_img): #undistort and crop dst = cv2.undistort(orig_img, mtx, dist, None, newcameramtx) x,y,w,h = roi crop_frame = dst[y:y+h, x:x+w] return crop_frame
Example #25
Source File: demo.py From pytorch_mpiigaze with MIT License | 5 votes |
def run(self) -> None: while True: if self.config.demo.display_on_screen: self._wait_key() if self.stop: break ok, frame = self.cap.read() if not ok: break undistorted = cv2.undistort( frame, self.gaze_estimator.camera.camera_matrix, self.gaze_estimator.camera.dist_coefficients) self.visualizer.set_image(frame.copy()) faces = self.gaze_estimator.detect_faces(undistorted) for face in faces: self.gaze_estimator.estimate_gaze(undistorted, face) self._draw_face_bbox(face) self._draw_head_pose(face) self._draw_landmarks(face) self._draw_face_template_model(face) self._draw_gaze_vector(face) self._display_normalized_image(face) if self.config.demo.use_camera: self.visualizer.image = self.visualizer.image[:, ::-1] if self.writer: self.writer.write(self.visualizer.image) if self.config.demo.display_on_screen: cv2.imshow('frame', self.visualizer.image) self.cap.release() if self.writer: self.writer.release()
Example #26
Source File: CalibrationObject.py From niryo_one_ros with GNU General Public License v3.0 | 5 votes |
def undistort_image(self, img): return cv2.undistort(src=img, cameraMatrix=self.__mtx, distCoeffs=self.__dist, newCameraMatrix=None)
Example #27
Source File: lane.py From vehicle-detection with GNU General Public License v3.0 | 5 votes |
def lane_process(img, visualization=False): start = timer() # resize the input image according to scale img_undist_ = cv2.undistort(img, mtx, dist, None, mtx) img_undist = cv2.resize(img_undist_, (0,0), fx=1/input_scale, fy=1/input_scale) # find the binary image of lane/edges img_binary = find_edges(img_undist) # warp the image to bird view binary_warped = warper(img_binary, M) # get binary image contains edges # crop the binary image binary_sub = np.zeros_like(binary_warped) binary_sub[:, int(150/input_scale):int(-80/input_scale)] = binary_warped[:, int(150/input_scale):int(-80/input_scale)] # start detector or tracker to find the lanes ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0]) if left_lane.detected: # start tracker tracker(binary_sub, ploty, visualization) else: # start detector detector(binary_sub, ploty, visualization) # average among the previous N frames to get the averaged lanes left_lane.process(ploty) right_lane.process(ploty) # measure the lane curvature curvature, curve_direction = measure_lane_curvature(ploty, left_lane.mean_fitx, right_lane.mean_fitx) # compute the car's off-center in meters offcenter, pts = compute_car_offcenter(ploty, left_lane.mean_fitx, right_lane.mean_fitx, img_undist) # compute the processing frame rate end = timer() fps = 1.0 / (end - start) # combine all images into final video output (only for visualization purpose) _, single_view, lane_info = create_output_frame(offcenter, pts, img_undist_, fps, curvature, curve_direction, binary_sub) return img_undist_, single_view, lane_info
Example #28
Source File: calibration.py From vehicle-detection with GNU General Public License v3.0 | 5 votes |
def undistort_image(imagepath, calib_file, visulization_flag): """ undistort the image and visualization :param imagepath: image path :param calib_file: includes calibration matrix and distortion coefficients :param visulization_flag: flag to plot the image :return: none """ mtx, dist = load_calibration(calib_file) img = cv2.imread(imagepath) # undistort the image img_undist = cv2.undistort(img, mtx, dist, None, mtx) img_undistRGB = cv2.cvtColor(img_undist, cv2.COLOR_BGR2RGB) if visulization_flag: imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) f, (ax1, ax2) = plt.subplots(1, 2) ax1.imshow(imgRGB) ax1.set_title('Original Image', fontsize=30) ax1.axis('off') ax2.imshow(img_undistRGB) ax2.set_title('Undistorted Image', fontsize=30) ax2.axis('off') plt.show() return img_undistRGB
Example #29
Source File: PrecisionLand.py From Precland with GNU General Public License v3.0 | 4 votes |
def __init__(self,config): #Unpack Config file self.config = config #simulator self.use_simulator = self.config.get_boolean('simulator','use_simulator',True) #general #Run the program no matter what mode or location; Useful for debug purposes self.always_run = self.config.get_boolean('general', 'always_run', True) self.target_size = self.config.get_float('general','target_internal_diameter',0.60325) self.resize_alt_thres = self.config.get_float('general','resize_alt_thres', 7) #logging #create a logger and set log levels self.logger = Logger("precland",self.config.get_string('logging', 'location', '~/precland/')) self.logger.set_print_level(self.config.get_string('logging', 'print_level', 'general, debug')) self.logger.set_log_level(self.config.get_string('logging', 'log_level', 'general, debug, aircraft')) self.logger.set_display_level(self.config.get_string('logging', 'display_level', 'gui, raw')) self.logger.set_record_level(self.config.get_string('logging', 'record_level', 'gui, raw')) self.logger.set_record_type(self.config.get_string('logging', 'record_type', 'video')) #video self.camera_src = self.config.get_string('camera','source', "0") self.background_capture = self.config.get_boolean('camera','background_capture', False) self.hfov = config.get_float('camera','hfov',72.3) self.vfov = config.get_float('camera','vfov',46) self.video = Video(self.camera_src,self.background_capture) self.has_gimbal = self.config.get_boolean('camera', 'has_gimbal', False) self.undistort = self.config.get_boolean('camera','undistort', True) self.matrix = self.config.get_array('camera','matrix',None) self.distortion = self.config.get_array('camera','distortion', None) if self.matrix is None or self.distortion is None: raise StandardError("Please fix config file. Add matrix and distortion arrays") #PX4flow as capture device if self.camera_src == 'PX4Flow': from Flow_Camera import FlowCamera flow_cam = FlowCamera() self.video.set_camera(flow_cam) #gopro as capture device if self.camera_src == 'gopro': from Solo_Camera import SoloCamera solo_cam = SoloCamera() self.video.set_camera(solo_cam) #clear extra video frames in the background video_clear = Threader(target=solo_cam.clear, args=None, iterations = -1) video_clear.start()
Example #30
Source File: camera.py From camera.py with MIT License | 4 votes |
def calibrate_division_model(line_coordinates, y0, z_n, focal_length=1): """ Calibrate division model by making lines straight. :param line_coordinates: coordinates of points on lines :type line_coordinates: np.ndarray, shape=(nlines, npoints_per_line, 2) :param y0: radial distortion center xy coordinates :type y0: array-like, len=2 :param z_n: distance to boundary (pincushion: image width / 2, barrel: image diagonal / 2) :type z_n: float :param focal_length: focal length of the camera (optional) :type focal_length: float :return: Camera object with calibrated division model parameter lambda :rtype: Camera """ def lines_fit_error(p, line_coordinates, cam): if not (-1 < p < 1): return np.inf assert line_coordinates.ndim == 3 cam.division_lambda = p error = 0. for line in xrange(line_coordinates.shape[0]): xy = cam.undistort(line_coordinates[line].T) mc = fit_line(xy) d = line_point_distance(xy, mc) nearest_xy = nearest_point_on_line(xy, mc) line_length_sq = np.sum((nearest_xy[:, 0] - nearest_xy[:, -1]) ** 2) error += np.sum(d ** 2) / line_length_sq / line_coordinates.shape[1] # plt.plot(x, mc[0] * x + mc[1], 'y') # plt.plot(nx, ny, 'y+') # plt.plot(x, y, 'r+') # plt.show() return error c = Camera() c.set_K_elements(u0_px=y0[0], v0_px=y0[1], f=focal_length) c.calibration_type = 'division' c.division_z_n = z_n res = minimize_scalar(lambda p: lines_fit_error(p, line_coordinates, c)) c.division_lambda = float(res.x) return c