Python cv2.HoughLines() Examples
The following are 14
code examples of cv2.HoughLines().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
cv2
, or try the search function
.
Example #1
Source File: imgproc.py From pdftabextract with Apache License 2.0 | 9 votes |
def detect_lines(self, canny_low_thresh, canny_high_thresh, canny_kernel_size, hough_rho_res, hough_theta_res, hough_votes_thresh, gray_conversion=cv2.COLOR_BGR2GRAY): """ Detect lines in input image using hough transform. Return detected lines as list with tuples: (rho, theta, normalized theta with 0 <= theta_norm < np.pi, DIRECTION_VERTICAL or DIRECTION_HORIZONTAL) """ self.gray_img = cv2.cvtColor(self.input_img, gray_conversion) self.edges = cv2.Canny(self.gray_img, canny_low_thresh, canny_high_thresh, apertureSize=canny_kernel_size) # detect lines with hough transform lines = cv2.HoughLines(self.edges, hough_rho_res, hough_theta_res, hough_votes_thresh) if lines is None: lines = [] self.lines_hough = self._generate_hough_lines(lines) return self.lines_hough
Example #2
Source File: imgproc.py From pdftabextract with Apache License 2.0 | 6 votes |
def _generate_hough_lines(self, lines): """ From a list of lines in <lines> detected by cv2.HoughLines, create a list with a tuple per line containing: (rho, theta, normalized theta with 0 <= theta_norm < np.pi, DIRECTION_VERTICAL or DIRECTION_HORIZONTAL) """ lines_hough = [] for l in lines: rho, theta = l[0] # they come like this from OpenCV's hough transform theta_norm = normalize_angle(theta) if abs(PIHLF - theta_norm) > PI4TH: # vertical line_dir = DIRECTION_VERTICAL else: line_dir = DIRECTION_HORIZONTAL lines_hough.append((rho, theta, theta_norm, line_dir)) return lines_hough
Example #3
Source File: page.py From doc2text with MIT License | 6 votes |
def estimate_skew(image): edges = auto_canny(image) lines = cv2.HoughLines(edges, 1, np.pi / 90, 200) new = edges.copy() thetas = [] for line in lines: for rho, theta in line: a = np.cos(theta) b = np.sin(theta) x0 = a * rho y0 = b * rho x1 = int(x0 + 1000 * (-b)) y1 = int(y0 + 1000 * (a)) x2 = int(x0 - 1000 * (-b)) y2 = int(y0 - 1000 * (a)) if theta > np.pi / 3 and theta < np.pi * 2 / 3: thetas.append(theta) new = cv2.line(new, (x1, y1), (x2, y2), (255, 255, 255), 1) theta_mean = np.mean(thetas) theta = rad_to_deg(theta_mean) if len(thetas) > 0 else 0 return theta
Example #4
Source File: ImageMiniLab.py From ImageMiniLab with GNU General Public License v3.0 | 6 votes |
def hough_line(self): src = self.cv_read_img(self.src_file) if src is None: return gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY) edges = cv.Canny(gray, 50, 150, apertureSize=3) lines = cv.HoughLines(edges, 1, np.pi/180, 200) for line in lines: rho, theta = line[0] a = np.cos(theta) b = np.sin(theta) x0 = a * rho y0 = b * rho x1 = int(x0+1000*(-b)) y1 = int(y0+1000*(a)) x2 = int(x0-1000*(-b)) y2 = int(y0-1000*(a)) cv.line(src, (x1, y1), (x2, y2), (0, 0, 255), 2) self.decode_and_show_dst(src) # 圆检测
Example #5
Source File: parse_primitives.py From geosolver with Apache License 2.0 | 6 votes |
def _get_lines(image_segment, params): lines = [] temp = cv2.HoughLines(image_segment.binarized_segmented_image, params.rho, params.theta, params.threshold) if temp is None: return lines rho_theta_pairs = [temp[idx][0] for idx in range(len(temp))] if len(rho_theta_pairs) > params.max_num: rho_theta_pairs = rho_theta_pairs[:params.max_num] nms_rho_theta_pairs = dimension_wise_non_maximum_suppression(rho_theta_pairs, (params.nms_rho, params.nms_theta), _dimension_wise_distances_between_rho_theta_pairs) for rho_theta_pair in rho_theta_pairs: curr_lines = _segment_line(image_segment, rho_theta_pair, params) lines.extend(curr_lines) return lines
Example #6
Source File: line_detect_1.py From crop_row_detection with GNU General Public License v3.0 | 5 votes |
def crop_point_hough(crop_points): height = len(crop_points) width = len(crop_points[0]) #crop_line_data = cv2.HoughLinesP(crop_points, 1, math.pi/180, 2, 10, 10) crop_line_data = cv2.HoughLines(crop_points, HOUGH_RHO, HOUGH_ANGLE, HOUGH_THRESH) crop_lines = np.zeros((height, width, 3), dtype=np.uint8) if crop_line_data != None: crop_line_data = crop_line_data[0] #print(crop_line_data) if len(crop_line_data[0]) == 2: for [rho, theta] in crop_line_data: #print(rho, theta) if (theta <= ANGLE_THRESH) or (theta >= math.pi-ANGLE_THRESH): a = math.cos(theta) b = math.sin(theta) x0 = a*rho y0 = b*rho point1 = (int(round(x0+1000*(-b))), int(round(y0+1000*(a)))) point2 = (int(round(x0-1000*(-b))), int(round(y0-1000*(a)))) cv2.line(crop_lines, point1, point2, (0, 0, 255), 2) elif len(crop_line_data[0]) == 4: for [x0, y0, x1, y1] in crop_line_data: cv2.line(crop_lines, (x0, y0), (x1, y1), (0, 0, 255), 2) else: print("No lines found") return crop_lines
Example #7
Source File: HoughLine.py From Finger-Detection-and-Tracking with BSD 2-Clause "Simplified" License | 5 votes |
def main(): capture = cv2.VideoCapture(0) while True: ret, frame = capture.read() gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) edges_detec = cv2.Canny(gray_frame, 50, 250, apertureSize=5, L2gradient=True) hough_lines = cv2.HoughLines(edges_detec, 1, np.pi / 180, 200) if hough_lines is not None: for rho, theta in hough_lines[0]: x0 = rho * np.cos(theta) y0 = rho * np.sin(theta) ptsX = (int(x0 + 1000 * (-np.sin(theta))), int(y0 + 1000 * (np.cos(theta)))) ptsY = (int(x0 - 1000 * (-np.sin(theta))), int(y0 - 1000 * (np.cos(theta)))) cv2.line(frame, ptsX, ptsY, (0, 255, 0), 2) cv2.imshow("Capture Frame", frame) if cv2.waitKey(1) == 27: break cv2.destroyAllWindows() capture.release()
Example #8
Source File: vanishing_point.py From vanishing-point-detection with MIT License | 5 votes |
def hough_transform(img): gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # Convert image to grayscale kernel = np.ones((15, 15), np.uint8) opening = cv2.morphologyEx(gray, cv2.MORPH_OPEN, kernel) # Open (erode, then dilate) edges = cv2.Canny(opening, 50, 150, apertureSize=3) # Canny edge detection lines = cv2.HoughLines(edges, 1, np.pi / 180, 200) # Hough line detection hough_lines = [] # Lines are represented by rho, theta; converted to endpoint notation if lines is not None: for line in lines: hough_lines.extend(list(starmap(endpoints, line))) return hough_lines
Example #9
Source File: extract.py From DE3-ROB1-CHESS with Creative Commons Attribution 4.0 International | 5 votes |
def extractGrid(img, nvertical, nhorizontal, threshold1 = 50, threshold2 = 150, apertureSize = 3, hough_threshold_step=20, hough_threshold_min=50, hough_threshold_max=150): """Finds the grid lines in a board image. :param img: board image :param nvertical: number of vertical lines :param nhorizontal: number of horizontal lines :returns: a pair (horizontal, vertical). Both elements are lists with the lines' positions. """ w, h, _ = img.shape close_threshold_v = (w / nvertical) / 4 close_threshold_h = (h / nhorizontal) / 4 im_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) thresh, im_bw = cv2.threshold(im_gray, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU) im_canny = cv2.Canny(im_bw, threshold1, threshold2, apertureSize=apertureSize) for i in range((hough_threshold_max - hough_threshold_min + 1) / hough_threshold_step): lines = cv2.HoughLines(im_canny, 1, np.pi / 180, hough_threshold_max - (hough_threshold_step * i)) if lines is None: continue lines = [Line(l[0], l[1]) for l in lines[0]] horizontal, vertical = partitionLines(lines) vertical = filterCloseLines(vertical, horizontal=False, threshold=close_threshold_v) horizontal = filterCloseLines(horizontal, horizontal=True, threshold=close_threshold_h) if len(vertical) >= nvertical and \ len(horizontal) >= nhorizontal: return (horizontal, vertical)
Example #10
Source File: extractBoard.py From DE3-ROB1-CHESS with Creative Commons Attribution 4.0 International | 5 votes |
def extractGrid(img, nvertical, nhorizontal, threshold1 = 50, threshold2 = 150, apertureSize = 3, hough_threshold_step=20, hough_threshold_min=50, hough_threshold_max=150): """Finds the grid lines in a board image. :param img: board image :param nvertical: number of vertical lines :param nhorizontal: number of horizontal lines :returns: a pair (horizontal, vertical). Both elements are lists with the lines' positions. """ w, h, _ = img.shape close_threshold_v = (w / nvertical) / 4 close_threshold_h = (h / nhorizontal) / 4 im_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) thresh, im_bw = cv2.threshold(im_gray, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU) im_canny = cv2.Canny(im_bw, threshold1, threshold2, apertureSize=apertureSize) for i in range((hough_threshold_max - hough_threshold_min + 1) / hough_threshold_step): lines = cv2.HoughLines(im_canny, 1, np.pi / 180, hough_threshold_max - (hough_threshold_step * i)) if lines is None: continue lines = [Line(l[0], l[1]) for l in lines[0]] horizontal, vertical = partitionLines(lines) vertical = filterCloseLines(vertical, horizontal=False, threshold=close_threshold_v) horizontal = filterCloseLines(horizontal, horizontal=True, threshold=close_threshold_h) if len(vertical) >= nvertical and \ len(horizontal) >= nhorizontal: return (horizontal, vertical)
Example #11
Source File: python_houghlines.py From DE3-ROB1-CHESS with Creative Commons Attribution 4.0 International | 5 votes |
def get_horizontal_lines (img): #=====[ Step 1: set parameters ]===== num_peaks = 5 theta_buckets_horz = [-90, -89] theta_resolution_horz = 0.0175 #raidans rho_resolution_horz = 6 threshold_horz = 5 #=====[ Step 2: find lines in (rho, theta) ]===== # [H, theta, rho] = hough (corners_img, 'Theta', theta_buckets_horz, 'RhoResolution', rho_resolution_horz); # peaks = houghpeaks(H, num_peaks); lines_rt = cv2.HoughLines (deepcopy(img), rho_resolution_horz, theta_resolution_horz, threshold_horz)[0] print lines_rt #####[ DEBUG: draw lines in (rho, theta) ]##### img = draw_lines_rho_theta (img , lines_rt) cv2.imshow ('HORIZONTAL LINES', img) key = 0 while key != 27: key = cv2.waitKey (30) #=====[ Step 3: convert peaks to rho, theta ]===== # theta_rad = fromDegrees ('radians', theta); # rhos = rho(peaks(:, 1)); # thetas = theta_rad(peaks(:, 2)); # lines = [rhos; thetas]; #=====[ Step 4: figure out which lines they are ]===== # indexed_lines = horizontal_ransac (lines); #####[ DEBUG: show lines ]##### # draw_lines (corners_img, indexed_lines(1:2, :));
Example #12
Source File: extract.py From DE3-ROB1-CHESS with Creative Commons Attribution 4.0 International | 5 votes |
def extractGrid(img, nvertical, nhorizontal, threshold1 = 50, threshold2 = 150, apertureSize = 3, hough_threshold_step=20, hough_threshold_min=50, hough_threshold_max=150): """Finds the grid lines in a board image. :param img: board image :param nvertical: number of vertical lines :param nhorizontal: number of horizontal lines :returns: a pair (horizontal, vertical). Both elements are lists with the lines' positions. """ w, h, _ = img.shape close_threshold_v = (w / nvertical) / 4 close_threshold_h = (h / nhorizontal) / 4 im_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) thresh, im_bw = cv2.threshold(im_gray, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU) im_canny = cv2.Canny(im_bw, threshold1, threshold2, apertureSize=apertureSize) for i in range((hough_threshold_max - hough_threshold_min + 1) / hough_threshold_step): lines = cv2.HoughLines(im_canny, 1, np.pi / 180, hough_threshold_max - (hough_threshold_step * i)) if lines is None: continue lines = [Line(l[0], l[1]) for l in lines[0]] horizontal, vertical = partitionLines(lines) vertical = filterCloseLines(vertical, horizontal=False, threshold=close_threshold_v) horizontal = filterCloseLines(horizontal, horizontal=True, threshold=close_threshold_h) if len(vertical) >= nvertical and \ len(horizontal) >= nhorizontal: return (horizontal, vertical)
Example #13
Source File: perspective.py From DE3-ROB1-CHESS with Creative Commons Attribution 4.0 International | 4 votes |
def getPerspective(image, points): yy, xx, _ = image.shape tmp = np.zeros(image.shape[0:2], np.uint8); drawContour(tmp, points, (255,), 1) houghRatio=houghThreshold//hough_threshold_step grid = None for i in range(houghRatio): lines = cv2.HoughLines(tmp, 1, np.pi / 180, houghThreshold-(i * hough_threshold_step)) if lines is None: continue lines = [Line(l[0], l[1]) for l in lines[0]] (horizontal, vertical) = partitionLines(lines) vertical = filterCloseLines(vertical, horizontal=False) horizontal = filterCloseLines(horizontal, horizontal=True) if len(vertical) == 2 and len(horizontal) == 2: grid = (vertical, horizontal) break if grid is None: return None if vertical[0].getCenter()[0] > vertical[1].getCenter()[0]: v2, v1 = vertical else: v1, v2 = vertical if horizontal[0].getCenter()[1] > horizontal[1].getCenter()[1]: h2, h1 = horizontal else: h1, h2 = horizontal perspective = (h1.intersect(v1), h1.intersect(v2), h2.intersect(v2), h2.intersect(v1)) ## Doc ## #tmp = cv2.cvtColor(tmp, cv2.COLOR_GRAY2BGR) #drawContour(tmp, points, (0,0,255), 3) #writeDocumentationImage(tmp, "contour_individual_bw") #tmp_bw = tmp #tmp_orig = image.copy() #for tmp in (tmp_bw, tmp_orig): # for l in (v1,v2,h1,h2): l.draw(tmp, (0,255,0), 2) # for p in perspective: drawPoint(tmp, p, (255,0,0), 3) #writeDocumentationImage(tmp_bw, "contour_lines_bw") #writeDocumentationImage(tmp_orig, "contour_lines_orig") ## Doc ## return perspective
Example #14
Source File: CVAnalysis_old.py From DE3-ROB1-CHESS with Creative Commons Attribution 4.0 International | 4 votes |
def get_chessboard_lines (corners, image): """ Function: get_chessboard_lines ------------------------------ given a list of corners represented as tuples, this returns (horizontal_lines, vertical_lines) represented as (a, b, c) pairs """ #=====[ Step 1: get lines via Hough transform on corners ]===== corners_img = np.zeros (image.shape[:2], dtype=np.uint8) for corner in corners: corners_img[int(corner[1])][int(corner[0])] = 255 lines = cv2.HoughLines (corners_img, 3, np.pi/180, 4)[0] #=====[ Step 2: get vertical lines ]===== # lines = avg_close_lines_vert (lines) lines = [rho_theta_to_abc(l) for l in lines] vert_lines = filter_by_slope (lines, lambda slope: (slope > 1) or (slope < -1)) vert_lines_rt = [abc_to_rho_theta (l) for l in vert_lines] #=====[ Step 3: snap points to grid ]=== points_grid = snap_points_to_lines (vert_lines_rt, corners) #=====[ Step 6: hough transform on points in grid to get horizontal lines ]===== all_points = [p for l in points_grid for p in l] corners_img = np.zeros (image.shape[:2], dtype=np.uint8) for p in all_points: corners_img[int(p[1])][int(p[0])] = 255 lines = cv2.HoughLines (corners_img, 3, np.pi/180, 2)[0] lines = [rho_theta_to_abc (l) for l in lines] horz_lines = filter_by_slope (lines, lambda slope: (slope < 0.1) and (slope > -0.1)) lines = [abc_to_rho_theta(l) for l in horz_lines] horz_lines_rt = avg_close_lines_2 (lines) # horz_lines_rt = lines print horz_lines_rt print vert_lines_rt horz_lines = [rho_theta_to_abc (l) for l in horz_lines_rt] vert_lines = [rho_theta_to_abc (l) for l in vert_lines_rt] return horz_lines, vert_lines