Python cv2.ADAPTIVE_THRESH_GAUSSIAN_C Examples
The following are 30
code examples of cv2.ADAPTIVE_THRESH_GAUSSIAN_C().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
cv2
, or try the search function
.
Example #1
Source File: temp.py From aggregation with Apache License 2.0 | 6 votes |
def read_file(fname): image = cv2.imread(fname,0) image = cv2.adaptiveThreshold(image,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY,11,2) # cv2.namedWindow('image', cv2.WINDOW_NORMAL) # cv2.imshow('image',image) # cv2.waitKey(0) # cv2.destroyAllWindows() # image = cv2.adaptiveThreshold(image,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,11,2) # cv2.imwrite("/home/ggdhines/temp.jpg",image) # assert False # _,image = cv2.threshold(image,200,255,cv2.THRESH_BINARY) # image = 255 - image # image = image > 0 image = image.astype(np.float) return image
Example #2
Source File: parse_image_segments.py From geosolver with Apache License 2.0 | 6 votes |
def _get_image_segments(image, kernel, block_size, c): binarized_image = cv2.adaptiveThreshold(image, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, block_size, c) labeled, nr_objects = ndimage.label(binarized_image, structure=kernel) slices = ndimage.find_objects(labeled) image_segments = {} for idx, slice_ in enumerate(slices): offset = instantiators['point'](slice_[1].start, slice_[0].start) sliced_image = image[slice_] boolean_array = labeled[slice_] == (idx+1) segmented_image = 255- (255-sliced_image) * boolean_array pixels = set(instantiators['point'](x, y) for x, y in np.transpose(np.nonzero(np.transpose(boolean_array)))) binarized_segmented_image = cv2.adaptiveThreshold(segmented_image, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, block_size, c) image_segment = ImageSegment(segmented_image, sliced_image, binarized_segmented_image, pixels, offset, idx) image_segments[idx] = image_segment return image_segments
Example #3
Source File: class_PlateDetection.py From ALPR_System with Apache License 2.0 | 6 votes |
def clean_plate(self, plate): gray = cv2.cvtColor(plate, cv2.COLOR_BGR2GRAY) thresh = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2) _, contours, _ = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) if contours: areas = [cv2.contourArea(c) for c in contours] max_index = np.argmax(areas) # index of the largest contour in the area array max_cnt = contours[max_index] max_cntArea = areas[max_index] x,y,w,h = cv2.boundingRect(max_cnt) rect = cv2.minAreaRect(max_cnt) rotatedPlate = self.crop_rotated_contour(plate, rect) if not self.ratioCheck(max_cntArea, rotatedPlate.shape[1], rotatedPlate.shape[0]): return plate, False, None return rotatedPlate, True, [x, y, w, h] else: return plate, False, None
Example #4
Source File: SudokuExtractor.py From SolveSudoku with MIT License | 6 votes |
def pre_process_image(img, skip_dilate=False): """Uses a blurring function, adaptive thresholding and dilation to expose the main features of an image.""" # Gaussian blur with a kernal size (height, width) of 9. # Note that kernal sizes must be positive and odd and the kernel must be square. proc = cv2.GaussianBlur(img.copy(), (9, 9), 0) # Adaptive threshold using 11 nearest neighbour pixels proc = cv2.adaptiveThreshold(proc, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2) # Invert colours, so gridlines have non-zero pixel values. # Necessary to dilate the image, otherwise will look like erosion instead. proc = cv2.bitwise_not(proc, proc) if not skip_dilate: # Dilate the image to increase the size of the grid lines. kernel = np.array([[0., 1., 0.], [1., 1., 1.], [0., 1., 0.]],np.uint8) proc = cv2.dilate(proc, kernel) return proc
Example #5
Source File: mainDetect.py From DE3-ROB1-CHESS with Creative Commons Attribution 4.0 International | 6 votes |
def processFile(self, img, debug=False): """ Converts input image to grayscale & applies adaptive thresholding. """ img = cv2.GaussianBlur(img,(5,5),0) # Convert to HSV hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) # Convert to grayscale gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) # HSV Thresholding res,hsvThresh = cv2.threshold(hsv[:,:,0], 25, 250, cv2.THRESH_BINARY_INV) # Show adaptively thresholded image adaptiveThresh = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 115, 1) # Show both thresholded images # cv2.imshow("HSV Thresholded",hsvThresh) if debug: cv2.imshow("Adaptive Thresholding", adaptiveThresh) return img, adaptiveThresh
Example #6
Source File: Cartoonlization.py From rabbitVE with GNU General Public License v3.0 | 6 votes |
def cartoonise(self, img_rgb, num_down, num_bilateral, medianBlur, D, sigmaColor, sigmaSpace): # 用高斯金字塔降低取样 img_color = cv2.cvtColor(img_rgb, cv2.COLOR_RGB2BGR) for _ in range(num_down): img_color = cv2.pyrDown(img_color) # 重复使用小的双边滤波代替一个大的滤波 for _ in range(num_bilateral): img_color = cv2.bilateralFilter(img_color, d=D, sigmaColor=sigmaColor, sigmaSpace=sigmaSpace) # 升采样图片到原始大小 for _ in range(num_down): img_color = cv2.pyrUp(img_color) if not self.Save_Edge: img_cartoon = img_color else: img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_RGB2GRAY) img_blur = cv2.medianBlur(img_gray, medianBlur) img_edge = cv2.adaptiveThreshold(img_blur, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, blockSize=self.Adaptive_Threshold_Block_Size, C=self.C) img_edge = cv2.cvtColor(img_edge, cv2.COLOR_GRAY2RGB) img_edge = cv2.resize(img_edge, img_color.shape[:2][::-1]) img_cartoon = cv2.bitwise_and(img_color, img_edge) return cv2.cvtColor(img_cartoon, cv2.COLOR_RGB2BGR)
Example #7
Source File: platesOCR.py From LicensePlates-OCR with MIT License | 6 votes |
def adaptiveThreshold(plates): for i, plate in enumerate(plates): img = cv2.imread(plate) gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) cv2.imshow('gray', gray) ret, thresh = cv2.threshold(gray, 50, 255, cv2.THRESH_BINARY) # cv2.imshow('thresh', thresh) threshMean = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 5, 10) # cv2.imshow('threshMean', threshMean) threshGauss = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 51, 27) cv2.imshow('threshGauss', threshGauss) cv2.imwrite("processed\\plate{}.png".format(i), threshGauss) cv2.waitKey(0)
Example #8
Source File: main.py From WannaPark with GNU General Public License v3.0 | 6 votes |
def processImageForNeuralNet(arg1, image=False): """ Receives as parameter arg1 the path of the image to be converted or the image already captured with cv2 (in that case, pass image=True as a parameter). The return of this function (x) should be passed as input to a Network object by network.feedforward(x) """ SIDE_SIZE = 10 TOTAL_SIZE = 100 img = arg1 if(not image): img = cv2.imread(arg1,0) img = cv2.resize(img,(SIDE_SIZE,SIDE_SIZE)) img = cv2.adaptiveThreshold(img,1,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,11,2) img = np.reshape(img, (TOTAL_SIZE, 1)) return np.array(img, dtype='f')
Example #9
Source File: utils.py From WannaPark with GNU General Public License v3.0 | 6 votes |
def processImageForNeuralNet(arg1, image=False): """ Receives as parameter arg1 the path of the image to be converted or the image already captured with cv2 (in that case, pass image=True as a parameter). The return of this function (x) should be passed as input to a Network object by network.feedforward(x) """ SIDE_SIZE = 10 TOTAL_SIZE = 100 img = arg1 if(not image): img = cv2.imread(arg1,0) img = cv2.resize(img,(SIDE_SIZE,SIDE_SIZE)) img = cv2.adaptiveThreshold(img,1,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,11,2) img = np.reshape(img, (TOTAL_SIZE, 1)) return np.array(img, dtype='f')
Example #10
Source File: omr.py From omr with MIT License | 6 votes |
def normalize(im): """Converts `im` to black and white. Applying a threshold to a grayscale image will make every pixel either fully black or fully white. Before doing so, a common technique is to get rid of noise (or super high frequency color change) by blurring the grayscale image with a Gaussian filter.""" im_gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY) # Filter the grayscale image with a 3x3 kernel blurred = cv2.GaussianBlur(im_gray, (3, 3), 0) # Applies a Gaussian adaptive thresholding. In practice, adaptive thresholding # seems to work better than appling a single, global threshold to the image. # This is particularly important if there could be shadows or non-uniform # lighting on the answer sheet. In those scenarios, using a global thresholding # technique might yield paricularly bad results. # The choice of the parameters blockSize = 77 and C = 10 is as much as an art # as a science and domain-dependand. # In practice, you might want to try different values for your specific answer # sheet. return cv2.adaptiveThreshold( blurred, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 77, 10)
Example #11
Source File: recongnize_chinese_pdf.py From CRNN_Tensorflow with MIT License | 6 votes |
def locate_text_area(pdf_image_row_block): """ locate the text area of the image row block :param pdf_image_row_block: color pdf image block :return: """ gray_image = cv2.cvtColor(pdf_image_row_block, cv2.COLOR_BGR2GRAY) binarized_image = cv2.adaptiveThreshold( src=gray_image, maxValue=255, adaptiveMethod=cv2.ADAPTIVE_THRESH_GAUSSIAN_C, thresholdType=cv2.THRESH_BINARY, blockSize=11, C=2 ) # sum along the col axis col_sum = np.sum(binarized_image, axis=0) idx_col_sum = np.argwhere(col_sum < col_sum.max())[:, 0] start_col = idx_col_sum[0] if idx_col_sum[0] > 0 else 0 end_col = idx_col_sum[-1] end_col = end_col if end_col < pdf_image_row_block.shape[1] else pdf_image_row_block.shape[1] - 1 return pdf_image_row_block[:, start_col:end_col, :]
Example #12
Source File: cv_functions.py From R-CNN_LIGHT with MIT License | 5 votes |
def adaptiveThresholding(gray=None, neighbor=5, blur=False, k_size=3): if(blur): gray = cv2.GaussianBlur(gray, (k_size, k_size), 0) return cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, neighbor, 1)
Example #13
Source File: image_dataset.py From Comicolorization with MIT License | 5 votes |
def convert_to_linedrawing(self, luminous_image_data): linedrawing = cv2.adaptiveThreshold(luminous_image_data, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2) return linedrawing
Example #14
Source File: recongnize_chinese_pdf.py From CRNN_Tensorflow with MIT License | 5 votes |
def split_pdf_image_into_row_image_block(pdf_image): """ split the whole pdf image into row image block :param pdf_image: the whole color pdf image :return: """ gray_image = cv2.cvtColor(pdf_image, cv2.COLOR_BGR2GRAY) binarized_image = cv2.adaptiveThreshold( src=gray_image, maxValue=255, adaptiveMethod=cv2.ADAPTIVE_THRESH_GAUSSIAN_C, thresholdType=cv2.THRESH_BINARY, blockSize=11, C=2 ) # sum along the row axis row_sum = np.sum(binarized_image, axis=1) idx_row_sum = np.argwhere(row_sum < row_sum.max())[:, 0] split_idx = [] start_idx = idx_row_sum[0] for index, idx in enumerate(idx_row_sum[:-1]): if idx_row_sum[index + 1] - idx > 5: end_idx = idx split_idx.append((start_idx, end_idx)) start_idx = idx_row_sum[index + 1] split_idx.append((start_idx, idx_row_sum[-1])) pdf_image_splits = [] for index in range(len(split_idx)): idx = split_idx[index] pdf_image_split = pdf_image[idx[0]:idx[1], :, :] pdf_image_splits.append(pdf_image_split) return pdf_image_splits
Example #15
Source File: get_time.py From edusense with BSD 3-Clause "New" or "Revised" License | 5 votes |
def get_timestamp(frame): gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) cropped_image=gray[80:150,3000:3800] cropped_image=cv2.resize(cropped_image,(800,100)) binary = cv2.adaptiveThreshold(cropped_image,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,11,60) text = pytesseract.image_to_string(binary,config='--psm 13 -c tessedit_char_whitelist=:-0123456789APM" " ') return text;
Example #16
Source File: get_time.py From edusense with BSD 3-Clause "New" or "Revised" License | 5 votes |
def get_timestamp(frame): gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) cropped_image=gray[80:150,3000:3800] cropped_image=cv2.resize(cropped_image,(800,100)) binary = cv2.adaptiveThreshold(cropped_image,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,11,60) text = pytesseract.image_to_string(binary,config='--psm 13 -c tessedit_char_whitelist=:-0123456789APM" " ') return text;
Example #17
Source File: image_utils.py From ImageProcessingProjects with MIT License | 5 votes |
def adaptive_threshold(image, option=cv2.THRESH_BINARY): return cv2.adaptiveThreshold(image, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, option, 11, 2)
Example #18
Source File: threshold_methods.py From plantcv with MIT License | 5 votes |
def gaussian(gray_img, max_value, object_type="light"): """Creates a binary image from a grayscale image based on the Gaussian adaptive threshold method. Inputs: gray_img = Grayscale image data max_value = value to apply above threshold (usually 255 = white) object_type = "light" or "dark" (default: "light") - If object is lighter than the background then standard thresholding is done - If object is darker than the background then inverse thresholding is done Returns: bin_img = Thresholded, binary image :param gray_img: numpy.ndarray :param max_value: int :param object_type: str :return bin_img: numpy.ndarray """ # Set the threshold method threshold_method = "" if object_type.upper() == "LIGHT": threshold_method = cv2.THRESH_BINARY elif object_type.upper() == "DARK": threshold_method = cv2.THRESH_BINARY_INV else: fatal_error('Object type ' + str(object_type) + ' is not "light" or "dark"!') params.device += 1 bin_img = _call_adaptive_threshold(gray_img, max_value, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, threshold_method, "_gaussian_threshold_") return bin_img # Mean adaptive threshold
Example #19
Source File: fake_util.py From CRAFT_keras with Apache License 2.0 | 5 votes |
def watershed(src): """ Performs a marker-based image segmentation using the watershed algorithm. :param src: 8-bit 1-channel image. :return: 32-bit single-channel image (map) of markers. """ # cv2.imwrite('{}.png'.format(np.random.randint(1000)), src) gray = src.copy() img = cv2.cvtColor(gray, cv2.COLOR_GRAY2BGR) # h, w = gray.shape[:2] # block_size = (min(h, w) // 4 + 1) * 2 + 1 # thresh = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, block_size, 0) _ret, thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) # noise removal kernel = np.ones((3, 3), np.uint8) opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations=2) # sure background area sure_bg = cv2.dilate(opening, kernel, iterations=3) # Finding sure foreground area dist_transform = cv2.distanceTransform(opening, cv2.DIST_L2, 5) # dist_transform = opening & gray # cv2.imshow('dist_transform', dist_transform) # _ret, sure_bg = cv2.threshold(dist_transform, 0.2 * dist_transform.max(), 255, cv2.THRESH_BINARY_INV) _ret, sure_fg = cv2.threshold(dist_transform, 0.2 * dist_transform.max(), 255, cv2.THRESH_BINARY) # Finding unknown region # sure_bg = np.uint8(sure_bg) sure_fg = np.uint8(sure_fg) # cv2.imshow('sure_fg', sure_fg) unknown = cv2.subtract(sure_bg, sure_fg) # Marker label lingret, marker_map = cv2.connectedComponents(sure_fg) # Add one to all labels so that sure background is not 0, but 1 marker_map = marker_map + 1 # Now, mark the region of unknown with zero marker_map[unknown == 255] = 0 marker_map = cv2.watershed(img, marker_map) return marker_map
Example #20
Source File: Video_find_square_0702.py From PyCV-time with MIT License | 5 votes |
def adap_threshold(frame_in): # blur the image to reduce noise frame_blur = cv2.blur(frame_in, (3,3)) # threshold thresh = cv2.adaptiveThreshold(frame_blur,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,11,2) frame_out = thresh return frame_out
Example #21
Source File: idcardocr.py From idcardocr with GNU General Public License v3.0 | 5 votes |
def get_sex(img): _, _, red = cv2.split(img) print('sex') red = cv2.UMat(red) red = hist_equal(red) red = cv2.adaptiveThreshold(red, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 151, 50) # red = cv2.medianBlur(red, 3) # cv2.imwrite('address.png', img) # img2 = Image.open('address.png') red = img_resize(red, 150) # cv2.imwrite('sex.png', red) # img = Image.fromarray(cv2.UMat.get(red).astype('uint8')) #return get_result_fix_length(red, 1, 'sex', '-psm 10') return get_result_fix_length(red, 1, 'chi_sim', '--psm 10') # return pytesseract.image_to_string(img, lang='sex', config='-psm 10').replace(" ","")
Example #22
Source File: idcardocr.py From idcardocr with GNU General Public License v3.0 | 5 votes |
def get_address(img): #_, _, red = cv2.split(img) #red = cv2.medianBlur(red, 3) print('address') _, _, red = cv2.split(img) red = cv2.UMat(red) red = hist_equal(red) red = cv2.adaptiveThreshold(red, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 151, 50) red = img_resize(red, 300) #img = img_resize(img, 300) #cv2.imwrite('address_red.png', red) img = Image.fromarray(cv2.UMat.get(red).astype('uint8')) #return punc_filter(get_result_vary_length(red,'chi_sim', img, '-psm 6')) return punc_filter(get_result_vary_length(red, 'chi_sim', img, '--psm 6')) #return punc_filter(pytesseract.image_to_string(img, lang='chi_sim', config='-psm 3').replace(" ",""))
Example #23
Source File: idcardocr.py From idcardocr with GNU General Public License v3.0 | 5 votes |
def get_idnum_and_birth(img): _, _, red = cv2.split(img) print('idnum') red = cv2.UMat(red) red = hist_equal(red) red = cv2.adaptiveThreshold(red, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 151, 50) red = img_resize(red, 150) # cv2.imwrite('idnum_red.png', red) #idnum_str = get_result_fix_length(red, 18, 'idnum', '-psm 8') # idnum_str = get_result_fix_length(red, 18, 'eng', '--psm 8 ') img = Image.fromarray(cv2.UMat.get(red).astype('uint8')) idnum_str = get_result_vary_length(red, 'eng', img, '--psm 8 ') return idnum_str, idnum_str[6:14]
Example #24
Source File: damage_parser.py From gym-mupen64plus with MIT License | 5 votes |
def _get_damage_outline_from_pixels(self, player_num, pixels): assert player_num == 1 or player_num == 2 pixels = self._get_damage_screen_section(player_num, pixels) x_len = len(pixels[0]) assert len(pixels) == _HEIGHT # Use OpenCV to find the outlines of the numbers in black and white. bw = cv2.cvtColor(pixels, cv2.COLOR_BGR2GRAY) thresh = cv2.adaptiveThreshold(bw, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 3, 2) dilated = cv2.dilate(thresh, np.ones((2,2), np.uint8), iterations = 1) return dilated == 0 # True where the pixels are black, False where white. # The first time we detect a zero, record its inner pixels. Later, we # can determine whether a zero is a true zero or not based on whether # it is the correct color.
Example #25
Source File: words.py From 12306-captcha with Apache License 2.0 | 5 votes |
def binary_text(im): """ 二值化 :param im: 待处理的原始图片 :return: 二值化后的图片 """ dst = cv2.adaptiveThreshold(im, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 7, 0) return dst
Example #26
Source File: webcam_gui.py From PyCV-time with MIT License | 5 votes |
def adap_threshold(frame_in): # convert into gray scale frame_gray = cv2.cvtColor(frame_in, cv2.COLOR_BGR2GRAY) # blur the image to reduce noise frame_blur = cv2.blur(frame_gray, (3,3)) # threshold thresh = cv2.adaptiveThreshold(frame_blur,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,11,2) frame_out = thresh return frame_out
Example #27
Source File: Contours.py From Finger-Detection-and-Tracking with BSD 2-Clause "Simplified" License | 5 votes |
def main(): image = cv2.imread("../data/detect_blob.png", 1) gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) binay_thresh = cv2.adaptiveThreshold(gray_image, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 115, 1) _, contours, _ = cv2.findContours(binay_thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) cv2.drawContours(image, contours, -1, (0, 255, 0), 3) new_image = np.zeros((image.shape[0], image.shape[1], 3), np.uint8) for cnt in contours: cv2.drawContours(new_image, [cnt], -1, (255, 0, 255), -1) # get contour area using 'contourArea' method area_cnt = cv2.contourArea(cnt) # get the perimeter of any contour using 'arcLength' perimeter_cnt = cv2.arcLength(cnt, True) # get centroid oy contour using moments M = cv2.moments(cnt) cx = int(M['m10'] / M['m00']) cy = int(M['m01'] / M['m00']) cv2.circle(new_image, (cx, cy), 3, (0, 255, 0), -1) print("Area : {}, Perimeter : {}".format(area_cnt, perimeter_cnt)) cv2.imshow("Contoured Image", new_image) cv2.waitKey(0) cv2.destroyAllWindows()
Example #28
Source File: solver.py From airport with Apache License 2.0 | 5 votes |
def PrepareImage(image): """Converts color image to black and white""" # work on gray scale bw = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) # remove noise, preserve edges bw = cv2.bilateralFilter(bw, 9, 75, 75) # binary threshold bw = cv2.adaptiveThreshold(bw, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2) return bw
Example #29
Source File: toolbox.py From stagesepx with MIT License | 5 votes |
def turn_binary(old: np.ndarray) -> np.ndarray: grey = turn_grey(old).astype("uint8") return cv2.adaptiveThreshold( grey, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2 )
Example #30
Source File: extract_lines.py From hazymaze with Apache License 2.0 | 5 votes |
def find_items(maze_image): # Preprocessing to find the contour of the shapes h, w = maze_image.shape[0], maze_image.shape[1] dim = (h+w)//2 b_and_w = cv2.cvtColor(maze_image, cv2.COLOR_BGR2GRAY) edges = cv2.GaussianBlur(b_and_w, (11, 11), 0) edges = cv2.adaptiveThreshold(edges, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 9, 2) cv2.rectangle(edges,(0, 0),(w-1,h-1),(255,255,255),16) contours, _ = cv2.findContours(edges, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) # cv2.imshow('d', edges) items = [] if contours: item_mask = np.zeros(edges.shape, np.uint8) conts = sorted(contours, key=lambda x: cv2.contourArea(x), reverse=False) for cnt in conts: if cv2.contourArea(cnt) > 0.35*dim: return items, item_mask elif cv2.contourArea(cnt) > 0.05*dim: d = np.mean(cnt, axis=0) d[0][0], d[0][1] = int(round(d[0][0])), int(round(d[0][1])) # TODO adjust the size here? if cv2.contourArea(cnt) < 0.1*dim: items.append((d, 'smol')) cv2.drawContours(item_mask, [cnt], -1, (255,255,255), -1) else: items.append((d, 'big')) cv2.drawContours(item_mask, [cnt], -1, (255,255,255), -1) return items, item_mask