Python cv2.THRESH_BINARY Examples
The following are 30
code examples of cv2.THRESH_BINARY().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
cv2
, or try the search function
.
Example #1
Source File: pycv2.py From vrequest with MIT License | 16 votes |
def laplacian(filepathname): v = cv2.imread(filepathname) s = cv2.cvtColor(v, cv2.COLOR_BGR2GRAY) s = cv2.Laplacian(s, cv2.CV_16S, ksize=3) s = cv2.convertScaleAbs(s) cv2.imshow('nier',s) return s # ret, binary = cv2.threshold(s,40,255,cv2.THRESH_BINARY) # contours, hierarchy = cv2.findContours(binary,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) # for c in contours: # x,y,w,h = cv2.boundingRect(c) # if w>5 and h>10: # cv2.rectangle(v,(x,y),(x+w,y+h),(155,155,0),1) # cv2.imshow('nier2',v) # cv2.waitKey() # cv2.destroyAllWindows()
Example #2
Source File: segment.py From gesture-recognition with MIT License | 12 votes |
def segment(image, threshold=25): global bg # find the absolute difference between background and current frame diff = cv2.absdiff(bg.astype("uint8"), image) # threshold the diff image so that we get the foreground thresholded = cv2.threshold(diff, threshold, 255, cv2.THRESH_BINARY)[1] # get the contours in the thresholded image (_, cnts, _) = cv2.findContours(thresholded.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) # return None, if no contours detected if len(cnts) == 0: return else: # based on contour area, get the maximum contour which is the hand segmented = max(cnts, key=cv2.contourArea) return (thresholded, segmented) #----------------- # MAIN FUNCTION #-----------------
Example #3
Source File: motion.py From object-detection with MIT License | 10 votes |
def prediction(self, image): image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) image = cv2.GaussianBlur(image, (21, 21), 0) if self.avg is None: self.avg = image.copy().astype(float) cv2.accumulateWeighted(image, self.avg, 0.5) frameDelta = cv2.absdiff(image, cv2.convertScaleAbs(self.avg)) thresh = cv2.threshold( frameDelta, DELTA_THRESH, 255, cv2.THRESH_BINARY)[1] thresh = cv2.dilate(thresh, None, iterations=2) cnts = cv2.findContours( thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) cnts = imutils.grab_contours(cnts) self.avg = image.copy().astype(float) return cnts
Example #4
Source File: squares.py From OpenCV-Python-Tutorial with MIT License | 9 votes |
def find_squares(img): img = cv2.GaussianBlur(img, (5, 5), 0) squares = [] for gray in cv2.split(img): for thrs in xrange(0, 255, 26): if thrs == 0: bin = cv2.Canny(gray, 0, 50, apertureSize=5) bin = cv2.dilate(bin, None) else: retval, bin = cv2.threshold(gray, thrs, 255, cv2.THRESH_BINARY) bin, contours, hierarchy = cv2.findContours(bin, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) for cnt in contours: cnt_len = cv2.arcLength(cnt, True) cnt = cv2.approxPolyDP(cnt, 0.02*cnt_len, True) if len(cnt) == 4 and cv2.contourArea(cnt) > 1000 and cv2.isContourConvex(cnt): cnt = cnt.reshape(-1, 2) max_cos = np.max([angle_cos( cnt[i], cnt[(i+1) % 4], cnt[(i+2) % 4] ) for i in xrange(4)]) if max_cos < 0.1: squares.append(cnt) return squares
Example #5
Source File: plate_locate.py From EasyPR-python with Apache License 2.0 | 8 votes |
def sobelOperT(self, img, blursize, morphW, morphH): ''' No different with sobelOper ? ''' blur = cv2.GaussianBlur(img, (blursize, blursize), 0, 0, cv2.BORDER_DEFAULT) if len(blur.shape) == 3: gray = cv2.cvtColor(blur, cv2.COLOR_RGB2GRAY) else: gray = blur x = cv2.Sobel(gray, cv2.CV_16S, 1, 0, 3) absX = cv2.convertScaleAbs(x) grad = cv2.addWeighted(absX, 1, 0, 0, 0) _, threshold = cv2.threshold(grad, 0, 255, cv2.THRESH_OTSU + cv2.THRESH_BINARY) element = cv2.getStructuringElement(cv2.MORPH_RECT, (morphW, morphH)) threshold = cv2.morphologyEx(threshold, cv2.MORPH_CLOSE, element) return threshold
Example #6
Source File: util.py From smashscan with MIT License | 8 votes |
def get_image_and_mask(img_location, gray_flag): # Load image from file with alpha channel (UNCHANGED flag). If an alpha # channel does not exist, just return the base image. img = cv2.imread(img_location, cv2.IMREAD_UNCHANGED) if img.shape[2] <= 3: return img, None # Create an alpha channel matrix with values between 0-255. Then # threshold the alpha channel to create a binary mask. channels = cv2.split(img) mask = np.array(channels[3]) _, mask = cv2.threshold(mask, 250, 255, cv2.THRESH_BINARY) # Convert image and mask to grayscale or BGR based on input flag. if gray_flag: img = cv2.cvtColor(img, cv2.COLOR_BGRA2GRAY) else: img = cv2.cvtColor(img, cv2.COLOR_BGRA2BGR) mask = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR) return img, mask # Resize an image and mask based on an input scale ratio.
Example #7
Source File: pycv2.py From vrequest with MIT License | 8 votes |
def canny(filepathname, left=70, right=140): v = cv2.imread(filepathname) s = cv2.cvtColor(v, cv2.COLOR_BGR2GRAY) s = cv2.Canny(s, left, right) cv2.imshow('nier',s) return s # 圈出最小方矩形框,这里Canny算法后都是白色线条,所以取色范围 127-255 即可。 # ret, binary = cv2.threshold(s,127,255,cv2.THRESH_BINARY) # contours, hierarchy = cv2.findContours(binary,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) # for c in contours: # x,y,w,h = cv2.boundingRect(c) # if w>5 and h>10: # 有约束的画框 # cv2.rectangle(v,(x,y),(x+w,y+h),(155,155,0),1) # # cv2.drawContours(s,contours,-1,(0,0,255),3) # 画所有框 # cv2.imshow('nier2',v) # cv2.waitKey() # cv2.destroyAllWindows()
Example #8
Source File: line_detect_2.py From crop_row_detection with GNU General Public License v3.0 | 7 votes |
def skeletonize(image_in): '''Inputs and grayscale image and outputs a binary skeleton image''' size = np.size(image_in) skel = np.zeros(image_in.shape, np.uint8) ret, image_edit = cv2.threshold(image_in, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU) element = cv2.getStructuringElement(cv2.MORPH_CROSS, (3,3)) done = False while not done: eroded = cv2.erode(image_edit, element) temp = cv2.dilate(eroded, element) temp = cv2.subtract(image_edit, temp) skel = cv2.bitwise_or(skel, temp) image_edit = eroded.copy() zeros = size - cv2.countNonZero(image_edit) if zeros == size: done = True return skel
Example #9
Source File: camera_test.py From crop_row_detection with GNU General Public License v3.0 | 7 votes |
def main(): capture = cv2.VideoCapture(0) _, image = capture.read() previous = image.copy() while (cv2.waitKey(1) < 0): _, image = capture.read() diff = cv2.absdiff(image, previous) #image = cv2.flip(image, 3) #image = cv2.norm(image) _, diff = cv2.threshold(diff, 32, 0, cv2.THRESH_TOZERO) _, diff = cv2.threshold(diff, 0, 255, cv2.THRESH_BINARY) diff = cv2.medianBlur(diff, 5) cv2.imshow('video', diff) previous = image.copy() capture.release() cv2.destroyAllWindows()
Example #10
Source File: result_detail.py From IkaLog with Apache License 2.0 | 6 votes |
def is_entry_me(self, img_entry): # ヒストグラムから、入力エントリが自分かを判断 if len(img_entry.shape) > 2 and img_entry.shape[2] != 1: img_me = cv2.cvtColor(img_entry[:, 0:43], cv2.COLOR_BGR2GRAY) else: img_me = img_entry[:, 0:43] img_me = cv2.threshold(img_me, 230, 255, cv2.THRESH_BINARY)[1] me_score = np.sum(img_me) me_score_normalized = 0 try: me_score_normalized = me_score / (43 * 45 * 255 / 10) except ZeroDivisionError as e: me_score_normalized = 0 #print("score=%3.3f" % me_score_normalized) return (me_score_normalized > 1)
Example #11
Source File: saliency.py From OpenCV-Computer-Vision-Projects-with-Python with MIT License | 6 votes |
def get_proto_objects_map(self, use_otsu=True): """Returns the proto-objects map of an RGB image This method generates a proto-objects map of an RGB image. Proto-objects are saliency hot spots, generated by thresholding the saliency map. :param use_otsu: flag whether to use Otsu thresholding (True) or a hardcoded threshold value (False) :returns: proto-objects map """ saliency = self.get_saliency_map() if use_otsu: _, img_objects = cv2.threshold(np.uint8(saliency*255), 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) else: thresh = np.mean(saliency)*255*3 _, img_objects = cv2.threshold(np.uint8(saliency*255), thresh, 255, cv2.THRESH_BINARY) return img_objects
Example #12
Source File: helpers.py From hazymaze with Apache License 2.0 | 6 votes |
def blend_non_transparent(sprite, background_img): gray_overlay = cv2.cvtColor(background_img, cv2.COLOR_BGR2GRAY) overlay_mask = cv2.threshold(gray_overlay, 1, 255, cv2.THRESH_BINARY)[1] overlay_mask = cv2.erode(overlay_mask, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))) overlay_mask = cv2.blur(overlay_mask, (3, 3)) background_mask = 255 - overlay_mask overlay_mask = cv2.cvtColor(overlay_mask, cv2.COLOR_GRAY2BGR) background_mask = cv2.cvtColor(background_mask, cv2.COLOR_GRAY2BGR) sprite_part = (sprite * (1 / 255.0)) * (background_mask * (1 / 255.0)) overlay_part = (background_img * (1 / 255.0)) * (overlay_mask * (1 / 255.0)) return np.uint8(cv2.addWeighted(sprite_part, 255.0, overlay_part, 255.0, 0.0))
Example #13
Source File: line_detect_1.py From crop_row_detection with GNU General Public License v3.0 | 6 votes |
def crop_row_detect(image_in): save_image('0_image_in', image_in) ### Grayscale Transform ### image_edit = grayscale_transform(image_in) save_image('1_image_gray', image_edit) ### Binarization ### _, image_edit = cv2.threshold(image_edit, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU) save_image('2_image_bin', image_edit) ### Stripping ### crop_points = strip_process(image_edit) save_image('8_crop_points', crop_points) ### Hough Transform ### crop_lines = crop_point_hough(crop_points) save_image('9_image_hough', cv2.addWeighted(image_in, 1, crop_lines, 1, 0.0)) return crop_lines
Example #14
Source File: thresholding.py From smashscan with MIT License | 6 votes |
def param_filter(self, frame): # Apply pre-blur according to trackbar value. if self.pre_blur_val == 1: frame = cv2.GaussianBlur(frame, (5, 5), 0) elif self.pre_blur_val == 2: frame = cv2.medianBlur(frame, 5) # Apply a thresholding method according to trackbar value. if self.thresh_flag: _, frame = cv2.threshold(frame, 127, 255, cv2.THRESH_BINARY) else: _, frame = cv2.threshold(frame, 127, 255, cv2.THRESH_OTSU) # Apply post-blur according to trackbar value. if self.post_blur_val: frame = cv2.medianBlur(frame, 5) return frame # Apply filterrs to frame according to contour parameters.
Example #15
Source File: plate_locate.py From EasyPR-python with Apache License 2.0 | 6 votes |
def colorSearch(self, src, color, out_rect): """ :param src: :param color: :param out_rect: minAreaRect :return: binary """ color_morph_width = 10 color_morph_height = 2 match_gray = colorMatch(src, color, False) _, src_threshold = cv2.threshold(match_gray, 0, 255, cv2.THRESH_OTSU + cv2.THRESH_BINARY) element = cv2.getStructuringElement(cv2.MORPH_RECT, (color_morph_width, color_morph_height)) src_threshold = cv2.morphologyEx(src_threshold, cv2.MORPH_CLOSE, element) out = src_threshold.copy() _, contours, _ = cv2.findContours(src_threshold, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) for cnt in contours: mr = cv2.minAreaRect(cnt) if self.verifySizes(mr): out_rect.append(mr) return out
Example #16
Source File: SudokuExtractor.py From SolveSudoku with MIT License | 6 votes |
def pre_process_image(img, skip_dilate=False): """Uses a blurring function, adaptive thresholding and dilation to expose the main features of an image.""" # Gaussian blur with a kernal size (height, width) of 9. # Note that kernal sizes must be positive and odd and the kernel must be square. proc = cv2.GaussianBlur(img.copy(), (9, 9), 0) # Adaptive threshold using 11 nearest neighbour pixels proc = cv2.adaptiveThreshold(proc, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2) # Invert colours, so gridlines have non-zero pixel values. # Necessary to dilate the image, otherwise will look like erosion instead. proc = cv2.bitwise_not(proc, proc) if not skip_dilate: # Dilate the image to increase the size of the grid lines. kernel = np.array([[0., 1., 0.], [1., 1., 1.], [0., 1., 0.]],np.uint8) proc = cv2.dilate(proc, kernel) return proc
Example #17
Source File: pycv2.py From vrequest with MIT License | 6 votes |
def sobel(filepathname): v = cv2.imread(filepathname) s = cv2.cvtColor(v,cv2.COLOR_BGR2GRAY) x, y = cv2.Sobel(s,cv2.CV_16S,1,0), cv2.Sobel(s,cv2.CV_16S,0,1) s = cv2.convertScaleAbs(cv2.subtract(x,y)) s = cv2.blur(s,(9,9)) cv2.imshow('nier',s) return s # ret, binary = cv2.threshold(s,40,255,cv2.THRESH_BINARY) # contours, hierarchy = cv2.findContours(binary,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) # for c in contours: # x,y,w,h = cv2.boundingRect(c) # if w>5 and h>10: # cv2.rectangle(v,(x,y),(x+w,y+h),(155,155,0),1) # cv2.imshow('nier2',v) # cv2.waitKey() # cv2.destroyAllWindows()
Example #18
Source File: recognize.py From gesture-recognition with MIT License | 6 votes |
def segment(image, threshold=25): global bg # find the absolute difference between background and current frame diff = cv2.absdiff(bg.astype("uint8"), image) # threshold the diff image so that we get the foreground thresholded = cv2.threshold(diff, threshold, 255, cv2.THRESH_BINARY)[1] # get the contours in the thresholded image (_, cnts, _) = cv2.findContours(thresholded.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) # return None, if no contours detected if len(cnts) == 0: return else: # based on contour area, get the maximum contour which is the hand segmented = max(cnts, key=cv2.contourArea) return (thresholded, segmented) #-------------------------------------------------------------- # To count the number of fingers in the segmented hand region #--------------------------------------------------------------
Example #19
Source File: plate_locate.py From EasyPR-python with Apache License 2.0 | 6 votes |
def DeleteNotArea(self, in_img): input_gray = cv2.cvtColor(in_img, cv2.COLOR_BGR2GRAY) w = in_img.shape[1] h = in_img.shape[0] tmp_mat = in_img[int(h * 0.1):int(h * 0.85), int(w * 0.15):int(w * 0.85)] plateType = getPlateType(tmp_mat, True) if plateType == 'BLUE': tmp = in_img[int(h * 0.1):int(h * 0.85), int(w * 0.15):int(w * 0.85)] threadHoldV = ThresholdOtsu(tmp) _, img_threshold = cv2.threshold(input_gray, threadHoldV, 255, cv2.THRESH_BINARY) elif plateType == 'YELLOW': tmp = in_img[int(h * 0.1):int(h * 0.85), int(w * 0.15):int(w * 0.85)] threadHoldV = ThresholdOtsu(tmp) _, img_threshold = cv2.threshold(input_gray, threadHoldV, 255, cv2.THRESH_BINARY_INV) else: _, img_threshold = cv2.threshold(input_gray, 10, 255, cv2.THRESH_OTSU + cv2.THRESH_BINARY) top, bottom = clearLiuDing(img_threshold, 0, img_threshold.shape[0] - 1) posLeft, posRight, flag = bFindLeftRightBound1(img_threshold) if flag: in_img = in_img[int(top):int(bottom), int(posLeft):int(w)]
Example #20
Source File: dead.py From IkaLog with Apache License 2.0 | 5 votes |
def recoginize_and_vote_death_reason(self, context): if self.deadly_weapon_recoginizer is None: return False lang_short = Localization.get_game_languages()[0][0:2] try: c = self.choordinates[lang_short] except KeyError: c = self.choordinates['en'] img_weapon = context['engine']['frame'][ c['top']:c['top'] + 51, c['left']:c['left'] + 410 ] img_weapon_gray = cv2.cvtColor(img_weapon, cv2.COLOR_BGR2GRAY) img_weapon_hsv = cv2.cvtColor(img_weapon, cv2.COLOR_BGR2HSV) img_weapon_gray[img_weapon_hsv[:, :, 1] > 32] = 0 ret, img_weapon_b = cv2.threshold( img_weapon_gray, 220, 255, cv2.THRESH_BINARY) # (覚) 学習用に保存しておくのはこのデータ。 Change to 1 for training. if 0: # (self.time_last_write + 5000 < context['engine']['msec']): import time filename = os.path.join( # training/ directory must already exist 'training', '_deadly_weapons.%s.png' % time.time()) cv2.imwrite(filename, img_weapon_b) self.time_last_write = context['engine']['msec'] # Workaround for languages that deadly_weapons is not trained if not Localization.get_game_languages()[0] in ['ja', 'en_NA']: return img_weapon_b_bgr = cv2.cvtColor(img_weapon_b, cv2.COLOR_GRAY2BGR) weapon_id = self.deadly_weapon_recoginizer.match(img_weapon_b_bgr) # 投票する(あとでまとめて開票) votes = self._cause_of_death_votes votes[weapon_id] = votes.get(weapon_id, 0) + 1
Example #21
Source File: result_judge.py From IkaLog with Apache License 2.0 | 5 votes |
def match_no_cache(self, context): if self.is_another_scene_matched(context, 'GameTimerIcon'): return False frame = context['engine']['frame'] if frame is None: return False match_win = self.mask_win.match(frame) match_lose = self.mask_lose.match(frame) match_win_or_lose = (match_win and (not match_lose) or (match_lose and (not match_win))) if not match_win_or_lose: return False img_bar = context['engine']['frame'][600:600 + 30, 126:126 + 1028, :] img_bar_hsv = cv2.cvtColor(img_bar, cv2.COLOR_BGR2HSV) ret, img_bar_b = cv2.threshold( img_bar_hsv[:, :, 2], 48, 255, cv2.THRESH_BINARY) img_bar_b_hist = cv2.calcHist([img_bar_b], [0], None, [3], [0, 256]) ratio = img_bar_b_hist[2] / np.sum(img_bar_b_hist) # print('%s: win %s lose %s ratio %s' % (self, match_win, match_lose, ratio)) if (ratio < 0.9): return False context['game']['judge'] = 'win' if match_win else 'lose' if not self.matched_in(context, 30 * 1000, attr='_last_event_msec'): context['game']['image_judge'] = \ copy.deepcopy(context['engine']['frame']) self._analyze(context) self._call_plugins('on_result_judge') self._last_event_msec = context['engine']['msec'] return True
Example #22
Source File: final.py From Sign-Language-Interpreter-using-Deep-Learning with MIT License | 5 votes |
def get_img_contour_thresh(img): img = cv2.flip(img, 1) imgHSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) dst = cv2.calcBackProject([imgHSV], [0, 1], hist, [0, 180, 0, 256], 1) disc = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(10,10)) cv2.filter2D(dst,-1,disc,dst) blur = cv2.GaussianBlur(dst, (11,11), 0) blur = cv2.medianBlur(blur, 15) thresh = cv2.threshold(blur,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)[1] thresh = cv2.merge((thresh,thresh,thresh)) thresh = cv2.cvtColor(thresh, cv2.COLOR_BGR2GRAY) thresh = thresh[y:y+h, x:x+w] contours = cv2.findContours(thresh.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)[0] return img, contours, thresh
Example #23
Source File: character.py From IkaLog with Apache License 2.0 | 5 votes |
def match1(self, img): if (img.shape[0] != self.sample_width) or (img.shape[1] != self.sample_height): img = cv2.resize( img, (self.sample_width, self.sample_height), interpolation=cv2.INTER_NEAREST) if (len(img.shape) > 2): img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) ret, img = cv2.threshold(img, 200, 255, cv2.THRESH_BINARY) raito = np.sum( img) / (img.shape[0] * img.shape[1]) if np.sum(img) != 0 else 0.0 if raito < 0.1: # ほぼ真っ黒 return 0 sample = img.reshape((1, img.shape[0] * img.shape[1])) sample = np.array(sample, np.float32) k = 3 retval, results, neigh_resp, dists = self.model.findNearest(sample, k) # 学習データを集めたいときなど if 0: import time cv2.imwrite('training/numbers/%s.%s.png' % (retval, time.time()), img) d = int(results.ravel()) return d
Example #24
Source File: set_hand_histogram.py From Sign-Language-Interpreter-using-Deep-Learning with MIT License | 5 votes |
def get_hand_hist(): cam = cv2.VideoCapture(1) if cam.read()[0]==False: cam = cv2.VideoCapture(0) x, y, w, h = 300, 100, 300, 300 flagPressedC, flagPressedS = False, False imgCrop = None while True: img = cam.read()[1] img = cv2.flip(img, 1) img = cv2.resize(img, (640, 480)) hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) keypress = cv2.waitKey(1) if keypress == ord('c'): hsvCrop = cv2.cvtColor(imgCrop, cv2.COLOR_BGR2HSV) flagPressedC = True hist = cv2.calcHist([hsvCrop], [0, 1], None, [180, 256], [0, 180, 0, 256]) cv2.normalize(hist, hist, 0, 255, cv2.NORM_MINMAX) elif keypress == ord('s'): flagPressedS = True break if flagPressedC: dst = cv2.calcBackProject([hsv], [0, 1], hist, [0, 180, 0, 256], 1) dst1 = dst.copy() disc = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(10,10)) cv2.filter2D(dst,-1,disc,dst) blur = cv2.GaussianBlur(dst, (11,11), 0) blur = cv2.medianBlur(blur, 15) ret,thresh = cv2.threshold(blur,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU) thresh = cv2.merge((thresh,thresh,thresh)) #cv2.imshow("res", res) cv2.imshow("Thresh", thresh) if not flagPressedS: imgCrop = build_squares(img) #cv2.rectangle(img, (x,y), (x+w, y+h), (0,255,0), 2) cv2.imshow("Set hand histogram", img) cam.release() cv2.destroyAllWindows() with open("hist", "wb") as f: pickle.dump(hist, f)
Example #25
Source File: mrcnn_dataset.py From segmentation-unet-maskrcnn with MIT License | 5 votes |
def load_mask(self, image_id): info = self.image_info[image_id] mask_path = info['mask_path'] valid_mask = [] for _mask_path in mask_path: _mask = cv2.imread(_mask_path, 0) if _mask.max() == _mask.min(): pass else: valid_mask.append(_mask_path) count = len(valid_mask) mask = np.zeros([info['height'], info['width'], count], 'uint8') shapes = [] for i in range(count): img_array = cv2.imread(valid_mask[i], 0) (thresh, im_bw) = cv2.threshold(img_array, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU) mask_array = (img_array < thresh).astype('uint8') mask[:, :, i:i+1] = np.expand_dims(mask_array, axis=2) fn_img, ext = os.path.splitext(valid_mask[i]) if fn_img.split('_')[-1] == 'merged': shapes.append(fn_img.split('/')[-1].split('_')[0]) else: shapes.append(fn_img.split('_')[-1]) # Map class names to class IDs. class_ids = np.array([self.class_names.index(s) for s in shapes]) return mask, class_ids
Example #26
Source File: markers_detection.py From niryo_one_ros with GNU General Public License v3.0 | 5 votes |
def extract_img_markers(img, workspace_ratio=1.0): """ Extract working area from an image thanks to 4 Niryo's markers :param img: OpenCV image which contain 4 Niryo's markers :param workspace_ratio: Ratio between the width and the height of the area represented by the markers :return: extracted and warped working area image """ gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) img_thresh = cv2.adaptiveThreshold(gray, maxValue=255, adaptiveMethod=cv2.ADAPTIVE_THRESH_MEAN_C, thresholdType=cv2.THRESH_BINARY, blockSize=15, C=25) list_good_candidates = find_markers_from_img_thresh(img_thresh) if not list_good_candidates or len(list_good_candidates) > 6: return None if len(list_good_candidates) == 4: list_good_candidates = sort_markers_detection(list_good_candidates) else: list_good_candidates = complicated_sort_markers(list_good_candidates, workspace_ratio=workspace_ratio) if list_good_candidates is None: return None im_cut = extract_sub_img(img, list_good_candidates, ratio_w_h=workspace_ratio) return im_cut
Example #27
Source File: main_gui_shadow_draw_sketchy.py From iSketchNFill with GNU General Public License v3.0 | 5 votes |
def browse(self,pos_y,pos_x): num_rows = int(opt.num_interpolate/2) num_cols = 2 div_rows = int(self.img_size/num_rows) div_cols = int(self.img_size/num_cols) which_row = int(pos_x / div_rows) which_col = int(pos_y / div_cols) cv2_gallery = cv2.imread('imgs/fake_B_gallery.png') cv2_gallery = cv2.resize(cv2_gallery,(self.img_size,self.img_size)) cv2_gallery = cv2.rectangle(cv2_gallery, ( which_col * div_cols , which_row * div_rows ) , ( (which_col + 1) * div_cols , (which_row + 1) * div_rows ) , (0,255,0) , 8) self.visWidget.update_vis_cv2(cv2_gallery) cv2_img = cv2.imread('imgs/test_fake_B_shadow.png') cv2_img = cv2.resize(cv2_img,(self.img_size,self.img_size)) which_highlight = which_row * 2 + which_col img_gray = cv2.imread('imgs/test_%d_L_fake_B_inter.png' % (which_highlight),cv2.IMREAD_GRAYSCALE) img_gray = cv2.resize(img_gray,(self.img_size,self.img_size)) (thresh,im_bw)=cv2.threshold(img_gray,128,255,cv2.THRESH_BINARY | cv2.THRESH_OTSU) cv2_img[np.where(im_bw==[0])] = [0,255,0] self.drawWidget.setShadowImage(cv2_img)
Example #28
Source File: handling_image.py From PythonPilot with Apache License 2.0 | 5 votes |
def mask_overlay(src, mask): """Image overlaying. Args: src (int): Input image BGR. numpy.ndarray, (720, 1280, 3), 0~255 mask (int): Input image BGR. numpy.ndarray, (720, 1280, 3), 0~255 Returns: dst (int): Output image BGR. numpy.ndarray, (720, 1280, 3), 0~255 """ # binarize gray = cv2.cvtColor(mask, cv2.COLOR_RGB2GRAY) ret, mask_bynary = cv2.threshold(gray, 10, 255, cv2.THRESH_BINARY) mask_bynary = mask_bynary // 255 # resize if src.shape[0] != mask.shape[0] or src.shape[1] != mask.shape[1]: mask = cv2.resize(mask, (src.shape[1], src.shape[0]), interpolation=cv2.INTER_LINEAR) mask_bynary = cv2.resize(mask_bynary, (src.shape[1], src.shape[0]), interpolation=cv2.INTER_LINEAR) # prepare white image mask_white = np.dstack((mask_bynary*255, mask_bynary*255, mask_bynary*255)) # combine src and thresholded image not_mask = cv2.bitwise_not(mask_white) src_masked = cv2.bitwise_and(src, not_mask) dst = cv2.bitwise_or(src_masked, mask) return dst
Example #29
Source File: download_dataset.py From ID-Card-Segmentation with MIT License | 5 votes |
def read_image(img, label): image = cv2.imread(img) mask = np.zeros(image.shape, dtype=np.uint8) quad = json.load(open(label, 'r')) coords = np.array(quad['quad'], dtype=np.int32) cv2.fillPoly(mask, coords.reshape(-1, 4, 2), color=(255,255,255)) mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY) mask = cv2.resize(mask, (mask.shape[1]//2, mask.shape[0]//2)) image = cv2.resize(image, (image.shape[1]//2, image.shape[0]//2)) mask = cv2.threshold(mask, 0,255, cv2.THRESH_BINARY)[1] mask = cv2.resize(mask, (256,256)) image = cv2.resize(image, (256, 256)) return image, mask
Example #30
Source File: plate_locate.py From EasyPR-python with Apache License 2.0 | 5 votes |
def sobelOper(self, img, blursize, morphW, morphH): blur = cv2.GaussianBlur(img, (blursize, blursize), 0, 0, cv2.BORDER_DEFAULT) if len(blur.shape) == 3: gray = cv2.cvtColor(blur, cv2.COLOR_RGB2GRAY) else: gray = blur x = cv2.Sobel(gray, cv2.CV_16S, 1, 0, ksize=3, scale=1, delta=0, borderType=cv2.BORDER_DEFAULT) absX = cv2.convertScaleAbs(x) grad = cv2.addWeighted(absX, 1, 0, 0, 0) _, threshold = cv2.threshold(grad, 0, 255, cv2.THRESH_OTSU + cv2.THRESH_BINARY) element = cv2.getStructuringElement(cv2.MORPH_RECT, (morphW, morphH)) threshold = cv2.morphologyEx(threshold, cv2.MORPH_CLOSE, element) return threshold