Python cv2.countNonZero() Examples
The following are 30
code examples of cv2.countNonZero().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
cv2
, or try the search function
.
Example #1
Source File: line_detect_2.py From crop_row_detection with GNU General Public License v3.0 | 7 votes |
def skeletonize(image_in): '''Inputs and grayscale image and outputs a binary skeleton image''' size = np.size(image_in) skel = np.zeros(image_in.shape, np.uint8) ret, image_edit = cv2.threshold(image_in, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU) element = cv2.getStructuringElement(cv2.MORPH_CROSS, (3,3)) done = False while not done: eroded = cv2.erode(image_edit, element) temp = cv2.dilate(eroded, element) temp = cv2.subtract(image_edit, temp) skel = cv2.bitwise_or(skel, temp) image_edit = eroded.copy() zeros = size - cv2.countNonZero(image_edit) if zeros == size: done = True return skel
Example #2
Source File: functions.py From malayalam-character-recognition with MIT License | 7 votes |
def skeletize(img): size = np.size(img) skel = np.zeros(img.shape, np.uint8) element = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3)) done = False while not done: eroded = cv2.erode(img, element) temp = cv2.dilate(eroded, element) temp = cv2.subtract(img, temp) skel = cv2.bitwise_or(skel, temp) img = eroded.copy() zeroes = size - cv2.countNonZero(img) if zeroes == size: done = True return skel
Example #3
Source File: chars_segment.py From EasyPR-python with Apache License 2.0 | 6 votes |
def verifyCharSizes(self, r): aspect = 0.5 charAspect = r.shape[1] / r.shape[0] error = 0.7 minH = 10 maxH = 35 minAspect = 0.05 # for number 1 maxAspect = aspect + aspect * error area = cv2.countNonZero(r) bbArea = r.shape[0] * r.shape[1] percPixels = area / bbArea if percPixels <= 1 and minAspect < charAspect < maxAspect and minH <= r.shape[0] < maxH: return True else: return False
Example #4
Source File: helloopencvtests.py From Mastering-OpenCV-4-with-Python with MIT License | 6 votes |
def test_write_image_to_disk(): """Test for write_image_to_disk """ print("testing write_image_to_disk") # load the image from disk bgr_image = load_image("images/logo.png") # write image to disk write_image_to_disk("images/temp.png", bgr_image) # load the image temp from disk temp = load_image("images/temp.png") # now we check that the two images are equal assert bgr_image.shape == temp.shape difference = cv2.subtract(bgr_image, temp) b, g, r = cv2.split(difference) assert cv2.countNonZero(b) == 0 and cv2.countNonZero(g) == 0 and cv2.countNonZero(r) == 0
Example #5
Source File: happycolour.py From SaltwashAR with GNU General Public License v3.0 | 6 votes |
def _thread(self, args): image = args # convert image from BGR to HSV hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) # only get colours in range mask = cv2.inRange(hsv, self.lower_colour, self.upper_colour) # obtain colour count colour_count = cv2.countNonZero(mask) # check whether to stop thread if self.is_stop: return # respond to colour count if colour_count < self.lower_threshold: self._text_to_speech("I just feel sad") self._display_emotion(SAD) elif colour_count > self.upper_threshold: self._text_to_speech("I'm so happy!") self._display_emotion(HAPPY)
Example #6
Source File: getFoodContourMorph.py From tierpsy-tracker with MIT License | 6 votes |
def skeletonize(img): """ OpenCV function to return a skeletonized version of img, a Mat object""" # hat tip to http://felix.abecassis.me/2011/09/opencv-morphological-skeleton/ img = img.copy() # don't clobber original skel = img.copy() skel[:,:] = 0 kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (3,3)) while True: eroded = cv2.morphologyEx(img, cv2.MORPH_ERODE, kernel) temp = cv2.morphologyEx(eroded, cv2.MORPH_DILATE, kernel) temp = cv2.subtract(img, temp) skel = cv2.bitwise_or(skel, temp) img[:,:] = eroded[:,:] if cv2.countNonZero(img) == 0: break return skel
Example #7
Source File: Input.py From DanceCV with BSD 3-Clause "New" or "Revised" License | 6 votes |
def checkDifference(self, roi, n): roi = cv2.cvtColor(roi, 6) roi = cv2.GaussianBlur(roi,(7,7),0) result = self.bsmog[n].apply(roi, None, self.bgAdapt[n]) if self.debug: if n == 0: cv2.imshow(self.debugWindow0, result) if n == 1: cv2.imshow(self.debugWindow1, result) if n == 2: cv2.imshow(self.debugWindow2, result) if n == 3: cv2.imshow(self.debugWindow3, result) number = cv2.countNonZero(result) if number > Constants.ACTIVE_THRESHOLD: return True return False
Example #8
Source File: readScreen2.py From Smart-Traffic-Signals-in-India-using-Deep-Reinforcement-Learning-and-Advanced-Computer-Vision with MIT License | 5 votes |
def tail_length(mask): checker = np.zeros((80), dtype=int) start = 800 for i in range(80): density = mask[start - 10:start, 0:500] white = cv2.countNonZero(density) #print(" ", white) # start +=10 if white > 1250: checker[i] = 1 else: checker[i] = 0 start -= 10 tail = 80 for i in range(65): over = 1 for j in range(i, i + 15): if checker[j] == 1: over = 0 break if over == 1: tail = i break #print(checker) #print(tail) if tail < 5: tail = 0 return tail
Example #9
Source File: readScreen.py From Smart-Traffic-Signals-in-India-using-Deep-Reinforcement-Learning-and-Advanced-Computer-Vision with MIT License | 5 votes |
def tail_length(mask): checker = np.zeros((80), dtype=int) start = 800 for i in range(80): density = mask[start - 10:start, 0:500] white = cv2.countNonZero(density) print(" ", white) # start +=10 if white > 1000: checker[i] = 1 else: checker[i] = 0 start -= 10 tail = 80 for i in range(65): over = 1 for j in range(i, i + 15): if checker[j] == 1: over = 0 break if over == 1: tail = i break print(checker) print(tail) if tail < 5: tail = 0 return tail
Example #10
Source File: blinkDetect.py From Fatigue-Detection-System-Based-On-Behavioural-Characteristics-Of-Driver with MIT License | 5 votes |
def checkEyeStatus(landmarks): mask = np.zeros(frame.shape[:2], dtype = np.float32) hullLeftEye = [] for i in range(0, len(leftEyeIndex)): hullLeftEye.append((landmarks[leftEyeIndex[i]][0], landmarks[leftEyeIndex[i]][1])) cv2.fillConvexPoly(mask, np.int32(hullLeftEye), 255) hullRightEye = [] for i in range(0, len(rightEyeIndex)): hullRightEye.append((landmarks[rightEyeIndex[i]][0], landmarks[rightEyeIndex[i]][1])) cv2.fillConvexPoly(mask, np.int32(hullRightEye), 255) # lenLeftEyeX = landmarks[leftEyeIndex[3]][0] - landmarks[leftEyeIndex[0]][0] # lenLeftEyeY = landmarks[leftEyeIndex[3]][1] - landmarks[leftEyeIndex[0]][1] # lenLeftEyeSquared = (lenLeftEyeX ** 2) + (lenLeftEyeY ** 2) # eyeRegionCount = cv2.countNonZero(mask) # normalizedCount = eyeRegionCount/np.float32(lenLeftEyeSquared) ############################################################################# leftEAR = eye_aspect_ratio(hullLeftEye) rightEAR = eye_aspect_ratio(hullRightEye) ear = (leftEAR + rightEAR) / 2.0 ############################################################################# eyeStatus = 1 # 1 -> Open, 0 -> closed if (ear < thresh): eyeStatus = 0 return eyeStatus
Example #11
Source File: readscreen3.py From Smart-Traffic-Signals-in-India-using-Deep-Reinforcement-Learning-and-Advanced-Computer-Vision with MIT License | 5 votes |
def tail_length(mask): checker = np.zeros((80), dtype=int) start = 800 width_threshold = 200 for i in range(80): density = mask[start - 10:start, 0:500] white = cv2.countNonZero(density) #print(" ", white) # start +=10 if white > width_threshold: checker[i] = 1 else: checker[i] = 0 start -= 10 tail = 80 length_threshold = 30 for i in range(80 - length_threshold): over = 1 for j in range(i, i + length_threshold): if checker[j] == 1: over = 0 break if over == 1: tail = i break #print(checker) #print(tail) if tail < 5: tail = 0 return tail
Example #12
Source File: cross_read.py From Smart-Traffic-Signals-in-India-using-Deep-Reinforcement-Learning-and-Advanced-Computer-Vision with MIT License | 5 votes |
def tail_length(mask): checker = np.zeros((80), dtype=int) start = 800 width_threshold = 200 for i in range(80): density = mask[start - 10:start, 0:500] white = cv2.countNonZero(density) #print(" ", white) # start +=10 if white > width_threshold: checker[i] = 1 else: checker[i] = 0 start -= 10 tail = 80 length_threshold = 30 for i in range(80 - length_threshold): over = 1 for j in range(i, i + length_threshold): if checker[j] == 1: over = 0 break if over == 1: tail = i break #print(checker) #print(tail) if tail < 5: tail = 0 return tail
Example #13
Source File: cross_read_sequential.py From Smart-Traffic-Signals-in-India-using-Deep-Reinforcement-Learning-and-Advanced-Computer-Vision with MIT License | 5 votes |
def tail_length(mask): checker = np.zeros((80), dtype=int) start = 800 width_threshold = 200 for i in range(80): density = mask[start - 10:start, 0:500] white = cv2.countNonZero(density) #print(" ", white) # start +=10 if white > width_threshold: checker[i] = 1 else: checker[i] = 0 start -= 10 tail = 80 length_threshold = 30 for i in range(80 - length_threshold): over = 1 for j in range(i, i + length_threshold): if checker[j] == 1: over = 0 break if over == 1: tail = i break #print(checker) #print(tail) if tail < 5: tail = 0 return tail
Example #14
Source File: color_vel.py From ros_book_programs with BSD 2-Clause "Simplified" License | 5 votes |
def get_colored_area(self, cv_image, lower, upper): hsv_image = cv2.cvtColor(cv_image, cv2.COLOR_BGR2HSV) mask_image = cv2.inRange(hsv_image, lower, upper) extracted_image = cv2.bitwise_and(cv_image, cv_image, mask=mask_image) area = cv2.countNonZero(mask_image) return (area, extracted_image)
Example #15
Source File: nox.py From Yugioh-bot with MIT License | 5 votes |
def check_if_battle(self, img): img = np.array(img) img = img[750:800, 0:400] blue_min = np.array([250, 250, 250], np.uint8) blue_max = np.array([255, 255, 255], np.uint8) amount = cv2.inRange(img, blue_min, blue_max) if cv2.countNonZero(amount) > (50 * 200): return True return False
Example #16
Source File: ocrd_anybaseocr_cropping.py From ocrd_anybaseocr with Apache License 2.0 | 5 votes |
def detect_textarea(self, arg): textarea = [] small = cv2.cvtColor(arg, cv2.COLOR_RGB2GRAY) height, width, _ = arg.shape kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) grad = cv2.morphologyEx(small, cv2.MORPH_GRADIENT, kernel) _, bw = cv2.threshold( grad, 0.0, 255.0, cv2.THRESH_BINARY | cv2.THRESH_OTSU) kernel = cv2.getStructuringElement( cv2.MORPH_RECT, (10, 1)) # for historical docs connected = cv2.morphologyEx(bw, cv2.MORPH_CLOSE, kernel) contours, _ = cv2.findContours( connected.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) mask = np.zeros(bw.shape, dtype=np.uint8) for idx in range(len(contours)): x, y, w, h = cv2.boundingRect(contours[idx]) # print x,y,w,h mask[y:y+h, x:x+w] = 0 cv2.drawContours(mask, contours, idx, (255, 255, 255), -1) r = float(cv2.countNonZero(mask[y:y+h, x:x+w])) / (w * h) if r > 0.45 and (width*0.9) > w > 15 and (height*0.5) > h > 15: textarea.append([x, y, x+w-1, y+h-1]) cv2.rectangle(arg, (x, y), (x+w-1, y+h-1), (0, 0, 255), 2) if len(textarea) > 1: textarea = self.filter_noisebox(textarea, height, width) return textarea, arg, height, width
Example #17
Source File: provider.py From Yugioh-bot with MIT License | 5 votes |
def get_current_page(self, img): img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) area = crop_image(img, **self.predefined.page_area) area = mask_image([254], [255], area) height, width = area.shape current_page = 0 for x in range(4): box = crop_image(area, (x * width / 4), 0, ((x + 1) * width / 4), height) if cv2.countNonZero(box) > 0: current_page = x break return current_page + 1
Example #18
Source File: idcardocr.py From idmatch with MIT License | 5 votes |
def recognize_card(idcard): result = [] # TODO: # process_image(original_image, cropped_image) # idcard = cv2.imread(cropped_, cv2.COLOR_BGR2GRAY) # In some cases resized image gives worse results # idcard = resize(idcard, width=720) gray = cv2.cvtColor(idcard, cv2.COLOR_BGR2GRAY) denoised = cv2.fastNlMeansDenoising(gray, None, 3, 7, 21) contours, hierarchy = recognize_text(gray) mask = np.zeros(gray.shape, np.uint8) for index, contour in enumerate(contours): [x, y, w, h] = cv2.boundingRect(contour) if h < 16 or w < 16: continue mskRoi = mask[y:y+h, x:x+w] cv2.drawContours(mask, [contour], 0, 255, -1) #CV_FILLED nz = cv2.countNonZero(mskRoi) ratio = (float)(nz) / (float)(h*w) # got this value from left heel if ratio > 0.55 and ratio < 0.9: roi = denoised[y:y+h, x:x+w] text = pytesseract.image_to_string(Image.fromarray(roi), lang="kir+eng", config="-psm 7") if text: item = {'x': x, 'y': y, 'w': w, 'h': h, 'text': text} result.append(item) cv2.rectangle(idcard, (x, y), (x + w, y + h), (255, 0, 255), 2) # need to restore settings hash_object = hashlib.sha256(idcard) hex_dig = hash_object.hexdigest() cv2.imwrite("/webapp/web/static/"+hex_dig+".jpeg", idcard) return "static/"+hex_dig+".jpeg", result
Example #19
Source File: detection.py From SaltwashAR with GNU General Public License v3.0 | 5 votes |
def get_active_cell(self, image): # obtain motion between previous and current image current_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) delta = cv2.absdiff(self.previous_gray, current_gray) threshold_image = cv2.threshold(delta, 25, 255, cv2.THRESH_BINARY)[1] # set cell height and width height, width = threshold_image.shape[:2] cell_height = height/2 cell_width = width/3 # store motion level for each cell cells = np.array([0, 0, 0]) cells[0] = cv2.countNonZero(threshold_image[cell_height:height, 0:cell_width]) cells[1] = cv2.countNonZero(threshold_image[cell_height:height, cell_width:cell_width*2]) cells[2] = cv2.countNonZero(threshold_image[cell_height:height, cell_width*2:width]) # obtain the most active cell top_cell = np.argmax(cells) # return the most active cell, if threshold met if(cells[top_cell] >= self.THRESHOLD): return top_cell else: return None
Example #20
Source File: motion_detection.py From pynvr with BSD 3-Clause "New" or "Revised" License | 5 votes |
def motionDetected(self, new_frame): frame = self.preprocessInputFrame(new_frame) gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY) gray = cv.GaussianBlur(gray, (21, 21), 0) if self.prevFrame is None: self.prevFrame = gray return False frameDiff = cv.absdiff(gray, self.prevFrame) # kernel = np.ones((5, 5), np.uint8) opening = cv.morphologyEx(frameDiff, cv.MORPH_OPEN, None) # noqa closing = cv.morphologyEx(frameDiff, cv.MORPH_CLOSE, None) # noqa ret1, th1 = cv.threshold(frameDiff, 10, 255, cv.THRESH_BINARY) height = np.size(th1, 0) width = np.size(th1, 1) nb = cv.countNonZero(th1) avg = (nb * 100) / (height * width) # Calculate the average of black pixel in the image self.prevFrame = gray # cv.DrawContours(currentframe, self.currentcontours, (0, 0, 255), (0, 255, 0), 1, 2, cv.CV_FILLED) # cv.imshow("frame", current_frame) ret = avg > self.threshold # If over the ceiling trigger the alarm if ret: self.updateMotionDetectionDts() return ret
Example #21
Source File: convenience.py From imutils with MIT License | 5 votes |
def skeletonize(image, size, structuring=cv2.MORPH_RECT): # determine the area (i.e. total number of pixels in the image), # initialize the output skeletonized image, and construct the # morphological structuring element area = image.shape[0] * image.shape[1] skeleton = np.zeros(image.shape, dtype="uint8") elem = cv2.getStructuringElement(structuring, size) # keep looping until the erosions remove all pixels from the # image while True: # erode and dilate the image using the structuring element eroded = cv2.erode(image, elem) temp = cv2.dilate(eroded, elem) # subtract the temporary image from the original, eroded # image, then take the bitwise 'or' between the skeleton # and the temporary image temp = cv2.subtract(image, temp) skeleton = cv2.bitwise_or(skeleton, temp) image = eroded.copy() # if there are no more 'white' pixels in the image, then # break from the loop if area == area - cv2.countNonZero(image): break # return the skeletonized image return skeleton
Example #22
Source File: core_func.py From EasyPR-python with Apache License 2.0 | 5 votes |
def plateColorJudge(src, r, adaptive_minsv): thresh = 0.45 src_gray = colorMatch(src, r, adaptive_minsv) percent = cv2.countNonZero(src_gray) / (src_gray.shape[0] * src_gray.shape[1]) if percent > thresh: return percent, True else: return percent, False
Example #23
Source File: motion.py From pygta5 with GNU General Public License v3.0 | 5 votes |
def motion_detection(t_minus, t_now, t_plus): delta_view = delta_images(t_minus, t_now, t_plus) retval, delta_view = cv2.threshold(delta_view, 16, 255, 3) cv2.normalize(delta_view, delta_view, 0, 255, cv2.NORM_MINMAX) img_count_view = cv2.cvtColor(delta_view, cv2.COLOR_RGB2GRAY) delta_count = cv2.countNonZero(img_count_view) dst = cv2.addWeighted(screen,1.0, delta_view,0.6,0) delta_count_last = delta_count return delta_count
Example #24
Source File: motion.py From pygta5 with GNU General Public License v3.0 | 5 votes |
def motion_detection(t_minus, t_now, t_plus): delta_view = delta_images(t_minus, t_now, t_plus) retval, delta_view = cv2.threshold(delta_view, 16, 255, 3) cv2.normalize(delta_view, delta_view, 0, 255, cv2.NORM_MINMAX) img_count_view = cv2.cvtColor(delta_view, cv2.COLOR_RGB2GRAY) delta_count = cv2.countNonZero(img_count_view) dst = cv2.addWeighted(screen,1.0, delta_view,0.6,0) delta_count_last = delta_count return delta_count
Example #25
Source File: motion.py From pygta5 with GNU General Public License v3.0 | 5 votes |
def motion_detection(t_minus, t_now, t_plus): delta_view = delta_images(t_minus, t_now, t_plus) retval, delta_view = cv2.threshold(delta_view, 16, 255, 3) cv2.normalize(delta_view, delta_view, 0, 255, cv2.NORM_MINMAX) img_count_view = cv2.cvtColor(delta_view, cv2.COLOR_RGB2GRAY) delta_count = cv2.countNonZero(img_count_view) dst = cv2.addWeighted(screen,1.0, delta_view,0.6,0) delta_count_last = delta_count return delta_count
Example #26
Source File: motion_detection.py From pynvr with BSD 3-Clause "New" or "Revised" License | 5 votes |
def motionDetected(self, new_frame): frame = self.preprocessInputFrame(new_frame) gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY) gray = cv.GaussianBlur(gray, (11, 11), 0) if (self.multiFrameDetection) and (self.prevPrevFrame is None): self.prevPrevFrame = gray return False if self.prevFrame is None: self.prevFrame = gray return False cv.normalize(gray, gray, 0, 255, cv.NORM_MINMAX) frameDiff = self.diffImg(self.prevPrevFrame, self.prevFrame, gray) ret1, th1 = cv.threshold(frameDiff, 10, 255, cv.THRESH_BINARY) th1 = cv.dilate(th1, None, iterations=8) th1 = cv.erode(th1, None, iterations=4) delta_count = cv.countNonZero(th1) if self.multiFrameDetection: self.prevPrevFrame = self.prevFrame self.prevFrame = gray if delta_count < self.threshold: return False if self.multiFrameDetection: self.prevPrevFrame = self.prevFrame self.prevFrame = gray self.updateMotionDetectionDts() return True
Example #27
Source File: motion_detection.py From pynvr with BSD 3-Clause "New" or "Revised" License | 5 votes |
def motionDetected(self, new_frame): frame = self.preprocessInputFrame(new_frame) gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY) gray = cv.GaussianBlur(gray, (11, 11), 0) if self.prevPrevFrame is None: self.prevPrevFrame = gray return False if self.prevFrame is None: self.prevFrame = gray return False cv.normalize(gray, gray, 0, 255, cv.NORM_MINMAX) frameDiff = self.diffImg(self.prevPrevFrame, self.prevFrame, gray) ret1, th1 = cv.threshold(frameDiff, 10, 255, cv.THRESH_BINARY) cv.dilate(th1, None, iterations=15) cv.erode(th1, None, iterations=1) delta_count = cv.countNonZero(th1) cv.imshow("frame_th1", th1) self.prevPrevFrame = self.prevFrame self.prevFrame = gray ret = delta_count > self.threshold if ret: self.updateMotionDetectionDts() return ret
Example #28
Source File: process.py From deepdiy with MIT License | 4 votes |
def process(frame): # plt.subplot(231) # plt.imshow(frame) # # step1: extract bright field # 转为灰度图 frame = nomalize_to_8_bit_BGR(frame) frame = cv2.resize(frame,(256,256)) img_gray=cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY) # 中值模糊去噪点 MEDIUM_BLUR_RADIUM=5 img_blur=cv2.medianBlur(img_gray,MEDIUM_BLUR_RADIUM) # plt.subplot(232) # plt.imshow(img_blur,'gray') # 阈值分割 _,thresh = cv2.threshold(img_blur,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU) # plt.subplot(233) # plt.imshow(thresh,'gray') # 提取明亮区域 img=thresh.copy() bright_area_masks=[] contours, hierarchy = cv2.findContours(img,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) # 提取轮廓 for cnt in contours:# 遍历轮廓 mask = np.zeros(img.shape,np.uint8)# 空mask hull = cv2.convexHull(cnt)#考虑到小球可能接触到明场边缘, 需要使用轮廓的凸包 cv2.drawContours(mask,[hull],0,255,-1) # 用轮廓填充mask mean_val = cv2.mean(img,mask = mask)[0]# 用mask计算轮廓内平均灰度 if mean_val>128:# 如果轮廓内亮度>128 bright_area_masks.append(mask) # 收集这个mask # 一张图可能有多个明亮区域, 但明场视野应该是像素亮度总和最高的 idx=np.argmax([cv2.mean(img,mask=area)[0]*cv2.countNonZero(area) for area in bright_area_masks]) bright_field_mask=bright_area_masks[idx] # plt.subplot(234) # plt.imshow(bright_field_mask,'gray') # # step2: 明场内找黑斑 img_balls=thresh+255-bright_field_mask # 将明场外区域填白 # plt.subplot(235) # plt.imshow(img_balls,'gray') contours, hierarchy = cv2.findContours(255-img_balls,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) # 提取轮廓(外框需要是黑的,所以反相一下) cnt=max(contours,key=cv2.contourArea) # 取面积最大的轮廓 mask = np.zeros(img_balls.shape,np.uint8)# 空mask cv2.drawContours(mask,[cnt],0,255,-1) # 用轮廓填充mask ellipse=cv2.fitEllipse(cnt) img_label=cv2.ellipse(frame,ellipse,(0,255,0),2) # plt.subplot(236) # plt.imshow(img_label,'gray') return img_label,ellipse[2]
Example #29
Source File: augmentation.py From synthetic-occlusion with GNU General Public License v3.0 | 4 votes |
def load_occluders(pascal_voc_root_path): occluders = [] structuring_element = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (8, 8)) annotation_paths = list_filepaths(os.path.join(pascal_voc_root_path, 'Annotations')) for annotation_path in annotation_paths: xml_root = xml.etree.ElementTree.parse(annotation_path).getroot() is_segmented = (xml_root.find('segmented').text != '0') if not is_segmented: continue boxes = [] for i_obj, obj in enumerate(xml_root.findall('object')): is_person = (obj.find('name').text == 'person') is_difficult = (obj.find('difficult').text != '0') is_truncated = (obj.find('truncated').text != '0') if not is_person and not is_difficult and not is_truncated: bndbox = obj.find('bndbox') box = [int(bndbox.find(s).text) for s in ['xmin', 'ymin', 'xmax', 'ymax']] boxes.append((i_obj, box)) if not boxes: continue im_filename = xml_root.find('filename').text seg_filename = im_filename.replace('jpg', 'png') im_path = os.path.join(pascal_voc_root_path, 'JPEGImages', im_filename) seg_path = os.path.join(pascal_voc_root_path,'SegmentationObject', seg_filename) im = np.asarray(PIL.Image.open(im_path)) labels = np.asarray(PIL.Image.open(seg_path)) for i_obj, (xmin, ymin, xmax, ymax) in boxes: object_mask = (labels[ymin:ymax, xmin:xmax] == i_obj + 1).astype(np.uint8)*255 object_image = im[ymin:ymax, xmin:xmax] if cv2.countNonZero(object_mask) < 500: # Ignore small objects continue # Reduce the opacity of the mask along the border for smoother blending eroded = cv2.erode(object_mask, structuring_element) object_mask[eroded < object_mask] = 192 object_with_mask = np.concatenate([object_image, object_mask[..., np.newaxis]], axis=-1) # Downscale for efficiency object_with_mask = resize_by_factor(object_with_mask, 0.5) occluders.append(object_with_mask) return occluders
Example #30
Source File: steam.py From Yugioh-bot with MIT License | 4 votes |
def check_if_battle(self, img): img = np.array(img) img = crop_image(img, **self.predefined.page_area) blue_min = np.array([250, 250, 250], np.uint8) blue_max = np.array([255, 255, 255], np.uint8) amount = cv2.inRange(img, blue_min, blue_max) if cv2.countNonZero(amount) > (50 * 200): return True return False