Python cv2.subtract() Examples
The following are 30
code examples of cv2.subtract().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
cv2
, or try the search function
.
Example #1
Source File: line_detect_2.py From crop_row_detection with GNU General Public License v3.0 | 7 votes |
def skeletonize(image_in): '''Inputs and grayscale image and outputs a binary skeleton image''' size = np.size(image_in) skel = np.zeros(image_in.shape, np.uint8) ret, image_edit = cv2.threshold(image_in, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU) element = cv2.getStructuringElement(cv2.MORPH_CROSS, (3,3)) done = False while not done: eroded = cv2.erode(image_edit, element) temp = cv2.dilate(eroded, element) temp = cv2.subtract(image_edit, temp) skel = cv2.bitwise_or(skel, temp) image_edit = eroded.copy() zeros = size - cv2.countNonZero(image_edit) if zeros == size: done = True return skel
Example #2
Source File: Preprocess.py From ALPR-Indonesia with MIT License | 7 votes |
def maximizeContrast(imgGrayscale): height, width = imgGrayscale.shape imgTopHat = np.zeros((height, width, 1), np.uint8) imgBlackHat = np.zeros((height, width, 1), np.uint8) structuringElement = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3)) imgTopHat = cv2.morphologyEx(imgGrayscale, cv2.MORPH_TOPHAT, structuringElement) imgBlackHat = cv2.morphologyEx(imgGrayscale, cv2.MORPH_BLACKHAT, structuringElement) imgGrayscalePlusTopHat = cv2.add(imgGrayscale, imgTopHat) imgGrayscalePlusTopHatMinusBlackHat = cv2.subtract(imgGrayscalePlusTopHat, imgBlackHat) return imgGrayscalePlusTopHatMinusBlackHat # end function
Example #3
Source File: functions.py From malayalam-character-recognition with MIT License | 7 votes |
def skeletize(img): size = np.size(img) skel = np.zeros(img.shape, np.uint8) element = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3)) done = False while not done: eroded = cv2.erode(img, element) temp = cv2.dilate(eroded, element) temp = cv2.subtract(img, temp) skel = cv2.bitwise_or(skel, temp) img = eroded.copy() zeroes = size - cv2.countNonZero(img) if zeroes == size: done = True return skel
Example #4
Source File: single_roi_tracker.py From ethoscope with GNU General Public License v3.0 | 7 votes |
def _pre_process_input_minimal(self, img, mask, t, darker_fg=True): if self._buff_grey is None: self._buff_grey = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) if mask is None: mask = np.ones_like(self._buff_grey) * 255 cv2.cvtColor(img,cv2.COLOR_BGR2GRAY, self._buff_grey) cv2.erode(self._buff_grey, self._erode_kern, dst=self._buff_grey) if darker_fg: cv2.subtract(255, self._buff_grey, self._buff_grey) if mask is not None: cv2.bitwise_and(self._buff_grey, mask, self._buff_grey) return self._buff_grey
Example #5
Source File: run.py From Auto-Lianliankan with Apache License 2.0 | 6 votes |
def getAllSquareRecord(all_square_list,types): print("将所有的方块与类型进行比较,转置成数字矩阵...") record = [] # 整个记录的二维数组 line = [] # 记录一行 for square in all_square_list: # 把所有的方块和保存起来的所有类型做对比 num = 0 for type in types: # 所有类型 res = cv2.subtract(square,type) # 作比较 if not np.any(res): # 如果两个图片一样 line.append(num) # 将类型的数字记录进这一行 break # 并且跳出循环 num += 1 # 如果没有匹配上,则类型数加1 if len(line) == V_NUM: # 如果校验完这一行已经有了11个数据,则另起一行 record.append(line) line = [] print(record) return record # 自动消除
Example #6
Source File: cvutils.py From ai-smarthome with BSD 2-Clause "Simplified" License | 6 votes |
def calculate_diff(self, frame): if self.avgframe is not None: subframe = cv2.subtract(frame, self.avgframe) grayscaled = cv2.cvtColor(subframe, cv2.COLOR_BGR2GRAY) retval2,th1 = cv2.threshold(grayscaled,35,255,cv2.THRESH_BINARY) self.avgframe = cv2.addWeighted(frame, 0.1, self.avgframe, 0.9, 0.0) th1 = th1 / 255 w, h = th1.shape sum = cv2.sumElems(th1)[0]/(w*h) return sum else: self.avgframe = frame return 0.0 # Test the code.
Example #7
Source File: photometric.py From mmcv with Apache License 2.0 | 6 votes |
def imnormalize_(img, mean, std, to_rgb=True): """Inplace normalize an image with mean and std. Args: img (ndarray): Image to be normalized. mean (ndarray): The mean to be used for normalize. std (ndarray): The std to be used for normalize. to_rgb (bool): Whether to convert to rgb. Returns: ndarray: The normalized image. """ # cv2 inplace normalization does not accept uint8 assert img.dtype != np.uint8 mean = np.float64(mean.reshape(1, -1)) stdinv = 1 / np.float64(std.reshape(1, -1)) if to_rgb: cv2.cvtColor(img, cv2.COLOR_BGR2RGB, img) # inplace cv2.subtract(img, mean, img) # inplace cv2.multiply(img, stdinv, img) # inplace return img
Example #8
Source File: BackgroundSubtractor.py From tierpsy-tracker with MIT License | 6 votes |
def subtract_bgnd(self, image): # new method using bitwise not def _remove_func(_img, _func, _bg): #the reason to use opencv2 instead of numpy is to avoid buffer overflow #https://stackoverflow.com/questions/45817037/opencv-image-subtraction-vs-numpy-subtraction/45817868 new_img = np.zeros_like(_img); #maybe can do this in place if image.ndim == 2: _func(_img, _bg, new_img) else: for ii, this_frame in enumerate(_img): _func(this_frame, _bg, new_img[ii]) return new_img bg = self.bgnd.astype(np.uint8) if self.is_light_background: notbg = ~bg ss = _remove_func(image, cv2.add, notbg) else: # fluorescence ss = _remove_func(image, cv2.subtract, bg) ss = np.clip( ss ,1,255); return ss
Example #9
Source File: barcodeD&D_zbar.py From Barcode-Detection-and-Decoding with Apache License 2.0 | 6 votes |
def preprocess(image): # load the image image = cv2.imread(args["image"]) #resize image image = cv2.resize(image,None,fx=0.7, fy=0.7, interpolation = cv2.INTER_CUBIC) #convert to grayscale gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) #calculate x & y gradient gradX = cv2.Sobel(gray, ddepth = cv2.CV_32F, dx = 1, dy = 0, ksize = -1) gradY = cv2.Sobel(gray, ddepth = cv2.CV_32F, dx = 0, dy = 1, ksize = -1) # subtract the y-gradient from the x-gradient gradient = cv2.subtract(gradX, gradY) gradient = cv2.convertScaleAbs(gradient) # blur the image blurred = cv2.blur(gradient, (3, 3)) # threshold the image (_, thresh) = cv2.threshold(blurred, 225, 255, cv2.THRESH_BINARY) thresh = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) return thresh
Example #10
Source File: pysift.py From PythonSIFT with MIT License | 6 votes |
def generateDoGImages(gaussian_images): """Generate Difference-of-Gaussians image pyramid """ logger.debug('Generating Difference-of-Gaussian images...') dog_images = [] for gaussian_images_in_octave in gaussian_images: dog_images_in_octave = [] for first_image, second_image in zip(gaussian_images_in_octave, gaussian_images_in_octave[1:]): dog_images_in_octave.append(subtract(second_image, first_image)) # ordinary subtraction will not work because the images are unsigned integers dog_images.append(dog_images_in_octave) return array(dog_images) ############################### # Scale-space extrema related # ###############################
Example #11
Source File: getFoodContourMorph.py From tierpsy-tracker with MIT License | 6 votes |
def skeletonize(img): """ OpenCV function to return a skeletonized version of img, a Mat object""" # hat tip to http://felix.abecassis.me/2011/09/opencv-morphological-skeleton/ img = img.copy() # don't clobber original skel = img.copy() skel[:,:] = 0 kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (3,3)) while True: eroded = cv2.morphologyEx(img, cv2.MORPH_ERODE, kernel) temp = cv2.morphologyEx(eroded, cv2.MORPH_DILATE, kernel) temp = cv2.subtract(img, temp) skel = cv2.bitwise_or(skel, temp) img[:,:] = eroded[:,:] if cv2.countNonZero(img) == 0: break return skel
Example #12
Source File: helloopencvtests.py From Mastering-OpenCV-4-with-Python with MIT License | 6 votes |
def test_write_image_to_disk(): """Test for write_image_to_disk """ print("testing write_image_to_disk") # load the image from disk bgr_image = load_image("images/logo.png") # write image to disk write_image_to_disk("images/temp.png", bgr_image) # load the image temp from disk temp = load_image("images/temp.png") # now we check that the two images are equal assert bgr_image.shape == temp.shape difference = cv2.subtract(bgr_image, temp) b, g, r = cv2.split(difference) assert cv2.countNonZero(b) == 0 and cv2.countNonZero(g) == 0 and cv2.countNonZero(r) == 0
Example #13
Source File: ImageFusion.py From ImageStitch with MIT License | 5 votes |
def LaplacianPyramid(self, img, level): gp = self.GaussianPyramid(img, level) lp = [gp[level-1]] for i in range(level - 1, -1, -1): GE = cv2.pyrUp(gp[i]) GE = cv2.resize(GE, (gp[i - 1].shape[1], gp[i - 1].shape[0]), interpolation=cv2.INTER_CUBIC) L = cv2.subtract(gp[i - 1], GE) lp.append(L) return lp, gp
Example #14
Source File: BackgroundSubtractor.py From tierpsy-tracker with MIT License | 5 votes |
def _subtract_bgnd_from_mask(self, img): ss = np.zeros_like(img) if self.is_light_background: cv2.subtract(self.bgnd, img, ss) else: cv2.subtract(img, self.bgnd, ss) ss[img==0] = 0 return ss
Example #15
Source File: fake_util.py From CRAFT_keras with Apache License 2.0 | 5 votes |
def watershed(src): """ Performs a marker-based image segmentation using the watershed algorithm. :param src: 8-bit 1-channel image. :return: 32-bit single-channel image (map) of markers. """ # cv2.imwrite('{}.png'.format(np.random.randint(1000)), src) gray = src.copy() img = cv2.cvtColor(gray, cv2.COLOR_GRAY2BGR) # h, w = gray.shape[:2] # block_size = (min(h, w) // 4 + 1) * 2 + 1 # thresh = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, block_size, 0) _ret, thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) # noise removal kernel = np.ones((3, 3), np.uint8) opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations=2) # sure background area sure_bg = cv2.dilate(opening, kernel, iterations=3) # Finding sure foreground area dist_transform = cv2.distanceTransform(opening, cv2.DIST_L2, 5) # dist_transform = opening & gray # cv2.imshow('dist_transform', dist_transform) # _ret, sure_bg = cv2.threshold(dist_transform, 0.2 * dist_transform.max(), 255, cv2.THRESH_BINARY_INV) _ret, sure_fg = cv2.threshold(dist_transform, 0.2 * dist_transform.max(), 255, cv2.THRESH_BINARY) # Finding unknown region # sure_bg = np.uint8(sure_bg) sure_fg = np.uint8(sure_fg) # cv2.imshow('sure_fg', sure_fg) unknown = cv2.subtract(sure_bg, sure_fg) # Marker label lingret, marker_map = cv2.connectedComponents(sure_fg) # Add one to all labels so that sure background is not 0, but 1 marker_map = marker_map + 1 # Now, mark the region of unknown with zero marker_map[unknown == 255] = 0 marker_map = cv2.watershed(img, marker_map) return marker_map
Example #16
Source File: mask_morphology.py From NucleiDetectron with Apache License 2.0 | 5 votes |
def opencv_segmentation(mask, kernel=k_3x3, k=3): # noise removal opening = cv.morphologyEx(mask, cv.MORPH_OPEN, kernel, iterations=k) # sure background area sure_bg = cv.dilate(opening, kernel, iterations=k) # Finding sure foreground area dist_transform = cv.distanceTransform(opening,cv.DIST_L2, 5) ret, sure_fg = cv.threshold(dist_transform, 0.7*dist_transform.max(), 255, 0) # Finding unknown region sure_fg = np.uint8(sure_fg) unknown = cv.subtract(sure_bg, sure_fg) # Marker labelling ret, markers = cv.connectedComponents(sure_fg) # Add one to all labels so that sure background is not 0, but 1 markers = markers + 1 # Now, mark the region of unknown with zero markers[unknown > 0] = 0 labels_ws = cv.watershed(cv.cvtColor(mask, cv.COLOR_GRAY2RGB), markers) if labels_ws.max() - 1 < 2: return [mask], labels_ws res_masks = [] for idx in range(2, labels_ws.max() + 1): m = labels_ws == idx if m.sum() > 5: m = cv.dilate(m.astype(np.uint8), kernel, iterations=1) res_masks.append(m) return res_masks, labels_ws
Example #17
Source File: colordescriptor.py From flask-image-search with MIT License | 5 votes |
def describe(self, image): # convert the image to the HSV color space and initialize # the features used to quantify the image image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) features = [] # grab the dimensions and compute the center of the image (h, w) = image.shape[:2] (cX, cY) = (int(w * 0.5), int(h * 0.5)) # divide the image into four rectangles/segments (top-left, # top-right, bottom-right, bottom-left) segments = [(0, cX, 0, cY), (cX, w, 0, cY), (cX, w, cY, h), (0, cX, cY, h)] # construct an elliptical mask representing the center of the # image (axesX, axesY) = (int(w * 0.75) / 2, int(h * 0.75) / 2) ellipMask = np.zeros(image.shape[:2], dtype="uint8") cv2.ellipse(ellipMask, (cX, cY), (axesX, axesY), 0, 0, 360, 255, -1) # loop over the segments for (startX, endX, startY, endY) in segments: # construct a mask for each corner of the image, subtracting # the elliptical center from it cornerMask = np.zeros(image.shape[:2], dtype="uint8") cv2.rectangle(cornerMask, (startX, startY), (endX, endY), 255, -1) cornerMask = cv2.subtract(cornerMask, ellipMask) # extract a color histogram from the image, then update the # feature vector hist = self.histogram(image, cornerMask) features.extend(hist) # extract a color histogram from the elliptical region and # update the feature vector hist = self.histogram(image, ellipMask) features.extend(hist) # return the feature vector return features
Example #18
Source File: mqtt-cam.py From ai-smarthome with BSD 2-Clause "Simplified" License | 5 votes |
def diff_filter(self, frame, avgframe): subframe = cv2.subtract(frame, avgframe) grayscaled = cv2.cvtColor(subframe, cv2.COLOR_BGR2GRAY) retval2,th1 = cv2.threshold(grayscaled,35,255,cv2.THRESH_BINARY) avgframe = cv2.addWeighted(frame, 0.1, avgframe, 0.9, 0.0) if self.show: cv2.imshow('Treshold diff', th1) th1 = th1 / 255 w, h = th1.shape sum = cv2.sumElems(th1)[0]/(w*h) return avgframe, sum
Example #19
Source File: mqtt_cam.py From ai-smarthome with BSD 2-Clause "Simplified" License | 5 votes |
def diff_filter(self, frame, avgframe): subframe = cv2.subtract(frame, avgframe) grayscaled = cv2.cvtColor(subframe, cv2.COLOR_BGR2GRAY) retval2,th1 = cv2.threshold(grayscaled,35,255,cv2.THRESH_BINARY) avgframe = cv2.addWeighted(frame, 0.1, avgframe, 0.9, 0.0) if self.show: cv2.imshow('Treshold diff', th1) th1 = th1 / 255 w, h = th1.shape sum = cv2.sumElems(th1)[0]/(w*h) return avgframe, sum
Example #20
Source File: amplify_color.py From Heart-rate-measurement-using-camera with Apache License 2.0 | 5 votes |
def build_laplacian_pyramid(self, src,levels=3): gaussianPyramid = self.build_gaussian_pyramid(src, levels) pyramid=[] for i in range(levels,0,-1): GE=cv2.pyrUp(gaussianPyramid[i]) L=cv2.subtract(gaussianPyramid[i-1],GE) pyramid.append(L) return pyramid #reconstract video from laplacian pyramid
Example #21
Source File: run.py From Auto-Lianliankan with Apache License 2.0 | 5 votes |
def isImageExist(img,img_list): for existed_img in img_list: b = np.subtract(existed_img,img) # 图片数组进行比较,返回的是两个图片像素点差值的数组, if not np.any(b): # 如果全部是0,说明两图片完全相同。 return True else: continue return False # 获取所有的方块类型
Example #22
Source File: convenience.py From imutils with MIT License | 5 votes |
def skeletonize(image, size, structuring=cv2.MORPH_RECT): # determine the area (i.e. total number of pixels in the image), # initialize the output skeletonized image, and construct the # morphological structuring element area = image.shape[0] * image.shape[1] skeleton = np.zeros(image.shape, dtype="uint8") elem = cv2.getStructuringElement(structuring, size) # keep looping until the erosions remove all pixels from the # image while True: # erode and dilate the image using the structuring element eroded = cv2.erode(image, elem) temp = cv2.dilate(eroded, elem) # subtract the temporary image from the original, eroded # image, then take the bitwise 'or' between the skeleton # and the temporary image temp = cv2.subtract(image, temp) skeleton = cv2.bitwise_or(skeleton, temp) image = eroded.copy() # if there are no more 'white' pixels in the image, then # break from the loop if area == area - cv2.countNonZero(image): break # return the skeletonized image return skeleton
Example #23
Source File: Pyramids.py From Finger-Detection-and-Tracking with BSD 2-Clause "Simplified" License | 5 votes |
def main(): image = cv2.imread("../data/4.2.03.tiff", 1) first_layer_down = cv2.pyrDown(image) first_layer_up = cv2.pyrUp(first_layer_down) laplasian = cv2.subtract(image, first_layer_up) cv2.imshow("Orignal Image", image) cv2.imshow("Laplasian Image", laplasian) cv2.waitKey(0) cv2.destroyAllWindows()
Example #24
Source File: multi_fly_tracker.py From ethoscope with GNU General Public License v3.0 | 5 votes |
def _pre_process_input_minimal(self, img, mask, t, darker_fg=True): blur_rad = int(self._object_expected_size * np.max(img.shape) / 2.0) if blur_rad % 2 == 0: blur_rad += 1 if self._buff_grey is None: self._buff_grey = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) if mask is None: mask = np.ones_like(self._buff_grey) * 255 cv2.cvtColor(img,cv2.COLOR_BGR2GRAY, self._buff_grey) # cv2.imshow("dbg",self._buff_grey) cv2.GaussianBlur(self._buff_grey,(blur_rad,blur_rad),1.2, self._buff_grey) if darker_fg: cv2.subtract(255, self._buff_grey, self._buff_grey) # mean = cv2.mean(self._buff_grey, mask) scale = 128. / mean[0] cv2.multiply(self._buff_grey, scale, dst = self._buff_grey) if mask is not None: cv2.bitwise_and(self._buff_grey, mask, self._buff_grey) return self._buff_grey
Example #25
Source File: adaptive_bg_tracker.py From ethoscope with GNU General Public License v3.0 | 5 votes |
def _pre_process_input_minimal(self, img, mask, t, darker_fg=True): blur_rad = int(self._object_expected_size * np.max(img.shape) / 2.0) if blur_rad % 2 == 0: blur_rad += 1 if self._buff_grey is None: self._buff_grey = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) if mask is None: mask = np.ones_like(self._buff_grey) * 255 cv2.cvtColor(img,cv2.COLOR_BGR2GRAY, self._buff_grey) # cv2.imshow("dbg",self._buff_grey) cv2.GaussianBlur(self._buff_grey,(blur_rad,blur_rad),1.2, self._buff_grey) if darker_fg: cv2.subtract(255, self._buff_grey, self._buff_grey) # mean = cv2.mean(self._buff_grey, mask) scale = 128. / mean[0] cv2.multiply(self._buff_grey, scale, dst = self._buff_grey) if mask is not None: cv2.bitwise_and(self._buff_grey, mask, self._buff_grey) return self._buff_grey
Example #26
Source File: adaptive_bg_tracker.py From ethoscope with GNU General Public License v3.0 | 5 votes |
def distance(self, features,time): if time - self._last_updated_time > self._max_unupdated_duration: logging.warning("FG model not updated for too long. Resetting.") self.__init__(self._history_length) return 0 if not self._is_ready: last_row = self._ring_buff_idx + 1 else: last_row = self._history_length means = np.mean(self._ring_buff[:last_row ], 0) np.subtract(self._ring_buff[:last_row], means, self._std_buff[:last_row]) np.abs(self._std_buff[:last_row], self._std_buff[:last_row]) stds = np.mean(self._std_buff[:last_row], 0) if (stds == 0).any(): return 0 a = 1 / (stds* self._sqrt_2_pi) b = np.exp(- (features - means) ** 2 / (2 * stds ** 2)) likelihoods = a * b if np.any(likelihoods==0): return 0 #print features, means logls = np.sum(np.log10(likelihoods)) / len(likelihoods) return -1.0 * logls
Example #27
Source File: colordescriptor.py From image-search-engine with MIT License | 5 votes |
def describe(self, image): # convert the image to the HSV color space and initialize # the features used to quantify the image image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) features = [] # grab the dimensions and compute the center of the image (h, w) = image.shape[:2] (cX, cY) = (int(w * 0.5), int(h * 0.5)) # divide the image into four rectangles/segments (top-left, # top-right, bottom-right, bottom-left) segments = [(0, cX, 0, cY), (cX, w, 0, cY), (cX, w, cY, h), (0, cX, cY, h)] # construct an elliptical mask representing the center of the # image (axesX, axesY) = (int(w * 0.75) // 2, int(h * 0.75) // 2) ellipMask = np.zeros(image.shape[:2], dtype = "uint8") cv2.ellipse(ellipMask, (cX, cY), (axesX, axesY), 0, 0, 360, 255, -1) # loop over the segments for (startX, endX, startY, endY) in segments: # construct a mask for each corner of the image, subtracting # the elliptical center from it cornerMask = np.zeros(image.shape[:2], dtype = "uint8") cv2.rectangle(cornerMask, (startX, startY), (endX, endY), 255, -1) cornerMask = cv2.subtract(cornerMask, ellipMask) # extract a color histogram from the image, then update the # feature vector hist = self.histogram(image, cornerMask) features.extend(hist) # extract a color histogram from the elliptical region and # update the feature vector hist = self.histogram(image, ellipMask) features.extend(hist) # return the feature vector return features
Example #28
Source File: cut_part.py From 2019-CCF-BDCI-OCR-MCZJ-OCR-IdentificationIDElement with MIT License | 5 votes |
def gradient_and_binary(img_blurred, image_name='1.jpg', save_path='./'): # 将灰度图二值化,后面两个参数调试用 """ 求取梯度,二值化 :param img_blurred: 滤波后的图片 :param image_name: 图片名,测试用 :param save_path: 保存路径,测试用 :return: 二值化后的图片 """ gradX = cv2.Sobel(img_blurred, ddepth=cv2.CV_32F, dx=1, dy=0) gradY = cv2.Sobel(img_blurred, ddepth=cv2.CV_32F, dx=0, dy=1) img_gradient = cv2.subtract(gradX, gradY) img_gradient = cv2.convertScaleAbs(img_gradient) # sobel算子,计算梯度, 也可以用canny算子替代 # 这里改进成自适应阈值,貌似没用 img_thresh = cv2.adaptiveThreshold(img_gradient, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 3, -3) # cv2.imwrite(os.path.join(save_path, img_name + '_binary.jpg'), img_thresh) # 二值化 阈值未调整好 kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5)) img_closed = cv2.morphologyEx(img_thresh, cv2.MORPH_CLOSE, kernel) img_closed = cv2.morphologyEx(img_closed, cv2.MORPH_OPEN, kernel) img_closed = cv2.erode(img_closed, None, iterations=9) img_closed = cv2.dilate(img_closed, None, iterations=9) # 腐蚀膨胀 # 这里调整了kernel大小(减小),腐蚀膨胀次数后(增大),出错的概率大幅减小 return img_closed
Example #29
Source File: watershed.py From python-image-processing with MIT License | 4 votes |
def watershed(src): # Change color to gray scale gray = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY) # Use the Otsu's binarization thresh,bin_img = cv2.threshold(gray,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU) # print(thresh) # print threshold # Noise removal kernel = np.ones((3,3), np.uint8) opening = cv2.morphologyEx(bin_img,cv2.MORPH_OPEN,kernel,iterations = 2) # Sure background area sure_bg = cv2.dilate(opening,kernel,iterations=3) # Finding sure foreground area dist_transform = cv2.distanceTransform(opening,cv2.DIST_L2,5) ret, sure_fg = cv2.threshold(dist_transform,0.7*dist_transform.max(),255,0) # Finding unknown region sure_fg = np.uint8(sure_fg) unknown = cv2.subtract(sure_bg,sure_fg) # Marker labelling ret, markers = cv2.connectedComponents(sure_fg) # Add one to all labels so that sure background is not 0, but 1 markers = markers+1 # Now, mark the region of unknown with zero markers[unknown==255] = 0 # Apply watershed markers = cv2.watershed(src,markers) src[markers == -1] = [255,0,0] # Check marker (If check markers, please import matplotlib) # plt.imshow(markers) # plt.show() # Check markers data # print(np.unique(markers,return_counts=True)) return markers, src
Example #30
Source File: adaptive_bg_tracker.py From ethoscope with GNU General Public License v3.0 | 4 votes |
def update(self, img_t, t, fg_mask=None): dt = float(t - self.last_t) if dt < 0: # raise EthoscopeException("Negative time interval between two consecutive frames") raise NoPositionError("Negative time interval between two consecutive frames") # clip the half life to possible value: self._current_half_life = np.clip(self._current_half_life, self._min_half_life, self._max_half_life) # ensure preallocated buffers exist. otherwise, initialise them if self._bg_mean is None: self._bg_mean = img_t.astype(np.float32) # self._bg_sd = np.zeros_like(img_t) # self._bg_sd.fill(128) if self._buff_alpha_matrix is None: self._buff_alpha_matrix = np.ones_like(img_t,dtype = np.float32) # the learning rate, alpha, is an exponential function of half life # it correspond to how much the present frame should account for the background lam = np.log(2)/self._current_half_life # how much the current frame should be accounted for alpha = 1 - np.exp(-lam * dt) # set-p a matrix of learning rate. it is 0 where foreground map is true self._buff_alpha_matrix.fill(alpha) if fg_mask is not None: cv2.dilate(fg_mask,None,fg_mask) cv2.subtract(self._buff_alpha_matrix, self._buff_alpha_matrix, self._buff_alpha_matrix, mask=fg_mask) if self._buff_invert_alpha_mat is None: self._buff_invert_alpha_mat = 1 - self._buff_alpha_matrix else: np.subtract(1, self._buff_alpha_matrix, self._buff_invert_alpha_mat) np.multiply(self._buff_alpha_matrix, img_t, self._buff_alpha_matrix) np.multiply(self._buff_invert_alpha_mat, self._bg_mean, self._buff_invert_alpha_mat) np.add(self._buff_alpha_matrix, self._buff_invert_alpha_mat, self._bg_mean) self.last_t = t