Python cv2.convertScaleAbs() Examples

The following are 30 code examples of cv2.convertScaleAbs(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module cv2 , or try the search function .
Example #1
Source File: pycv2.py    From vrequest with MIT License 16 votes vote down vote up
def laplacian(filepathname):
    v = cv2.imread(filepathname)
    s = cv2.cvtColor(v, cv2.COLOR_BGR2GRAY)
    s = cv2.Laplacian(s, cv2.CV_16S, ksize=3)
    s = cv2.convertScaleAbs(s)
    cv2.imshow('nier',s)
    return s

    # ret, binary = cv2.threshold(s,40,255,cv2.THRESH_BINARY)
    # contours, hierarchy = cv2.findContours(binary,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
    # for c in contours:
    #     x,y,w,h = cv2.boundingRect(c)
    #     if w>5 and h>10:
    #         cv2.rectangle(v,(x,y),(x+w,y+h),(155,155,0),1)
    # cv2.imshow('nier2',v)

    # cv2.waitKey()
    # cv2.destroyAllWindows() 
Example #2
Source File: motion.py    From object-detection with MIT License 10 votes vote down vote up
def prediction(self, image):
        image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        image = cv2.GaussianBlur(image, (21, 21), 0)
        if self.avg is None:
            self.avg = image.copy().astype(float)
        cv2.accumulateWeighted(image, self.avg, 0.5)
        frameDelta = cv2.absdiff(image, cv2.convertScaleAbs(self.avg))
        thresh = cv2.threshold(
                frameDelta, DELTA_THRESH, 255,
                cv2.THRESH_BINARY)[1]
        thresh = cv2.dilate(thresh, None, iterations=2)
        cnts = cv2.findContours(
                thresh.copy(), cv2.RETR_EXTERNAL,
                cv2.CHAIN_APPROX_SIMPLE)
        cnts = imutils.grab_contours(cnts)
        self.avg = image.copy().astype(float)
        return cnts 
Example #3
Source File: plate_locate.py    From EasyPR-python with Apache License 2.0 8 votes vote down vote up
def sobelOperT(self, img, blursize, morphW, morphH):
        '''
            No different with sobelOper ? 
        '''
        blur = cv2.GaussianBlur(img, (blursize, blursize), 0, 0, cv2.BORDER_DEFAULT)

        if len(blur.shape) == 3:
            gray = cv2.cvtColor(blur, cv2.COLOR_RGB2GRAY)
        else:
            gray = blur

        x = cv2.Sobel(gray, cv2.CV_16S, 1, 0, 3)
        absX = cv2.convertScaleAbs(x)
        grad = cv2.addWeighted(absX, 1, 0, 0, 0)

        _, threshold = cv2.threshold(grad, 0, 255, cv2.THRESH_OTSU + cv2.THRESH_BINARY)

        element = cv2.getStructuringElement(cv2.MORPH_RECT, (morphW, morphH))
        threshold = cv2.morphologyEx(threshold, cv2.MORPH_CLOSE, element)

        return threshold 
Example #4
Source File: normalized.py    From virtual-dressing-room with Apache License 2.0 7 votes vote down vote up
def normalized(self):
               
#        t1=time.time()
        b=self.down[:,:,0]
        g=self.down[:,:,1]
        r=self.down[:,:,2]
        
        sum=b+g+r
        
        
        self.norm[:,:,0]=b/sum*255.0
        self.norm[:,:,1]=g/sum*255.0
        self.norm[:,:,2]=r/sum*255.0
        
 #       print "conversion time",time.time()-t1
        
        #self.norm=cv2.merge([self.norm1,self.norm2,self.norm3])
        self.norm_rgb=cv2.convertScaleAbs(self.norm)
        #self.norm.dtype=np.uint8
        return self.norm_rgb 
Example #5
Source File: functional.py    From opencv_transforms with MIT License 6 votes vote down vote up
def adjust_brightness(img, brightness_factor):
    """Adjust brightness of an Image.
    Args:
        img (numpy ndarray): numpy ndarray to be adjusted.
        brightness_factor (float):  How much to adjust the brightness. Can be
            any non negative number. 0 gives a black image, 1 gives the
            original image while 2 increases the brightness by a factor of 2.
    Returns:
        numpy ndarray: Brightness adjusted image.
    """
    if not _is_numpy_image(img):
        raise TypeError('img should be numpy Image. Got {}'.format(type(img)))
    table = np.array([ i*brightness_factor for i in range (0,256)]).clip(0,255).astype('uint8')
    # same thing but a bit slower
    # cv2.convertScaleAbs(img, alpha=brightness_factor, beta=0)
    if img.shape[2]==1:
        return cv2.LUT(img, table)[:,:,np.newaxis]
    else:
        return cv2.LUT(img, table) 
Example #6
Source File: amplify_color.py    From Heart-rate-measurement-using-camera with Apache License 2.0 6 votes vote down vote up
def mainLoop(self):
        frame = self.webcam.get_frame()
        f1 = imutils.resize(frame, width = 256)
        #crop_frame = frame[100:228,200:328]
        self.data_buffer.append(f1)
        self.run_color()
        #print(frame)
        
        #if len(self.vidmag_frames) > 0:
            #print(self.vidmag_frames[0])
        cv2.putText(frame, "FPS "+str(float("{:.2f}".format(self.fps))),
                       (20,420), cv2.FONT_HERSHEY_PLAIN, 1.5, (0, 255, 0),2)
            
        #frame[100:228,200:328] = cv2.convertScaleAbs(self.vidmag_frames[-1])
        cv2.imshow("Original",frame)
        #f2 = imutils.resize(cv2.convertScaleAbs(self.vidmag_frames[-1]), width = 640)
        f2 = imutils.resize(cv2.convertScaleAbs(self.frame_out), width = 640)
            
        cv2.imshow("Color amplification",f2)
            
            
        self.key_handler()  #if not the GUI cant show anything 
Example #7
Source File: opencv_functional.py    From ss-ood with MIT License 6 votes vote down vote up
def adjust_brightness(img, brightness_factor):
    """Adjust brightness of an Image.
    Args:
        img (numpy ndarray): numpy ndarray to be adjusted.
        brightness_factor (float):  How much to adjust the brightness. Can be
            any non negative number. 0 gives a black image, 1 gives the
            original image while 2 increases the brightness by a factor of 2.
    Returns:
        numpy ndarray: Brightness adjusted image.
    """
    if not _is_numpy_image(img):
        raise TypeError('img should be numpy Image. Got {}'.format(type(img)))
    table = np.array([ i*brightness_factor for i in range (0,256)]).clip(0,255).astype('uint8')
    # same thing but a bit slower
    # cv2.convertScaleAbs(img, alpha=brightness_factor, beta=0)
    if img.shape[2]==1:
        return cv2.LUT(img, table)[:,:,np.newaxis]
    else:
        return cv2.LUT(img, table) 
Example #8
Source File: opencv_functional.py    From ss-ood with MIT License 6 votes vote down vote up
def adjust_brightness(img, brightness_factor):
    """Adjust brightness of an Image.
    Args:
        img (numpy ndarray): numpy ndarray to be adjusted.
        brightness_factor (float):  How much to adjust the brightness. Can be
            any non negative number. 0 gives a black image, 1 gives the
            original image while 2 increases the brightness by a factor of 2.
    Returns:
        numpy ndarray: Brightness adjusted image.
    """
    if not _is_numpy_image(img):
        raise TypeError('img should be numpy Image. Got {}'.format(type(img)))
    table = np.array([ i*brightness_factor for i in range (0,256)]).clip(0,255).astype('uint8')
    # same thing but a bit slower
    # cv2.convertScaleAbs(img, alpha=brightness_factor, beta=0)
    if img.shape[2]==1:
        return cv2.LUT(img, table)[:,:,np.newaxis]
    else:
        return cv2.LUT(img, table) 
Example #9
Source File: utils.py    From answer-sheet-scan with MIT License 6 votes vote down vote up
def get_init_process_img(roi_img):
    """
    对图片进行初始化处理,包括,梯度化,高斯模糊,二值化,腐蚀,膨胀和边缘检测
    :param roi_img: ndarray
    :return: ndarray
    """
    h = cv2.Sobel(roi_img, cv2.CV_32F, 0, 1, -1)
    v = cv2.Sobel(roi_img, cv2.CV_32F, 1, 0, -1)
    img = cv2.add(h, v)
    img = cv2.convertScaleAbs(img)
    img = cv2.GaussianBlur(img, (3, 3), 0)
    ret, img = cv2.threshold(img, 120, 255, cv2.THRESH_BINARY)
    kernel = np.ones((1, 1), np.uint8)
    img = cv2.erode(img, kernel, iterations=1)
    img = cv2.dilate(img, kernel, iterations=2)
    img = cv2.erode(img, kernel, iterations=1)
    img = cv2.dilate(img, kernel, iterations=2)
    img = auto_canny(img)
    return img 
Example #10
Source File: ImageProcessing.py    From PyDesignPattern with GNU General Public License v3.0 6 votes vote down vote up
def differentialDerivativeOpenCv():
    img = cv2.imread("E:\\TestImages\\person.jpg")

    # 转换成单通道灰度图
    img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    x = cv2.Sobel(img, cv2.CV_16S, 1, 0)
    y = cv2.Sobel(img, cv2.CV_16S, 0, 1)
    # 进行微分计算后,可能会出现负值,将每个像素加上最小负数的绝对值
    absX = cv2.convertScaleAbs(x)  # 转回uint8
    absY = cv2.convertScaleAbs(y)
    # img = cv2.addWeighted(absX, 0.5, absY, 0.5, 0)

    cv2.imshow("First order differential X", absX)
    cv2.imshow("First order differential Y", absY)
    cv2.waitKey(0)
    cv2.destroyAllWindows() 
Example #11
Source File: vis.py    From OCHumanApi with MIT License 6 votes vote down vote up
def draw_mask(img, mask, thickness=3, color=(255, 0, 0)):
    def _get_edge(mask, thickness=3):
        dtype = mask.dtype
        x=cv2.Sobel(np.float32(mask),cv2.CV_16S,1,0, ksize=thickness) 
        y=cv2.Sobel(np.float32(mask),cv2.CV_16S,0,1, ksize=thickness)
        absX=cv2.convertScaleAbs(x)
        absY=cv2.convertScaleAbs(y)  
        edge = cv2.addWeighted(absX,0.5,absY,0.5,0)
        return edge.astype(dtype)
    
    img = img.copy()
    canvas = np.zeros(img.shape, img.dtype) + color
    img[mask > 0] = img[mask > 0] * 0.8 + canvas[mask > 0] * 0.2
    edge = _get_edge(mask, thickness)
    img[edge > 0] = img[edge > 0] * 0.2 + canvas[edge > 0] * 0.8
    return img 
Example #12
Source File: Tshirt.py    From virtual-dressing-room with Apache License 2.0 6 votes vote down vote up
def detect_shirt(self):
        
        
        #self.dst=cv2.inRange(self.norm_rgb,np.array([self.lb,self.lg,self.lr],np.uint8),np.array([self.b,self.g,self.r],np.uint8))
        self.dst=cv2.inRange(self.norm_rgb,np.array([20,20,20],np.uint8),np.array([255,110,80],np.uint8))
        cv2.threshold(self.dst,0,255,cv2.THRESH_OTSU+cv2.THRESH_BINARY)
        fg=cv2.erode(self.dst,None,iterations=2)
        #cv2.imshow("fore",fg)  
        bg=cv2.dilate(self.dst,None,iterations=3)
        _,bg=cv2.threshold(bg, 1,128,1)
        #cv2.imshow("back",bg)
        
        mark=cv2.add(fg,bg)
        mark32=np.int32(mark)
        cv2.watershed(self.norm_rgb,mark32)
        self.m=cv2.convertScaleAbs(mark32)
        _,self.m=cv2.threshold(self.m,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
        #cv2.imshow("final_tshirt",self.m)
        
        cntr,h=cv2.findContours(self.m,cv2.cv.CV_RETR_EXTERNAL,cv2.cv.CV_CHAIN_APPROX_SIMPLE)
               
        return self.m,cntr 
Example #13
Source File: barcodeD&D_zbar.py    From Barcode-Detection-and-Decoding with Apache License 2.0 6 votes vote down vote up
def preprocess(image):
	# load the image
	image = cv2.imread(args["image"])

	#resize image
	image = cv2.resize(image,None,fx=0.7, fy=0.7, interpolation = cv2.INTER_CUBIC)

	#convert to grayscale
	gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

	#calculate x & y gradient
	gradX = cv2.Sobel(gray, ddepth = cv2.CV_32F, dx = 1, dy = 0, ksize = -1)
	gradY = cv2.Sobel(gray, ddepth = cv2.CV_32F, dx = 0, dy = 1, ksize = -1)

	# subtract the y-gradient from the x-gradient
	gradient = cv2.subtract(gradX, gradY)
	gradient = cv2.convertScaleAbs(gradient)

	# blur the image
	blurred = cv2.blur(gradient, (3, 3))

	# threshold the image
	(_, thresh) = cv2.threshold(blurred, 225, 255, cv2.THRESH_BINARY)
	thresh = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
	return thresh 
Example #14
Source File: reduce_image_by_seam_carving.py    From OpenCV-3-x-with-Python-By-Example with MIT License 6 votes vote down vote up
def compute_energy_matrix(img): 
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) 
 
    # Compute X derivative of the image 
    sobel_x = cv2.Sobel(gray,cv2.CV_64F, 1, 0, ksize=3) 
 
    # Compute Y derivative of the image 
    sobel_y = cv2.Sobel(gray,cv2.CV_64F, 0, 1, ksize=3) 
 
    abs_sobel_x = cv2.convertScaleAbs(sobel_x) 
    abs_sobel_y = cv2.convertScaleAbs(sobel_y) 
 
    # Return weighted summation of the two images i.e. 0.5*X + 0.5*Y 
    return cv2.addWeighted(abs_sobel_x, 0.5, abs_sobel_y, 0.5, 0) 
 
# Find vertical seam in the input image 
Example #15
Source File: opencv_functional.py    From deep-smoke-machine with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def adjust_brightness(img, brightness_factor):
    """Adjust brightness of an Image.
    Args:
        img (numpy ndarray): numpy ndarray to be adjusted.
        brightness_factor (float):  How much to adjust the brightness. Can be
            any non negative number. 0 gives a black image, 1 gives the
            original image while 2 increases the brightness by a factor of 2.
    Returns:
        numpy ndarray: Brightness adjusted image.
    """
    if not _is_numpy_image(img):
        raise TypeError('img should be numpy Image. Got {}'.format(type(img)))
    table = np.array([ i*brightness_factor for i in range (0,256)]).clip(0,255).astype('uint8')
    # same thing but a bit slower
    # cv2.convertScaleAbs(img, alpha=brightness_factor, beta=0)
    if img.shape[2] == 1:
        return cv2.LUT(img, table)[:,:,np.newaxis]
    else:
        return cv2.LUT(img, table) 
Example #16
Source File: pycv2.py    From vrequest with MIT License 6 votes vote down vote up
def sobel(filepathname):
    v = cv2.imread(filepathname)
    s = cv2.cvtColor(v,cv2.COLOR_BGR2GRAY)
    x, y = cv2.Sobel(s,cv2.CV_16S,1,0), cv2.Sobel(s,cv2.CV_16S,0,1)
    s = cv2.convertScaleAbs(cv2.subtract(x,y))
    s = cv2.blur(s,(9,9))
    cv2.imshow('nier',s)
    return s

    # ret, binary = cv2.threshold(s,40,255,cv2.THRESH_BINARY)
    # contours, hierarchy = cv2.findContours(binary,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
    # for c in contours:
    #     x,y,w,h = cv2.boundingRect(c)
    #     if w>5 and h>10:
    #         cv2.rectangle(v,(x,y),(x+w,y+h),(155,155,0),1)
    # cv2.imshow('nier2',v)

    # cv2.waitKey()
    # cv2.destroyAllWindows() 
Example #17
Source File: FocusMask.py    From BlurDetection with MIT License 6 votes vote down vote up
def blur_mask(img):
    assert isinstance(img, numpy.ndarray), 'img_col must be a numpy array'
    assert img.ndim == 3, 'img_col must be a color image ({0} dimensions currently)'.format(img.ndim)
    msk, val, blurry = main.blur_detector(img)
    logger.debug('inverting img_fft')
    msk = cv2.convertScaleAbs(255-(255*msk/numpy.max(msk)))
    msk[msk < 50] = 0
    msk[msk > 127] = 255
    logger.debug('removing border')
    msk = remove_border(msk)
    logger.debug('applying erosion and dilation operators')
    msk = morphology(msk)
    logger.debug('evaluation complete')
    result = numpy.sum(msk)/(255.0*msk.size)
    logger.info('{0}% of input image is blurry'.format(int(100*result)))
    return msk, result, blurry 
Example #18
Source File: Back_sub.py    From virtual-dressing-room with Apache License 2.0 5 votes vote down vote up
def subtract_back(self,frm):
        #dst=self.__back__-self.__foreground__
        temp=np.zeros((config.height,config.width),np.uint8)
        
        self.__foreground__=cv2.blur(self.__foreground__,(3,3))
        dst=cv2.absdiff(self.__back__,self.__foreground__)
        
        #dst=cv2.adaptiveThreshold(dst,255,cv.CV_THRESH_BINARY,cv.CV_ADAPTIVE_THRESH_GAUSSIAN_C,5,10)
        val,dst=cv2.threshold(dst,0,255,cv.CV_THRESH_BINARY+cv.CV_THRESH_OTSU)
        
        fg=cv2.erode(dst,None,iterations=1)
        bg=cv2.dilate(dst,None,iterations=4)
        
        _,bg=cv2.threshold(bg,1,128,1)
        
        mark=cv2.add(fg,bg)
        mark32=np.int32(mark)
        #dst.copy(temp)
        
        #seq=cv.FindContours(cv.fromarray(dst),self.mem,cv.CV_RETR_EXTERNAL,cv.CV_CHAIN_APPROX_SIMPLE)
        #cntr,h=cv2.findContours(dst,cv.CV_RETR_EXTERNAL,cv.CV_CHAIN_APPROX_SIMPLE)
        #print cntr,h
        #cv.DrawContours(cv.fromarray(temp),seq,(255,255,255),(255,255,255),1,cv.CV_FILLED)
        cv2.watershed(frm, mark32)
        self.final_mask=cv2.convertScaleAbs(mark32)
        #print temp
        
        #--outputs---
        #cv2.imshow("subtraction",fg)
        #cv2.imshow("thres",dst)
        #cv2.imshow("thres1",bg)
        #cv2.imshow("mark",mark)
        #cv2.imshow("final",self.final_mask) 
Example #19
Source File: fix_img_address_unit.py    From 2019-CCF-BDCI-OCR-MCZJ-OCR-IdentificationIDElement with MIT License 5 votes vote down vote up
def preprocess_img(img, name):
    resize_img = cv2.resize(img, (int(2.0 * img.shape[1]), int(2.0 * img.shape[0])), interpolation=cv2.INTER_CUBIC)
    # 放大两倍,更容易识别
    resize_img = cv2.convertScaleAbs(resize_img, alpha=0.35, beta=20)
    resize_img = cv2.normalize(resize_img, dst=None, alpha=300, beta=10, norm_type=cv2.NORM_MINMAX)
    img_blurred = cv2.medianBlur(resize_img, 7)  # 中值滤波
    img_blurred = cv2.medianBlur(img_blurred, 3)
    # 这里面的几个参数,alpha,beta都可以调节,目前感觉效果还行,但是应该还可以调整地更好

    return img_blurred 
Example #20
Source File: TrackSet.py    From SimpleCV2 with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def getBackground(self):
        """
        **SUMMARY**

        Get Background of the Image. For more info read
        http://opencvpython.blogspot.in/2012/07/background-extraction-using-running.html

        **PARAMETERS**
        No Parameters

        **RETURNS**

        Image - SimpleCV.ImageClass.Image

        **EXAMPLE**

        >>> while (some_condition):
            ... img1 = cam.getImage()
            ... ts = img1.track("camshift", ts1, img, bb)
            ... img = img1
        >>> ts.getBackground().show()
        """
        imgs = self.trackImages(cv2_numpy=True)
        f = imgs[0]
        avg = np.float32(f)
        for img in imgs[1:]:
            f = img
            cv2.accumulateWeighted(f,avg,0.01)
            res = cv2.convertScaleAbs(avg)
        return Image(res, cv2image=True) 
Example #21
Source File: ChangeDetector.py    From NaturewatchCameraServer with GNU General Public License v3.0 5 votes vote down vote up
def detect_change_contours(self, img):
        """
        Detect changed contours in frame
        :param img: current image
        :return: True if it's time to capture
        """
        # convert to gray
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        gray = cv2.GaussianBlur(gray, (21, 21), 0)

        if self.avg is None:
            self.avg = gray.copy().astype("float")
            return False

        # add to accumulation model and find the change
        cv2.accumulateWeighted(gray, self.avg, 0.5)
        frame_delta = cv2.absdiff(gray, cv2.convertScaleAbs(self.avg))

        # threshold, dilate and find contours
        thresh = cv2.threshold(frame_delta, self.config["delta_threshold"], 255, cv2.THRESH_BINARY)[1]
        thresh = cv2.dilate(thresh, None, iterations=2)
        cnts, _ = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

        # find largest contour
        largest_contour = self.get_largest_contour(cnts)

        if largest_contour is None:
            return False

        (x, y, w, h) = cv2.boundingRect(largest_contour)

        # if the contour is too small, return false
        if w > self.maxWidth or w < self.minWidth or h > self.maxHeight or h < self.minHeight:
            return False
        else:
            if self.get_fake_time() - self.lastPhotoTime >= self.config['min_photo_interval_s']:
                return True

        return False 
Example #22
Source File: cut_part.py    From 2019-CCF-BDCI-OCR-MCZJ-OCR-IdentificationIDElement with MIT License 5 votes vote down vote up
def gradient_and_binary(img_blurred, image_name='1.jpg', save_path='./'):  # 将灰度图二值化,后面两个参数调试用
    """
    求取梯度,二值化
    :param img_blurred: 滤波后的图片
    :param image_name: 图片名,测试用
    :param save_path: 保存路径,测试用
    :return:  二值化后的图片
    """
    gradX = cv2.Sobel(img_blurred, ddepth=cv2.CV_32F, dx=1, dy=0)
    gradY = cv2.Sobel(img_blurred, ddepth=cv2.CV_32F, dx=0, dy=1)
    img_gradient = cv2.subtract(gradX, gradY)
    img_gradient = cv2.convertScaleAbs(img_gradient)  # sobel算子,计算梯度, 也可以用canny算子替代

    # 这里改进成自适应阈值,貌似没用
    img_thresh = cv2.adaptiveThreshold(img_gradient, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 3, -3)
    # cv2.imwrite(os.path.join(save_path, img_name + '_binary.jpg'), img_thresh)  # 二值化 阈值未调整好

    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
    img_closed = cv2.morphologyEx(img_thresh, cv2.MORPH_CLOSE, kernel)
    img_closed = cv2.morphologyEx(img_closed, cv2.MORPH_OPEN, kernel)
    img_closed = cv2.erode(img_closed, None, iterations=9)
    img_closed = cv2.dilate(img_closed, None, iterations=9)  # 腐蚀膨胀
    # 这里调整了kernel大小(减小),腐蚀膨胀次数后(增大),出错的概率大幅减小

    return img_closed 
Example #23
Source File: plate_locate.py    From EasyPR-python with Apache License 2.0 5 votes vote down vote up
def sobelOper(self, img, blursize, morphW, morphH):
        blur = cv2.GaussianBlur(img, (blursize, blursize), 0, 0, cv2.BORDER_DEFAULT)

        if len(blur.shape) == 3:
            gray = cv2.cvtColor(blur, cv2.COLOR_RGB2GRAY)
        else:
            gray = blur

        x = cv2.Sobel(gray, cv2.CV_16S, 1, 0, ksize=3, scale=1, delta=0, borderType=cv2.BORDER_DEFAULT)
        absX = cv2.convertScaleAbs(x)
        grad = cv2.addWeighted(absX, 1, 0, 0, 0)

        _, threshold = cv2.threshold(grad, 0, 255, cv2.THRESH_OTSU + cv2.THRESH_BINARY)

        element = cv2.getStructuringElement(cv2.MORPH_RECT, (morphW, morphH))
        threshold = cv2.morphologyEx(threshold, cv2.MORPH_CLOSE, element)

        return threshold 
Example #24
Source File: ImageProcessing.py    From PyDesignPattern with GNU General Public License v3.0 5 votes vote down vote up
def preProcessing(self, img):
        print("梯度化处理...")
        x = cv2.Sobel(img, cv2.CV_16S, 1, 0)
        y = cv2.Sobel(img, cv2.CV_16S, 0, 1)
        absX = cv2.convertScaleAbs(x)  # 转回uint8
        absY = cv2.convertScaleAbs(y)
        return cv2.addWeighted(absX, 0.5, absY, 0.5, 0)



# 一阶微分算子
#======================================================================================================================= 
Example #25
Source File: active_weather.py    From aggregation with Apache License 2.0 5 votes vote down vote up
def __sobel_image__(self,image,horizontal):
        """
        apply the sobel operator to a given image on either the vertical or horizontal axis
        basically copied from
        http://stackoverflow.com/questions/10196198/how-to-remove-convexity-defects-in-a-sudoku-square
        :param horizontal:
        :return:
        """
        if horizontal:
            dy = cv2.Sobel(image,cv2.CV_16S,0,2)
            dy = cv2.convertScaleAbs(dy)
            cv2.normalize(dy,dy,0,255,cv2.NORM_MINMAX)
            ret,close = cv2.threshold(dy,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)

            kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(10,2))
        else:
            dx = cv2.Sobel(image,cv2.CV_16S,2,0)
            dx = cv2.convertScaleAbs(dx)
            cv2.normalize(dx,dx,0,255,cv2.NORM_MINMAX)
            ret,close = cv2.threshold(dx,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)

            kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(2,10))

        close = cv2.morphologyEx(close,cv2.MORPH_CLOSE,kernel)

        return close 
Example #26
Source File: pretreatment.py    From captcha_trainer with Apache License 2.0 5 votes vote down vote up
def laplacian(self, value, modify=False) -> np.ndarray:
        if not value:
            return self.origin
        _laplacian = cv2.convertScaleAbs(cv2.Laplacian(self.origin, cv2.CV_16S, ksize=3))
        if modify:
            self.origin = _laplacian
        return _laplacian 
Example #27
Source File: 6_dm_video.py    From stereopi-tutorial with GNU General Public License v3.0 5 votes vote down vote up
def stereo_depth_map(rectified_pair):
    dmLeft = rectified_pair[0]
    dmRight = rectified_pair[1]
    disparity = sbm.compute(dmLeft, dmRight)
    local_max = disparity.max()
    local_min = disparity.min()
    disparity_grayscale = (disparity-local_min)*(65535.0/(local_max-local_min))
    disparity_fixtype = cv2.convertScaleAbs(disparity_grayscale, alpha=(255.0/65535.0))
    disparity_color = cv2.applyColorMap(disparity_fixtype, cv2.COLORMAP_JET)
    cv2.imshow("Image", disparity_color)
    key = cv2.waitKey(1) & 0xFF   
    if key == ord("q"):
        quit();
    return disparity_color 
Example #28
Source File: Tshirt.py    From virtual-dressing-room with Apache License 2.0 5 votes vote down vote up
def detect_shirt2(self):
        self.hsv=cv2.cvtColor(self.norm_rgb,cv.CV_BGR2HSV)
        self.hue,s,_=cv2.split(self.hsv)
        
        _,self.dst=cv2.threshold(self.hue,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
        self.fg=cv2.erode(self.dst,None,iterations=3)
        self.bg=cv2.dilate(self.dst,None,iterations=1)
        _,self.bg=cv2.threshold(self.bg,1,128,1)
        mark=cv2.add(self.fg,self.bg)
        mark32=np.int32(mark)
        cv2.watershed(self.norm_rgb,mark32)
        
        m=cv2.convertScaleAbs(mark32)
        _,m=cv2.threshold(m,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
        
        cntr,h=cv2.findContours(m,cv.CV_RETR_EXTERNAL,cv.CV_CHAIN_APPROX_SIMPLE)
        print len(cntr)
        #print cntr[0].shape
        #cntr[1].dtype=np.float32
        #ret=cv2.contourArea(np.array(cntr[1]))
        #print ret
        #cntr[0].dtype=np.uint8
        cv2.drawContours(m,cntr,-1,(255,255,255),3)
        cv2.imshow("mask_fg",self.fg)
        cv2.imshow("mask_bg",self.bg)
        cv2.imshow("mark",m) 
Example #29
Source File: BackgroundRemove.py    From vidpipe with GNU General Public License v3.0 5 votes vote down vote up
def processFrame( self, frame_in ):
        # version 1 - moving average
        if self._avg == None:
            self._avg = np.float32( frame_in )
        cv2.accumulateWeighted( frame_in, self._avg, self._speed )
        background = cv2.convertScaleAbs( self._avg )
        active_area = cv2.absdiff( frame_in, background )

        #version 2 - MOG - Gausian Mixture-based Background/Foreground Segmentation Algorithm
        fgmask = self._fgbg.apply( frame_in ,learningRate = 0.01 )
        #active_area = cv2.bitwise_and( frame_in, frame_in, mask = fgmask )

        return fgmask 
Example #30
Source File: img_tools.py    From crossgap_il_rl with GNU General Public License v2.0 5 votes vote down vote up
def float_img_to_display(_img):
        img = _img
        max_value = 1000
        rows, cols = img.shape
        for i in range(rows):
            for j in range(cols):
                if (img[i, j] > max_value):
                    img[i, j] = max_value
        dist1 = cv2.convertScaleAbs(img)
        dist2 = cv2.normalize(dist1, None, 255, 0, cv2.NORM_MINMAX, cv2.CV_8UC1)
        return dist1
        # return dist2