Python cv2.add() Examples

The following are 30 code examples of cv2.add(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module cv2 , or try the search function .
Example #1
Source File: train.py    From kaggle_carvana_segmentation with MIT License 7 votes vote down vote up
def random_hue_saturation_value(image,
                                hue_shift_limit=(-180, 180),
                                sat_shift_limit=(-255, 255),
                                val_shift_limit=(-255, 255)):

    image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
    h, s, v = cv2.split(image)
    hue_shift = np.random.uniform(hue_shift_limit[0], hue_shift_limit[1])
    h = cv2.add(h, hue_shift)
    sat_shift = np.random.uniform(sat_shift_limit[0], sat_shift_limit[1])
    s = cv2.add(s, sat_shift)
    val_shift = np.random.uniform(val_shift_limit[0], val_shift_limit[1])
    v = cv2.add(v, val_shift)
    image = cv2.merge((h, s, v))
    image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)

    return image 
Example #2
Source File: Preprocess.py    From ALPR-Indonesia with MIT License 7 votes vote down vote up
def maximizeContrast(imgGrayscale):

    height, width = imgGrayscale.shape

    imgTopHat = np.zeros((height, width, 1), np.uint8)
    imgBlackHat = np.zeros((height, width, 1), np.uint8)

    structuringElement = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))

    imgTopHat = cv2.morphologyEx(imgGrayscale, cv2.MORPH_TOPHAT, structuringElement)
    imgBlackHat = cv2.morphologyEx(imgGrayscale, cv2.MORPH_BLACKHAT, structuringElement)

    imgGrayscalePlusTopHat = cv2.add(imgGrayscale, imgTopHat)
    imgGrayscalePlusTopHatMinusBlackHat = cv2.subtract(imgGrayscalePlusTopHat, imgBlackHat)

    return imgGrayscalePlusTopHatMinusBlackHat
# end function 
Example #3
Source File: watermark.py    From dreampower with GNU General Public License v3.0 6 votes vote down vote up
def _execute(self, *args):
        """
        Create a watermark image.

        :param agrs: <[RGB}> image to watermark
        :return: <RGB> watermarked image
        """
        # Add alpha channel if missing
        if args[0].shape[2] < 4:
            img = np.dstack([args[0], np.ones((512, 512), dtype="uint8") * 255])

        f1 = np.asarray([0, 0, 0, 250])  # red color filter
        f2 = np.asarray([255, 255, 255, 255])
        mask = cv2.bitwise_not(cv2.inRange(self.__watermark, f1, f2))
        mask_inv = cv2.bitwise_not(mask)

        res1 = cv2.bitwise_and(img, img, mask=mask)
        res2 = cv2.bitwise_and(self.__watermark, self.__watermark, mask=mask_inv)
        res = cv2.add(res1, res2)

        alpha = 0.6
        return cv2.addWeighted(res, alpha, img, 1 - alpha, 0) 
Example #4
Source File: OCR.py    From display_ocr with GNU General Public License v2.0 6 votes vote down vote up
def getthresholdedimg(hsv):
    yellow = cv2.inRange(hsv, np.array((20, 100, 100)), np.array((30, 255, 255)))
    blue = cv2.inRange(hsv, np.array((100, 100, 100)), np.array((120, 255, 255)))
    both = cv2.add(yellow, blue)
    return both 
Example #5
Source File: Tshirt.py    From virtual-dressing-room with Apache License 2.0 6 votes vote down vote up
def detect_shirt(self):
        
        
        #self.dst=cv2.inRange(self.norm_rgb,np.array([self.lb,self.lg,self.lr],np.uint8),np.array([self.b,self.g,self.r],np.uint8))
        self.dst=cv2.inRange(self.norm_rgb,np.array([20,20,20],np.uint8),np.array([255,110,80],np.uint8))
        cv2.threshold(self.dst,0,255,cv2.THRESH_OTSU+cv2.THRESH_BINARY)
        fg=cv2.erode(self.dst,None,iterations=2)
        #cv2.imshow("fore",fg)  
        bg=cv2.dilate(self.dst,None,iterations=3)
        _,bg=cv2.threshold(bg, 1,128,1)
        #cv2.imshow("back",bg)
        
        mark=cv2.add(fg,bg)
        mark32=np.int32(mark)
        cv2.watershed(self.norm_rgb,mark32)
        self.m=cv2.convertScaleAbs(mark32)
        _,self.m=cv2.threshold(self.m,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
        #cv2.imshow("final_tshirt",self.m)
        
        cntr,h=cv2.findContours(self.m,cv2.cv.CV_RETR_EXTERNAL,cv2.cv.CV_CHAIN_APPROX_SIMPLE)
               
        return self.m,cntr 
Example #6
Source File: train.py    From Kaggle-Carvana-Image-Masking-Challenge with MIT License 6 votes vote down vote up
def randomHueSaturationValue(image, hue_shift_limit=(-180, 180),
                             sat_shift_limit=(-255, 255),
                             val_shift_limit=(-255, 255), u=0.5):
    if np.random.random() < u:
        image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
        h, s, v = cv2.split(image)
        hue_shift = np.random.uniform(hue_shift_limit[0], hue_shift_limit[1])
        h = cv2.add(h, hue_shift)
        sat_shift = np.random.uniform(sat_shift_limit[0], sat_shift_limit[1])
        s = cv2.add(s, sat_shift)
        val_shift = np.random.uniform(val_shift_limit[0], val_shift_limit[1])
        v = cv2.add(v, val_shift)
        image = cv2.merge((h, s, v))
        image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)

    return image 
Example #7
Source File: client.py    From Walk-Assistant with GNU General Public License v3.0 6 votes vote down vote up
def render(img, res):
    res *= 255
    res = res.astype(np.uint8)

    res = cv2.cvtColor(res, cv2.COLOR_GRAY2BGR)  # to white color
    res[:, :, 0] = 0  # remove blue channel
    res[:, :, 2] = 0  # remove red channel

    res = cv2.resize(res, (WIDTH, HEIGHT), interpolation=cv2.INTER_LINEAR)  # resize 15x8 to 1280x720
    org = cv2.resize(img, (WIDTH, HEIGHT))
    added = cv2.add(org, res)  # combine input, output

    cv2.imshow('result', added)
    cv2.waitKey(0)
    # if cv2.waitKey(1) & 0xFF == ord('q'):
    #     print('User Interrupted')
    #     exit(1) 
Example #8
Source File: annotation.py    From Walk-Assistant with GNU General Public License v3.0 6 votes vote down vote up
def visualize(img):
    global grid

    grid = np.round(grid)

    h, w, c = img.shape
    box = np.zeros((h, w), dtype=np.uint8)
    for i in range(len(grid)):
        for j in range(len(grid[i])):
            box[i*KERNEL : (i+1)*KERNEL, j*KERNEL : (j+1)*KERNEL] = grid[i][j] * 255

    box = cv2.cvtColor(box, cv2.COLOR_GRAY2BGR)
    box[:,:,0]=0
    box[:,:,2]=0

    visual = cv2.add(img, box)

    for i in range(0, HEIGHT, KERNEL):  # 가로선
        cv2.line(visual, (0, i), (WIDTH, i), color=(255,255,255))
    for i in range(0, WIDTH, KERNEL):  # 세로선
        cv2.line(visual, (i, 0), (i, HEIGHT), color=(255,255,255))

    return visual 
Example #9
Source File: BackgroundSubtractor.py    From tierpsy-tracker with MIT License 6 votes vote down vote up
def subtract_bgnd(self, image):
        # new method using bitwise not
        def _remove_func(_img, _func, _bg):
            #the reason to use opencv2 instead of numpy is to avoid buffer overflow
            #https://stackoverflow.com/questions/45817037/opencv-image-subtraction-vs-numpy-subtraction/45817868
            new_img = np.zeros_like(_img); #maybe can do this in place
            if image.ndim == 2:
                _func(_img, _bg, new_img)
            else:
                for ii, this_frame in enumerate(_img):
                    _func(this_frame, _bg, new_img[ii])
            return new_img
        
        bg = self.bgnd.astype(np.uint8)
        if self.is_light_background:
            notbg = ~bg
            ss = _remove_func(image, cv2.add, notbg)
        else: # fluorescence
            ss = _remove_func(image, cv2.subtract, bg)
            
        ss = np.clip( ss ,1,255);
        
        return ss 
Example #10
Source File: thug.py    From thug-memes with MIT License 6 votes vote down vote up
def _draw_on_top(self, img, x, y, sub_img, sub_name=''):
        h, w, _ = sub_img.shape
        mask = sub_img[:, :, 3]
        mask_inv = cv2.bitwise_not(mask)
        sub_img_ = sub_img[:, :, :3]

        background = img[y:y + h, x:x + w]
        try:
            background = cv2.bitwise_and(background, background, mask=mask_inv)
        except cv2.error as e:
            raise ThugError(
                'Can not draw {}, please try with smaller {}.'.format(
                    sub_name, sub_name))
        foreground = cv2.bitwise_and(sub_img_, sub_img_, mask=mask)
        sum_ = cv2.add(background, foreground)

        img[y:y + h, x:x + w] = sum_ 
Example #11
Source File: utils.py    From answer-sheet-scan with MIT License 6 votes vote down vote up
def get_init_process_img(roi_img):
    """
    对图片进行初始化处理,包括,梯度化,高斯模糊,二值化,腐蚀,膨胀和边缘检测
    :param roi_img: ndarray
    :return: ndarray
    """
    h = cv2.Sobel(roi_img, cv2.CV_32F, 0, 1, -1)
    v = cv2.Sobel(roi_img, cv2.CV_32F, 1, 0, -1)
    img = cv2.add(h, v)
    img = cv2.convertScaleAbs(img)
    img = cv2.GaussianBlur(img, (3, 3), 0)
    ret, img = cv2.threshold(img, 120, 255, cv2.THRESH_BINARY)
    kernel = np.ones((1, 1), np.uint8)
    img = cv2.erode(img, kernel, iterations=1)
    img = cv2.dilate(img, kernel, iterations=2)
    img = cv2.erode(img, kernel, iterations=1)
    img = cv2.dilate(img, kernel, iterations=2)
    img = auto_canny(img)
    return img 
Example #12
Source File: functional.py    From albumentations with MIT License 6 votes vote down vote up
def _shift_hsv_non_uint8(img, hue_shift, sat_shift, val_shift):
    dtype = img.dtype
    img = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
    hue, sat, val = cv2.split(img)

    if hue_shift != 0:
        hue = cv2.add(hue, hue_shift)
        hue = np.where(hue < 0, hue + 180, hue)
        hue = np.where(hue > 180, hue - 180, hue)
        hue = hue.astype(dtype)

    if sat_shift != 0:
        sat = clip(cv2.add(sat, sat_shift), dtype, 255 if dtype == np.uint8 else 1.0)

    if val_shift != 0:
        val = clip(cv2.add(val, val_shift), dtype, 255 if dtype == np.uint8 else 1.0)

    img = cv2.merge((hue, sat, val)).astype(dtype)
    img = cv2.cvtColor(img, cv2.COLOR_HSV2RGB)
    return img 
Example #13
Source File: video.py    From OpenCV-Snapchat-DogFilter with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def read(self, dst=None):
        noise = np.zeros(self.render.sceneBg.shape, np.int8)
        cv2.randn(noise, np.zeros(3), np.ones(3)*255*self.noise)

        return True, cv2.add(self.render.getNextFrame(), noise, dtype=cv2.CV_8UC3) 
Example #14
Source File: video.py    From OpenCV-Snapchat-DogFilter with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def read(self, dst=None):
        noise = np.zeros(self.render.sceneBg.shape, np.int8)
        cv2.randn(noise, np.zeros(3), np.ones(3)*255*self.noise)

        return True, cv2.add(self.render.getNextFrame(), noise, dtype=cv2.CV_8UC3) 
Example #15
Source File: video.py    From PyCV-time with MIT License 5 votes vote down vote up
def read(self, dst=None):
        w, h = self.frame_size

        if self.bg is None:
            buf = np.zeros((h, w, 3), np.uint8)
        else:
            buf = self.bg.copy()

        self.render(buf)

        if self.noise > 0.0:
            noise = np.zeros((h, w, 3), np.int8)
            cv2.randn(noise, np.zeros(3), np.ones(3)*255*self.noise)
            buf = cv2.add(buf, noise, dtype=cv2.CV_8UC3)
        return True, buf 
Example #16
Source File: mask.py    From dreampower with GNU General Public License v3.0 5 votes vote down vote up
def _execute(self, *args):
        """
        Create mask ref.

        :param args: <[RGB,RGB]>image correct, image mask
        :return: <RGB> image
        """
        # Create a total green image
        green = np.zeros((512, 512, 3), np.uint8)
        green[:, :, :] = (0, 255, 0)  # (B, G, R)

        # Define the green color filter
        f1 = np.asarray([0, 250, 0])  # green color filter
        f2 = np.asarray([10, 255, 10])

        # From mask, extrapolate only the green mask
        green_mask = cv2.inRange(args[1], f1, f2)  # green is 0

        # (OPTIONAL) Apply dilate and open to mask
        kernel = np.ones((5, 5), np.uint8)  # Try change it?
        green_mask = cv2.dilate(green_mask, kernel, iterations=1)
        # green_mask = cv2.morphologyEx(green_mask, cv2.MORPH_OPEN, kernel)

        # Create an inverted mask
        green_mask_inv = cv2.bitwise_not(green_mask)

        # Cut correct and green image, using the green_mask & green_mask_inv
        res1 = cv2.bitwise_and(args[0], args[0], mask=green_mask_inv)
        res2 = cv2.bitwise_and(green, green, mask=green_mask)

        # Compone:
        return cv2.add(res1, res2) 
Example #17
Source File: video.py    From PyCV-time with MIT License 5 votes vote down vote up
def read(self, dst=None):
        w, h = self.frame_size

        if self.bg is None:
            buf = np.zeros((h, w, 3), np.uint8)
        else:
            buf = self.bg.copy()

        self.render(buf)

        if self.noise > 0.0:
            noise = np.zeros((h, w, 3), np.int8)
            cv2.randn(noise, np.zeros(3), np.ones(3)*255*self.noise)
            buf = cv2.add(buf, noise, dtype=cv2.CV_8UC3)
        return True, buf 
Example #18
Source File: video.py    From OpenCV-Snapchat-DogFilter with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def read(self, dst=None):
        w, h = self.frame_size

        if self.bg is None:
            buf = np.zeros((h, w, 3), np.uint8)
        else:
            buf = self.bg.copy()

        self.render(buf)

        if self.noise > 0.0:
            noise = np.zeros((h, w, 3), np.int8)
            cv2.randn(noise, np.zeros(3), np.ones(3)*255*self.noise)
            buf = cv2.add(buf, noise, dtype=cv2.CV_8UC3)
        return True, buf 
Example #19
Source File: augmentations.py    From segmentation-networks-benchmark with MIT License 5 votes vote down vote up
def __call__(self, image):
        if random.random() < self.prob:
            image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
            h, s, v = cv2.split(image)
            hue_shift = np.random.uniform(self.hue_shift_limit[0], self.hue_shift_limit[1])
            h = cv2.add(h, hue_shift)
            sat_shift = np.random.uniform(self.sat_shift_limit[0], self.sat_shift_limit[1])
            s = cv2.add(s, sat_shift)
            val_shift = np.random.uniform(self.val_shift_limit[0], self.val_shift_limit[1])
            v = cv2.add(v, val_shift)
            image = cv2.merge((h, s, v))
            image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)
        return image 
Example #20
Source File: size_detector.py    From gaps with MIT License 5 votes vote down vote up
def _split_channel_images(self):
        blue, green, red = cv2.split(self._image)

        split_channel_images = [
            red,
            green,
            blue,
            cv2.add(red, green),
            cv2.add(red, blue),
            cv2.add(green, blue)
        ]

        return split_channel_images 
Example #21
Source File: aruco_detect_markers_augmented_reality.py    From Mastering-OpenCV-4-with-Python with MIT License 5 votes vote down vote up
def draw_augmented_overlay(pts_1, overlay_image, image):
    """Overlay the image 'overlay_image' onto the image 'image'"""

    # Define the squares of the overlay_image image to be drawn:
    pts_2 = np.float32([[0, 0], [overlay_image.shape[1], 0], [overlay_image.shape[1], overlay_image.shape[0]],
                        [0, overlay_image.shape[0]]])

    # Draw border to see the limits of the image:
    cv2.rectangle(overlay_image, (0, 0), (overlay_image.shape[1], overlay_image.shape[0]), (255, 255, 0), 10)

    # Create the transformation matrix:
    M = cv2.getPerspectiveTransform(pts_2, pts_1)

    # Transform the overlay_image image using the transformation matrix M:
    dst_image = cv2.warpPerspective(overlay_image, M, (image.shape[1], image.shape[0]))
    # cv2.imshow("dst_image", dst_image)

    # Create the mask:
    dst_image_gray = cv2.cvtColor(dst_image, cv2.COLOR_BGR2GRAY)
    ret, mask = cv2.threshold(dst_image_gray, 0, 255, cv2.THRESH_BINARY_INV)

    # Compute bitwise conjunction using the calculated mask:
    image_masked = cv2.bitwise_and(image, image, mask=mask)
    # cv2.imshow("image_masked", image_masked)

    # Add the two images to create the resulting image:
    result = cv2.add(dst_image, image_masked)
    return result


# Create the dictionary object and the parameters: 
Example #22
Source File: blob.py    From WPAL-network with GNU General Public License v3.0 5 votes vote down vote up
def prep_img_for_blob(img, pixel_means, target_size, max_area, min_size):
    """Mean subtract and scale an image for use in a blob."""
    img = img.astype(np.float32, copy=False)
    img -= pixel_means
    img_shape = img.shape
    img_size_min = np.min(img_shape[0:2])
    img_size_max = np.max(img_shape[0:2])
    img_scale = float(target_size) / float(img_size_max)

    # Prevent the shorter sides from being less than MIN_SIZE
    if np.round(img_scale * img_size_min < min_size):
        img_scale = np.round(min_size / img_size_min) + 1

    # Prevent the scaled area from being more than MAX_AREA
    if np.round(img_scale * img_size_min * img_scale * img_size_max) > max_area:
        img_scale = math.sqrt(float(max_area) / float(img_size_min * img_size_max))

    # Resize the sample.
    img = cv2.resize(img, None, None, fx=img_scale, fy=img_scale, interpolation=cv2.INTER_LINEAR)

    # Randomly rotate the sample.
    img = cv2.warpAffine(img,
                         cv2.getRotationMatrix2D((img.shape[1] / 2, img.shape[0] / 2),
                                                 np.random.randint(-15, 15), 1),
                         (img.shape[1], img.shape[0]))

    # Perform RGB Jittering
    h, w, c = img.shape
    zitter = np.zeros_like(img)
    for i in xrange(c):
        zitter[:, :, i] = np.random.randint(0, cfg.TRAIN.RGB_JIT, (h, w)) - cfg.TRAIN.RGB_JIT / 2
    img = cv2.add(img, zitter)

    return img, img_scale 
Example #23
Source File: factory.py    From pibooth with MIT License 5 votes vote down vote up
def _build_final_image(self, image):
        """See upper class description.
        """
        if self._overlay_image:
            overlay = cv2.cvtColor(cv2.imread(self._overlay_image, cv2.IMREAD_UNCHANGED), cv2.COLOR_BGR2RGBA)
            overlay, _, _ = self._image_resize_keep_ratio(overlay, self.width, self.height, True)

            # Fix the overlay. Why we have to do this? If we don't, pixels are marked
            # as opaque when they shouldn't be. See:
            # https://www.pyimagesearch.com/2016/04/25/watermarking-images-with-opencv-and-python
            RR, GG, BB, A = cv2.split(overlay)
            RR = cv2.bitwise_and(RR, RR, mask=A)
            GG = cv2.bitwise_and(GG, GG, mask=A)
            BB = cv2.bitwise_and(BB, BB, mask=A)
            overlay = cv2.merge([RR, GG, BB, A])

            # Add an extra dimension to the image (i.e., the alpha transparency)
            if image.shape[2] == 3:
                image = cv2.cvtColor(image, cv2.COLOR_RGB2RGBA)

            # Now create a mask of overlay and create its inverse mask also
            img2gray = cv2.cvtColor(overlay, cv2.COLOR_RGB2GRAY)
            _ret, mask = cv2.threshold(img2gray, 30, 255, cv2.THRESH_BINARY)
            mask_inv = cv2.bitwise_not(mask)
            # Now black-out the area of overlay in ROI (ie image)
            img1_bg = cv2.bitwise_and(image, image, mask=mask_inv)
            # Take only region of overlay from overlay image
            img2_fg = cv2.bitwise_and(overlay, overlay, mask=mask)
            # Generate the main image
            image = cv2.add(img1_bg, img2_fg)
            # Remove alpha dimension
            image = cv2.cvtColor(image, cv2.COLOR_RGBA2RGB)

        return Image.fromarray(image) 
Example #24
Source File: video.py    From TecoGAN with Apache License 2.0 5 votes vote down vote up
def read(self, dst=None):
        noise = np.zeros(self.render.sceneBg.shape, np.int8)
        cv.randn(noise, np.zeros(3), np.ones(3)*255*self.noise)

        return True, cv.add(self.render.getNextFrame(), noise, dtype=cv.CV_8UC3) 
Example #25
Source File: cv.py    From deepstar with BSD 3-Clause Clear License 5 votes vote down vote up
def alpha_blend(background_, foreground_, mask_):
    background = background_.copy()
    foreground = foreground_.copy()
    mask = mask_.copy()

    background = background.astype(float)
    foreground = foreground.astype(float)
    mask = mask.astype(float) / 255
    foreground = cv2.multiply(mask, foreground)
    background = cv2.multiply(1.0 - mask, background)
    image = cv2.add(foreground, background)

    return image 
Example #26
Source File: video.py    From TecoGAN with Apache License 2.0 5 votes vote down vote up
def read(self, dst=None):
        w, h = self.frame_size

        if self.bg is None:
            buf = np.zeros((h, w, 3), np.uint8)
        else:
            buf = self.bg.copy()

        self.render(buf)

        if self.noise > 0.0:
            noise = np.zeros((h, w, 3), np.int8)
            cv.randn(noise, np.zeros(3), np.ones(3)*255*self.noise)
            buf = cv.add(buf, noise, dtype=cv.CV_8UC3)
        return True, buf 
Example #27
Source File: video.py    From pi-tracking-telescope with MIT License 5 votes vote down vote up
def read(self, dst=None):
        w, h = self.frame_size

        if self.bg is None:
            buf = np.zeros((h, w, 3), np.uint8)
        else:
            buf = self.bg.copy()

        self.render(buf)

        if self.noise > 0.0:
            noise = np.zeros((h, w, 3), np.int8)
            cv2.randn(noise, np.zeros(3), np.ones(3)*255*self.noise)
            buf = cv2.add(buf, noise, dtype=cv2.CV_8UC3)
        return True, buf 
Example #28
Source File: video.py    From OpenCV-Python-Tutorial with MIT License 5 votes vote down vote up
def read(self, dst=None):
        w, h = self.frame_size

        if self.bg is None:
            buf = np.zeros((h, w, 3), np.uint8)
        else:
            buf = self.bg.copy()

        self.render(buf)

        if self.noise > 0.0:
            noise = np.zeros((h, w, 3), np.int8)
            cv2.randn(noise, np.zeros(3), np.ones(3)*255*self.noise)
            buf = cv2.add(buf, noise, dtype=cv2.CV_8UC3)
        return True, buf 
Example #29
Source File: video.py    From OpenCV-Python-Tutorial with MIT License 5 votes vote down vote up
def read(self, dst=None):
        noise = np.zeros(self.render.sceneBg.shape, np.int8)
        cv2.randn(noise, np.zeros(3), np.ones(3)*255*self.noise)

        return True, cv2.add(self.render.getNextFrame(), noise, dtype=cv2.CV_8UC3) 
Example #30
Source File: webcam.py    From mobile-hair-segmentation-pytorch with MIT License 5 votes vote down vote up
def alpha_image(image, mask, alpha=0.1):
    color = np.zeros((mask.shape[0], mask.shape[1], 3))
    color[np.where(mask != 0)] = [0, 130, 255]
    alpha_hand = ((1 - alpha) * image + alpha * color).astype(np.uint8)
    alpha_hand = cv2.bitwise_and(alpha_hand, alpha_hand, mask=mask)

    return cv2.add(alpha_hand, image)