Python cv2.erode() Examples

The following are 30 code examples of cv2.erode(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module cv2 , or try the search function .
Example #1
Source File: line_detect_2.py    From crop_row_detection with GNU General Public License v3.0 7 votes vote down vote up
def skeletonize(image_in):
    '''Inputs and grayscale image and outputs a binary skeleton image'''
    size = np.size(image_in)
    skel = np.zeros(image_in.shape, np.uint8)

    ret, image_edit = cv2.threshold(image_in, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
    element = cv2.getStructuringElement(cv2.MORPH_CROSS, (3,3))
    done = False

    while not done:
        eroded = cv2.erode(image_edit, element)
        temp = cv2.dilate(eroded, element)
        temp = cv2.subtract(image_edit, temp)
        skel = cv2.bitwise_or(skel, temp)
        image_edit = eroded.copy()

        zeros = size - cv2.countNonZero(image_edit)
        if zeros == size:
            done = True

    return skel 
Example #2
Source File: single_roi_tracker.py    From ethoscope with GNU General Public License v3.0 7 votes vote down vote up
def _pre_process_input_minimal(self, img, mask, t, darker_fg=True):
        if self._buff_grey is None:
            self._buff_grey = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
            if mask is None:
                mask = np.ones_like(self._buff_grey) * 255

        cv2.cvtColor(img,cv2.COLOR_BGR2GRAY, self._buff_grey)

        cv2.erode(self._buff_grey, self._erode_kern, dst=self._buff_grey)

        if darker_fg:
            cv2.subtract(255, self._buff_grey, self._buff_grey)


        if mask is not None:
            cv2.bitwise_and(self._buff_grey, mask, self._buff_grey)
            return self._buff_grey 
Example #3
Source File: picam.py    From PiCamNN with MIT License 7 votes vote down vote up
def movement(mat_1,mat_2):
    mat_1_gray     = cv2.cvtColor(mat_1.copy(),cv2.COLOR_BGR2GRAY)
    mat_1_gray     = cv2.blur(mat_1_gray,(blur1,blur1))
    _,mat_1_gray   = cv2.threshold(mat_1_gray,100,255,0)
    mat_2_gray     = cv2.cvtColor(mat_2.copy(),cv2.COLOR_BGR2GRAY)
    mat_2_gray     = cv2.blur(mat_2_gray,(blur1,blur1))
    _,mat_2_gray   = cv2.threshold(mat_2_gray,100,255,0)
    mat_2_gray     = cv2.bitwise_xor(mat_1_gray,mat_2_gray)
    mat_2_gray     = cv2.blur(mat_2_gray,(blur2,blur2))
    _,mat_2_gray   = cv2.threshold(mat_2_gray,70,255,0)
    mat_2_gray     = cv2.erode(mat_2_gray,np.ones((erodeval,erodeval)))
    mat_2_gray     = cv2.dilate(mat_2_gray,np.ones((4,4)))
    _, contours,__ = cv2.findContours(mat_2_gray,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
    if len(contours) > 0:return True #If there were any movements
    return  False                    #if not


#Pedestrian Recognition Thread 
Example #4
Source File: generate_sub_final_ensemble.py    From kaggle_carvana_segmentation with MIT License 6 votes vote down vote up
def check_if_top_is_unreliable(mean_pred, albu_pred):
    unreliable = np.zeros_like(albu_pred)
    rows, cols = unreliable.shape
    unreliable[(albu_pred > 30) & (albu_pred < 210)] = 255
    unreliable = cv2.erode(unreliable, (55, 55), iterations=10)
    unreliable = unreliable[0:rows // 2, ...]
    biggest = biggest_contour(unreliable)
    if biggest is None:
        return None
    if cv2.contourArea(biggest) > 40000:
        x, y, w, h = cv2.boundingRect(biggest)
        x, y, w, h = max(x - 50, 0), y - 50, w + 100, h + 100
        mask = (albu_pred > 55).astype(np.uint8) * 255
        c = biggest_contour(mask[y:y + h, x:x + w])
        c = cv2.convexHull(c)
        mask[y:y + h, x:x + w] = cv2.drawContours(mask[y:y + h, x:x + w], [c], -1, 255, -1)
        result = (mean_pred > 127).astype(np.uint8) * 255
        result[y:y + h, x:x + w] = mask[y:y + h, x:x + w]
        return result
    return None 
Example #5
Source File: helpers.py    From hazymaze with Apache License 2.0 6 votes vote down vote up
def blend_non_transparent(sprite, background_img):
    gray_overlay = cv2.cvtColor(background_img, cv2.COLOR_BGR2GRAY)
    overlay_mask = cv2.threshold(gray_overlay, 1, 255, cv2.THRESH_BINARY)[1]

    overlay_mask = cv2.erode(overlay_mask, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)))
    overlay_mask = cv2.blur(overlay_mask, (3, 3))

    background_mask = 255 - overlay_mask

    overlay_mask = cv2.cvtColor(overlay_mask, cv2.COLOR_GRAY2BGR)
    background_mask = cv2.cvtColor(background_mask, cv2.COLOR_GRAY2BGR)

    sprite_part = (sprite * (1 / 255.0)) * (background_mask * (1 / 255.0))
    overlay_part = (background_img * (1 / 255.0)) * (overlay_mask * (1 / 255.0))

    return np.uint8(cv2.addWeighted(sprite_part, 255.0, overlay_part, 255.0, 0.0)) 
Example #6
Source File: coherence.py    From OpenCV-Python-Tutorial with MIT License 6 votes vote down vote up
def coherence_filter(img, sigma = 11, str_sigma = 11, blend = 0.5, iter_n = 4):
    h, w = img.shape[:2]

    for i in xrange(iter_n):
        print(i)

        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        eigen = cv2.cornerEigenValsAndVecs(gray, str_sigma, 3)
        eigen = eigen.reshape(h, w, 3, 2)  # [[e1, e2], v1, v2]
        x, y = eigen[:,:,1,0], eigen[:,:,1,1]

        gxx = cv2.Sobel(gray, cv2.CV_32F, 2, 0, ksize=sigma)
        gxy = cv2.Sobel(gray, cv2.CV_32F, 1, 1, ksize=sigma)
        gyy = cv2.Sobel(gray, cv2.CV_32F, 0, 2, ksize=sigma)
        gvv = x*x*gxx + 2*x*y*gxy + y*y*gyy
        m = gvv < 0

        ero = cv2.erode(img, None)
        dil = cv2.dilate(img, None)
        img1 = ero
        img1[m] = dil[m]
        img = np.uint8(img*(1.0 - blend) + img1*blend)
    print('done')
    return img 
Example #7
Source File: data_gen.py    From Mobile-Image-Matting with MIT License 6 votes vote down vote up
def process(im_name, bg_name):
    im = cv.imread(fg_path + im_name)
    a = cv.imread(a_path + im_name, 0)
    h, w = im.shape[:2]
    bg = cv.imread(bg_path + bg_name)
    bh, bw = bg.shape[:2]
    wratio = w / bw
    hratio = h / bh
    ratio = wratio if wratio > hratio else hratio
    if ratio > 1:
        bg = cv.resize(src=bg, dsize=(math.ceil(bw * ratio), math.ceil(bh * ratio)), interpolation=cv.INTER_CUBIC)

    return composite4(im, bg, a, w, h)


# def gen_trimap(alpha):
#     fg = np.array(np.equal(alpha, 255).astype(np.float32))
#     # fg = cv.erode(fg, kernel, iterations=np.random.randint(1, 3))
#     unknown = np.array(np.not_equal(alpha, 0).astype(np.float32))
#     unknown = cv.dilate(unknown, kernel, iterations=np.random.randint(1, 20))
#     trimap = fg * 255 + (unknown - fg) * 128
#     trimap = np.clip(trimap, 0, 255.0)
#     return trimap.astype(np.uint8) 
Example #8
Source File: lucidDream.py    From pyLucid with MIT License 6 votes vote down vote up
def SeamlessClone_trimap(srcIm,dstIm,imMask,offX,offY):
    dstIm=dstIm.copy()
    bimsk=imMask>0

    new_msk=np.zeros(dstIm.shape[:2],dtype='uint8')
    new_msk[offY:offY+imMask.shape[0],offX:offX+imMask.shape[1]]=imMask

    dstIm[new_msk>0]=srcIm[imMask>0]

    kernel=cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
    bimsk=bimsk.astype('uint8')
    bdmsk=cv2.dilate(bimsk,kernel)-cv2.erode(bimsk,kernel)
    mask255=bdmsk>0
    mask255=(mask255*255).astype('uint8')

    offCenter=(int(offX+imMask.shape[1]/2),int(offY+imMask.shape[0]/2))

    if np.any(bdmsk>0):
        outputIm=cv2.seamlessClone(srcIm,dstIm,mask255,offCenter,cv2.MIXED_CLONE)
    else:
        outputIm=dstIm
        #when one object have very few pixels, bdmsk will be totally zero, which will cause segmentation fault.

    return outputIm,new_msk 
Example #9
Source File: nclt.py    From hierarchical_loc with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def __init__(self, fin, scale=1.0, fmask=None):
            self.fin = fin
            # read in distort
            with open(fin, 'r') as f:
                header = f.readline().rstrip()
                chunks = re.sub(r'[^0-9,]', '', header).split(',')
                self.mapu = np.zeros((int(chunks[1]), int(chunks[0])),
                                     dtype=np.float32)
                self.mapv = np.zeros((int(chunks[1]), int(chunks[0])),
                                     dtype=np.float32)
                for line in f.readlines():
                    chunks = line.rstrip().split(' ')
                    self.mapu[int(chunks[0]), int(chunks[1])] = float(chunks[3])
                    self.mapv[int(chunks[0]), int(chunks[1])] = float(chunks[2])
            # generate a mask
            self.mask = np.ones(self.mapu.shape, dtype=np.uint8)
            self.mask = cv2.remap(self.mask, self.mapu, self.mapv, cv2.INTER_LINEAR)
            kernel = np.ones((30, 30), np.uint8)
            self.mask = cv2.erode(self.mask, kernel, iterations=1)
            # crop black regions out
            h, w = self.mask.shape
            self.x_lim = [f(np.where(self.mask[int(h/2), :])[0])
                          for f in [np.min, np.max]]
            self.y_lim = [f(np.where(self.mask[:, int(w/2)])[0])
                          for f in [np.min, np.max]] 
Example #10
Source File: DataSet.py    From ext_portrait_segmentation with MIT License 6 votes vote down vote up
def __getitem__(self, idx):
        '''

        :param idx: Index of the image file
        :return: returns the image and corresponding label file.
        '''
        image_name = self.imList[idx]
        label_name = self.labelList[idx]
        image = cv2.imread(image_name)
        label = cv2.imread(label_name, 0)
        label_bool = 255 * ((label > 200).astype(np.uint8))

        if self.transform:
            [image, label] = self.transform(image, label_bool)
        if self.edge:
            np_label = 255 * label.data.numpy().astype(np.uint8)
            kernel = np.ones((self.kernel_size , self.kernel_size ), np.uint8)
            erosion = cv2.erode(np_label, kernel, iterations=1)
            dilation = cv2.dilate(np_label, kernel, iterations=1)
            boundary = dilation - erosion
            edgemap = 255 * torch.ones_like(label)
            edgemap[torch.from_numpy(boundary) > 0] = label[torch.from_numpy(boundary) > 0]
            return (image, label, edgemap)
        else:
            return (image, label) 
Example #11
Source File: Tshirt.py    From virtual-dressing-room with Apache License 2.0 6 votes vote down vote up
def detect_shirt(self):
        
        
        #self.dst=cv2.inRange(self.norm_rgb,np.array([self.lb,self.lg,self.lr],np.uint8),np.array([self.b,self.g,self.r],np.uint8))
        self.dst=cv2.inRange(self.norm_rgb,np.array([20,20,20],np.uint8),np.array([255,110,80],np.uint8))
        cv2.threshold(self.dst,0,255,cv2.THRESH_OTSU+cv2.THRESH_BINARY)
        fg=cv2.erode(self.dst,None,iterations=2)
        #cv2.imshow("fore",fg)  
        bg=cv2.dilate(self.dst,None,iterations=3)
        _,bg=cv2.threshold(bg, 1,128,1)
        #cv2.imshow("back",bg)
        
        mark=cv2.add(fg,bg)
        mark32=np.int32(mark)
        cv2.watershed(self.norm_rgb,mark32)
        self.m=cv2.convertScaleAbs(mark32)
        _,self.m=cv2.threshold(self.m,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
        #cv2.imshow("final_tshirt",self.m)
        
        cntr,h=cv2.findContours(self.m,cv2.cv.CV_RETR_EXTERNAL,cv2.cv.CV_CHAIN_APPROX_SIMPLE)
               
        return self.m,cntr 
Example #12
Source File: data_generator.py    From GCA-Matting with MIT License 6 votes vote down vote up
def __call__(self, sample):
        alpha = sample['alpha']
        # Adobe 1K
        fg_width = np.random.randint(1, 30)
        bg_width = np.random.randint(1, 30)
        fg_mask = (alpha + 1e-5).astype(np.int).astype(np.uint8)
        bg_mask = (1 - alpha + 1e-5).astype(np.int).astype(np.uint8)
        fg_mask = cv2.erode(fg_mask, self.erosion_kernels[fg_width])
        bg_mask = cv2.erode(bg_mask, self.erosion_kernels[bg_width])

        trimap = np.ones_like(alpha) * 128
        trimap[fg_mask == 1] = 255
        trimap[bg_mask == 1] = 0

        sample['trimap'] = trimap
        return sample 
Example #13
Source File: helpers.py    From songoku with MIT License 6 votes vote down vote up
def blend_non_transparent(face_img, overlay_img):
    # Let's find a mask covering all the non-black (foreground) pixels
    # NB: We need to do this on grayscale version of the image
    gray_overlay = cv2.cvtColor(overlay_img, cv2.COLOR_BGR2GRAY)
    overlay_mask = cv2.threshold(gray_overlay, 1, 255, cv2.THRESH_BINARY)[1]

    # Let's shrink and blur it a little to make the transitions smoother...
    overlay_mask = cv2.erode(overlay_mask, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)))
    overlay_mask = cv2.blur(overlay_mask, (3, 3))

    # And the inverse mask, that covers all the black (background) pixels
    background_mask = 255 - overlay_mask

    # Turn the masks into three channel, so we can use them as weights
    overlay_mask = cv2.cvtColor(overlay_mask, cv2.COLOR_GRAY2BGR)
    background_mask = cv2.cvtColor(background_mask, cv2.COLOR_GRAY2BGR)

    # Create a masked out face image, and masked out overlay
    # We convert the images to floating point in range 0.0 - 1.0
    face_part = (face_img * (1 / 255.0)) * (background_mask * (1 / 255.0))
    overlay_part = (overlay_img * (1 / 255.0)) * (overlay_mask * (1 / 255.0))

    # And finally just add them together, and rescale it back to an 8bit integer image
    return np.uint8(cv2.addWeighted(face_part, 255.0, overlay_part, 255.0, 0.0)) 
Example #14
Source File: image_comp_tool.py    From HalloPy with MIT License 6 votes vote down vote up
def get_max_area_contour(input_image):
        # Get the contours.
        expected_gray = cv2.cvtColor(input_image, cv2.COLOR_BGR2GRAY)
        blur = cv2.GaussianBlur(expected_gray, (41, 41), 0)
        thresh = cv2.threshold(blur, 50, 255, cv2.THRESH_BINARY)[1]
        thresh = cv2.erode(thresh, None, iterations=2)
        thresh = cv2.dilate(thresh, None, iterations=2)
        _, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

        # Find the biggest area
        try:
            if len(contours) > 0:
                max_area_contour = max(contours, key=cv2.contourArea)
                return max_area_contour
        except ValueError as error:
            print(error) 
Example #15
Source File: foundation.py    From Virtual-Makeup with Apache License 2.0 5 votes vote down vote up
def checkForSkin(IMG10):
    high,widt=IMG10.shape[:2]

    B1=np.reshape(np.float32(IMG10[:,:,0]),high*widt)#B
    G1=np.reshape(np.float32(IMG10[:,:,1]),high*widt)#G
    R1=np.reshape(np.float32(IMG10[:,:,2]),high*widt)#Rs

    #print high,widt
    h3=np.zeros((high,widt,3),np.uint8)

    #cv2.imshow("onetime",h)
    

    tem=np.logical_and(np.logical_and(np.logical_and(np.logical_and(R1 > 95, G1 > 40),np.logical_and(B1 > 20, (np.maximum(np.maximum(R1,G1),B1) - np.minimum(np.minimum(R1,G1),B1)) > 15)),R1>B1),np.logical_and(np.absolute(R1-G1) > 15,R1>G1))
    h5=np.array(tem).astype(np.uint8,order='C',casting='unsafe')

    h5=np.reshape(h5,(high,widt))
    h3[:,:,0]=h5
    h3[:,:,1]=h5
    h3[:,:,2]=h5
    #cv2.imshow("thirdtime",h3)
    kernel1 = np.ones((3,3),np.uint8)
    closedH3=np.copy(h3)
    for i in range(5):
        closedH3 = cv2.erode(closedH3,kernel1)
    for i in range(5):
        closedH3 = cv2.dilate(closedH3,kernel1)
    #cv2.imshow("closedH3",closedH3)
    # closedH3 = cv2.cvtColor(closedH3, cv2.COLOR_BGR2RGB)
    return closedH3 
Example #16
Source File: misc.py    From form2fit with MIT License 5 votes vote down vote up
def process_mask(mask, erode=True, kernel_size=3):
    """Cleans up a binary mask.
    """
    mask = mask.astype("float32")
    kernel = np.ones((kernel_size, kernel_size), "uint8")
    if erode:
        mask = cv2.erode(mask, np.ones((3, 3), "uint8"), iterations=2)
    mask = cv2.dilate(mask, kernel, iterations=2).astype("float32")
    return mask 
Example #17
Source File: gen_pic.py    From uai-sdk with Apache License 2.0 5 votes vote down vote up
def generate(size,char,char_index,save_path,font_path,times):
    #kernel = np.ones((3,3),np.uint8) #kernel of erode or dilate 
    char=str(char)
    l_char=len(char)
    
    #char=unicode(char,'utf-8')
    #l_char=len(char)
    width=l_char*size
    img = Image.new('RGB',(width,size+20),'white')
    draw = ImageDraw.Draw(img)
    #char=unicode(char,'utf-8')
    #maxsize=min(width/l_char/height,0.8)
    #minsize=maxsize-0.1
    
    #sizeofchar=np.arange(minsize,maxsize+0.05,0.05)
    #print(round(width/l_char/height,2),maxsize,minsize)
    #sizeofchar=[0.4,0.45]
    #size= sizeofchar[random.randint(0,len(sizeofchar)-1)]
    
    font = ImageFont.truetype(font_path,size)
    draw.text((0,0),char,(0,0,0),font=font)   
    IMG_SAVEPATH = os.path.join(save_path,str(char_index))
    if (not os.path.exists(IMG_SAVEPATH)):
        os.makedirs(IMG_SAVEPATH)        
    rotate = random.randint(-2, 2)
    img = img.rotate(rotate)
    img_0 = np.array(img)
    noise_modes=['gaussian','poisson','salt','pepper']
    noise_mode=noise_modes[random.randint(0,len(noise_modes)-1)]
    img_1 = skimage.util.random_noise(img_0, mode=noise_mode,seed=None, clip=True)
    #img_2 = cv2.erode(img_0,kernel,iterations = 1)  
    #img_3 = cv2.dilate(img_0,kernel,iterations = 1) 
    #img_3 = skimage.util.random_noise(img_2, mode=noise_mode,seed=None, clip=True)
    #img_5 = skimage.util.random_noise(img_3, mode=noise_mode,seed=None, clip=True)
    for index in range(2):
        io.imsave(os.path.join(IMG_SAVEPATH,str(times)+'_'+str(index)+'.jpg'),eval('img_'+str(index))) 
Example #18
Source File: image_transformation.py    From Sign-Language-Recognition with MIT License 5 votes vote down vote up
def make_background_black(frame):
    """
    Makes everything apart from the main object of interest to be
    black in color.
    """
    logger.debug("Making background black...")

    # Convert from RGB to HSV
    frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

    # Prepare the first mask.
    # Tuned parameters to match the skin color of the input images...
    lower_boundary = np.array([0, 40, 30], dtype="uint8")
    upper_boundary = np.array([43, 255, 254], dtype="uint8")
    skin_mask = cv2.inRange(frame, lower_boundary, upper_boundary)

    # Apply a series of erosions and dilations to the mask using an
    # elliptical kernel
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
    skin_mask = cv2.erode(skin_mask, kernel, iterations=2)
    skin_mask = cv2.dilate(skin_mask, kernel, iterations=2)

    # Prepare the second mask
    lower_boundary = np.array([170, 80, 30], dtype="uint8")
    upper_boundary = np.array([180, 255, 250], dtype="uint8")
    skin_mask2 = cv2.inRange(frame, lower_boundary, upper_boundary)

    # Combine the effect of both the masks to create the final frame.
    skin_mask = cv2.addWeighted(skin_mask, 0.5, skin_mask2, 0.5, 0.0)
    # Blur the mask to help remove noise.
    # skin_mask = cv2.medianBlur(skin_mask, 5)
    frame_skin = cv2.bitwise_and(frame, frame, mask=skin_mask)
    frame = cv2.addWeighted(frame, 1.5, frame_skin, -0.5, 0)
    frame_skin = cv2.bitwise_and(frame, frame, mask=skin_mask)

    logger.debug("Done!")
    return frame_skin 
Example #19
Source File: data_gen.py    From Mobile-Image-Matting with MIT License 5 votes vote down vote up
def gen_trimap(alpha):
    k_size = random.choice(range(1, 5))
    iterations = np.random.randint(1, 20)
    kernel = cv.getStructuringElement(cv.MORPH_ELLIPSE, (k_size, k_size))
    dilated = cv.dilate(alpha, kernel, iterations)
    eroded = cv.erode(alpha, kernel, iterations)
    trimap = np.zeros(alpha.shape)
    trimap.fill(128)
    trimap[eroded >= 255] = 255
    trimap[dilated <= 0] = 0
    return trimap


# Randomly crop (image, trimap) pairs centered on pixels in the unknown regions. 
Example #20
Source File: data_generator.py    From Deep-Image-Matting with MIT License 5 votes vote down vote up
def generate_trimap(alpha):
    fg = np.array(np.equal(alpha, 255).astype(np.float32))
    # fg = cv.erode(fg, kernel, iterations=np.random.randint(1, 3))
    unknown = np.array(np.not_equal(alpha, 0).astype(np.float32))
    unknown = cv.dilate(unknown, kernel, iterations=np.random.randint(1, 20))
    trimap = fg * 255 + (unknown - fg) * 128
    return trimap.astype(np.uint8)


# Randomly crop (image, trimap) pairs centered on pixels in the unknown regions. 
Example #21
Source File: main.py    From sbb_textline_detection with Apache License 2.0 5 votes vote down vote up
def get_textlines_for_each_textregions(self, textline_mask_tot, boxes):
        ########textline_mask_tot = cv2.erode(textline_mask_tot, self.kernel, iterations=1) ####should be changed
        self.area_of_cropped = []
        self.all_text_region_raw = []
        for jk in range(len(boxes)):
            crop_img, crop_coor = self.crop_image_inside_box(boxes[jk],
                                                             np.repeat(textline_mask_tot[:, :, np.newaxis], 3, axis=2))
            crop_img=crop_img.astype(np.uint8)
            self.all_text_region_raw.append(crop_img[:, :, 0])
            self.area_of_cropped.append(crop_img.shape[0] * crop_img.shape[1]) 
Example #22
Source File: __init__.py    From pylgbst with MIT License 5 votes vote down vote up
def _find_color(self):
        # from https://www.pyimagesearch.com/2015/09/14/ball-tracking-with-opencv/
        # and https://thecodacus.com/opencv-object-tracking-colour-detection-python/#.W2DHFd_IQsM

        blurred = cv2.GaussianBlur(self.cur_img, (11, 11), 0)
        hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)

        try:
            # having color values in file allows finding values easier
            with open(os.path.join(os.path.dirname(__file__), "color.json")) as fhd:
                data = json.loads(fhd.read())
                lower = tuple(data[0])
                upper = tuple(data[1])
        except BaseException:
            logging.debug("%s", traceback.format_exc())
            lower = (100, 100, 100,)
            upper = (130, 255, 255,)
        mask = cv2.inRange(hsv, lower, upper)
        mask = cv2.erode(mask, None, iterations=5)
        mask = cv2.dilate(mask, None, iterations=5)

        # if not (int(time.time()) % 2):
        #    self.cur_img = mask

        ret, thresh = cv2.threshold(mask, 20, 255, 0)
        cnts = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
        cnts = cnts[0] if imutils.is_cv2() else cnts[1]

        return self._reduce([cv2.boundingRect(c) for c in cnts]) 
Example #23
Source File: test_extractor.py    From HalloPy with MIT License 5 votes vote down vote up
def test_drawn_correctly(self):
        """Test if zero point is drawn correctly.

        zero point is the point that responsible for forward/backward commands extraction.
        """
        # setup
        # todo: use mockito here to mock preprocessing elements
        flags_handler = FlagsHandler()
        detector = Detector(flags_handler)
        extractor = Extractor(flags_handler)

        # Background model preparations.
        bg_model = cv2.createBackgroundSubtractorMOG2(0, 50)

        cap = cv2.VideoCapture(0)
        while flags_handler.quit_flag is False:
            ret, frame = cap.read()
            frame = cv2.flip(frame, 1)

            # Remove background from input frame.
            fgmask = bg_model.apply(frame, learningRate=0)
            kernel = np.ones((3, 3), np.uint8)
            fgmask = cv2.erode(fgmask, kernel, iterations=1)
            res = cv2.bitwise_and(frame, frame, mask=fgmask)

            # Clip frames ROI.
            back_ground_removed_clipped = ImageTestTool.clip_roi(res,
                                                                 {'cap_region_x_begin': 0.6, 'cap_region_y_end': 0.6})

            if flags_handler.background_capture_required is True:
                bg_model = cv2.createBackgroundSubtractorMOG2(0, 50)
                flags_handler.background_capture_required = False

            detector.input_frame_for_feature_extraction = back_ground_removed_clipped
            extractor.extract = detector

            # run
            image = extractor.get_drawn_extreme_contour_points()
            cv2.imshow('test_drawn_correctly', image)
            flags_handler.keyboard_input = cv2.waitKey(1) 
Example #24
Source File: test_extractor.py    From HalloPy with MIT License 5 votes vote down vote up
def test_contour_extreme_point_tracking(self):
        """Test for tracking extreme_points without optical flow (e.g until calibrated).  """
        # setup
        test_path = utils.get_full_path('docs/material_for_testing/back_ground_removed_frame.jpg')
        test_image = cv2.imread(test_path)

        # todo: use mockito here to mock preprocessing elements
        flags_handler = FlagsHandler()
        detector = Detector(flags_handler)
        extractor = Extractor(flags_handler)

        # Background model preparations.
        bg_model = cv2.createBackgroundSubtractorMOG2(0, 50)

        cap = cv2.VideoCapture(0)
        while flags_handler.quit_flag is False:
            ret, frame = cap.read()
            frame = cv2.flip(frame, 1)

            # Remove background from input frame.
            fgmask = bg_model.apply(frame, learningRate=0)
            kernel = np.ones((3, 3), np.uint8)
            fgmask = cv2.erode(fgmask, kernel, iterations=1)
            res = cv2.bitwise_and(frame, frame, mask=fgmask)

            # Clip frames ROI.
            back_ground_removed_clipped = ImageTestTool.clip_roi(res,
                                                                 {'cap_region_x_begin': 0.6, 'cap_region_y_end': 0.6})

            if flags_handler.background_capture_required is True:
                bg_model = cv2.createBackgroundSubtractorMOG2(0, 50)
                flags_handler.background_capture_required = False

            detector.input_frame_for_feature_extraction = back_ground_removed_clipped
            extractor.extract = detector

            image = extractor.get_drawn_extreme_contour_points()
            cv2.imshow('test_contour_extreme_point_tracking', image)
            flags_handler.keyboard_input = cv2.waitKey(1) 
Example #25
Source File: controller.py    From HalloPy with MIT License 5 votes vote down vote up
def input_frame_for_feature_extraction(self, input_frame_with_background_removed):
        """Function for finding hand contour. """
        # Preparation
        # Update threshold
        self.raw_input_frame = input_frame_with_background_removed
        if self.flags_handler.make_threshold_thinner is True and self._threshold >= 0:
            self.flags_handler.make_threshold_thinner = False
            self._threshold = self._threshold - 1
        elif self.flags_handler.make_threshold_thicker is True and self._threshold <= 100:
            self.flags_handler.make_threshold_thicker = False
            self._threshold = self._threshold + 1

        temp_detected_gray = cv2.cvtColor(input_frame_with_background_removed, cv2.COLOR_BGR2GRAY)
        blur = cv2.GaussianBlur(temp_detected_gray, (self._blur_Value, self._blur_Value), 0)
        thresh = cv2.threshold(blur, self._threshold, 255, cv2.THRESH_BINARY)[1]
        thresh = cv2.erode(thresh, None, iterations=2)
        thresh = cv2.dilate(thresh, None, iterations=2)

        # Get the contours.
        contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

        try:
            # Find the biggest area.
            self.max_area_contour = max(contours, key=cv2.contourArea)
            if self.max_area_contour is None:
                self.max_area_contour = [[(0, 0)]]
            self.detected_out_put_center = self._draw_axes(input_frame_with_background_removed)
        except (AttributeError, ValueError) as error:
            self.logger.debug("something went wrong when Detector object received input_frame!: {}".format(error)) 
Example #26
Source File: controller.py    From HalloPy with MIT License 5 votes vote down vote up
def detected_frame(self, preprocessed_faced_covered_input_frame):
        """Function for removing background from input frame. """
        if self.flag_handler.background_capture_required is True:
            self._bg_model = cv2.createBackgroundSubtractorMOG2(0, self._bg_Sub_Threshold)
            self.flag_handler.background_capture_required = False
        if self._bg_model is not None:
            fgmask = self._bg_model.apply(preprocessed_faced_covered_input_frame, learningRate=self._learning_Rate)
            kernel = np.ones((3, 3), np.uint8)
            fgmask = cv2.erode(fgmask, kernel, iterations=1)
            res = cv2.bitwise_and(preprocessed_faced_covered_input_frame, preprocessed_faced_covered_input_frame,
                                  mask=fgmask)
            self._input_frame_with_hand = res[
                                          0:int(
                                              self._cap_region_y_end * preprocessed_faced_covered_input_frame.shape[0]),
                                          int(self._cap_region_x_begin * preprocessed_faced_covered_input_frame.shape[
                                              1]):
                                          preprocessed_faced_covered_input_frame.shape[
                                              1]]  # clip the ROI 
Example #27
Source File: judge_multi_color.py    From Python-Code with MIT License 5 votes vote down vote up
def getColor(frame, count_color):

    hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

    color = []

    color_dict = color_list.getColorList()

    # 颜色字典的颜色数量
    num_color = len(color_dict)
    sum = [0] * num_color
    search_color_list = []
    # 对每个颜色进行判断,d是颜色字符串(如:red)
    for (d, i) in zip(color_dict, range(num_color)):
        
        search_color_list.append(d)
        # 根据阈值构建掩膜
        mask = cv2.inRange(hsv, color_dict[d][0], color_dict[d][1])
        # 腐蚀操作
        mask = cv2.erode(mask, None, iterations=2)
        # 膨胀操作,其实先腐蚀再膨胀的效果是开运算,去除噪点
        mask = cv2.dilate(mask, None, iterations=2)
        img, cnts, hiera = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
                                            cv2.CHAIN_APPROX_SIMPLE)
        # 有轮廓才进行后面的判断
        
        if len(cnts) > 0:
            for c in cnts:
                sum[i] += cv2.contourArea(c)

    find_color_list = heapq.nlargest(count_color, sum)
    for j in range(count_color):     
        color.append(search_color_list[sum.index(find_color_list[j])])

    return color 
Example #28
Source File: judge_single_color.py    From Python-Code with MIT License 5 votes vote down vote up
def getColor(frame):
	hsv = cv2.cvtColor(frame,cv2.COLOR_BGR2HSV)
	maxsum = 0
	color = None
	color_dict = color_list.getColorList()

	# 对每个颜色进行判断
	for d in color_dict:
		# 根据阈值构建掩膜
		mask = cv2.inRange(hsv, color_dict[d][0], color_dict[d][1])
		# 腐蚀操作
		mask = cv2.erode(mask, None, iterations=2)
		# 膨胀操作,其实先腐蚀再膨胀的效果是开运算,去除噪点
		mask = cv2.dilate(mask, None, iterations=2)	
		img, cnts, hiera = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
		
		# 有轮廓才进行后面的判断
		if len(cnts) > 0:	
			# 计算识别区域的面积
			sum = 0
			for c in cnts:
				sum += cv2.contourArea(c)
			
			# 找到最大面积并找到质心
			if sum > maxsum :
				maxsum = sum	
				if maxsum != 0:
					color = d
				else:
					color = None
 
	return color 
Example #29
Source File: judge_color_center.py    From Python-Code with MIT License 5 votes vote down vote up
def getColor(frame):
	hsv = cv2.cvtColor(frame,cv2.COLOR_BGR2HSV)
	maxsum = 0
	color = None
	color_dict = colorList.getColorList()

	# 对每个颜色进行判断
	for d in color_dict:
		# 根据阈值构建掩膜
		mask = cv2.inRange(hsv, color_dict[d][0], color_dict[d][1])
		# 腐蚀操作
		mask = cv2.erode(mask, None, iterations=2)
		# 膨胀操作,其实先腐蚀再膨胀的效果是开运算,去除噪点
		mask = cv2.dilate(mask, None, iterations=2)	
		img, cnts, hiera = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
		
		# 有轮廓才进行后面的判断
		if len(cnts) > 0:	
			# 计算识别区域的面积
			sum = 0
			for c in cnts:
				sum += cv2.contourArea(c)
			
			# 找到最大面积并找到质心
			if sum > maxsum :
				maxsum = sum	
				if maxsum != 0:
					color = d
				else:
					color = None
				# 找到面积最大的轮廓
				c = max(cnts, key = cv2.contourArea)
				# 确定面积最大的轮廓的外接圆
				((x, y), radius) = cv2.minEnclosingCircle(c)
				# 计算轮廓的矩
				M = cv2.moments(c)
				# 计算质心
				center = (int(M["m10"]/M["m00"]), int(M["m01"]/M["m00"]))
 
	return color, center 
Example #30
Source File: motion_detection.py    From pynvr with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def motionDetected(self, new_frame):
        frame = self.preprocessInputFrame(new_frame)

        gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
        gray = cv.GaussianBlur(gray, (11, 11), 0)

        if (self.multiFrameDetection) and (self.prevPrevFrame is None):
            self.prevPrevFrame = gray
            return False

        if self.prevFrame is None:
            self.prevFrame = gray
            return False

        cv.normalize(gray, gray, 0, 255, cv.NORM_MINMAX)

        frameDiff = self.diffImg(self.prevPrevFrame, self.prevFrame, gray)
        ret1, th1 = cv.threshold(frameDiff, 10, 255, cv.THRESH_BINARY)

        th1 = cv.dilate(th1, None, iterations=8)
        th1 = cv.erode(th1, None, iterations=4)

        delta_count = cv.countNonZero(th1)

        if self.multiFrameDetection:
            self.prevPrevFrame = self.prevFrame

        self.prevFrame = gray
        if delta_count < self.threshold:
            return False

        if self.multiFrameDetection:
            self.prevPrevFrame = self.prevFrame

        self.prevFrame = gray
        self.updateMotionDetectionDts()
        return True