Python cv2.CHAIN_APPROX_TC89_KCOS Examples

The following are 11 code examples of cv2.CHAIN_APPROX_TC89_KCOS(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module cv2 , or try the search function .
Example #1
Source File: ChickenVision.py    From ChickenVision with MIT License 6 votes vote down vote up
def findTargets(frame, mask):
    # Finds contours
    _, contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_TC89_KCOS)
    # Take each frame
    # Gets the shape of video
    screenHeight, screenWidth, _ = frame.shape
    # Gets center of height and width
    centerX = (screenWidth / 2) - .5
    centerY = (screenHeight / 2) - .5
    # Copies frame and stores it in image
    image = frame.copy()
    # Processes the contours, takes in (contours, output_image, (centerOfImage)
    if len(contours) != 0:
        image = findTape(contours, image, centerX, centerY)
    else:
        # pushes that it deosn't see vision target to network tables
        networkTable.putBoolean("tapeDetected", False)

    # Shows the contours overlayed on the original video
    return image

# Finds the balls from the masked image and displays them on original stream + network tables 
Example #2
Source File: ChickenVision.py    From ChickenVision with MIT License 6 votes vote down vote up
def findCargo(frame, mask):
    # Finds contours
    _, contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_TC89_KCOS)
    # Take each frame
    # Gets the shape of video
    screenHeight, screenWidth, _ = frame.shape
    # Gets center of height and width
    centerX = (screenWidth / 2) - .5
    centerY = (screenHeight / 2) - .5
    # Copies frame and stores it in image
    image = frame.copy()
    # Processes the contours, takes in (contours, output_image, (centerOfImage)
    if len(contours) != 0:
        image = findBall(contours, image, centerX, centerY)
    else:
        # pushes that it doesn't see cargo to network tables
        networkTable.putBoolean("cargoDetected", False)
    # Shows the contours overlayed on the original video
    return image


# Draws Contours and finds center and yaw of orange ball
# centerX is center x coordinate of image
# centerY is center y coordinate of image 
Example #3
Source File: util.py    From SpaceNet_Off_Nadir_Solutions with Apache License 2.0 5 votes vote down vote up
def contours_hierarchy(mask):
    # first, find contours with cv2: it's much faster than shapely
    image, contours, hierarchy = cv2.findContours(
        ((mask == 1) * 255).astype(np.uint8),
        cv2.RETR_CCOMP,
        cv2.CHAIN_APPROX_TC89_KCOS)  # cv2.CHAIN_APPROX_SIMPLE,#orig cv2.RETR_CCOMP, cv2.CHAIN_APPROX_TC89_KCOS
    return contours, hierarchy 
Example #4
Source File: binarized_filter_result.py    From DVCNN_Lane_Detection with Apache License 2.0 5 votes vote down vote up
def binarized_whatlike_filtered_image(self, img):
        """
        Do normalization and thresholding on the result of weighted hat-like filter image to extract line candidate
        :param img: input image
        :return: list of roi pair (top_roi, fv_roi) class which defined in imdb.py
        """
        if img is None:
            raise ValueError('Image data is invalid')
        # intensity normalizing the image and thresholding thre image
        image = img[:, :, 0]
        inds = np.where(image[:, :] > 650)
        norm_thresh_img = np.zeros(image.shape).astype(np.uint8)
        norm_thresh_img[inds] = 255

        # find connected component
        image, contours, hierarchy = cv2.findContours(image=norm_thresh_img, mode=cv2.RETR_CCOMP,
                                                      method=cv2.CHAIN_APPROX_TC89_KCOS)
        response_points = self.__find_response_points_in_contours(contours=contours, image=norm_thresh_img)

        # find rotate rect of each contour and check if it fits the condition, if fits the condition then save the
        # bounding rectangle of the contour
        result = []
        valid_contours = 0
        for index, contour in enumerate(contours):
            rotrect = cv2.minAreaRect(contour)
            if self.__is_rrect_valid(rotrect):
                # the contours is valid and can be saved
                roi_contours = contour
                roi_contours = np.reshape(roi_contours, newshape=(roi_contours.shape[0], roi_contours.shape[2]))
                roi_index = valid_contours
                valid_contours += 1
                top_roi_db = imdb.Roidb(roi_index=roi_index, roi_contours=roi_contours,
                                        roi_response_points=response_points[index])  # type:
                fv_roi_db, roi_is_valid = self.__map_roi_to_front_view(roidb=top_roi_db)
                if roi_is_valid:
                    result.append((top_roi_db, fv_roi_db))
        return result, norm_thresh_img 
Example #5
Source File: extract_candidate.py    From DVCNN_Lane_Detection with Apache License 2.0 5 votes vote down vote up
def __extract_line_from_filtered_image(img):
        """
        Do normalization and thresholding on the result of weighted hat-like filter image to extract line candidate
        :param img:input image
        :return:rotate rect list []
        """
        image = img[:, :, 0]
        inds = np.where(image[:, :] > 300)
        norm_thresh_image = np.zeros(image.shape).astype(np.uint8)
        norm_thresh_image[inds] = 255

        # find connected component
        image, contours, hierarchy = cv2.findContours(image=norm_thresh_image, mode=cv2.RETR_CCOMP,
                                                      method=cv2.CHAIN_APPROX_TC89_KCOS)

        # find rotate rect of each contour and check if it fits the condition, if fits the condition then save the
        # bounding rectangle of the contour
        rotate_rect_list = []
        bounding_rect_list = []
        for i in range(len(contours)):
            contour = contours[i]
            rotrect = cv2.minAreaRect(contour)
            if RoiExtractor.__is_rrect_valid(rotrect):
                rotate_rect_list.append(rotrect)
                bnd_rect = cv2.boundingRect(contour)
                bounding_rect_list.append(bnd_rect)
        result = {
            'rotate_rect_list': rotate_rect_list,
            'bounding_rect_list': bounding_rect_list
        }
        return result 
Example #6
Source File: TableRecognition.py    From OTR with GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, img, **kwargs):
        contours, hierarchy = _find_contours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_TC89_KCOS, **kwargs)
        self.hierarchy = hierarchy
        self.contours = contours
        self.imgshape = img.shape 
Example #7
Source File: TableRecognition.py    From OTR with GNU General Public License v3.0 5 votes vote down vote up
def compute_missing_cell_contours(self, missing_cells_mask):
        contx, _ = _find_contours(missing_cells_mask, cv2.RETR_LIST, cv2.CHAIN_APPROX_TC89_KCOS)
        return contx 
Example #8
Source File: main.py    From DE3-ROB1-CHESS with Creative Commons Attribution 4.0 International 4 votes vote down vote up
def extractPiece(tile, margin=0.05):
   imgs = [tile]
   w, h, _ = tile.shape

   im_gray = cv2.cvtColor(tile, cv2.COLOR_BGR2GRAY)
   imgs.append(cv2.cvtColor(im_gray, cv2.COLOR_GRAY2BGR))

#   im_gray = im_gray[(h*margin):(h*(1-margin)),
#                     (w*margin):(w*(1-margin))]
#   imgs.append(cv2.cvtColor(im_gray, cv2.COLOR_GRAY2BGR))


#   im_gray = cv2.equalizeHist(im_gray)
   im_gray = cv2.medianBlur(im_gray, 3)
   imgs.append(cv2.cvtColor(im_gray, cv2.COLOR_GRAY2BGR))



   bright = np.mean(im_gray)
   im_bw = im_gray
   im_bw[np.where(im_gray < bright)] = 0
   im_bw[np.where(im_gray >= bright)] = 255
   imgs.append(cv2.cvtColor(im_bw, cv2.COLOR_GRAY2BGR))


   if np.mean(im_bw) < 128:
      im_bw = 255 - im_bw

   imgs.append(cv2.cvtColor(im_bw, cv2.COLOR_GRAY2BGR))


   #_, im_bw = cv2.threshold(im_gray, 50, 250, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
   #im_bw = cv2.Canny(im_bw, 0,255, apertureSize=5)



   contours,hierarchy = cv2.findContours(im_bw.copy(),  cv2.RETR_CCOMP, cv2.CHAIN_APPROX_TC89_KCOS)

   hulls = [cv2.convexHull(c) for c in contours]
   ids = ignoreContours(im_bw, hulls, max_area_percentage=0.75, min_area_percentage=0.2)

   im_bw = cv2.cvtColor(im_bw, cv2.COLOR_GRAY2BGR)
   tmp = im_bw.copy()
   for i in ids:
      c = np.squeeze(hulls[i], 1)
      drawContour(tmp, c, randomColor(), thickness=1)

   imgs.append(tmp)


   return imgs 
Example #9
Source File: extractBoard.py    From DE3-ROB1-CHESS with Creative Commons Attribution 4.0 International 4 votes vote down vote up
def extractBoards(img, w, h):
    """Extracts all boards from an image. This function applies perspective correction.
    :param img: source image
    :param w: output width
    :param h: output height
    :returns: a list the extracted board images
    """
    im_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    ## Doc ##
    #writeDocumentationImage(im_gray, "gray")
    ## Doc ##

    (thresh, im_bw) = cv2.threshold(im_gray, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)

    ## Doc ##
    #writeDocumentationImage(im_bw, "bw")
    ## Doc ##

    contours,hierarchy = cv2.findContours(im_bw,  cv2.RETR_CCOMP, cv2.CHAIN_APPROX_TC89_KCOS)

    ## Doc ##
    #doc_im_contour = cv2.cvtColor(im_gray, cv2.COLOR_GRAY2BGR)
    #for c in contours:
    #    c = np.squeeze(c,1)
    #    drawContour(doc_im_contour, c, randomColor())
    #writeDocumentationImage(doc_im_contour, "contours")
    #doc_im_perspective = cv2.cvtColor(im_gray, cv2.COLOR_GRAY2BGR)
    #doc_im_contour = cv2.cvtColor(im_gray, cv2.COLOR_GRAY2BGR)
    ## Doc ##

    contour_ids = ignoreContours(im_bw, contours, hierarchy)
    boards = []
    for i in contour_ids:
        c = contours[i]
        c = np.squeeze(c,1)

        ## Doc ##
        #color = randomColor()
        #drawContour(doc_im_contour, c, color)
        ## Doc ##

        perspective = getPerspective(img, c)
        if perspective is not None:
            b = extractPerspective(img, perspective, w, h)
            boards.append(b)
            ## Doc ##
            #drawPerspective(doc_im_perspective, perspective)
            ## Doc ##

    ## Doc ##
    #writeDocumentationImage(boards[-1], "extracted")
    #writeDocumentationImage(doc_im_contour, "contours_filtered")
    #writeDocumentationImage(doc_im_perspective, "perspective")
    ## Doc ##


    return boards 
Example #10
Source File: main.py    From DE3-ROB1-CHESS with Creative Commons Attribution 4.0 International 4 votes vote down vote up
def extractPiece(tile, margin=0.05):
   imgs = [tile]
   w, h, _ = tile.shape

   im_gray = cv2.cvtColor(tile, cv2.COLOR_BGR2GRAY)
   imgs.append(cv2.cvtColor(im_gray, cv2.COLOR_GRAY2BGR))

#   im_gray = im_gray[(h*margin):(h*(1-margin)),
#                     (w*margin):(w*(1-margin))]
#   imgs.append(cv2.cvtColor(im_gray, cv2.COLOR_GRAY2BGR))


#   im_gray = cv2.equalizeHist(im_gray)
   im_gray = cv2.medianBlur(im_gray, 3)
   imgs.append(cv2.cvtColor(im_gray, cv2.COLOR_GRAY2BGR))



   bright = np.mean(im_gray)
   im_bw = im_gray
   im_bw[np.where(im_gray < bright)] = 0
   im_bw[np.where(im_gray >= bright)] = 255
   imgs.append(cv2.cvtColor(im_bw, cv2.COLOR_GRAY2BGR))


   if np.mean(im_bw) < 128:
      im_bw = 255 - im_bw

   imgs.append(cv2.cvtColor(im_bw, cv2.COLOR_GRAY2BGR))


   #_, im_bw = cv2.threshold(im_gray, 50, 250, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
   #im_bw = cv2.Canny(im_bw, 0,255, apertureSize=5)



   contours,hierarchy = cv2.findContours(im_bw.copy(),  cv2.RETR_CCOMP, cv2.CHAIN_APPROX_TC89_KCOS)

   hulls = [cv2.convexHull(c) for c in contours]
   ids = ignoreContours(im_bw, hulls, max_area_percentage=0.75, min_area_percentage=0.2)

   im_bw = cv2.cvtColor(im_bw, cv2.COLOR_GRAY2BGR)
   tmp = im_bw.copy()
   for i in ids:
      c = np.squeeze(hulls[i], 1)
      drawContour(tmp, c, randomColor(), thickness=1)

   imgs.append(tmp)


   return imgs 
Example #11
Source File: extract.py    From DE3-ROB1-CHESS with Creative Commons Attribution 4.0 International 4 votes vote down vote up
def extractBoards(img, w, h):
    """Extracts all boards from an image. This function applies perspective correction.
    :param img: source image
    :param w: output width
    :param h: output height
    :returns: a list the extracted board images
    """
    im_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    ## Doc ##
    #writeDocumentationImage(im_gray, "gray")
    ## Doc ##

    (thresh, im_bw) = cv2.threshold(im_gray, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)

    ## Doc ##
    #writeDocumentationImage(im_bw, "bw")
    ## Doc ##

    contours,hierarchy = cv2.findContours(im_bw,  cv2.RETR_CCOMP, cv2.CHAIN_APPROX_TC89_KCOS)

    ## Doc ##
    #doc_im_contour = cv2.cvtColor(im_gray, cv2.COLOR_GRAY2BGR)
    #for c in contours:
    #    c = np.squeeze(c,1)
    #    drawContour(doc_im_contour, c, randomColor())
    #writeDocumentationImage(doc_im_contour, "contours")
    #doc_im_perspective = cv2.cvtColor(im_gray, cv2.COLOR_GRAY2BGR)
    #doc_im_contour = cv2.cvtColor(im_gray, cv2.COLOR_GRAY2BGR)
    ## Doc ##

    contour_ids = ignoreContours(im_bw, contours, hierarchy)
    boards = []
    for i in contour_ids:
        c = contours[i]
        c = np.squeeze(c,1)

        ## Doc ##
        #color = randomColor()
        #drawContour(doc_im_contour, c, color)
        ## Doc ##

        perspective = getPerspective(img, c)
        if perspective is not None:
            b = extractPerspective(img, perspective, w, h)
            boards.append(b)
            ## Doc ##
            #drawPerspective(doc_im_perspective, perspective)
            ## Doc ##

    ## Doc ##
    #writeDocumentationImage(boards[-1], "extracted")
    #writeDocumentationImage(doc_im_contour, "contours_filtered")
    #writeDocumentationImage(doc_im_perspective, "perspective")
    ## Doc ##


    return boards