Python cv2.TM_CCORR_NORMED Examples

The following are 7 code examples of cv2.TM_CCORR_NORMED(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module cv2 , or try the search function .
Example #1
Source File: thresholding.py    From smashscan with MIT License 6 votes vote down vote up
def match_dmg_templates(self, frame):
        match_mat, max_val, tl = [None]*10, [0]*10, [(0, 0)]*10
        for i in range(0, 10):
            match_mat[i] = cv2.matchTemplate(frame, self.num_img[0],
                cv2.TM_CCORR_NORMED, mask=self.num_mask[0])
            _, max_val[i], _, tl[i] = cv2.minMaxLoc(match_mat[i])
        # print(max_val[0])
        br = (tl[0][0] + self.num_w, tl[0][1] + self.num_h)
        frame = cv2.rectangle(frame, tl[0], br, (255, 255, 255), 2)

        # Multi-template result searching
        # _, max_val_1, _, tl_1 = cv2.minMaxLoc(np.array(match_mat))
        # print(tl_1)


    # A number of methods corresponding to the various trackbars available. 
Example #2
Source File: match_template.py    From OpenCV-Python-Tutorial with MIT License 5 votes vote down vote up
def MatchingMethod(param):

   global match_method
   match_method = param

   ## [copy_source]
   img_display = img.copy()
   ## [copy_source]
   ## [match_template]
   method_accepts_mask = (cv2.TM_SQDIFF == match_method or match_method == cv2.TM_CCORR_NORMED)
   if (use_mask and method_accepts_mask):
       result = cv2.matchTemplate(img, templ, match_method, None, mask)
   else:
       result = cv2.matchTemplate(img, templ, match_method)
   ## [match_template]

   ## [normalize]
   cv2.normalize( result, result, 0, 1, cv2.NORM_MINMAX, -1 )
   ## [normalize]
   ## [best_match]
   minVal, maxVal, minLoc, maxLoc = cv2.minMaxLoc(result, None)
   ## [best_match]

   ## [match_loc]
   if (match_method == cv2.TM_SQDIFF or match_method == cv2.TM_SQDIFF_NORMED):
       matchLoc = minLoc
   else:
       matchLoc = maxLoc
   ## [match_loc]

   ## [imshow]
   cv2.rectangle(img_display, matchLoc, (matchLoc[0] + templ.shape[0], matchLoc[1] + templ.shape[1]), (0,0,0), 2, 8, 0 )
   cv2.rectangle(result, matchLoc, (matchLoc[0] + templ.shape[0], matchLoc[1] + templ.shape[1]), (0,0,0), 2, 8, 0 )
   cv2.imshow(image_window, img_display)
   cv2.imshow(result_window, result)
   ## [imshow]
   pass 
Example #3
Source File: CutImageClass.py    From water-meter-system-complete with MIT License 5 votes vote down vote up
def getRefCoordinate(self, image, template):
#        method = cv2.TM_SQDIFF                     #2
        method = cv2.TM_SQDIFF_NORMED              #1
#        method = cv2.TM_CCORR_NORMED                #3
        method = cv2.TM_CCOEFF_NORMED                #4
        res = cv2.matchTemplate(image, template, method)
        min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
        # If the method is TM_SQDIFF or TM_SQDIFF_NORMED, take minimum
        if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
            top_left = min_loc
        else:
            top_left = max_loc
#        bottom_right = (top_left[0] + w, top_left[1] + h)
        return top_left 
Example #4
Source File: percent_matching.py    From smashscan with MIT License 5 votes vote down vote up
def get_calibrate_results(self, frame):
        h, w = self.orig_pct_img.shape[:2]
        opt_max_val, opt_top_left, opt_w, opt_h = 0, 0, 0, 0

        # Assuming W-360p (640×360), only search the bottom of the frame.
        frame = frame[270:, :]

        # Iterate over a num. of widths, and rescale the img/mask accordingly.
        for new_w in range(self.calib_w_range[0], self.calib_w_range[1]):
            new_h = int(new_w * h / w)
            pct_img = cv2.resize(self.orig_pct_img, (new_w, new_h))
            pct_mask = cv2.resize(self.orig_pct_mask, (new_w, new_h))

            # Calculate the confidence and location of the current rescale.
            match_mat = cv2.matchTemplate(frame, pct_img,
                cv2.TM_CCORR_NORMED, mask=pct_mask)
            _, max_val, _, top_left = cv2.minMaxLoc(match_mat)

            # Store the results if the confidence is larger than the previous.
            if max_val > opt_max_val:
                opt_max_val, opt_top_left = max_val, top_left
                opt_w, opt_h = new_w, new_h

        # Compensate for point location for the ROI that was used.
        opt_top_left = (opt_top_left[0], opt_top_left[1] + 270)

        # Format the bounding box and return.
        bbox = (opt_top_left, (opt_top_left[0]+opt_w, opt_top_left[1]+opt_h))
        return bbox, opt_max_val, opt_w, opt_h


    # Given a list of expected widths, return the optimal dimensions of the
    # template bounding box by calculating the median of the list. 
Example #5
Source File: bot.py    From bot with MIT License 5 votes vote down vote up
def find_from_targeted(self, left, right):

        # @TODO ignore red target - it is attacked and dead
        template = cv2.imread('img/template_target.png', 0)

        # print template.shape
        roi = get_screen(
            self.window_info["x"],
            self.window_info["y"],
            self.window_info["x"] + self.window_info["width"],
            self.window_info["y"] + self.window_info["height"] - 300
        )

        roi = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)
        ret, th1 = cv2.threshold(roi, 224, 255, cv2.THRESH_TOZERO_INV)
        ret, th2 = cv2.threshold(th1, 135, 255, cv2.THRESH_BINARY)
        ret, tp1 = cv2.threshold(template, 224, 255, cv2.THRESH_TOZERO_INV)
        ret, tp2 = cv2.threshold(tp1, 135, 255, cv2.THRESH_BINARY)
        if not hasattr(th2, 'shape'):
            return False
        wth, hth = th2.shape
        wtp, htp = tp2.shape
        if wth > wtp and hth > htp:
            res = cv2.matchTemplate(th2, tp2, cv2.TM_CCORR_NORMED)
            if res.any():
                min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
                if max_val > 0.7:
                    return True
                else:
                    return False
        return False 
Example #6
Source File: newProcessCorrelations.py    From Python_DIC with Apache License 2.0 5 votes vote down vote up
def shiftDetection(filePath, imageList, activeImages, area, filterList, thread):

    largeDisp = np.zeros((len(imageList),2))

    initImage = cv2.imread(filePath+'/'+imageList[0].rstrip(), 0) #read the full image
    initImage = filterFunctions.applyFilterListToImage(filterList, initImage)
    nbImages = len(imageList)
    currentPercent = 1

    activeFileList = []
    for image in range(1, nbImages):
        if activeImages[image] == 1:
            activeFileList.append(image)

    template = initImage[area[1]:area[3],area[0]:area[2]] #select the template data
    width = area[2]-area[0]
    height = area[3]-area[1]

    origin = (area[0], area[1])
    startTime = time.time()
    for i in activeFileList:

        newImage = cv2.imread(filePath+'/'+imageList[i].rstrip(),0)
        newImage = filterFunctions.applyFilterListToImage(filterList, newImage)

        matchArea = cv2.matchTemplate(newImage, template, cv2.TM_CCORR_NORMED)
        minVal, maxVal, minLoc, maxLoc = cv2.minMaxLoc(matchArea)
        template = newImage[maxLoc[1]:maxLoc[1]+height,maxLoc[0]:maxLoc[0]+width] #the template for the next image is update with the template found on the current picture

        largeDisp[i][0] = maxLoc[0]-origin[0] #save the displacement
        largeDisp[i][1] = maxLoc[1]-origin[1]

        percent = i*100/nbImages
        if percent > currentPercent:
            thread.signal.threadSignal.emit([percent, i, largeDisp[i][0], largeDisp[i][1]])
            currentPercent = percent

    totalTime = time.time() - startTime
    thread.signal.threadSignal.emit([100, nbImages, largeDisp, totalTime])
    #print totalTime 
Example #7
Source File: percent_matching.py    From smashscan with MIT License 4 votes vote down vote up
def get_tm_results(self, frame, num_results, conf_thresh=None):

        # Only a specific subregion of the frame is analyzed. If the template
        # ROI has been initialized, take that frame subregion. Otherwise, take
        # the bottom quarter of the frame assuming a W-360p (640x360) format.
        if self.template_roi:
            frame = frame[self.template_roi[0][1]:self.template_roi[1][1], :]
        else:
            frame = frame[270:, :]

        # Set the confidence threshold to the default, if none was input.
        if conf_thresh is None:
            conf_thresh = self.conf_thresh

        # Match the template using a normalized cross-correlation method and
        # retrieve the confidence and top-left points from the result.
        match_mat = cv2.matchTemplate(frame, self.pct_img,
            cv2.TM_CCORR_NORMED, mask=self.pct_mask)
        conf_list, tl_list = self.get_match_results(
            match_mat, num_results, conf_thresh)

        # Compensate for point location for the used region of interest.
        if self.template_roi:
            for i, _ in enumerate(tl_list):
                tl_list[i] = (tl_list[i][0],
                    tl_list[i][1] + self.template_roi[0][1])
        else:
            for i, _ in enumerate(tl_list):
                tl_list[i] = (tl_list[i][0], tl_list[i][1] + 270)

        # Create a list of bounding boxes (top-left & bottom-right points),
        # using the input template_shape given as (width, height).
        bbox_list = list()
        h, w = self.pct_img.shape[:2]
        for tl in tl_list:
            br = (tl[0] + w, tl[1] + h)
            bbox_list.append((tl, br))

        return conf_list, bbox_list


    # Take the result of cv2.matchTemplate, and find the most likely locations
    # of a template match. To find multiple locations, the region around a
    # successful match is zeroed. Return a list of confidences and locations.