Python cv2.calcBackProject() Examples
The following are 18
code examples of cv2.calcBackProject().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
cv2
, or try the search function
.
Example #1
Source File: camshift_object_tracker.py From automl-video-ondevice with Apache License 2.0 | 6 votes |
def run(self, frame): """Processes a single frame. Args: frame: The np.array image frame. """ hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) dst = cv2.calcBackProject([hsv], [0, 1], self.roi_hist, [0, 180, 0, 255], 1) _, self.box = cv2.CamShift(dst, self.box, self.term_crit) (x, y, x2, y2) = self.glob_to_relative( (self.box[0], self.box[1], self.box[0] + self.box[2], self.box[1] + self.box[3])) self.annotation.bbox.left = x self.annotation.bbox.top = y self.annotation.bbox.right = x2 self.annotation.bbox.bottom = y2 self.age = self.age + 1 self.degrade()
Example #2
Source File: cvutils.py From 1ZLAB_PyEspCar with GNU General Public License v3.0 | 6 votes |
def backprojection(target, roihist): '''图像预处理''' hsvt = cv2.cvtColor(target,cv2.COLOR_BGR2HSV) dst = cv2.calcBackProject([hsvt],[0,1],roihist,[0,180,0,256],1) # Now convolute with circular disc disc = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(7,7)) cv2.filter2D(dst,-1,disc,dst) # threshold and binary AND ret,binary = cv2.threshold(dst,80,255,0) # 创建 核 kernel = np.ones((5,5), np.uint8) iter_time = 1 # 闭运算 binary = cv2.morphologyEx(binary, cv2.MORPH_CLOSE, kernel,iterations=iter_time) thresh = cv2.merge((binary,binary,binary)) target_filter = cv2.bitwise_and(target,thresh) return binary, target_filter
Example #3
Source File: HandRecognition.py From hand-gesture-recognition-opencv with MIT License | 5 votes |
def hand_threshold(frame_in,hand_hist): frame_in=cv2.medianBlur(frame_in,3) hsv=cv2.cvtColor(frame_in,cv2.COLOR_BGR2HSV) hsv[0:int(cap_region_y_end*hsv.shape[0]),0:int(cap_region_x_begin*hsv.shape[1])]=0 # Right half screen only hsv[int(cap_region_y_end*hsv.shape[0]):hsv.shape[0],0:hsv.shape[1]]=0 back_projection = cv2.calcBackProject([hsv], [0,1],hand_hist, [00,180,0,256], 1) disc = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (morph_elem_size,morph_elem_size)) cv2.filter2D(back_projection, -1, disc, back_projection) back_projection=cv2.GaussianBlur(back_projection,(gaussian_ksize,gaussian_ksize), gaussian_sigma) back_projection=cv2.medianBlur(back_projection,median_ksize) ret, thresh = cv2.threshold(back_projection, hsv_thresh_lower, 255, 0) return thresh # 3. Find hand contour
Example #4
Source File: tracking.py From OpenCV-Computer-Vision-Projects-with-Python with MIT License | 5 votes |
def _append_boxes_from_meanshift(self, frame, box_all): """Adds to the list all bounding boxes found with mean-shift tracking Mean-shift tracking is used to track objects from frame to frame. This information is combined with a saliency map to discard false-positives and focus only on relevant objects that move. :param frame: current RGB image frame :box_all: append bounding boxes from tracking to this list :returns: new list of all collected bounding boxes """ hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) for i in xrange(len(self.object_roi)): roi_hist = copy.deepcopy(self.object_roi[i]) box_old = copy.deepcopy(self.object_box[i]) dst = cv2.calcBackProject([hsv], [0], roi_hist, [0, 180], 1) ret, box_new = cv2.meanShift(dst, tuple(box_old), self.term_crit) self.object_box[i] = copy.deepcopy(box_new) # discard boxes that don't move (xo, yo, wo, ho) = box_old (xn, yn, wn, hn) = box_new co = [xo + wo/2, yo + ho/2] cn = [xn + wn/2, yn + hn/2] if (co[0]-cn[0])**2 + (co[1]-cn[1])**2 >= self.min_shift2: box_all.append(box_new) return box_all
Example #5
Source File: object_detection_using_color.py From Hands-On-Machine-Learning-with-OpenCV-4 with MIT License | 5 votes |
def locate_object(frame, object_hist): # convert to HSV hsv_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) # apply back projection to image using object_hist as # the model histogram object_segment = cv2.calcBackProject( [hsv_frame], [0, 1], object_hist, [0, 180, 0, 256], 1) # find the contours img, contours, _ = cv2.findContours( object_segment, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) flag = None max_area = 0 # find the contour with the greatest area for (i, c) in enumerate(contours): area = cv2.contourArea(c) if area > max_area: max_area = area flag = i # get the rectangle if flag is not None and max_area > 1000: cnt = contours[flag] coords = cv2.boundingRect(cnt) return coords return None # compute the color histogram
Example #6
Source File: set_hand_histogram.py From Sign-Language-Interpreter-using-Deep-Learning with MIT License | 5 votes |
def get_hand_hist(): cam = cv2.VideoCapture(1) if cam.read()[0]==False: cam = cv2.VideoCapture(0) x, y, w, h = 300, 100, 300, 300 flagPressedC, flagPressedS = False, False imgCrop = None while True: img = cam.read()[1] img = cv2.flip(img, 1) img = cv2.resize(img, (640, 480)) hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) keypress = cv2.waitKey(1) if keypress == ord('c'): hsvCrop = cv2.cvtColor(imgCrop, cv2.COLOR_BGR2HSV) flagPressedC = True hist = cv2.calcHist([hsvCrop], [0, 1], None, [180, 256], [0, 180, 0, 256]) cv2.normalize(hist, hist, 0, 255, cv2.NORM_MINMAX) elif keypress == ord('s'): flagPressedS = True break if flagPressedC: dst = cv2.calcBackProject([hsv], [0, 1], hist, [0, 180, 0, 256], 1) dst1 = dst.copy() disc = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(10,10)) cv2.filter2D(dst,-1,disc,dst) blur = cv2.GaussianBlur(dst, (11,11), 0) blur = cv2.medianBlur(blur, 15) ret,thresh = cv2.threshold(blur,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU) thresh = cv2.merge((thresh,thresh,thresh)) #cv2.imshow("res", res) cv2.imshow("Thresh", thresh) if not flagPressedS: imgCrop = build_squares(img) #cv2.rectangle(img, (x,y), (x+w, y+h), (0,255,0), 2) cv2.imshow("Set hand histogram", img) cam.release() cv2.destroyAllWindows() with open("hist", "wb") as f: pickle.dump(hist, f)
Example #7
Source File: final.py From Sign-Language-Interpreter-using-Deep-Learning with MIT License | 5 votes |
def get_img_contour_thresh(img): img = cv2.flip(img, 1) imgHSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) dst = cv2.calcBackProject([imgHSV], [0, 1], hist, [0, 180, 0, 256], 1) disc = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(10,10)) cv2.filter2D(dst,-1,disc,dst) blur = cv2.GaussianBlur(dst, (11,11), 0) blur = cv2.medianBlur(blur, 15) thresh = cv2.threshold(blur,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)[1] thresh = cv2.merge((thresh,thresh,thresh)) thresh = cv2.cvtColor(thresh, cv2.COLOR_BGR2GRAY) thresh = thresh[y:y+h, x:x+w] contours = cv2.findContours(thresh.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)[0] return img, contours, thresh
Example #8
Source File: camshift.py From PyCV-time with MIT License | 5 votes |
def run(self): while True: ret, self.frame = self.cam.read() vis = self.frame.copy() hsv = cv2.cvtColor(self.frame, cv2.COLOR_BGR2HSV) mask = cv2.inRange(hsv, np.array((0., 60., 32.)), np.array((180., 255., 255.))) if self.selection: x0, y0, x1, y1 = self.selection self.track_window = (x0, y0, x1-x0, y1-y0) hsv_roi = hsv[y0:y1, x0:x1] mask_roi = mask[y0:y1, x0:x1] hist = cv2.calcHist( [hsv_roi], [0], mask_roi, [16], [0, 180] ) cv2.normalize(hist, hist, 0, 255, cv2.NORM_MINMAX); self.hist = hist.reshape(-1) self.show_hist() vis_roi = vis[y0:y1, x0:x1] cv2.bitwise_not(vis_roi, vis_roi) vis[mask == 0] = 0 if self.tracking_state == 1: self.selection = None prob = cv2.calcBackProject([hsv], [0], self.hist, [0, 180], 1) prob &= mask term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1 ) track_box, self.track_window = cv2.CamShift(prob, self.track_window, term_crit) if self.show_backproj: vis[:] = prob[...,np.newaxis] try: cv2.ellipse(vis, track_box, (0, 0, 255), 2) except: print track_box cv2.imshow('camshift', vis) ch = 0xFF & cv2.waitKey(5) if ch == 27: break if ch == ord('b'): self.show_backproj = not self.show_backproj cv2.destroyAllWindows()
Example #9
Source File: red_blob_detection.py From visual-followme with GNU General Public License v3.0 | 5 votes |
def filter_by_color(hist, hsv): prob = cv2.calcBackProject([hsv], [0], hist, [0, 180], 1) return prob
Example #10
Source File: color_detection.py From deepgaze with MIT License | 5 votes |
def returnMask(self, frame, morph_opening=True, blur=True, kernel_size=5, iterations=1): """Given an input frame in BGR return the black/white mask. @param frame the original frame (color) @param morph_opening it is a erosion followed by dilatation to remove noise @param blur to smoth the image it is possible to apply Gaussian Blur @param kernel_size is the kernel dimension used for morph and blur """ if(len(self.template_hsv_list) == 0): return None frame_hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) mask = np.zeros((frame.shape[0], frame.shape[1])) for template_hsv in self.template_hsv_list: #Set the template histogram template_hist = cv2.calcHist([template_hsv],[0, 1], None, [256, 256], [0, 256, 0, 256] ) #Normalize the template histogram and apply backprojection cv2.normalize(template_hist, template_hist, 0, 255, cv2.NORM_MINMAX) frame_hsv_back = cv2.calcBackProject([frame_hsv], [0,1], template_hist, [0,256,0,256], 1) #Get the kernel and apply a convolution kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (kernel_size,kernel_size)) frame_hsv_clean = cv2.filter2D(frame_hsv_back, -1, kernel) #Applying the morph open operation (erosion followed by dilation) if(morph_opening==True): kernel = np.ones((kernel_size,kernel_size), np.uint8) frame_hsv_clean = cv2.morphologyEx(frame_hsv_clean, cv2.MORPH_OPEN, kernel, iterations=iterations) #Applying Gaussian Blur if(blur==True): frame_hsv_clean = cv2.GaussianBlur(frame_hsv_clean, (kernel_size,kernel_size), 0) #Get the threshold ret, frame_hsv_threshold = cv2.threshold(frame_hsv_clean, 50, 255, 0) mask = np.add(mask, frame_hsv_threshold) #Add the threshold to the mask #Normalize the mask because it contains #values added during the previous loop #Attention: here it is not necessary to normalize because the astype(np.uint8) method #will resize to 255 each value which is higher that that... #cv2.normalize(mask, mask, 0, 255, cv2.NORM_MINMAX) #Not necessary ret, mask = cv2.threshold(mask.astype(np.uint8), 50, 255, 0) return cv2.merge((mask,mask,mask))
Example #11
Source File: color_detection.py From deepgaze with MIT License | 5 votes |
def returnMask(self, frame, morph_opening=True, blur=True, kernel_size=5, iterations=1): """Given an input frame in BGR return the black/white mask. @param frame the original frame (color) @param morph_opening it is a erosion followed by dilatation to remove noise @param blur to smoth the image it is possible to apply Gaussian Blur @param kernel_size is the kernel dimension used for morph and blur """ if(self.template_hsv is None): return None #Convert the input framge from BGR -> HSV frame_hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) #Set the template histogram template_hist = cv2.calcHist([self.template_hsv],[0, 1], None, [180, 256], [0, 180, 0, 256] ) #Normalize the template histogram and apply backprojection cv2.normalize(template_hist, template_hist, 0, 255, cv2.NORM_MINMAX) frame_hsv = cv2.calcBackProject([frame_hsv], [0,1], template_hist, [0,180,0,256], 1) #Get the kernel and apply a convolution kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (kernel_size,kernel_size)) frame_hsv = cv2.filter2D(frame_hsv, -1, kernel) #Applying the morph open operation (erosion followed by dilation) if(morph_opening==True): kernel = np.ones((kernel_size,kernel_size), np.uint8) frame_hsv = cv2.morphologyEx(frame_hsv, cv2.MORPH_OPEN, kernel, iterations=iterations) #Applying Gaussian Blur if(blur==True): frame_hsv = cv2.GaussianBlur(frame_hsv, (kernel_size,kernel_size), 0) #Get the threshold ret, frame_threshold = cv2.threshold(frame_hsv, 50, 255, 0) #Merge the threshold matrices return cv2.merge((frame_threshold,frame_threshold,frame_threshold))
Example #12
Source File: FingerDetection.py From Finger-Detection-and-Tracking with BSD 2-Clause "Simplified" License | 5 votes |
def hist_masking(frame, hist): hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) dst = cv2.calcBackProject([hsv], [0, 1], hist, [0, 180, 0, 256], 1) disc = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (31, 31)) cv2.filter2D(dst, -1, disc, dst) ret, thresh = cv2.threshold(dst, 150, 255, cv2.THRESH_BINARY) # thresh = cv2.dilate(thresh, None, iterations=5) thresh = cv2.merge((thresh, thresh, thresh)) return cv2.bitwise_and(frame, thresh)
Example #13
Source File: FaceBlurring.py From ImageProcessingProjects with MIT License | 5 votes |
def camshift_track(prev, box, termination): hsv = cv2.cvtColor(prev,cv2.COLOR_BGR2HSV) x,y,w,h = box roi = prev[y:y+h, x:x+w] hist = cv2.calcHist([roi], [0], None, [16], [0, 180]) cv2.normalize(hist, hist, 0, 255, cv2.NORM_MINMAX) backProj = cv2.calcBackProject([hsv], [0], hist, [0, 180], 1) (r, box) = cv2.CamShift(backProj, tuple(box), termination) return box
Example #14
Source File: camshift.py From PyCV-time with MIT License | 4 votes |
def run(self): while True: ret, self.frame = self.cam.read() vis = self.frame.copy() hsv = cv2.cvtColor(self.frame, cv2.COLOR_BGR2HSV) mask = cv2.inRange(hsv, np.array((0., 60., 32.)), np.array((180., 255., 255.))) if self.selection: x0, y0, x1, y1 = self.selection self.track_window = (x0, y0, x1-x0, y1-y0) hsv_roi = hsv[y0:y1, x0:x1] mask_roi = mask[y0:y1, x0:x1] hist = cv2.calcHist( [hsv_roi], [0], mask_roi, [16], [0, 180] ) cv2.normalize(hist, hist, 0, 255, cv2.NORM_MINMAX); self.hist = hist.reshape(-1) self.show_hist() vis_roi = vis[y0:y1, x0:x1] cv2.bitwise_not(vis_roi, vis_roi) vis[mask == 0] = 0 if self.tracking_state == 1: self.selection = None prob = cv2.calcBackProject([hsv], [0], self.hist, [0, 180], 1) prob &= mask term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1 ) track_box, self.track_window = cv2.CamShift(prob, self.track_window, term_crit) if self.show_backproj: vis[:] = prob[...,np.newaxis] try: cv2.ellipse(vis, track_box, (0, 0, 255), 2) except: print track_box cv2.imshow('camshift', vis) ch = 0xFF & cv2.waitKey(5) if ch == 27: break if ch == ord('b'): self.show_backproj = not self.show_backproj cv2.destroyAllWindows()
Example #15
Source File: object-tracking.py From rpi-opencv with GNU General Public License v3.0 | 4 votes |
def run_main(): cap = cv2.VideoCapture('crash-480.mp4') #cap.set(3,320) #cap.set(4,240) # Read the first frame of the video ret, frame = cap.read() # Set the ROI (Region of Interest). Actually, this is a # rectangle of the building that we're tracking c,r,w,h = 427,240,50,50 track_window = (c,r,w,h) # Create mask and normalized histogram roi = frame[r:r+h, c:c+w] hsv_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV) mask = cv2.inRange(hsv_roi, np.array((0., 30.,32.)), np.array((180.,255.,255.))) roi_hist = cv2.calcHist([hsv_roi], [0], mask, [180], [0, 180]) cv2.normalize(roi_hist, roi_hist, 0, 255, cv2.NORM_MINMAX) term_crit = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 80, 1) while True: t = cv2.getTickCount() ret, frame = cap.read() hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) dst = cv2.calcBackProject([hsv], [0], roi_hist, [0,180], 1) ret, track_window = cv2.meanShift(dst, track_window, term_crit) x,y,w,h = track_window cv2.rectangle(frame, (x,y), (x+w,y+h), 255, 2) cv2.putText(frame, 'Tracked', (x-25,y-10), cv2.FONT_HERSHEY_SIMPLEX, .5, (255,255,255), 1, cv2.CV_AA) t = cv2.getTickCount() - t print "detection time = %gms" % (t/(cv2.getTickFrequency()*1000.)) cv2.imshow('Tracking', frame) if cv2.waitKey(1) & 0xFF == ord('q'): break cap.release() cv2.destroyAllWindows()
Example #16
Source File: object_tracker.py From OpenCV-3-x-with-Python-By-Example with MIT License | 4 votes |
def start_tracking(self): # Iterate until the user presses the Esc key while True: # Capture the frame from webcam ret, self.frame = self.cap.read() # Resize the input frame self.frame = cv2.resize(self.frame, None, fx=self.scaling_factor, fy=self.scaling_factor, interpolation=cv2.INTER_AREA) vis = self.frame.copy() # Convert to HSV colorspace hsv = cv2.cvtColor(self.frame, cv2.COLOR_BGR2HSV) # Create the mask based on predefined thresholds. mask = cv2.inRange(hsv, np.array((0., 60., 32.)), np.array((180., 255., 255.))) if self.selection: x0, y0, x1, y1 = self.selection self.track_window = (x0, y0, x1-x0, y1-y0) hsv_roi = hsv[y0:y1, x0:x1] mask_roi = mask[y0:y1, x0:x1] # Compute the histogram hist = cv2.calcHist( [hsv_roi], [0], mask_roi, [16], [0, 180] ) # Normalize and reshape the histogram cv2.normalize(hist, hist, 0, 255, cv2.NORM_MINMAX); self.hist = hist.reshape(-1) vis_roi = vis[y0:y1, x0:x1] cv2.bitwise_not(vis_roi, vis_roi) vis[mask == 0] = 0 if self.tracking_state == 1: print('tracking') self.selection = None # Compute the histogram back projection prob = cv2.calcBackProject([hsv], [0], self.hist, [0, 180], 1) prob &= mask term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1 ) # Apply CAMShift on 'prob' track_box, self.track_window = cv2.CamShift(prob, self.track_window, term_crit) # Draw an ellipse around the object cv2.ellipse(vis, track_box, (0, 255, 0), 2) cv2.imshow('Object Tracker', vis) c = cv2.waitKey(delay=5) if c == 27: break cv2.destroyAllWindows()
Example #17
Source File: create_gestures.py From Sign-Language-Interpreter-using-Deep-Learning with MIT License | 4 votes |
def store_images(g_id): total_pics = 1200 hist = get_hand_hist() cam = cv2.VideoCapture(1) if cam.read()[0]==False: cam = cv2.VideoCapture(0) x, y, w, h = 300, 100, 300, 300 create_folder("gestures/"+str(g_id)) pic_no = 0 flag_start_capturing = False frames = 0 while True: img = cam.read()[1] img = cv2.flip(img, 1) imgHSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) dst = cv2.calcBackProject([imgHSV], [0, 1], hist, [0, 180, 0, 256], 1) disc = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(10,10)) cv2.filter2D(dst,-1,disc,dst) blur = cv2.GaussianBlur(dst, (11,11), 0) blur = cv2.medianBlur(blur, 15) thresh = cv2.threshold(blur,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)[1] thresh = cv2.merge((thresh,thresh,thresh)) thresh = cv2.cvtColor(thresh, cv2.COLOR_BGR2GRAY) thresh = thresh[y:y+h, x:x+w] contours = cv2.findContours(thresh.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)[1] if len(contours) > 0: contour = max(contours, key = cv2.contourArea) if cv2.contourArea(contour) > 10000 and frames > 50: x1, y1, w1, h1 = cv2.boundingRect(contour) pic_no += 1 save_img = thresh[y1:y1+h1, x1:x1+w1] if w1 > h1: save_img = cv2.copyMakeBorder(save_img, int((w1-h1)/2) , int((w1-h1)/2) , 0, 0, cv2.BORDER_CONSTANT, (0, 0, 0)) elif h1 > w1: save_img = cv2.copyMakeBorder(save_img, 0, 0, int((h1-w1)/2) , int((h1-w1)/2) , cv2.BORDER_CONSTANT, (0, 0, 0)) save_img = cv2.resize(save_img, (image_x, image_y)) rand = random.randint(0, 10) if rand % 2 == 0: save_img = cv2.flip(save_img, 1) cv2.putText(img, "Capturing...", (30, 60), cv2.FONT_HERSHEY_TRIPLEX, 2, (127, 255, 255)) cv2.imwrite("gestures/"+str(g_id)+"/"+str(pic_no)+".jpg", save_img) cv2.rectangle(img, (x,y), (x+w, y+h), (0,255,0), 2) cv2.putText(img, str(pic_no), (30, 400), cv2.FONT_HERSHEY_TRIPLEX, 1.5, (127, 127, 255)) cv2.imshow("Capturing gesture", img) cv2.imshow("thresh", thresh) keypress = cv2.waitKey(1) if keypress == ord('c'): if flag_start_capturing == False: flag_start_capturing = True else: flag_start_capturing = False frames = 0 if flag_start_capturing == True: frames += 1 if pic_no == total_pics: break
Example #18
Source File: camshift.py From OpenCV-Python-Tutorial with MIT License | 4 votes |
def run(self): while True: ret, self.frame = self.cam.read() vis = self.frame.copy() hsv = cv2.cvtColor(self.frame, cv2.COLOR_BGR2HSV) mask = cv2.inRange(hsv, np.array((0., 60., 32.)), np.array((180., 255., 255.))) if self.selection: x0, y0, x1, y1 = self.selection hsv_roi = hsv[y0:y1, x0:x1] mask_roi = mask[y0:y1, x0:x1] hist = cv2.calcHist( [hsv_roi], [0], mask_roi, [16], [0, 180] ) cv2.normalize(hist, hist, 0, 255, cv2.NORM_MINMAX) self.hist = hist.reshape(-1) self.show_hist() vis_roi = vis[y0:y1, x0:x1] cv2.bitwise_not(vis_roi, vis_roi) vis[mask == 0] = 0 if self.track_window and self.track_window[2] > 0 and self.track_window[3] > 0: self.selection = None prob = cv2.calcBackProject([hsv], [0], self.hist, [0, 180], 1) prob &= mask term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1 ) track_box, self.track_window = cv2.CamShift(prob, self.track_window, term_crit) if self.show_backproj: vis[:] = prob[...,np.newaxis] try: cv2.ellipse(vis, track_box, (0, 0, 255), 2) except: print(track_box) cv2.imshow('camshift', vis) ch = cv2.waitKey(5) if ch == 27: break if ch == ord('b'): self.show_backproj = not self.show_backproj cv2.destroyAllWindows()