Python cv2.createBackgroundSubtractorMOG2() Examples
The following are 13
code examples of cv2.createBackgroundSubtractorMOG2().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
cv2
, or try the search function
.
Example #1
Source File: shanghaitech.py From novelty-detection with MIT License | 6 votes |
def create_background(video_frames): # type: (np.ndarray) -> np.ndarray """ Create the background of a video via MOGs. :param video_frames: list of ordered frames (i.e., a video). :return: the estimated background of the video. """ mog = cv2.createBackgroundSubtractorMOG2() for frame in video_frames: img = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) mog.apply(img) # Get background background = mog.getBackgroundImage() return cv2.cvtColor(background, cv2.COLOR_BGR2RGB)
Example #2
Source File: detect_background.py From zmMagik with GNU General Public License v2.0 | 6 votes |
def __init__(self, min_accuracy, min_blend_area, kernel_fill=20, dist_threshold=15000, history=400): self.min_accuracy = max (min_accuracy, 0.7) self.min_blend_area = min_blend_area self.kernel_clean = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(4,4)) self.kernel_fill = np.ones((kernel_fill,kernel_fill),np.uint8) self.dist_threshold = dist_threshold self.history = history # read https://docs.opencv.org/3.3.0/d2/d55/group__bgsegm.html#gae561c9701970d0e6b35ec12bae149814 try: self.fgbg = cv2.bgsegm.createBackgroundSubtractorMOG(history=self.history, nmixtures=5, backgroundRatio=0.7, noiseSigma=0) except AttributeError as error: print ('It looks like your OpenCV version does not include bgsegm. Switching to createBackgroundSubtractorMOG2') self.fgbg = cv2.createBackgroundSubtractorMOG2(detectShadows=False, history=self.history) #self.fgbg = cv2.bgsegm.createBackgroundSubtractorGMG(decisionThreshold=0.98, initializationFrames=10) #self.fgbg = cv2.createBackgroundSubtractorMOG2(detectShadows=False, history=self.history) #self.fgbg=cv2.bgsegm.createBackgroundSubtractorGSOC(noiseRemovalThresholdFacBG=0.01, noiseRemovalThresholdFacFG=0.0001) #self.fgbg=cv2.bgsegm.createBackgroundSubtractorCNT(minPixelStability = 5, useHistory = True, maxPixelStability = 5 *60,isParallel = True) #self.fgbg=cv2.createBackgroundSubtractorKNN(detectShadows=False, history=self.history, dist2Threshold = self.dist_threshold) #fgbg=cv2.bgsegm.createBackgroundSubtractorLSBP() utils.success_print('Background subtraction initialized')
Example #3
Source File: detectors.py From kalman_filter_multi_object_tracking with MIT License | 5 votes |
def __init__(self): """Initialize variables used by Detectors class Args: None Return: None """ self.fgbg = cv2.createBackgroundSubtractorMOG2()
Example #4
Source File: violation_detection.py From Traffic-Rules-Violation-Detection with GNU General Public License v3.0 | 5 votes |
def __init__(self, vid_file): # vid_file = 'videos/traffic.avi' self.cnt_up = 0 self.cnt_down = 0 self.zone1 = (100, 200) self.zone2 = (450, 100) self.cap = cv2.VideoCapture(vid_file) # insane # Capture the properties of VideoCapture to console # for i in range(19): # print(i, self.cap.get(i)) self.w = self.cap.get(3) self.h = self.cap.get(4) self.frameArea = self.h * self.w self.areaTH = self.frameArea / 200 print('Area Threshold', self.areaTH) # Input/Output Lines self.line_up = int(2 * (self.h / 5)) self.line_down = int(3 * (self.h / 5)) self.up_limit = int(1 * (self.h / 5)) self.down_limit = int(4 * (self.h / 5)) self.line_down_color = (255, 0, 0) self.line_up_color = (0, 0, 255) self.pt1 = [0, self.line_down] self.pt2 = [self.w, self.line_down] self.pts_L1 = np.array([self.pt1, self.pt2], np.int32) self.pts_L1 = self.pts_L1.reshape((-1, 1, 2)) self.pt3 = [0, self.line_up] self.pt4 = [self.w, self.line_up] self.pts_L2 = np.array([self.pt3, self.pt4], np.int32) self.pts_L2 = self.pts_L2.reshape((-1, 1, 2)) self.pt5 = [0, self.up_limit] self.pt6 = [self.w, self.up_limit] self.pts_L3 = np.array([self.pt5, self.pt6], np.int32) self.pts_L3 = self.pts_L3.reshape((-1, 1, 2)) self.pt7 = [0, self.down_limit] self.pt8 = [self.w, self.down_limit] self.pts_L4 = np.array([self.pt7, self.pt8], np.int32) self.pts_L4 = self.pts_L4.reshape((-1, 1, 2)) # Create the background subtractor self.fgbg = cv2.createBackgroundSubtractorMOG2() self.kernelOp = np.ones((3, 3), np.uint8) self.kernelOp2 = np.ones((5, 5), np.uint8) self.kernelCl = np.ones((11, 11), np.uint8) # Variables self.font = cv2.FONT_HERSHEY_SIMPLEX self.vehicles = [] self.max_p_age = 5 self.pid = 1
Example #5
Source File: controller.py From HalloPy with MIT License | 5 votes |
def detected_frame(self, preprocessed_faced_covered_input_frame): """Function for removing background from input frame. """ if self.flag_handler.background_capture_required is True: self._bg_model = cv2.createBackgroundSubtractorMOG2(0, self._bg_Sub_Threshold) self.flag_handler.background_capture_required = False if self._bg_model is not None: fgmask = self._bg_model.apply(preprocessed_faced_covered_input_frame, learningRate=self._learning_Rate) kernel = np.ones((3, 3), np.uint8) fgmask = cv2.erode(fgmask, kernel, iterations=1) res = cv2.bitwise_and(preprocessed_faced_covered_input_frame, preprocessed_faced_covered_input_frame, mask=fgmask) self._input_frame_with_hand = res[ 0:int( self._cap_region_y_end * preprocessed_faced_covered_input_frame.shape[0]), int(self._cap_region_x_begin * preprocessed_faced_covered_input_frame.shape[ 1]): preprocessed_faced_covered_input_frame.shape[ 1]] # clip the ROI
Example #6
Source File: test_extractor.py From HalloPy with MIT License | 5 votes |
def test_contour_extreme_point_tracking(self): """Test for tracking extreme_points without optical flow (e.g until calibrated). """ # setup test_path = utils.get_full_path('docs/material_for_testing/back_ground_removed_frame.jpg') test_image = cv2.imread(test_path) # todo: use mockito here to mock preprocessing elements flags_handler = FlagsHandler() detector = Detector(flags_handler) extractor = Extractor(flags_handler) # Background model preparations. bg_model = cv2.createBackgroundSubtractorMOG2(0, 50) cap = cv2.VideoCapture(0) while flags_handler.quit_flag is False: ret, frame = cap.read() frame = cv2.flip(frame, 1) # Remove background from input frame. fgmask = bg_model.apply(frame, learningRate=0) kernel = np.ones((3, 3), np.uint8) fgmask = cv2.erode(fgmask, kernel, iterations=1) res = cv2.bitwise_and(frame, frame, mask=fgmask) # Clip frames ROI. back_ground_removed_clipped = ImageTestTool.clip_roi(res, {'cap_region_x_begin': 0.6, 'cap_region_y_end': 0.6}) if flags_handler.background_capture_required is True: bg_model = cv2.createBackgroundSubtractorMOG2(0, 50) flags_handler.background_capture_required = False detector.input_frame_for_feature_extraction = back_ground_removed_clipped extractor.extract = detector image = extractor.get_drawn_extreme_contour_points() cv2.imshow('test_contour_extreme_point_tracking', image) flags_handler.keyboard_input = cv2.waitKey(1)
Example #7
Source File: test_extractor.py From HalloPy with MIT License | 5 votes |
def test_max_distance_between_top_ext_point_and_palm_center_point(self): """Test if max distance is found correctly. """ # setup # todo: use mockito here to mock preprocessing elements flags_handler = FlagsHandler() detector = Detector(flags_handler) extractor = Extractor(flags_handler) # Background model preparations. bg_model = cv2.createBackgroundSubtractorMOG2(0, 50) cap = cv2.VideoCapture(0) while flags_handler.quit_flag is False: ret, frame = cap.read() frame = cv2.flip(frame, 1) # Remove background from input frame. fgmask = bg_model.apply(frame, learningRate=0) kernel = np.ones((3, 3), np.uint8) fgmask = cv2.erode(fgmask, kernel, iterations=1) res = cv2.bitwise_and(frame, frame, mask=fgmask) # Clip frames ROI. back_ground_removed_clipped = ImageTestTool.clip_roi(res, {'cap_region_x_begin': 0.6, 'cap_region_y_end': 0.6}) if flags_handler.background_capture_required is True: bg_model = cv2.createBackgroundSubtractorMOG2(0, 50) flags_handler.background_capture_required = False detector.input_frame_for_feature_extraction = back_ground_removed_clipped extractor.extract = detector # run image = extractor.get_drawn_extreme_contour_points() cv2.line(image, extractor.palm_center_point, (extractor.ext_top[0], extractor.palm_center_point[ 1] - extractor.max_distance_from_ext_top_point_to_palm_center), (255, 255, 255), thickness=2) cv2.imshow('test_max_distance_between_top_ext_point_and_palm_center_point', image) flags_handler.keyboard_input = cv2.waitKey(1)
Example #8
Source File: test_extractor.py From HalloPy with MIT License | 4 votes |
def test_palm_angle_calculation(self): """Test if angle is calculated correctly. Usage: 1. press 'b': to calibrate back_ground_remover. 2. insert hand into frame, so that middle_finger is aligned with the Y axe. 3. rotate hand 15 degrees left. (degrees should go above 90). 4. rotate hand 15 degrees right. (degrees should go below 90). 5. press esc when done. """ # setup # todo: use mockito here to mock preprocessing elements flags_handler = FlagsHandler() detector = Detector(flags_handler) extractor = Extractor(flags_handler) # Background model preparations. bg_model = cv2.createBackgroundSubtractorMOG2(0, 50) cap = cv2.VideoCapture(0) while flags_handler.quit_flag is False: ret, frame = cap.read() frame = cv2.flip(frame, 1) # Remove background from input frame. fgmask = bg_model.apply(frame, learningRate=0) kernel = np.ones((3, 3), np.uint8) fgmask = cv2.erode(fgmask, kernel, iterations=1) res = cv2.bitwise_and(frame, frame, mask=fgmask) # Clip frames ROI. back_ground_removed_clipped = ImageTestTool.clip_roi(res, {'cap_region_x_begin': 0.6, 'cap_region_y_end': 0.6}) if flags_handler.background_capture_required is True: bg_model = cv2.createBackgroundSubtractorMOG2(0, 50) flags_handler.background_capture_required = False detector.input_frame_for_feature_extraction = back_ground_removed_clipped extractor.extract = detector # run image = extractor.get_drawn_extreme_contour_points() cv2.imshow('test_contour_extreme_point_tracking', image) print(extractor.palm_angle_in_degrees) flags_handler.keyboard_input = cv2.waitKey(1)
Example #9
Source File: test_extractor.py From HalloPy with MIT License | 4 votes |
def test_5_second_calibration_time(self): """Test if 5 second calibration time works correctly according to flags_handler. Usage: 1. press 'b': to calibrate back_ground_remover. 2. insert hand into frame, center palms_center (white dot) with axes crossing. 3. wait for #calibration_time (default 5 sec). 4. press esc test: after calibration_time, center circle should be green. """ # setup # todo: use mockito here to mock preprocessing elements flags_handler = FlagsHandler() detector = Detector(flags_handler) extractor = Extractor(flags_handler) # Background model preparations. bg_model = cv2.createBackgroundSubtractorMOG2(0, 50) cap = cv2.VideoCapture(0) while flags_handler.quit_flag is False: ret, frame = cap.read() frame = cv2.flip(frame, 1) # Remove background from input frame. fgmask = bg_model.apply(frame, learningRate=0) kernel = np.ones((3, 3), np.uint8) fgmask = cv2.erode(fgmask, kernel, iterations=1) res = cv2.bitwise_and(frame, frame, mask=fgmask) # Clip frames ROI. back_ground_removed_clipped = ImageTestTool.clip_roi(res, {'cap_region_x_begin': 0.6, 'cap_region_y_end': 0.6}) if flags_handler.background_capture_required is True: bg_model = cv2.createBackgroundSubtractorMOG2(0, 50) flags_handler.background_capture_required = False detector.input_frame_for_feature_extraction = back_ground_removed_clipped extractor.extract = detector # run image = extractor.get_drawn_extreme_contour_points() cv2.imshow('test_contour_extreme_point_tracking', image) flags_handler.keyboard_input = cv2.waitKey(1)
Example #10
Source File: test_detector.py From HalloPy with MIT License | 4 votes |
def test_detector_extract_and_track(self): """Test if Detector uses tracker object correctly. """ # setup # Input from camera. cv2.namedWindow('test_detector_extract_and_track') cap = cv2.VideoCapture(0) flags_handler = FlagsHandler() detector = Detector(flags_handler) extractor = Extractor(flags_handler) bg_model = cv2.createBackgroundSubtractorMOG2(0, 50) while flags_handler.quit_flag is False: """ Inside loop, update self._threshold according to flags_handler, Pressing 'c': in order to toggle control (suppose to change contour's color between green and red) Pressing 'l': to raise 'land' flag in flags_handler, in order to be able to break loop (with esc) Pressing 'z': will make threshold thinner. Pressing 'x': will make threshold thicker. Pressing esc: break loop. """ ret, frame = cap.read() frame = cv2.flip(frame, 1) # Remove background from input frame. fgmask = bg_model.apply(frame, learningRate=0) kernel = np.ones((3, 3), np.uint8) fgmask = cv2.erode(fgmask, kernel, iterations=1) res = cv2.bitwise_and(frame, frame, mask=fgmask) # Clip frames ROI.b roi = {'cap_region_x_begin': 0.6, 'cap_region_y_end': 0.6} back_ground_removed_clipped = ImageTestTool.clip_roi(res, roi) if flags_handler.background_capture_required is True: bg_model = cv2.createBackgroundSubtractorMOG2(0, 50) flags_handler.background_capture_required = False # Pipe: detector.input_frame_for_feature_extraction = back_ground_removed_clipped extractor.extract = detector cv2.imshow('test_detector_extract_and_track', extractor.get_drawn_extreme_contour_points()) keyboard_input = cv2.waitKey(1) flags_handler.keyboard_input = keyboard_input # teardown cap.release() cv2.destroyAllWindows()
Example #11
Source File: test_tracker.py From HalloPy with MIT License | 4 votes |
def test_track(self): """Test if tracker object tracks correctly after given set of points to track, and a frame.""" # setup cv2.namedWindow('test_track') flags_handler = FlagsHandler() tracker = None bg_model = cv2.createBackgroundSubtractorMOG2(0, 50) cap = cv2.VideoCapture(0) while flags_handler.quit_flag is False: ret, frame = cap.read() frame = cv2.flip(frame, 1) # Remove background from input frame. fgmask = bg_model.apply(frame, learningRate=0) kernel = np.ones((3, 3), np.uint8) fgmask = cv2.erode(fgmask, kernel, iterations=1) res = cv2.bitwise_and(frame, frame, mask=fgmask) # Clip frames ROI. back_ground_removed_clipped = ImageTestTool.clip_roi(res, {'cap_region_x_begin': 0.6, 'cap_region_y_end': 0.6}) if flags_handler.background_capture_required is True: bg_model = cv2.createBackgroundSubtractorMOG2(0, 50) flags_handler.background_capture_required = False max_area_contour = ImageTestTool.get_max_area_contour(back_ground_removed_clipped) extLeft, extRight, extTop, extBot = ImageTestTool.get_contour_extreme_points(max_area_contour) palm_center = ImageTestTool.get_center_of_mass(max_area_contour) if tracker is None: points = np.array([extTop, palm_center]) else: points = tracker.points_to_track tracker.track(points, back_ground_removed_clipped) points = tracker.points_to_track ImageTestTool.draw_tracking_points(back_ground_removed_clipped, points) cv2.circle(back_ground_removed_clipped, palm_center, 8, (255, 255, 255), thickness=-1) cv2.imshow('test_track', back_ground_removed_clipped) keyboard_input = cv2.waitKey(1) flags_handler.keyboard_input = keyboard_input # run if flags_handler.background_capture_required is True: tracker = None if keyboard_input == ord('t'): tracker = Tracker(flags_handler, points, back_ground_removed_clipped) # teardown cap.release() cv2.destroyAllWindows()
Example #12
Source File: background_subtraction.py From plantcv with MIT License | 4 votes |
def background_subtraction(background_image, foreground_image): """Creates a binary image from a background subtraction of the foreground using cv2.BackgroundSubtractorMOG(). The binary image returned is a mask that should contain mostly foreground pixels. The background image should be the same background as the foreground image except not containing the object of interest. Images must be of the same size and type. If not, larger image will be taken and downsampled to smaller image size. If they are of different types, an error will occur. Inputs: background_image = img object, RGB or binary/grayscale/single-channel foreground_image = img object, RGB or binary/grayscale/single-channel Returns: fgmask = background subtracted foreground image (mask) :param background_image: numpy.ndarray :param foreground_image: numpy.ndarray :return fgmask: numpy.ndarray """ params.device += 1 # Copying images to make sure not alter originals bg_img = np.copy(background_image) fg_img = np.copy(foreground_image) # Checking if images need to be resized or error raised if bg_img.shape != fg_img.shape: # If both images are not 3 channel or single channel then raise error. if len(bg_img.shape) != len(fg_img.shape): fatal_error("Images must both be single-channel/grayscale/binary or RGB") # Forcibly resizing largest image to smallest image print("WARNING: Images are not of same size.\nResizing") if bg_img.shape > fg_img.shape: width, height = fg_img.shape[1], fg_img.shape[0] bg_img = cv2.resize(bg_img, (width, height), interpolation=cv2.INTER_AREA) else: width, height = bg_img.shape[1], bg_img.shape[0] fg_img = cv2.resize(fg_img, (width, height), interpolation=cv2.INTER_AREA) bgsub = cv2.createBackgroundSubtractorMOG2() # Applying the background image to the background subtractor first. # Anything added after is subtracted from the previous iterations. _ = bgsub.apply(bg_img) # Applying the foreground image to the background subtractor (therefore removing the background) fgmask = bgsub.apply(fg_img) # Debug options if params.debug == "print": print_image(fgmask, os.path.join(params.debug_outdir, str(params.device) + "_background_subtraction.png")) elif params.debug == "plot": plot_image(fgmask, cmap="gray") return fgmask
Example #13
Source File: violation_detection.py From Traffic-Rules-Violation-Detection-System with GNU General Public License v3.0 | 4 votes |
def __init__(self, vid_file): # vid_file = 'videos/traffic.avi' self.cnt_up = 0 self.cnt_down = 0 self.zone1 = (100, 200) self.zone2 = (450, 100) self.cap = cv2.VideoCapture(vid_file) # insane # Capture the properties of VideoCapture to console # for i in range(19): # print(i, self.cap.get(i)) self.w = self.cap.get(3) self.h = self.cap.get(4) self.frameArea = self.h * self.w self.areaTH = self.frameArea / 200 print('Area Threshold', self.areaTH) # Input/Output Lines self.line_up = int(2 * (self.h / 5)) self.line_down = int(3 * (self.h / 5)) self.up_limit = int(1 * (self.h / 5)) self.down_limit = int(4 * (self.h / 5)) self.line_down_color = (255, 0, 0) self.line_up_color = (0, 0, 255) self.pt1 = [0, self.line_down] self.pt2 = [self.w, self.line_down] self.pts_L1 = np.array([self.pt1, self.pt2], np.int32) self.pts_L1 = self.pts_L1.reshape((-1, 1, 2)) self.pt3 = [0, self.line_up] self.pt4 = [self.w, self.line_up] self.pts_L2 = np.array([self.pt3, self.pt4], np.int32) self.pts_L2 = self.pts_L2.reshape((-1, 1, 2)) self.pt5 = [0, self.up_limit] self.pt6 = [self.w, self.up_limit] self.pts_L3 = np.array([self.pt5, self.pt6], np.int32) self.pts_L3 = self.pts_L3.reshape((-1, 1, 2)) self.pt7 = [0, self.down_limit] self.pt8 = [self.w, self.down_limit] self.pts_L4 = np.array([self.pt7, self.pt8], np.int32) self.pts_L4 = self.pts_L4.reshape((-1, 1, 2)) # Create the background subtractor self.fgbg = cv2.createBackgroundSubtractorMOG2() self.kernelOp = np.ones((3, 3), np.uint8) self.kernelOp2 = np.ones((5, 5), np.uint8) self.kernelCl = np.ones((11, 11), np.uint8) # Variables self.font = cv2.FONT_HERSHEY_SIMPLEX self.vehicles = [] self.max_p_age = 5 self.pid = 1