Python cv2.accumulateWeighted() Examples

The following are 12 code examples of cv2.accumulateWeighted(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module cv2 , or try the search function .
Example #1
Source File: motion.py    From object-detection with MIT License 10 votes vote down vote up
def prediction(self, image):
        image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        image = cv2.GaussianBlur(image, (21, 21), 0)
        if self.avg is None:
            self.avg = image.copy().astype(float)
        cv2.accumulateWeighted(image, self.avg, 0.5)
        frameDelta = cv2.absdiff(image, cv2.convertScaleAbs(self.avg))
        thresh = cv2.threshold(
                frameDelta, DELTA_THRESH, 255,
                cv2.THRESH_BINARY)[1]
        thresh = cv2.dilate(thresh, None, iterations=2)
        cnts = cv2.findContours(
                thresh.copy(), cv2.RETR_EXTERNAL,
                cv2.CHAIN_APPROX_SIMPLE)
        cnts = imutils.grab_contours(cnts)
        self.avg = image.copy().astype(float)
        return cnts 
Example #2
Source File: recognize.py    From gesture-recognition with MIT License 5 votes vote down vote up
def run_avg(image, accumWeight):
    global bg
    # initialize the background
    if bg is None:
        bg = image.copy().astype("float")
        return

    # compute weighted average, accumulate it and update the background
    cv2.accumulateWeighted(image, bg, accumWeight)

#---------------------------------------------
# To segment the region of hand in the image
#--------------------------------------------- 
Example #3
Source File: segment.py    From gesture-recognition with MIT License 5 votes vote down vote up
def run_avg(image, aWeight):
    global bg
    # initialize the background
    if bg is None:
        bg = image.copy().astype("float")
        return

    # compute weighted average, accumulate it and update the background
    cv2.accumulateWeighted(image, bg, aWeight)

#---------------------------------------------
# To segment the region of hand in the image
#--------------------------------------------- 
Example #4
Source File: demo.py    From ActionAI with GNU General Public License v3.0 5 votes vote down vote up
def motion_detector(self, img):
        occupied = False
        # resize the frame, convert it to grayscale, and blur it
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        gray = cv2.GaussianBlur(gray, (15, 15), 0)
     
        if self.avg is None:
            print("[INFO] starting background model...")
            self.avg = gray.copy().astype("float")
     
        # accumulate the weighted average between the current frame and
        # previous frames, then compute the difference between the current
        # frame and running average
        cv2.accumulateWeighted(gray, self.avg, 0.5)
        frameDelta = cv2.absdiff(gray, cv2.convertScaleAbs(self.avg))
        # threshold the delta image, dilate the thresholded image to fill
        # in holes, then find contours on thresholded image
        thresh = cv2.threshold(frameDelta, 5, 255,
            cv2.THRESH_BINARY)[1]
        thresh = cv2.dilate(thresh, None, iterations=2)
        cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
            cv2.CHAIN_APPROX_SIMPLE)
        cnts = cnts[1]
     
        # loop over the contours
        for c in cnts:
            # if the contour is too small, ignore it
            if cv2.contourArea(c) < 5000:
                pass
            occupied = True
     
        return occupied 
Example #5
Source File: BackgroundRemove.py    From vidpipe with GNU General Public License v3.0 5 votes vote down vote up
def processFrame( self, frame_in ):
        # version 1 - moving average
        if self._avg == None:
            self._avg = np.float32( frame_in )
        cv2.accumulateWeighted( frame_in, self._avg, self._speed )
        background = cv2.convertScaleAbs( self._avg )
        active_area = cv2.absdiff( frame_in, background )

        #version 2 - MOG - Gausian Mixture-based Background/Foreground Segmentation Algorithm
        fgmask = self._fgbg.apply( frame_in ,learningRate = 0.01 )
        #active_area = cv2.bitwise_and( frame_in, frame_in, mask = fgmask )

        return fgmask 
Example #6
Source File: ContinuousGesturePredictor.py    From Hand-Gesture-Recognition-Using-Background-Elllimination-and-Convolution-Neural-Network with MIT License 5 votes vote down vote up
def run_avg(image, aWeight):
    global bg
    # initialize the background
    if bg is None:
        bg = image.copy().astype("float")
        return

    # compute weighted average, accumulate it and update the background
    cv2.accumulateWeighted(image, bg, aWeight) 
Example #7
Source File: PalmTracker.py    From Hand-Gesture-Recognition-Using-Background-Elllimination-and-Convolution-Neural-Network with MIT License 5 votes vote down vote up
def run_avg(image, aWeight):
    global bg
    # initialize the background
    if bg is None:
        bg = image.copy().astype("float")
        return

    # compute weighted average, accumulate it and update the background
    cv2.accumulateWeighted(image, bg, aWeight) 
Example #8
Source File: ChangeDetector.py    From NaturewatchCameraServer with GNU General Public License v3.0 5 votes vote down vote up
def detect_change_contours(self, img):
        """
        Detect changed contours in frame
        :param img: current image
        :return: True if it's time to capture
        """
        # convert to gray
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        gray = cv2.GaussianBlur(gray, (21, 21), 0)

        if self.avg is None:
            self.avg = gray.copy().astype("float")
            return False

        # add to accumulation model and find the change
        cv2.accumulateWeighted(gray, self.avg, 0.5)
        frame_delta = cv2.absdiff(gray, cv2.convertScaleAbs(self.avg))

        # threshold, dilate and find contours
        thresh = cv2.threshold(frame_delta, self.config["delta_threshold"], 255, cv2.THRESH_BINARY)[1]
        thresh = cv2.dilate(thresh, None, iterations=2)
        cnts, _ = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

        # find largest contour
        largest_contour = self.get_largest_contour(cnts)

        if largest_contour is None:
            return False

        (x, y, w, h) = cv2.boundingRect(largest_contour)

        # if the contour is too small, return false
        if w > self.maxWidth or w < self.minWidth or h > self.maxHeight or h < self.minHeight:
            return False
        else:
            if self.get_fake_time() - self.lastPhotoTime >= self.config['min_photo_interval_s']:
                return True

        return False 
Example #9
Source File: TrackSet.py    From SimpleCV2 with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def getBackground(self):
        """
        **SUMMARY**

        Get Background of the Image. For more info read
        http://opencvpython.blogspot.in/2012/07/background-extraction-using-running.html

        **PARAMETERS**
        No Parameters

        **RETURNS**

        Image - SimpleCV.ImageClass.Image

        **EXAMPLE**

        >>> while (some_condition):
            ... img1 = cam.getImage()
            ... ts = img1.track("camshift", ts1, img, bb)
            ... img = img1
        >>> ts.getBackground().show()
        """
        imgs = self.trackImages(cv2_numpy=True)
        f = imgs[0]
        avg = np.float32(f)
        for img in imgs[1:]:
            f = img
            cv2.accumulateWeighted(f,avg,0.01)
            res = cv2.convertScaleAbs(avg)
        return Image(res, cv2image=True) 
Example #10
Source File: motion_detector.py    From study-picamera-examples with MIT License 4 votes vote down vote up
def process_image(self, frame):
        frame = imutils.resize(frame, width=min(500, frame.shape[1]))
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        gray = cv2.GaussianBlur(gray, (21, 21), 0)

        if self.avg is None:
            print('Starting background model...')
            self.avg = gray.copy().astype('float')
            return frame

        cv2.accumulateWeighted(gray, self.avg, 0.5)
        frameDelta = cv2.absdiff(gray, cv2.convertScaleAbs(self.avg))
        thresh = cv2.threshold(frameDelta, 5, 255, cv2.THRESH_BINARY)[1]
        thresh = cv2.dilate(thresh, None, iterations=2)

        cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        cnts = cnts[0] if imutils.is_cv2() else cnts[1]

        for c in cnts:
            if cv2.contourArea(c) < 5000:
                continue

            (x, y, w, h) = cv2.boundingRect(c)
            cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
        
        return frame 
Example #11
Source File: camera_opencv.py    From Adeept_RaspTank with MIT License 4 votes vote down vote up
def watchDog(self, imgInput):
        timestamp = datetime.datetime.now()
        gray = cv2.cvtColor(imgInput, cv2.COLOR_BGR2GRAY)
        gray = cv2.GaussianBlur(gray, (21, 21), 0)

        if self.avg is None:
            print("[INFO] starting background model...")
            self.avg = gray.copy().astype("float")
            return 'background model'

        cv2.accumulateWeighted(gray, self.avg, 0.5)
        self.frameDelta = cv2.absdiff(gray, cv2.convertScaleAbs(self.avg))

        # threshold the delta image, dilate the thresholded image to fill
        # in holes, then find contours on thresholded image
        self.thresh = cv2.threshold(self.frameDelta, 5, 255,
            cv2.THRESH_BINARY)[1]
        self.thresh = cv2.dilate(self.thresh, None, iterations=2)
        self.cnts = cv2.findContours(self.thresh.copy(), cv2.RETR_EXTERNAL,
            cv2.CHAIN_APPROX_SIMPLE)
        self.cnts = imutils.grab_contours(self.cnts)
        # print('x')
        # loop over the contours
        for c in self.cnts:
            # if the contour is too small, ignore it
            if cv2.contourArea(c) < 5000:
                continue
     
            # compute the bounding box for the contour, draw it on the frame,
            # and update the text
            (self.mov_x, self.mov_y, self.mov_w, self.mov_h) = cv2.boundingRect(c)
            self.drawing = 1
            
            self.motionCounter += 1
            #print(motionCounter)
            #print(text)
            self.lastMovtionCaptured = timestamp
            led.setColor(255,78,0)
            # switch.switch(1,1)
            # switch.switch(2,1)
            # switch.switch(3,1)

        if (timestamp - self.lastMovtionCaptured).seconds >= 0.5:
            led.setColor(0,78,255)
            self.drawing = 0
            # switch.switch(1,0)
            # switch.switch(2,0)
            # switch.switch(3,0)
        self.pause() 
Example #12
Source File: FaceBlurring.py    From ImageProcessingProjects with MIT License 4 votes vote down vote up
def camshift_face_track():
    face_cascade = cv2.CascadeClassifier('Image_Lib/Face_Data/haarcascade_frontalface_default.xml')
    termination = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1)
    ALPHA = 0.5

    camera = cv2.VideoCapture(0)
    face_box = None

    #wait till first face box is available
    print "Waiting to get first face frame..."
    while face_box is None:
        grabbed, frame = camera.read()
        if not grabbed:
            raise EnvironmentError("Camera read failed!")
        image_prev = cv2.pyrDown(frame)
        face_box = utils.detect_face(face_cascade, image_prev)

    print "Face found!"
    prev_frames = image_prev.astype(np.float32)
    while (True):
        _, frame = camera.read()
        image_curr = cv2.pyrDown(frame)
        cv2.accumulateWeighted(image_curr, prev_frames, ALPHA)
        image_curr = cv2.convertScaleAbs(prev_frames)
        if face_box is not None:
            face_box = camshift_track(image_curr, face_box, termination)
            cv2.rectangle(image_curr, (face_box[0], face_box[1]), (face_box[0]+face_box[2], face_box[1] + face_box[3]),
                          (255, 0,0), 2)
            # cv2.rectangle(image_curr, (box[0], box[1]), (box[0]+box[2], box[1] + box[3]),
            #               (0, 0,255), 2)

        else:
            face_box = utils.detect_face(face_cascade, image_curr)

        cv2.imshow("Output", image_curr)
        key = cv2.waitKey(1)
        if key & 0xFF == ord('q'):
            break

        elif key & 0xFF == ord('r'):
            print "Reseting face detection!"
            face_box = None