Python cv2.absdiff() Examples
The following are 30
code examples of cv2.absdiff().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
cv2
, or try the search function
.
Example #1
Source File: segment.py From gesture-recognition with MIT License | 12 votes |
def segment(image, threshold=25): global bg # find the absolute difference between background and current frame diff = cv2.absdiff(bg.astype("uint8"), image) # threshold the diff image so that we get the foreground thresholded = cv2.threshold(diff, threshold, 255, cv2.THRESH_BINARY)[1] # get the contours in the thresholded image (_, cnts, _) = cv2.findContours(thresholded.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) # return None, if no contours detected if len(cnts) == 0: return else: # based on contour area, get the maximum contour which is the hand segmented = max(cnts, key=cv2.contourArea) return (thresholded, segmented) #----------------- # MAIN FUNCTION #-----------------
Example #2
Source File: motion.py From object-detection with MIT License | 10 votes |
def prediction(self, image): image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) image = cv2.GaussianBlur(image, (21, 21), 0) if self.avg is None: self.avg = image.copy().astype(float) cv2.accumulateWeighted(image, self.avg, 0.5) frameDelta = cv2.absdiff(image, cv2.convertScaleAbs(self.avg)) thresh = cv2.threshold( frameDelta, DELTA_THRESH, 255, cv2.THRESH_BINARY)[1] thresh = cv2.dilate(thresh, None, iterations=2) cnts = cv2.findContours( thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) cnts = imutils.grab_contours(cnts) self.avg = image.copy().astype(float) return cnts
Example #3
Source File: scene_detector.py From ATX with Apache License 2.0 | 7 votes |
def get_match_confidence(img1, img2, mask=None): if img1.shape != img2.shape: return False ## first try, using absdiff # diff = cv2.absdiff(img1, img2) # h, w, d = diff.shape # total = h*w*d # num = (diff<20).sum() # print 'is_match', total, num # return num > total*0.90 if mask is not None: img1 = img1.copy() img1[mask!=0] = 0 img2 = img2.copy() img2[mask!=0] = 0 ## using match match = cv2.matchTemplate(img1, img2, cv2.TM_CCOEFF_NORMED) _, confidence, _, _ = cv2.minMaxLoc(match) # print confidence return confidence
Example #4
Source File: camera_test.py From crop_row_detection with GNU General Public License v3.0 | 7 votes |
def main(): capture = cv2.VideoCapture(0) _, image = capture.read() previous = image.copy() while (cv2.waitKey(1) < 0): _, image = capture.read() diff = cv2.absdiff(image, previous) #image = cv2.flip(image, 3) #image = cv2.norm(image) _, diff = cv2.threshold(diff, 32, 0, cv2.THRESH_TOZERO) _, diff = cv2.threshold(diff, 0, 255, cv2.THRESH_BINARY) diff = cv2.medianBlur(diff, 5) cv2.imshow('video', diff) previous = image.copy() capture.release() cv2.destroyAllWindows()
Example #5
Source File: imutils.py From ATX with Apache License 2.0 | 7 votes |
def diff_rect(img1, img2, pos=None): """find counters include pos in differences between img1 & img2 (cv2 images)""" diff = cv2.absdiff(img1, img2) diff = cv2.GaussianBlur(diff, (3, 3), 0) edges = cv2.Canny(diff, 100, 200) _, thresh = cv2.threshold(edges, 0, 255, cv2.THRESH_BINARY) contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE) if not contours: return None contours.sort(key=lambda c: len(c)) # no pos provide, just return the largest different area rect if pos is None: cnt = contours[-1] x0, y0, w, h = cv2.boundingRect(cnt) x1, y1 = x0+w, y0+h return (x0, y0, x1, y1) # else the rect should contain the pos x, y = pos for i in range(len(contours)): cnt = contours[-1-i] x0, y0, w, h = cv2.boundingRect(cnt) x1, y1 = x0+w, y0+h if x0 <= x <= x1 and y0 <= y <= y1: return (x0, y0, x1, y1)
Example #6
Source File: facer.py From Peppa_Pig_Face_Engine with Apache License 2.0 | 7 votes |
def diff_frames(self,previous_frame,image): ''' diff value for two value, determin if to excute the detection :param previous_frame: RGB array :param image: RGB array :return: True or False ''' if previous_frame is None: return True else: _diff = cv2.absdiff(previous_frame, image) diff=np.sum(_diff)/previous_frame.shape[0]/previous_frame.shape[1]/3. if diff>self.diff_thres: return True else: return False
Example #7
Source File: image_detect_02.py From image-detect with MIT License | 7 votes |
def matchAB(fileA, fileB): # 读取图像数据 imgA = cv2.imread(fileA) imgB = cv2.imread(fileB) # 转换成灰色 grayA = cv2.cvtColor(imgA, cv2.COLOR_BGR2GRAY) grayB = cv2.cvtColor(imgB, cv2.COLOR_BGR2GRAY) # 获取图片A的大小 height, width = grayA.shape # 取局部图像,寻找匹配位置 result_window = np.zeros((height, width), dtype=imgA.dtype) for start_y in range(0, height-100, 10): for start_x in range(0, width-100, 10): window = grayA[start_y:start_y+100, start_x:start_x+100] match = cv2.matchTemplate(grayB, window, cv2.TM_CCOEFF_NORMED) _, _, _, max_loc = cv2.minMaxLoc(match) matched_window = grayB[max_loc[1]:max_loc[1]+100, max_loc[0]:max_loc[0]+100] result = cv2.absdiff(window, matched_window) result_window[start_y:start_y+100, start_x:start_x+100] = result plt.imshow(result_window) plt.show()
Example #8
Source File: pySaliencyMap.py From aim with MIT License | 6 votes |
def FMCenterSurroundDiff(self, GaussianMaps): dst = list() for s in range(2, 5): now_size = GaussianMaps[s].shape now_size = (now_size[1], now_size[0]) # (width, height) tmp = cv2.resize(GaussianMaps[s + 3], now_size, interpolation=cv2.INTER_LINEAR) nowdst = cv2.absdiff(GaussianMaps[s], tmp) dst.append(nowdst) tmp = cv2.resize(GaussianMaps[s + 4], now_size, interpolation=cv2.INTER_LINEAR) nowdst = cv2.absdiff(GaussianMaps[s], tmp) dst.append(nowdst) return dst # Constructing a Gaussian pyramid + taking center-surround differences
Example #9
Source File: 1b-est-gyro-rates.py From ImageAnalysis with MIT License | 6 votes |
def motion1(new_frame, base): motion = cv2.absdiff(base, new_frame) gray = cv2.cvtColor(motion, cv2.COLOR_BGR2GRAY) cv2.imshow('motion', gray) ret, motion_mask = cv2.threshold(gray, 25, 255, cv2.THRESH_BINARY_INV) blendsize = (3,3) kernel = np.ones(blendsize,'uint8') motion_mask = cv2.erode(motion_mask, kernel) # lots motion_mask /= 1.1429 motion_mask += 16 # medium #motion_mask /= 1.333 #motion_mask += 32 # minimal #motion_mask /= 2 #motion_mask += 64 cv2.imshow('motion1', motion_mask) return motion_mask
Example #10
Source File: 1a-est-gyro-rates.py From ImageAnalysis with MIT License | 6 votes |
def motion1(new_frame, base): motion = cv2.absdiff(base, new_frame) gray = cv2.cvtColor(motion, cv2.COLOR_BGR2GRAY) cv2.imshow('motion', gray) ret, motion_mask = cv2.threshold(gray, 25, 255, cv2.THRESH_BINARY_INV) blendsize = (3,3) kernel = np.ones(blendsize,'uint8') motion_mask = cv2.erode(motion_mask, kernel) # lots motion_mask /= 1.1429 motion_mask += 16 # medium #motion_mask /= 1.333 #motion_mask += 32 # minimal #motion_mask /= 2 #motion_mask += 64 cv2.imshow('motion1', motion_mask) return motion_mask
Example #11
Source File: 1a-est-gyro-rates.py From ImageAnalysis with MIT License | 6 votes |
def motion3(frame, counter): global last_frame global static_mask gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) if last_frame is None: pass else: diff = cv2.absdiff(gray, last_frame) cv2.imshow('motion3', diff) if static_mask is None: static_mask = np.float32(diff) else: if counter > 1000: c = float(1000) else: c = float(counter) f = float(c - 1) / c static_mask = f*static_mask + (1.0 - f)*np.float32(diff) mask_uint8 = np.uint8(static_mask) cv2.imshow('mask3', mask_uint8) ret, newmask = cv2.threshold(mask_uint8, 2, 255, cv2.THRESH_BINARY) cv2.imshow('newmask', newmask) last_frame = gray # average of frames (the stationary stuff should be the sharpest)
Example #12
Source File: 1a-est-gyro-rates.py From ImageAnalysis with MIT License | 6 votes |
def motion1(new_frame, base): motion = cv2.absdiff(base, new_frame) gray = cv2.cvtColor(motion, cv2.COLOR_BGR2GRAY) cv2.imshow('motion', gray) ret, motion_mask = cv2.threshold(gray, 25, 255, cv2.THRESH_BINARY_INV) blendsize = (3,3) kernel = np.ones(blendsize,'uint8') motion_mask = cv2.erode(motion_mask, kernel) # lots motion_mask /= 1.1429 motion_mask += 16 # medium #motion_mask /= 1.333 #motion_mask += 32 # minimal #motion_mask /= 2 #motion_mask += 64 cv2.imshow('motion1', motion_mask) return motion_mask
Example #13
Source File: main.py From Human-detection-and-Tracking with Apache License 2.0 | 6 votes |
def background_subtraction(previous_frame, frame_resized_grayscale, min_area): """ This function returns 1 for the frames in which the area after subtraction with previous frame is greater than minimum area defined. Thus expensive computation of human detection face detection and face recognition is not done on all the frames. Only the frames undergoing significant amount of change (which is controlled min_area) are processed for detection and recognition. """ frameDelta = cv2.absdiff(previous_frame, frame_resized_grayscale) thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1] thresh = cv2.dilate(thresh, None, iterations=2) im2, cnts, hierarchy = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) temp = 0 for c in cnts: # if the contour is too small, ignore it if cv2.contourArea(c) > min_area: temp = 1 return temp
Example #14
Source File: PalmTracker.py From Hand-Gesture-Recognition-Using-Background-Elllimination-and-Convolution-Neural-Network with MIT License | 6 votes |
def segment(image, threshold=25): global bg # find the absolute difference between background and current frame diff = cv2.absdiff(bg.astype("uint8"), image) # threshold the diff image so that we get the foreground thresholded = cv2.threshold(diff, threshold, 255, cv2.THRESH_BINARY)[1] # get the contours in the thresholded image (cnts, _) = cv2.findContours(thresholded.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) # return None, if no contours detected if len(cnts) == 0: return else: # based on contour area, get the maximum contour which is the hand segmented = max(cnts, key=cv2.contourArea) return (thresholded, segmented)
Example #15
Source File: ContinuousGesturePredictor.py From Hand-Gesture-Recognition-Using-Background-Elllimination-and-Convolution-Neural-Network with MIT License | 6 votes |
def segment(image, threshold=25): global bg # find the absolute difference between background and current frame diff = cv2.absdiff(bg.astype("uint8"), image) # threshold the diff image so that we get the foreground thresholded = cv2.threshold(diff, threshold, 255, cv2.THRESH_BINARY)[1] # get the contours in the thresholded image (cnts, _) = cv2.findContours(thresholded.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) # return None, if no contours detected if len(cnts) == 0: return else: # based on contour area, get the maximum contour which is the hand segmented = max(cnts, key=cv2.contourArea) return (thresholded, segmented)
Example #16
Source File: recognize.py From gesture-recognition with MIT License | 6 votes |
def segment(image, threshold=25): global bg # find the absolute difference between background and current frame diff = cv2.absdiff(bg.astype("uint8"), image) # threshold the diff image so that we get the foreground thresholded = cv2.threshold(diff, threshold, 255, cv2.THRESH_BINARY)[1] # get the contours in the thresholded image (_, cnts, _) = cv2.findContours(thresholded.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) # return None, if no contours detected if len(cnts) == 0: return else: # based on contour area, get the maximum contour which is the hand segmented = max(cnts, key=cv2.contourArea) return (thresholded, segmented) #-------------------------------------------------------------- # To count the number of fingers in the segmented hand region #--------------------------------------------------------------
Example #17
Source File: frame_diff.py From Artificial-Intelligence-with-Python with MIT License | 5 votes |
def frame_diff(prev_frame, cur_frame, next_frame): # Difference between the current frame and the next frame diff_frames_1 = cv2.absdiff(next_frame, cur_frame) # Difference between the current frame and the previous frame diff_frames_2 = cv2.absdiff(cur_frame, prev_frame) return cv2.bitwise_and(diff_frames_1, diff_frames_2) # Define a function to get the current frame from the webcam
Example #18
Source File: test_monkey.py From ATX with Apache License 2.0 | 5 votes |
def test_similar(): from itertools import combinations from collections import defaultdict from heapq import heappush def sim1(img1, img2): h, w, d = img1.shape total = h*w*d diff = cv2.absdiff(img1, img2) num = (diff<10).sum() return num*1.0/total names = [os.path.join('scene', c) for c in os.listdir('scene')] imgs = dict(zip(names, map(cv2.imread, names))) diffs = defaultdict(list) for name1, name2 in combinations(names, 2): img1, img2 = imgs[name1], imgs[name2] similarity = sim1(img1, img2) # print 'diff', name1, name2, 'result is:', similarity heappush(diffs[name1], (-similarity, name2)) heappush(diffs[name2], (-similarity, name1)) for k, v in diffs.iteritems(): print k, v[0][1], -v[0][0]
Example #19
Source File: image_recognition_singlecam.py From hta0-horizontal-robot-arm with GNU General Public License v2.0 | 5 votes |
def calculateDifference_method1(self,img,background_img): # Object Recognition Tresholds diff_low_t=45 diff_high_t=255 self.previewImage("Original Image [Diff method1]",img) # In this approach, we are doing Gray>Difference>Blur>Treshold>Blur. # Background - Gray background_img_gray=cv2.cvtColor(background_img, cv2.COLOR_BGR2GRAY) self.previewImage("1 Background Gray",background_img_gray) # Image - Gray img_gray=cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) self.previewImage("2 Image Gray",img_gray) # Calculate Difference diff_gray=cv2.absdiff(background_img_gray,img_gray) self.previewImage("3 Pre-Diff",diff_gray) # Diff Blur diff_gray_blur = cv2.GaussianBlur(diff_gray,(5,5),0) self.previewImage("4 Pre-Diff Blur",diff_gray_blur) #========= Threshold :: this is a manual calibratin point in this approach ret,diff_tresh = cv2.threshold(diff_gray_blur,diff_low_t,diff_high_t,cv2.THRESH_BINARY) self.previewImage("5 Image Treshold",diff_tresh) #Treshold Blur diff = cv2.GaussianBlur(diff_tresh,(5,5),0) self.previewImage("6 Image Treshold",diff) return diff
Example #20
Source File: 1a-est-gyro-rates.py From ImageAnalysis with MIT License | 5 votes |
def motion2(new_frame, base): motion = cv2.absdiff(base, new_frame) gray = cv2.cvtColor(motion, cv2.COLOR_BGR2GRAY) cv2.imshow('motion', gray) motion_mask = 255 - gray motion_mask /= 2 motion_mask += 2 cv2.imshow('motion2', motion_mask) return motion_mask
Example #21
Source File: motion_detection.py From pynvr with BSD 3-Clause "New" or "Revised" License | 5 votes |
def diffImg(self, t0, t1, t2): d1 = cv.absdiff(t2, t1) d2 = cv.absdiff(t1, t0) return cv.bitwise_and(d1, d2)
Example #22
Source File: motion_detection.py From pynvr with BSD 3-Clause "New" or "Revised" License | 5 votes |
def diffImg(self, t0, t1, t2): if not self.multiFrameDetection: return cv.absdiff(t2, t1) d1 = cv.absdiff(t2, t1) d2 = cv.absdiff(t1, t2) return cv.bitwise_and(d1, d2)
Example #23
Source File: 图像相减3.py From OpenCV-Python-Tutorial with MIT License | 5 votes |
def diff(img, img1): # returns just the difference of the two images return cv2.absdiff(img, img1)
Example #24
Source File: pi-timolo81.py From pi-timolo with MIT License | 5 votes |
def trackPoint(grayimage1, grayimage2): movementCenterPoint = [] # initialize list of movementCenterPoints biggestArea = MIN_AREA # Get differences between the two greyed images differenceimage = cv2.absdiff( grayimage1, grayimage2 ) # Blur difference image to enhance motion vectors differenceimage = cv2.blur( differenceimage,(BLUR_SIZE,BLUR_SIZE )) # Get threshold of blurred difference image based on THRESHOLD_SENSITIVITY variable retval, thresholdimage = cv2.threshold( differenceimage, THRESHOLD_SENSITIVITY, 255, cv2.THRESH_BINARY ) try: thresholdimage, contours, hierarchy = cv2.findContours( thresholdimage, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE ) except: contours, hierarchy = cv2.findContours( thresholdimage, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE ) if contours: movement = False for c in contours: cArea = cv2.contourArea(c) if cArea > biggestArea: biggestArea = cArea ( x, y, w, h ) = cv2.boundingRect(c) cx = int(x + w/2) # x centerpoint of contour cy = int(y + h/2) # y centerpoint of contour movementCenterPoint = [cx,cy] return movementCenterPoint #-----------------------------------------------------------------------------------------------
Example #25
Source File: motion_detection.py From deepgaze with MIT License | 5 votes |
def returnMask(self, foreground_image, threshold=25): """Return the binary image after the detection process @param foreground_image the frame to check @param threshold the value used for filtering the pixels after the absdiff """ if(foreground_image is None): return None foreground_gray = cv2.cvtColor(foreground_image, cv2.COLOR_BGR2GRAY) delta_image = cv2.absdiff(self.background_gray, foreground_gray) threshold_image = cv2.threshold(delta_image, threshold, 255, cv2.THRESH_BINARY)[1] return threshold_image
Example #26
Source File: motion_detection.py From deepgaze with MIT License | 5 votes |
def returnMask(self, foreground_image): """Return the binary image after the detection process @param foreground_image the frame to check @param threshold the value used for filtering the pixels after the absdiff """ return self.BackgroundSubtractorMOG.apply(foreground_image)
Example #27
Source File: rodent.py From rodent with MIT License | 5 votes |
def motion_detection(camera, folder, until): """ Uses 3 frames to look for motion, can't remember where I found it but it gives better result than my first try with comparing 2 frames. """ utils.clear_directory(folder) # Need to get 2 images to start with previous_image = cv2.cvtColor(camera.read()[1], cv2.cv.CV_RGB2GRAY) current_image = cv2.cvtColor(camera.read()[1], cv2.cv.CV_RGB2GRAY) purple = (140, 25, 71) while True: now = datetime.datetime.now() _, image = camera.read() gray_image = cv2.cvtColor(image, cv2.cv.CV_RGB2GRAY) difference1 = cv2.absdiff(previous_image, gray_image) difference2 = cv2.absdiff(current_image, gray_image) result = cv2.bitwise_and(difference1, difference2) # Basic threshold, turn the bitwise_and into a black or white (haha) # result, white (255) being a motion _, result = cv2.threshold(result, 40, 255, cv2.THRESH_BINARY) # Let's show a square around the detected motion in the original pic low_point, high_point = utils.find_motion_boundaries(result.tolist()) if low_point is not None and high_point is not None: cv2.rectangle(image, low_point, high_point, purple, 3) print 'Motion detected ! Taking picture' utils.save_image(image, folder, now) previous_image = current_image current_image = gray_image if utils.time_over(until, now): break del(camera)
Example #28
Source File: detection.py From SaltwashAR with GNU General Public License v3.0 | 5 votes |
def get_active_cell(self, image): # obtain motion between previous and current image current_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) delta = cv2.absdiff(self.previous_gray, current_gray) threshold_image = cv2.threshold(delta, 25, 255, cv2.THRESH_BINARY)[1] # set cell height and width height, width = threshold_image.shape[:2] cell_height = height/2 cell_width = width/3 # store motion level for each cell cells = np.array([0, 0, 0]) cells[0] = cv2.countNonZero(threshold_image[cell_height:height, 0:cell_width]) cells[1] = cv2.countNonZero(threshold_image[cell_height:height, cell_width:cell_width*2]) cells[2] = cv2.countNonZero(threshold_image[cell_height:height, cell_width*2:width]) # obtain the most active cell top_cell = np.argmax(cells) # return the most active cell, if threshold met if(cells[top_cell] >= self.THRESHOLD): return top_cell else: return None
Example #29
Source File: pi-timolo.py From pi-timolo with MIT License | 5 votes |
def trackPoint(grayimage1, grayimage2): """ Process two cropped grayscale images. check for motion and return center point of motion for largest contour. """ movementCenterPoint = [] # initialize list of movementCenterPoints biggestArea = MIN_AREA # Get differences between the two greyed images differenceimage = cv2.absdiff(grayimage1, grayimage2) # Blur difference image to enhance motion vectors differenceimage = cv2.blur(differenceimage, (BLUR_SIZE, BLUR_SIZE)) # Get threshold of blurred difference image # based on THRESHOLD_SENSITIVITY variable retval, thresholdimage = cv2.threshold(differenceimage, THRESHOLD_SENSITIVITY, 255, cv2.THRESH_BINARY) try: # opencv2 syntax default contours, hierarchy = cv2.findContours(thresholdimage, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) except ValueError: # opencv 3 syntax thresholdimage, contours, hierarchy = cv2.findContours(thresholdimage, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) if contours: for c in contours: cArea = cv2.contourArea(c) if cArea > biggestArea: biggestArea = cArea (x, y, w, h) = cv2.boundingRect(c) cx = int(x + w/2) # x center point of contour cy = int(y + h/2) # y center point of contour movementCenterPoint = [cx, cy] return movementCenterPoint #------------------------------------------------------------------------------
Example #30
Source File: frame_diff.py From cv with MIT License | 5 votes |
def frame_diff(prev_frame, cur_frame, next_frame): diff_frames1 = cv2.absdiff(next_frame, cur_frame) diff_frames2 = cv2.absdiff(cur_frame, prev_frame) return cv2.bitwise_and(diff_frames1, diff_frames2)