Python cv2.calcOpticalFlowPyrLK() Examples
The following are 27
code examples of cv2.calcOpticalFlowPyrLK().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
cv2
, or try the search function
.
Example #1
Source File: optical_flow_tracker.py From face_landmark_dnn with MIT License | 6 votes |
def update_tracks(self, img_old, img_new): """Update tracks.""" # Get old points, using the latest one. points_old = np.float32([track[-1] for track in self.tracks]).reshape(-1, 1, 2) # Get new points from old points. points_new, _st, _err = cv2.calcOpticalFlowPyrLK( img_old, img_new, points_old, None, **self.lk_params) # Get inferred old points from new points. points_old_inferred, _st, _err = cv2.calcOpticalFlowPyrLK( img_new, img_old, points_new, None, **self.lk_params) # Compare between old points and inferred old points error_term = abs( points_old - points_old_inferred).reshape(-1, 2).max(-1) point_valid = error_term < 1 new_tracks = [] for track, (x, y), good_flag in zip(self.tracks, points_new.reshape(-1, 2), point_valid): # Track is good? if not good_flag: continue # New point is good, add to track. track.append((x, y)) # Need to drop first old point? if len(track) > self.track_len: del track[0] # Track updated, add to track groups. new_tracks.append(track) # New track groups got, do update. self.tracks = new_tracks
Example #2
Source File: vidstab_utils.py From python_video_stab with MIT License | 6 votes |
def match_keypoints(optical_flow, prev_kps): """Match optical flow keypoints :param optical_flow: output of cv2.calcOpticalFlowPyrLK :param prev_kps: keypoints that were passed to cv2.calcOpticalFlowPyrLK to create optical_flow :return: tuple of (cur_matched_kp, prev_matched_kp) """ cur_kps, status, err = optical_flow # storage for keypoints with status 1 prev_matched_kp = [] cur_matched_kp = [] if status is None: return cur_matched_kp, prev_matched_kp for i, matched in enumerate(status): # store coords of keypoints that appear in both if matched: prev_matched_kp.append(prev_kps[i]) cur_matched_kp.append(cur_kps[i]) return cur_matched_kp, prev_matched_kp
Example #3
Source File: VidStab.py From python_video_stab with MIT License | 6 votes |
def _gen_next_raw_transform(self): current_frame = self.frame_queue.frames[-1] current_frame_gray = current_frame.gray_image current_frame_gray = self._resize_frame(current_frame_gray) # calc flow of movement optical_flow = cv2.calcOpticalFlowPyrLK(self.prev_gray, current_frame_gray, self.prev_kps, None) matched_keypoints = vidstab_utils.match_keypoints(optical_flow, self.prev_kps) transform_i = vidstab_utils.estimate_partial_transform(matched_keypoints) # update previous frame info for next iteration self._update_prev_frame(current_frame_gray) self._raw_transforms.append(transform_i[:]) self._update_trajectory(transform_i)
Example #4
Source File: visual_odometry.py From Monocular-Visual-Inertial-Odometry with MIT License | 6 votes |
def featureTracking(image_ref, image_cur, px_ref): kp2, st, err = cv2.calcOpticalFlowPyrLK(image_ref, image_cur, px_ref, None, **lk_params) #shape: [k,2] [k,1] [k,1] st = st.reshape(st.shape[0]) kp1 = px_ref[st == 1] kp2 = kp2[st == 1] return kp1, kp2
Example #5
Source File: vo.py From Monocular-Visual-Inertial-Odometry with MIT License | 6 votes |
def feature_tracking(img1,img2, points1,points2,status): #track matching features err = np.array([]) winSize = (15,15) maxLevel = 3 termcriteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 30, 0.01)) cv2.calcOpticalFlowPyrLK(img1, img2, points1, points2, status, err, winSize, maxLevel, termcriteria, 0, 0.001) indexcorrection = 0 #remove bad points for i in range(len(status)): pt = points2[i - indexcorrection] if (status[i]==0 or pt[0,0]<0 or pt[0,1]<0): if pt[0,0]<0 or pt[0,1]<0: status[i]=0 np.delete(points1, i-indexcorrection) np.delete(points2, i-indexcorrection) indexcorrection+=1
Example #6
Source File: feature_tracking.py From OpenCV-3-x-with-Python-By-Example with MIT License | 6 votes |
def compute_feature_points(tracking_paths, prev_img, current_img): feature_points = [tp[-1] for tp in tracking_paths] # Vector of 2D points for which the flow needs to be found feature_points_0 = np.float32(feature_points).reshape(-1, 1, 2) feature_points_1, status_1, err_1 = cv2.calcOpticalFlowPyrLK(prev_img, current_img, \ feature_points_0, None, **tracking_params) feature_points_0_rev, status_2, err_2 = cv2.calcOpticalFlowPyrLK(current_img, prev_img, \ feature_points_1, None, **tracking_params) # Compute the difference of the feature points diff_feature_points = abs(feature_points_0-feature_points_0_rev).reshape(-1, 2).max(-1) # threshold and keep only the good points good_points = diff_feature_points < 1 return feature_points_1.reshape(-1, 2), good_points
Example #7
Source File: feature_tracker.py From pyslam with GNU General Public License v3.0 | 6 votes |
def track(self, image_ref, image_cur, kps_ref, des_ref = None): kps_cur, st, err = cv2.calcOpticalFlowPyrLK(image_ref, image_cur, kps_ref, None, **self.lk_params) #shape: [k,2] [k,1] [k,1] st = st.reshape(st.shape[0]) res = FeatureTrackingResult() #res.idxs_ref = (st == 1) res.idxs_ref = [i for i,v in enumerate(st) if v== 1] res.idxs_cur = res.idxs_ref.copy() res.kps_ref_matched = kps_ref[res.idxs_ref] res.kps_cur_matched = kps_cur[res.idxs_cur] res.kps_ref = res.kps_ref_matched # with LK we follow feature trails hence we can forget unmatched features res.kps_cur = res.kps_cur_matched res.des_cur = None return res # Extract features by using desired detector and descriptor, match keypoints by using desired matcher on computed descriptors
Example #8
Source File: controller.py From HalloPy with MIT License | 6 votes |
def _calculate_optical_flow(self, old_gray, frame_gray, p0): """This function tracks the edge of the Middle finger. points for tracking: expected_ext_left expected_ext_right expected_ext_top expected_ext_bot palm_center_point :param old_gray: old frame, gray scale :param frame_gray: current frame :return: p0- updated tracking point, """ # Calculate optical flow p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **self.lk_params) if p1 is None: good_new = p0[st == 1] else: good_new = p1[st == 1] # Now update the previous frame and previous points. self._old_gray = frame_gray.copy() self._p0 = good_new.reshape(-1, 1, 2)
Example #9
Source File: lk_track.py From OpenCV-Python-Tutorial with MIT License | 6 votes |
def run(self): while True: ret, frame = self.cam.read() frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) vis = frame.copy() if len(self.tracks) > 0: img0, img1 = self.prev_gray, frame_gray p0 = np.float32([tr[-1] for tr in self.tracks]).reshape(-1, 1, 2) p1, st, err = cv2.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params) p0r, st, err = cv2.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params) d = abs(p0-p0r).reshape(-1, 2).max(-1) good = d < 1 new_tracks = [] for tr, (x, y), good_flag in zip(self.tracks, p1.reshape(-1, 2), good): if not good_flag: continue tr.append((x, y)) if len(tr) > self.track_len: del tr[0] new_tracks.append(tr) cv2.circle(vis, (x, y), 2, (0, 255, 0), -1) self.tracks = new_tracks cv2.polylines(vis, [np.int32(tr) for tr in self.tracks], False, (0, 255, 0)) draw_str(vis, (20, 20), 'track count: %d' % len(self.tracks)) if self.frame_idx % self.detect_interval == 0: mask = np.zeros_like(frame_gray) mask[:] = 255 for x, y in [np.int32(tr[-1]) for tr in self.tracks]: cv2.circle(mask, (x, y), 5, 0, -1) p = cv2.goodFeaturesToTrack(frame_gray, mask = mask, **feature_params) if p is not None: for x, y in np.float32(p).reshape(-1, 2): self.tracks.append([(x, y)]) self.frame_idx += 1 self.prev_gray = frame_gray cv2.imshow('lk_track', vis) ch = cv2.waitKey(1) if ch == 27: break
Example #10
Source File: optical_flow_tracker.py From head-pose-estimation with MIT License | 5 votes |
def update_tracks(self, img_old, img_new): """Update tracks.""" # Get old points, using the latest one. points_old = np.float32([track[-1] for track in self.tracks]).reshape(-1, 1, 2) # Get new points from old points. points_new, _st, _err = cv2.calcOpticalFlowPyrLK( img_old, img_new, points_old, None, **self.lk_params) # Get inferred old points from new points. points_old_inferred, _st, _err = cv2.calcOpticalFlowPyrLK( img_new, img_old, points_new, None, **self.lk_params) # Compare between old points and inferred old points error_term = abs( points_old - points_old_inferred).reshape(-1, 2).max(-1) point_valid = error_term < 1 new_tracks = [] for track, (x, y), good_flag in zip(self.tracks, points_new.reshape(-1, 2), point_valid): # Track is good? if not good_flag: continue # New point is good, add to track. track.append((x, y)) # Need to drop first old point? if len(track) > self.track_len: del track[0] # Track updated, add to track groups. new_tracks.append(track) # New track groups got, do update. self.tracks = new_tracks
Example #11
Source File: lk_homography.py From PyCV-time with MIT License | 5 votes |
def checkedTrace(img0, img1, p0, back_threshold = 1.0): p1, st, err = cv2.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params) p0r, st, err = cv2.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params) d = abs(p0-p0r).reshape(-1, 2).max(-1) status = d < back_threshold return p1, status
Example #12
Source File: lk_homography.py From PyCV-time with MIT License | 5 votes |
def checkedTrace(img0, img1, p0, back_threshold = 1.0): p1, st, err = cv2.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params) p0r, st, err = cv2.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params) d = abs(p0-p0r).reshape(-1, 2).max(-1) status = d < back_threshold return p1, status
Example #13
Source File: optical_flow.py From UE4PyServer with MIT License | 5 votes |
def feed(self,frame): if self.old_gray is None: self.old_frame=frame.copy() #self.old_gray=cv2.cvtColor(self.old_frame, cv2.COLOR_BGR2GRAY) self.old_gray=self.old_frame[:,:,2] margx=120 margy=30 self.p0 = np.array([(i,j) for i in range(margx,frame.shape[1]-margx,30) for j in range(margy,frame.shape[0]-margy,30)],dtype='float32').reshape(-1,1,2) self.color=self.color[:len(self.p0)] self.initial_state=[self.p0,self.color] #import ipdb;ipdb.set_trace() frame_gray = frame[:,:,2].copy() #frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) tic=time.time() p1, st, err = cv2.calcOpticalFlowPyrLK(self.old_gray, frame_gray, self.p0, None, **self.lk_params) #print('-------------------->',time.time()-tic) # Select good points good_new = p1[st==1] good_old = self.p0[st==1] self.color=self.color[(st==1).flatten()] # draw the tracks self.old_frame = frame.copy() for i,new in enumerate(good_new): a,b = new.ravel() frame = cv2.circle(frame,(a,b),2,self.color[i].tolist(),-1) # Now update the previous frame and previous points self.old_gray = frame_gray self.p0 = good_new.reshape(-1,1,2) return frame
Example #14
Source File: tracker.py From rpg_feature_tracking_analysis with MIT License | 5 votes |
def track_features_on_klt(self, tracks_obj, tracker_params): """ tracks features in feature_init using the dataset tracks must be dict with keys as ids and values as 1 x 3 array with x,y,t returns a dict with keys as track ids, and values as N x 3 arrays, with x,y,t. If collate is true, returns N x 4 array with id,x,y,t . """ assert "reference_track" in tracker_params assert "frame_dataset" in tracker_params window_size = self.config["window_size"] num_pyramidal_layers = self.config["num_pyramidal_layers"] dataset = tracker_params["frame_dataset"] dataset.set_to_first_after(tracks_obj.t) print("Tracking with KLT parameters: [window_size=%s num_pyramidal_layers=%s]" % (window_size, num_pyramidal_layers)) for i, (t, img) in enumerate(tqdm.tqdm(dataset)): if i == 0: first_img = img continue second_img = img if len(tracks_obj.active_features) == 0: break new_features, status, err = \ cv2.calcOpticalFlowPyrLK(first_img, second_img, tracks_obj.active_features, None, winSize=(window_size, window_size), maxLevel=num_pyramidal_layers) tracks_obj.update(status[:,0]==1, new_features, t) first_img = second_img tracks = tracks_obj.collate() return tracks
Example #15
Source File: klt.py From sips2_open with GNU General Public License v3.0 | 5 votes |
def track(img_0, ips_rc_0, img_1, border_margin=15): """ Returns keypoint tracking status and the set of tracked keypoints. Keypoints given colwise and in (row, col). Status is TRACK_SUCCESS, BORDER_LOST or POOR_TRACK """ status = np.ones(ips_rc_0.shape[1], dtype=int) * TRACK_SUCCESS if ips_rc_0.size == 0: return status, np.zeros_like(ips_rc_0) ips_xy_0 = np.reshape(np.fliplr(ips_rc_0.T), (-1, 1, 2)).astype(np.float32) ips_xy_1, _, _ = cv2.calcOpticalFlowPyrLK( img_0, img_1, ips_xy_0, None) # Point lost at border. ips_rc_1_for_bordlost = np.fliplr(np.reshape(ips_xy_1, (-1, 2))).T not_bordlost = points.haveMarginToBorder( ips_rc_1_for_bordlost, img_1.shape, border_margin) # Symmetry: Reject all that don't come back to within 1 px: re_ips_xy_0, _, _ = cv2.calcOpticalFlowPyrLK( img_1, img_0, ips_xy_1, None) err = np.linalg.norm(re_ips_xy_0 - ips_xy_0, axis=2) tracked = (err < 1).ravel() status[np.logical_not(tracked)] = POOR_TRACK status[np.logical_not(not_bordlost)] = BORDER_LOST successful = np.logical_and(not_bordlost, tracked) ips_xy_1 = ips_xy_1[successful, :, :] ips_rc_1 = np.fliplr(np.reshape(ips_xy_1, (-1, 2))).T assert (np.count_nonzero(status == TRACK_SUCCESS) == ips_rc_1.shape[1]) return status, ips_rc_1
Example #16
Source File: OptFlowEst.py From Gesture-Recognition with MIT License | 5 votes |
def estimate_loop(self): opt_flow_params = dict(winSize=(15,15), maxLevel=2, criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03)) while not self.stopped: frame = self.video_feed.read() frame_grey = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # Pull data from each human and bodypart -> put into np array w/shape (num_humans, 18, 2) and reshape to (num_humans 18, 1, 2) for use by optical flow with self.lock: all_human_points = np.asarray([np.asarray([[[body_part.x * self.frame_shape[1], body_part.y * self.frame_shape[0]]] for key, body_part in human.body_parts.iteritems()], dtype=np.float32) for human in self.humans]) for idx, human_points in enumerate(all_human_points): p1, st, err = cv2.calcOpticalFlowPyrLK(self.old_frame_grey, frame_grey, human_points, None, **opt_flow_params) self.repack_humans(p1, idx) # Grab the points that have gone out of frame oof_points = p1[st!=1] if oof_points.shape != 0: # Get all the matches tmp = np.isin(human_points, oof_points) # Get the indexes of those matches msng_idxz = [msng for msng in range(len(human_points)) if tmp[msng].all()] #print "msng_idxz %s" % str(msng_idxz) cur_part_exist = self.humans[idx].body_parts.keys() for foo_idx in range(len(msng_idxz)): del self.humans[idx].body_parts[cur_part_exist[msng_idxz[foo_idx]]] if len(self.humans[idx].body_parts.keys()) == 0: del self.humans[idx] self.old_frame = frame self.old_frame_grey = frame_grey.copy()
Example #17
Source File: klt.py From imips_open with GNU General Public License v3.0 | 5 votes |
def track(img_0, ips_rc_0, img_1, border_margin=15): """ Returns keypoint tracking status and the set of tracked keypoints. Keypoints given colwise and in (row, col). Status is TRACK_SUCCESS, BORDER_LOST or POOR_TRACK """ status = np.ones(ips_rc_0.shape[1], dtype=int) * TRACK_SUCCESS if ips_rc_0.size == 0: return status, np.zeros_like(ips_rc_0) ips_xy_0 = np.reshape(np.fliplr(ips_rc_0.T), (-1, 1, 2)).astype(np.float32) ips_xy_1, _, _ = cv2.calcOpticalFlowPyrLK( img_0, img_1, ips_xy_0, None) # Point lost at border. ips_rc_1_for_bordlost = np.fliplr(np.reshape(ips_xy_1, (-1, 2))).T not_bordlost = points.haveMarginToBorder( ips_rc_1_for_bordlost, img_1.shape, border_margin) # Symmetry: Reject all that don't come back to within 1 px: re_ips_xy_0, _, _ = cv2.calcOpticalFlowPyrLK( img_1, img_0, ips_xy_1, None) err = np.linalg.norm(re_ips_xy_0 - ips_xy_0, axis=2) tracked = (err < 1).ravel() status[np.logical_not(tracked)] = POOR_TRACK status[np.logical_not(not_bordlost)] = BORDER_LOST successful = np.logical_and(not_bordlost, tracked) ips_xy_1 = ips_xy_1[successful, :, :] ips_rc_1 = np.fliplr(np.reshape(ips_xy_1, (-1, 2))).T assert (np.count_nonzero(status == TRACK_SUCCESS) == ips_rc_1.shape[1]) return status, ips_rc_1
Example #18
Source File: test_vidstab_utils.py From python_video_stab with MIT License | 5 votes |
def test_none_optical_flow(): prev_gray = np.ones((1208, 1920, 3)) * 250 current_frame_gray = np.ones((1208, 1920, 3)) * 250 prev_kps = np.array([], dtype='float32') # noinspection PyArgumentList prev_kps = prev_kps.reshape(0, 1, 2) none_optical_flow = cv2.calcOpticalFlowPyrLK(prev_gray, current_frame_gray, prev_kps, None) matched_keypoints = utils.match_keypoints(none_optical_flow, prev_kps) transform_i = utils.estimate_partial_transform(matched_keypoints) assert transform_i == [0, 0, 0]
Example #19
Source File: lk_homography.py From OpenCV-Python-Tutorial with MIT License | 5 votes |
def checkedTrace(img0, img1, p0, back_threshold = 1.0): p1, st, err = cv2.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params) p0r, st, err = cv2.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params) d = abs(p0-p0r).reshape(-1, 2).max(-1) status = d < back_threshold return p1, status
Example #20
Source File: opticalflow_estimate.py From CrowdFlow with GNU General Public License v3.0 | 4 votes |
def run_parameter(config_item): prev_img = cv2.imread(config_item["files"]["prevImg"]) curr_img = cv2.imread(config_item["files"]["currImg"]) flow_method = config_item["parameter"]["flow_method"] estimate_base = config_item["files"]["estimatepath"] + "/" if os.path.exists(estimate_base) == False: os.makedirs(estimate_base) if os.path.exists(config_item["files"]["estflow"]): return # compute optical flow if flow_method.find("dual") >= 0: dual_proc = cv2.DualTVL1OpticalFlow_create(config_item["parameter"]["tau"], config_item["parameter"]["lambda"], config_item["parameter"]["theta"], config_item["parameter"]["nscales"], config_item["parameter"]["warps"]) est_flow = np.zeros(shape=(prev_img.shape[0], prev_img.shape[1],2), dtype=np.float32) dual_proc.calc(cv2.cvtColor(prev_img, cv2.COLOR_BGR2GRAY), cv2.cvtColor(curr_img, cv2.COLOR_BGR2GRAY), est_flow) # elif flow_method.find("farneback") >= 0: est_flow = cv2.calcOpticalFlowFarneback(cv2.cvtColor(prev_img, cv2.COLOR_BGR2GRAY), cv2.cvtColor(curr_img, cv2.COLOR_BGR2GRAY), None, 0.5, 3, 15, 3, 5, 1.2, 0) elif flow_method.find("plk") >= 0: prev_pts = list() for r in range(prev_img.shape[0]): for c in range(prev_img.shape[1]): prev_pts.append((c,r)) prev_pts = np.array(prev_pts, dtype=np.float32) curr_pts, st, err = cv2.calcOpticalFlowPyrLK(cv2.cvtColor(prev_img, cv2.COLOR_BGR2GRAY), cv2.cvtColor(curr_img, cv2.COLOR_BGR2GRAY), prev_pts, None, winSize=(21,21), maxLevel=3, criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 20, 0.001)) est_flow = np.zeros(shape=(prev_img.shape[0], prev_img.shape[1],2), dtype=np.float32) n = 0 flow_pts = curr_pts - prev_pts for r in range(prev_img.shape[0]): for c in range(prev_img.shape[1]): est_flow[r, c, :] = flow_pts[n,:] n = n + 1 #here alternative optical flow methods can be applied # else: raise ValueError("flow method has not been implemented") ut.writeFlowFile(config_item["files"]["estflow"], est_flow) ut.drawFlowField(config_item["files"]["estflow"][:-3] + "png", est_flow) print("Done -> ", config_item["files"]["estflow"])
Example #21
Source File: util.py From OpenCV-Video-Label with GNU General Public License v3.0 | 4 votes |
def track(im_prev, im_gray, keypoints, THR_FB=20): if type(keypoints) is list: keypoints = keypoints_cv_to_np(keypoints) num_keypoints = keypoints.shape[0] # Status of tracked keypoint - True means successfully tracked status = [False] * num_keypoints # If at least one keypoint is active if num_keypoints > 0: # Prepare data for opencv: # Add singleton dimension # Use only first and second column # Make sure dtype is float32 pts = keypoints[:, None, :2].astype(np.float32) # Calculate forward optical flow for prev_location nextPts, status, _ = cv2.calcOpticalFlowPyrLK(im_prev, im_gray, pts, None) # Calculate backward optical flow for prev_location pts_back, _, _ = cv2.calcOpticalFlowPyrLK(im_gray, im_prev, nextPts, None) # Remove singleton dimension pts_back = squeeze_pts(pts_back) pts = squeeze_pts(pts) nextPts = squeeze_pts(nextPts) status = status.squeeze() # Calculate forward-backward error fb_err = np.sqrt(np.power(pts_back - pts, 2).sum(axis=1)) # Set status depending on fb_err and lk error large_fb = fb_err > THR_FB status = ~large_fb & status.astype(np.bool) nextPts = nextPts[status, :] keypoints_tracked = keypoints[status, :] keypoints_tracked[:, :2] = nextPts else: keypoints_tracked = np.array([]) return keypoints_tracked, status
Example #22
Source File: tracking.py From pi-tracking-telescope with MIT License | 4 votes |
def updateError(self, frame): self.frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) if len(self.tracks) > 0: img0, img1 = self.prev_gray, self.frame_gray p0 = np.float32([tr[-1][:2] for tr in self.tracks]).reshape(-1, 1, 2) p1, st, err = cv2.calcOpticalFlowPyrLK(img0, img1, p0, None, **self.lk_params) p0r, st, err = cv2.calcOpticalFlowPyrLK(img1, img0, p1, None, **self.lk_params) d = abs(p0-p0r).reshape(-1, 2).max(-1) good = d < 1 new_tracks = [] self.xerror = 0.0 self.yerror = 0.0 self.n = 0.0 current_time = time.time() for tr, (x, y), good_flag in zip(self.tracks, p1.reshape(-1, 2), good): if not good_flag: continue tr.append((x, y, current_time)) if len(tr) > 500: del tr[0] new_tracks.append(tr) if(len(tr)>=2): t = np.float32([v[2] for v in tr]) x = np.float32([v[0] for v in tr]) y = np.float32([v[1] for v in tr]) self.xerror = self.xerror + (x[-1] - x[0]) self.yerror = self.yerror + (y[-1] - y[0]) self.n = self.n + 1.0 if self.n>0: self.xerror = self.xerror / float(self.n) self.yerror = self.yerror / float(self.n) self.tracks = new_tracks if self.xerror==0 and self.yerror==0: current_time = time.time() mask = np.zeros_like(self.frame_gray) mask[:] = 255 p = cv2.goodFeaturesToTrack(self.frame_gray, mask = mask, **self.feature_params) if p is not None: for x, y in np.float32(p).reshape(-1, 2): self.tracks.append([(x, y, current_time)]) self.prev_gray = self.frame_gray
Example #23
Source File: opticalFlow.py From Mask-RCNN-Pedestrian-Detection with MIT License | 4 votes |
def sparseOpticalFlow(): # use 0 for webcam capturing # cap = cv2.VideoCapture(0) cap = cv2.VideoCapture('test/Pedestrian overpass.mp4') # params for ShiTomasi corner detection feature_params = dict( maxCorners = 100, qualityLevel = 0.3, minDistance = 7, blockSize = 7 ) # Parameters for lucas kanade optical flow lk_params = dict( winSize = (15,15), maxLevel = 2, criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03)) # Create some random colors color = np.random.randint(0,255,(100,3)) # Take first frame and find corners in it ret, old_frame = cap.read() old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY) p0 = cv2.goodFeaturesToTrack(old_gray, mask = None, **feature_params) # Create a mask image for drawing purposes mask = np.zeros_like(old_frame) while(1): ret,frame = cap.read() frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # calculate optical flow p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params) # Select good points good_new = p1[st==1] good_old = p0[st==1] # draw the tracks for i,(new,old) in enumerate(zip(good_new,good_old)): a,b = new.ravel() c,d = old.ravel() mask = cv2.line(mask, (a,b),(c,d), color[i].tolist(), 2) frame = cv2.circle(frame,(a,b),5,color[i].tolist(),-1) img = cv2.add(frame,mask) cv2.imshow('frame',img) k = cv2.waitKey(30) & 0xff if k == 27: break # Now update the previous frame and previous points old_gray = frame_gray.copy() p0 = good_new.reshape(-1,1,2) cv2.destroyAllWindows() cap.release() # DENSE OPTICAL FLOW
Example #24
Source File: MFTracker.py From SimpleCV2 with BSD 3-Clause "New" or "Revised" License | 4 votes |
def lktrack(img1, img2, ptsI, nPtsI, winsize_ncc=10, win_size_lk=4, method=cv2.cv.CV_TM_CCOEFF_NORMED): """ **SUMMARY** (Dev Zone) Lucas-Kanede Tracker with pyramids **PARAMETERS** img1 - Previous image or image containing the known bounding box (Numpy array) img2 - Current image ptsI - Points to track from the first image Format ptsI[0] - x1, ptsI[1] - y1, ptsI[2] - x2, .. nPtsI - Number of points to track from the first image winsize_ncc - size of the search window at each pyramid level in LK tracker (in int) method - Paramete specifying the comparison method for normalized cross correlation (see http://opencv.itseez.com/modules/imgproc/doc/object_detection.html?highlight=matchtemplate#cv2.matchTemplate) **RETURNS** fb - forward-backward confidence value. (corresponds to euclidean distance between). ncc - normCrossCorrelation values status - Indicates positive tracks. 1 = PosTrack 0 = NegTrack ptsJ - Calculated Points of second image """ template_pt = [] target_pt = [] fb_pt = [] ptsJ = [-1]*len(ptsI) for i in range(nPtsI): template_pt.append((ptsI[2*i],ptsI[2*i+1])) target_pt.append((ptsI[2*i],ptsI[2*i+1])) fb_pt.append((ptsI[2*i],ptsI[2*i+1])) template_pt = np.asarray(template_pt,dtype="float32") target_pt = np.asarray(target_pt,dtype="float32") fb_pt = np.asarray(fb_pt,dtype="float32") target_pt, status, track_error = cv2.calcOpticalFlowPyrLK(img1, img2, template_pt, target_pt, winSize=(win_size_lk, win_size_lk), flags = cv2.OPTFLOW_USE_INITIAL_FLOW, criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03)) fb_pt, status_bt, track_error_bt = cv2.calcOpticalFlowPyrLK(img2,img1, target_pt,fb_pt, winSize = (win_size_lk,win_size_lk),flags = cv2.OPTFLOW_USE_INITIAL_FLOW, criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03)) status = status & status_bt ncc = normCrossCorrelation(img1, img2, template_pt, target_pt, status, winsize_ncc, method) fb = euclideanDistance(template_pt, target_pt) newfb = -1*np.ones(len(fb)) newncc = -1*np.ones(len(ncc)) for i in np.argwhere(status): i = i[0] ptsJ[2 * i] = target_pt[i][0] ptsJ[2 * i + 1] = target_pt[i][1] newfb[i] = fb[i] newncc[i] = ncc[i] return newfb, newncc, status, ptsJ
Example #25
Source File: lk_track.py From PyCV-time with MIT License | 4 votes |
def run(self): while True: ret, frame = self.cam.read() frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) vis = frame.copy() if len(self.tracks) > 0: img0, img1 = self.prev_gray, frame_gray p0 = np.float32([tr[-1] for tr in self.tracks]).reshape(-1, 1, 2) p1, st, err = cv2.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params) p0r, st, err = cv2.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params) d = abs(p0-p0r).reshape(-1, 2).max(-1) good = d < 1 new_tracks = [] for tr, (x, y), good_flag in zip(self.tracks, p1.reshape(-1, 2), good): if not good_flag: continue tr.append((x, y)) if len(tr) > self.track_len: del tr[0] new_tracks.append(tr) cv2.circle(vis, (x, y), 2, (0, 255, 0), -1) self.tracks = new_tracks cv2.polylines(vis, [np.int32(tr) for tr in self.tracks], False, (0, 255, 0)) draw_str(vis, (20, 20), 'track count: %d' % len(self.tracks)) if self.frame_idx % self.detect_interval == 0: mask = np.zeros_like(frame_gray) mask[:] = 255 for x, y in [np.int32(tr[-1]) for tr in self.tracks]: cv2.circle(mask, (x, y), 5, 0, -1) p = cv2.goodFeaturesToTrack(frame_gray, mask = mask, **feature_params) if p is not None: for x, y in np.float32(p).reshape(-1, 2): self.tracks.append([(x, y)]) self.frame_idx += 1 self.prev_gray = frame_gray cv2.imshow('lk_track', vis) ch = 0xFF & cv2.waitKey(1) if ch == 27: break
Example #26
Source File: models.py From rainymotion with MIT License | 4 votes |
def _sparse_sd(data_instance, of_params={'st_pars': dict(maxCorners = 200, qualityLevel = 0.2, minDistance = 7, blockSize = 21), 'lk_pars': dict(winSize = (20, 20), maxLevel = 2, criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0))}, lead_steps=12): # define penult and last frames penult_frame = data_instance[-2] last_frame = data_instance[-1] # find features to track old_corners = cv2.goodFeaturesToTrack(data_instance[0], mask=None, **of_params['st_pars']) # track corners by optical flow algorithm new_corners, st, err = cv2.calcOpticalFlowPyrLK(prevImg=penult_frame, nextImg=last_frame, prevPts=old_corners, nextPts=None, **of_params['lk_pars']) # select only good attempts for corner tracking success = st.ravel() == 1 new_corners = new_corners[success].copy() old_corners = old_corners[success].copy() # calculate Simple Delta delta = new_corners.reshape(-1, 2) - old_corners.reshape(-1, 2) # simplificate furher transformations pts_source = new_corners.reshape(-1, 2) # propagate our corners through time pts_target_container = [] for lead_step in range(lead_steps): pts_target_container.append(pts_source + delta * (lead_step + 1)) return pts_source, pts_target_container
Example #27
Source File: lk_track.py From PyCV-time with MIT License | 4 votes |
def run(self): while True: ret, frame = self.cam.read() frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) vis = frame.copy() if len(self.tracks) > 0: img0, img1 = self.prev_gray, frame_gray p0 = np.float32([tr[-1] for tr in self.tracks]).reshape(-1, 1, 2) p1, st, err = cv2.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params) p0r, st, err = cv2.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params) d = abs(p0-p0r).reshape(-1, 2).max(-1) good = d < 1 new_tracks = [] for tr, (x, y), good_flag in zip(self.tracks, p1.reshape(-1, 2), good): if not good_flag: continue tr.append((x, y)) if len(tr) > self.track_len: del tr[0] new_tracks.append(tr) cv2.circle(vis, (x, y), 2, (0, 255, 0), -1) self.tracks = new_tracks cv2.polylines(vis, [np.int32(tr) for tr in self.tracks], False, (0, 255, 0)) draw_str(vis, (20, 20), 'track count: %d' % len(self.tracks)) if self.frame_idx % self.detect_interval == 0: mask = np.zeros_like(frame_gray) mask[:] = 255 for x, y in [np.int32(tr[-1]) for tr in self.tracks]: cv2.circle(mask, (x, y), 5, 0, -1) p = cv2.goodFeaturesToTrack(frame_gray, mask = mask, **feature_params) if p is not None: for x, y in np.float32(p).reshape(-1, 2): self.tracks.append([(x, y)]) self.frame_idx += 1 self.prev_gray = frame_gray cv2.imshow('lk_track', vis) ch = 0xFF & cv2.waitKey(1) if ch == 27: break