Python cv2.TERM_CRITERIA_COUNT Examples
The following are 30
code examples of cv2.TERM_CRITERIA_COUNT().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
cv2
, or try the search function
.
Example #1
Source File: models.py From rainymotion with MIT License | 7 votes |
def __init__(self): self.of_params = {'st_pars': dict(maxCorners=200, qualityLevel=0.2, minDistance=7, blockSize=21), 'lk_pars': dict(winSize=(20, 20), maxLevel=2, criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0))} self.extrapolation = "linear" self.warper = "affine" self.input_data = None self.scaler = RYScaler self.inverse_scaler = inv_RYScaler self.lead_steps = 12
Example #2
Source File: tracking.py From OpenCV-Computer-Vision-Projects-with-Python with MIT License | 6 votes |
def __init__(self, min_area=400, min_shift2=5): """Constructor This method initializes the multiple-objects tracking algorithm. :param min_area: Minimum area for a proto-object contour to be considered a real object :param min_shift2: Minimum distance for a proto-object to drift from frame to frame ot be considered a real object """ self.object_roi = [] self.object_box = [] self.min_cnt_area = min_area self.min_shift2 = min_shift2 # Setup the termination criteria, either 100 iteration or move by at # least 1 pt self.term_crit = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 100, 1)
Example #3
Source File: tracking.py From pi-tracking-telescope with MIT License | 6 votes |
def __init__(self, piscopeController): Thread.__init__(self) self.mutex = Lock() self.piscopeController = piscopeController self.setDaemon(True) # terminate on exit self.status = "Initial" self.reset() self.lk_params = dict( winSize = (15, 15), maxLevel = 2, criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03)) self.feature_params = dict( maxCorners = 5, qualityLevel = 0.3, minDistance = 7, blockSize = 7 )
Example #4
Source File: tracker.py From tracking_wo_bnw with GNU General Public License v3.0 | 6 votes |
def align(self, blob): """Aligns the positions of active and inactive tracks depending on camera motion.""" if self.im_index > 0: im1 = np.transpose(self.last_image.cpu().numpy(), (1, 2, 0)) im2 = np.transpose(blob['img'][0].cpu().numpy(), (1, 2, 0)) im1_gray = cv2.cvtColor(im1, cv2.COLOR_RGB2GRAY) im2_gray = cv2.cvtColor(im2, cv2.COLOR_RGB2GRAY) warp_matrix = np.eye(2, 3, dtype=np.float32) criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, self.number_of_iterations, self.termination_eps) cc, warp_matrix = cv2.findTransformECC(im1_gray, im2_gray, warp_matrix, self.warp_mode, criteria) warp_matrix = torch.from_numpy(warp_matrix) for t in self.tracks: t.pos = warp_pos(t.pos, warp_matrix) # t.pos = clip_boxes(Variable(pos), blob['im_info'][0][:2]).data if self.do_reid: for t in self.inactive_tracks: t.pos = warp_pos(t.pos, warp_matrix) if self.motion_model_cfg['enabled']: for t in self.tracks: for i in range(len(t.last_pos)): t.last_pos[i] = warp_pos(t.last_pos[i], warp_matrix)
Example #5
Source File: vo.py From Monocular-Visual-Inertial-Odometry with MIT License | 6 votes |
def feature_tracking(img1,img2, points1,points2,status): #track matching features err = np.array([]) winSize = (15,15) maxLevel = 3 termcriteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 30, 0.01)) cv2.calcOpticalFlowPyrLK(img1, img2, points1, points2, status, err, winSize, maxLevel, termcriteria, 0, 0.001) indexcorrection = 0 #remove bad points for i in range(len(status)): pt = points2[i - indexcorrection] if (status[i]==0 or pt[0,0]<0 or pt[0,1]<0): if pt[0,0]<0 or pt[0,1]<0: status[i]=0 np.delete(points1, i-indexcorrection) np.delete(points2, i-indexcorrection) indexcorrection+=1
Example #6
Source File: models.py From rainymotion with MIT License | 6 votes |
def __init__(self): self.of_params = {'st_pars': dict(maxCorners=200, qualityLevel=0.2, minDistance=7, blockSize=21), 'lk_pars': dict(winSize=(20, 20), maxLevel=2, criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0))} self.extrapolation = "simple_delta" self.warper = "affine" self.input_data = None self.scaler = RYScaler self.inverse_scaler = inv_RYScaler self.lead_steps = 12
Example #7
Source File: camshift.py From PyCV-time with MIT License | 5 votes |
def run(self): while True: ret, self.frame = self.cam.read() vis = self.frame.copy() hsv = cv2.cvtColor(self.frame, cv2.COLOR_BGR2HSV) mask = cv2.inRange(hsv, np.array((0., 60., 32.)), np.array((180., 255., 255.))) if self.selection: x0, y0, x1, y1 = self.selection self.track_window = (x0, y0, x1-x0, y1-y0) hsv_roi = hsv[y0:y1, x0:x1] mask_roi = mask[y0:y1, x0:x1] hist = cv2.calcHist( [hsv_roi], [0], mask_roi, [16], [0, 180] ) cv2.normalize(hist, hist, 0, 255, cv2.NORM_MINMAX); self.hist = hist.reshape(-1) self.show_hist() vis_roi = vis[y0:y1, x0:x1] cv2.bitwise_not(vis_roi, vis_roi) vis[mask == 0] = 0 if self.tracking_state == 1: self.selection = None prob = cv2.calcBackProject([hsv], [0], self.hist, [0, 180], 1) prob &= mask term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1 ) track_box, self.track_window = cv2.CamShift(prob, self.track_window, term_crit) if self.show_backproj: vis[:] = prob[...,np.newaxis] try: cv2.ellipse(vis, track_box, (0, 0, 255), 2) except: print track_box cv2.imshow('camshift', vis) ch = 0xFF & cv2.waitKey(5) if ch == 27: break if ch == ord('b'): self.show_backproj = not self.show_backproj cv2.destroyAllWindows()
Example #8
Source File: letter_recog.py From PyCV-time with MIT License | 5 votes |
def train(self, samples, responses): sample_n, var_n = samples.shape new_responses = self.unroll_responses(responses).reshape(-1, self.class_n) layer_sizes = np.int32([var_n, 100, 100, self.class_n]) self.model.create(layer_sizes) # CvANN_MLP_TrainParams::BACKPROP,0.001 params = dict( term_crit = (cv2.TERM_CRITERIA_COUNT, 300, 0.01), train_method = cv2.ANN_MLP_TRAIN_PARAMS_BACKPROP, bp_dw_scale = 0.001, bp_moment_scale = 0.0 ) self.model.train(samples, np.float32(new_responses), None, params = params)
Example #9
Source File: letter_recog.py From PyCV-time with MIT License | 5 votes |
def train(self, samples, responses): sample_n, var_n = samples.shape new_responses = self.unroll_responses(responses).reshape(-1, self.class_n) layer_sizes = np.int32([var_n, 100, 100, self.class_n]) self.model.create(layer_sizes) # CvANN_MLP_TrainParams::BACKPROP,0.001 params = dict( term_crit = (cv2.TERM_CRITERIA_COUNT, 300, 0.01), train_method = cv2.ANN_MLP_TRAIN_PARAMS_BACKPROP, bp_dw_scale = 0.001, bp_moment_scale = 0.0 ) self.model.train(samples, np.float32(new_responses), None, params = params)
Example #10
Source File: synthetic_util.py From SCCvSD with BSD 2-Clause "Simplified" License | 5 votes |
def find_transform(im_src, im_dst): warp = np.eye(3, dtype=np.float32) criteria = (cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 50, 0.001) try: _, warp = cv.findTransformECC(im_src, im_dst, warp, cv.MOTION_HOMOGRAPHY, criteria) except: print('Warning: find transform failed. Set warp as identity') return warp
Example #11
Source File: optical_flow.py From UE4PyServer with MIT License | 5 votes |
def __init__(self): # Parameters for lucas kanade optical flow self.lk_params = dict( winSize = (15,15), maxLevel = 1, criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.08)) # Create some random colors self.color = np.random.randint(0,255,(2000,3)) self.old_gray=None self.old_frame=None self.p0=self.p1=None self.initial_state=None
Example #12
Source File: model.py From AutoRCCar with BSD 2-Clause "Simplified" License | 5 votes |
def create(self, layer_sizes): # create neural network self.model = cv2.ml.ANN_MLP_create() self.model.setLayerSizes(np.int32(layer_sizes)) self.model.setTrainMethod(cv2.ml.ANN_MLP_BACKPROP) self.model.setActivationFunction(cv2.ml.ANN_MLP_SIGMOID_SYM, 2, 1) self.model.setTermCriteria((cv2.TERM_CRITERIA_COUNT, 100, 0.01))
Example #13
Source File: coregistration.py From eo-learn with MIT License | 5 votes |
def register(self, src, trg, trg_mask=None, src_mask=None): """ Implementation of pair-wise registration and warping using Enhanced Correlation Coefficient This function estimates an Euclidean transformation (x,y translation + rotation) using the intensities of the pair of images to be registered. The similarity metric is a modification of the cross-correlation metric, which is invariant to distortions in contrast and brightness. :param src: 2D single channel source moving image :param trg: 2D single channel target reference image :param trg_mask: Mask of target image. Not used in this method. :param src_mask: Mask of source image. Not used in this method. :return: Estimated 2D transformation matrix of shape 2x3 """ # Parameters of registration warp_mode = cv2.MOTION_EUCLIDEAN # Specify the threshold of the increment # in the correlation coefficient between two iterations termination_eps = 1e-10 # Define termination criteria criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, self.params['MaxIters'], termination_eps) # Initialise warp matrix warp_matrix = np.eye(2, 3, dtype=np.float32) # Run the ECC algorithm. The results are stored in warp_matrix. _, warp_matrix = cv2.findTransformECC(src.astype(np.float32), trg.astype(np.float32), warp_matrix, warp_mode, criteria, None, self.params['gaussFiltSize']) return warp_matrix
Example #14
Source File: camshift_object_tracker.py From automl-video-ondevice with Apache License 2.0 | 5 votes |
def __init__(self): self.term_crit = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1) self.tracks = [] self.current_track = 0
Example #15
Source File: test_monkey.py From ATX with Apache License 2.0 | 5 votes |
def test_features(): from atx.drivers.android_minicap import AndroidDeviceMinicap cv2.namedWindow("preview") d = AndroidDeviceMinicap() # r, h, c, w = 200, 100, 200, 100 # track_window = (c, r, w, h) # oldimg = cv2.imread('base1.png') # roi = oldimg[r:r+h, c:c+w] # hsv_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV) # mask = cv2.inRange(hsv_roi, 0, 255) # roi_hist = cv2.calcHist([hsv_roi], [0], mask, [180], [0,180]) # cv2.normalize(roi_hist, roi_hist, 0, 255, cv2.NORM_MINMAX) # term_cirt = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1) while True: try: w, h = d._screen.shape[:2] img = cv2.resize(d._screen, (h/2, w/2)) cv2.imshow('preview', img) hist = cv2.calcHist([img], [0], None, [256], [0,256]) plt.plot(plt.hist(hist.ravel(), 256)) plt.show() # if img.shape == oldimg.shape: # # hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) # # ret, track_window = cv2.meanShift(hsv, track_window, term_cirt) # # x, y, w, h = track_window # cv2.rectangle(img, (x, y), (x+w, y+h), 255, 2) # cv2.imshow('preview', img) # # cv2.imshow('preview', img) cv2.waitKey(1) except KeyboardInterrupt: break cv2.destroyWindow('preview')
Example #16
Source File: feature_tracker.py From pyslam with GNU General Public License v3.0 | 5 votes |
def __init__(self, num_features=kMinNumFeatureDefault, num_levels = 3, # number of pyramid levels for detector scale_factor = 1.2, # detection scale factor (if it can be set, otherwise it is automatically computed) detector_type = FeatureDetectorTypes.FAST, descriptor_type = FeatureDescriptorTypes.NONE, match_ratio_test = kRatioTest, tracker_type = FeatureTrackerTypes.LK): super().__init__(num_features=num_features, num_levels=num_levels, scale_factor=scale_factor, detector_type=detector_type, descriptor_type=descriptor_type, tracker_type=tracker_type) self.feature_manager = feature_manager_factory(num_features=num_features, num_levels=num_levels, scale_factor=scale_factor, detector_type=detector_type, descriptor_type=descriptor_type) #if num_levels < 3: # Printer.green('LkFeatureTracker: forcing at least 3 levels on LK pyr optic flow') # num_levels = 3 optic_flow_num_levels = max(kLkPyrOpticFlowNumLevelsMin,num_levels) Printer.green('LkFeatureTracker: num levels on LK pyr optic flow: ', optic_flow_num_levels) # we use LK pyr optic flow for matching self.lk_params = dict(winSize = (21, 21), maxLevel = optic_flow_num_levels, criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 30, 0.01)) # out: keypoints and empty descriptors
Example #17
Source File: baseline_ridi.py From ronin with GNU General Public License v3.0 | 5 votes |
def __init__(self, chn, c, e, **kwargs): self.chn = chn self.m = cv2.ml.SVM_create() self.m.setType(cv2.ml.SVM_EPS_SVR) self.m.setC(c) self.m.setDegree(1) self.m.setP(e) max_iter = kwargs.get('max_iter', 10000) self.m.setTermCriteria( (cv2.TERM_CRITERIA_COUNT + cv2.TERM_CRITERIA_EPS, max_iter, 1e-09))
Example #18
Source File: letter_recog.py From OpenCV-Python-Tutorial with MIT License | 5 votes |
def train(self, samples, responses): sample_n, var_n = samples.shape new_responses = self.unroll_responses(responses).reshape(-1, self.class_n) layer_sizes = np.int32([var_n, 100, 100, self.class_n]) self.model.setLayerSizes(layer_sizes) self.model.setTrainMethod(cv2.ml.ANN_MLP_BACKPROP) self.model.setBackpropMomentumScale(0.0) self.model.setBackpropWeightScale(0.001) self.model.setTermCriteria((cv2.TERM_CRITERIA_COUNT, 20, 0.01)) self.model.setActivationFunction(cv2.ml.ANN_MLP_SIGMOID_SYM, 2, 1) self.model.train(samples, cv2.ml.ROW_SAMPLE, np.float32(new_responses))
Example #19
Source File: optical_flow_tracker.py From face_landmark_dnn with MIT License | 5 votes |
def __init__(self): self.track_len = 5 self.tracks = [] self.lk_params = dict(winSize=(15, 15), maxLevel=2, criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03)) self.feature_params = dict(maxCorners=500, qualityLevel=0.3, minDistance=7, blockSize=7)
Example #20
Source File: optical_flow_tracker.py From head-pose-estimation with MIT License | 5 votes |
def __init__(self): self.track_len = 5 self.tracks = [] self.lk_params = dict(winSize=(15, 15), maxLevel=2, criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03)) self.feature_params = dict(maxCorners=500, qualityLevel=0.3, minDistance=7, blockSize=7)
Example #21
Source File: controller.py From HalloPy with MIT License | 5 votes |
def __init__(self, flags_handler, points_to_track, input_image): self.logger = logging.getLogger('tracker_handler') self.flags_handler = flags_handler self.points_to_track = points_to_track self._input_image = input_image self._old_gray = None self._p0 = None self.lk_params = dict(winSize=(15, 15), maxLevel=2, criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03)) self.track(self.points_to_track, self._input_image)
Example #22
Source File: optical_flow.py From self-driving with MIT License | 5 votes |
def __init__(self, videoSource, featurePtMask=None, verbosity=0): # cap the length of optical flow tracks self.maxTrackLength = 10 # detect feature points in intervals of frames; adds robustness for # when feature points disappear. self.detectionInterval = 5 # Params for Shi-Tomasi corner (feature point) detection self.featureParams = dict( maxCorners=500, qualityLevel=0.3, minDistance=7, blockSize=7 ) # Params for Lucas-Kanade optical flow self.lkParams = dict( winSize=(15, 15), maxLevel=2, criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03) ) # # Alternatively use a fast feature detector # self.fast = cv2.FastFeatureDetector_create(500) self.verbosity = verbosity (self.videoStream, self.width, self.height, self.featurePtMask) = self._initializeCamera(videoSource)
Example #23
Source File: OptFlowEst.py From Gesture-Recognition with MIT License | 5 votes |
def estimate_loop(self): opt_flow_params = dict(winSize=(15,15), maxLevel=2, criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03)) while not self.stopped: frame = self.video_feed.read() frame_grey = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # Pull data from each human and bodypart -> put into np array w/shape (num_humans, 18, 2) and reshape to (num_humans 18, 1, 2) for use by optical flow with self.lock: all_human_points = np.asarray([np.asarray([[[body_part.x * self.frame_shape[1], body_part.y * self.frame_shape[0]]] for key, body_part in human.body_parts.iteritems()], dtype=np.float32) for human in self.humans]) for idx, human_points in enumerate(all_human_points): p1, st, err = cv2.calcOpticalFlowPyrLK(self.old_frame_grey, frame_grey, human_points, None, **opt_flow_params) self.repack_humans(p1, idx) # Grab the points that have gone out of frame oof_points = p1[st!=1] if oof_points.shape != 0: # Get all the matches tmp = np.isin(human_points, oof_points) # Get the indexes of those matches msng_idxz = [msng for msng in range(len(human_points)) if tmp[msng].all()] #print "msng_idxz %s" % str(msng_idxz) cur_part_exist = self.humans[idx].body_parts.keys() for foo_idx in range(len(msng_idxz)): del self.humans[idx].body_parts[cur_part_exist[msng_idxz[foo_idx]]] if len(self.humans[idx].body_parts.keys()) == 0: del self.humans[idx] self.old_frame = frame self.old_frame_grey = frame_grey.copy()
Example #24
Source File: train_test_mlp.py From opencv-python-blueprints with GNU General Public License v3.0 | 4 votes |
def main(): # load training data # training data can be recorded using chapter7.py in training mode (X_train, y_train), (X_test, y_test), _, _ = homebrew.load_data( "datasets/faces_training.pkl", num_components=50, test_split=0.2, save_to_file="datasets/faces_preprocessed.pkl", seed=42) if len(X_train) == 0 or len(X_test) == 0: print "Empty data" raise SystemExit # convert to numpy X_train = np.squeeze(np.array(X_train)).astype(np.float32) y_train = np.array(y_train) X_test = np.squeeze(np.array(X_test)).astype(np.float32) y_test = np.array(y_test) # find all class labels labels = np.unique(np.hstack((y_train, y_test))) # prepare training num_features = len(X_train[0]) num_classes = len(labels) params = dict(term_crit=(cv2.TERM_CRITERIA_COUNT, 300, 0.01), train_method=cv2.ANN_MLP_TRAIN_PARAMS_BACKPROP, bp_dw_scale=0.001, bp_moment_scale=0.9) saveFile = 'params/mlp.xml' # find best MLP configuration print "---" print "1-hidden layer networks" best_acc = 0.0 # keep track of best accuracy for l1 in xrange(10): # gradually increase the hidden-layer size layerSizes = np.int32([num_features, (l1 + 1) * num_features/5, num_classes]) MLP = MultiLayerPerceptron(layerSizes, labels) print layerSizes MLP.fit(X_train, y_train, params=params) (acc, _, _) = MLP.evaluate(X_train, y_train) print " - train acc = ", acc (acc, _, _) = MLP.evaluate(X_test, y_test) print " - test acc = ", acc if acc > best_acc: # save best MLP configuration to file MLP.save(saveFile) best_acc = acc
Example #25
Source File: camshift.py From OpenCV-Python-Tutorial with MIT License | 4 votes |
def run(self): while True: ret, self.frame = self.cam.read() vis = self.frame.copy() hsv = cv2.cvtColor(self.frame, cv2.COLOR_BGR2HSV) mask = cv2.inRange(hsv, np.array((0., 60., 32.)), np.array((180., 255., 255.))) if self.selection: x0, y0, x1, y1 = self.selection hsv_roi = hsv[y0:y1, x0:x1] mask_roi = mask[y0:y1, x0:x1] hist = cv2.calcHist( [hsv_roi], [0], mask_roi, [16], [0, 180] ) cv2.normalize(hist, hist, 0, 255, cv2.NORM_MINMAX) self.hist = hist.reshape(-1) self.show_hist() vis_roi = vis[y0:y1, x0:x1] cv2.bitwise_not(vis_roi, vis_roi) vis[mask == 0] = 0 if self.track_window and self.track_window[2] > 0 and self.track_window[3] > 0: self.selection = None prob = cv2.calcBackProject([hsv], [0], self.hist, [0, 180], 1) prob &= mask term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1 ) track_box, self.track_window = cv2.CamShift(prob, self.track_window, term_crit) if self.show_backproj: vis[:] = prob[...,np.newaxis] try: cv2.ellipse(vis, track_box, (0, 0, 255), 2) except: print(track_box) cv2.imshow('camshift', vis) ch = cv2.waitKey(5) if ch == 27: break if ch == ord('b'): self.show_backproj = not self.show_backproj cv2.destroyAllWindows()
Example #26
Source File: camshift.py From PyCV-time with MIT License | 4 votes |
def run(self): while True: ret, self.frame = self.cam.read() vis = self.frame.copy() hsv = cv2.cvtColor(self.frame, cv2.COLOR_BGR2HSV) mask = cv2.inRange(hsv, np.array((0., 60., 32.)), np.array((180., 255., 255.))) if self.selection: x0, y0, x1, y1 = self.selection self.track_window = (x0, y0, x1-x0, y1-y0) hsv_roi = hsv[y0:y1, x0:x1] mask_roi = mask[y0:y1, x0:x1] hist = cv2.calcHist( [hsv_roi], [0], mask_roi, [16], [0, 180] ) cv2.normalize(hist, hist, 0, 255, cv2.NORM_MINMAX); self.hist = hist.reshape(-1) self.show_hist() vis_roi = vis[y0:y1, x0:x1] cv2.bitwise_not(vis_roi, vis_roi) vis[mask == 0] = 0 if self.tracking_state == 1: self.selection = None prob = cv2.calcBackProject([hsv], [0], self.hist, [0, 180], 1) prob &= mask term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1 ) track_box, self.track_window = cv2.CamShift(prob, self.track_window, term_crit) if self.show_backproj: vis[:] = prob[...,np.newaxis] try: cv2.ellipse(vis, track_box, (0, 0, 255), 2) except: print track_box cv2.imshow('camshift', vis) ch = 0xFF & cv2.waitKey(5) if ch == 27: break if ch == ord('b'): self.show_backproj = not self.show_backproj cv2.destroyAllWindows()
Example #27
Source File: models.py From rainymotion with MIT License | 4 votes |
def _sparse_sd(data_instance, of_params={'st_pars': dict(maxCorners = 200, qualityLevel = 0.2, minDistance = 7, blockSize = 21), 'lk_pars': dict(winSize = (20, 20), maxLevel = 2, criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0))}, lead_steps=12): # define penult and last frames penult_frame = data_instance[-2] last_frame = data_instance[-1] # find features to track old_corners = cv2.goodFeaturesToTrack(data_instance[0], mask=None, **of_params['st_pars']) # track corners by optical flow algorithm new_corners, st, err = cv2.calcOpticalFlowPyrLK(prevImg=penult_frame, nextImg=last_frame, prevPts=old_corners, nextPts=None, **of_params['lk_pars']) # select only good attempts for corner tracking success = st.ravel() == 1 new_corners = new_corners[success].copy() old_corners = old_corners[success].copy() # calculate Simple Delta delta = new_corners.reshape(-1, 2) - old_corners.reshape(-1, 2) # simplificate furher transformations pts_source = new_corners.reshape(-1, 2) # propagate our corners through time pts_target_container = [] for lead_step in range(lead_steps): pts_target_container.append(pts_source + delta * (lead_step + 1)) return pts_source, pts_target_container
Example #28
Source File: opticalFlow.py From Mask-RCNN-Pedestrian-Detection with MIT License | 4 votes |
def sparseOpticalFlow(): # use 0 for webcam capturing # cap = cv2.VideoCapture(0) cap = cv2.VideoCapture('test/Pedestrian overpass.mp4') # params for ShiTomasi corner detection feature_params = dict( maxCorners = 100, qualityLevel = 0.3, minDistance = 7, blockSize = 7 ) # Parameters for lucas kanade optical flow lk_params = dict( winSize = (15,15), maxLevel = 2, criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03)) # Create some random colors color = np.random.randint(0,255,(100,3)) # Take first frame and find corners in it ret, old_frame = cap.read() old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY) p0 = cv2.goodFeaturesToTrack(old_gray, mask = None, **feature_params) # Create a mask image for drawing purposes mask = np.zeros_like(old_frame) while(1): ret,frame = cap.read() frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # calculate optical flow p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params) # Select good points good_new = p1[st==1] good_old = p0[st==1] # draw the tracks for i,(new,old) in enumerate(zip(good_new,good_old)): a,b = new.ravel() c,d = old.ravel() mask = cv2.line(mask, (a,b),(c,d), color[i].tolist(), 2) frame = cv2.circle(frame,(a,b),5,color[i].tolist(),-1) img = cv2.add(frame,mask) cv2.imshow('frame',img) k = cv2.waitKey(30) & 0xff if k == 27: break # Now update the previous frame and previous points old_gray = frame_gray.copy() p0 = good_new.reshape(-1,1,2) cv2.destroyAllWindows() cap.release() # DENSE OPTICAL FLOW
Example #29
Source File: MFTracker.py From SimpleCV2 with BSD 3-Clause "New" or "Revised" License | 4 votes |
def lktrack(img1, img2, ptsI, nPtsI, winsize_ncc=10, win_size_lk=4, method=cv2.cv.CV_TM_CCOEFF_NORMED): """ **SUMMARY** (Dev Zone) Lucas-Kanede Tracker with pyramids **PARAMETERS** img1 - Previous image or image containing the known bounding box (Numpy array) img2 - Current image ptsI - Points to track from the first image Format ptsI[0] - x1, ptsI[1] - y1, ptsI[2] - x2, .. nPtsI - Number of points to track from the first image winsize_ncc - size of the search window at each pyramid level in LK tracker (in int) method - Paramete specifying the comparison method for normalized cross correlation (see http://opencv.itseez.com/modules/imgproc/doc/object_detection.html?highlight=matchtemplate#cv2.matchTemplate) **RETURNS** fb - forward-backward confidence value. (corresponds to euclidean distance between). ncc - normCrossCorrelation values status - Indicates positive tracks. 1 = PosTrack 0 = NegTrack ptsJ - Calculated Points of second image """ template_pt = [] target_pt = [] fb_pt = [] ptsJ = [-1]*len(ptsI) for i in range(nPtsI): template_pt.append((ptsI[2*i],ptsI[2*i+1])) target_pt.append((ptsI[2*i],ptsI[2*i+1])) fb_pt.append((ptsI[2*i],ptsI[2*i+1])) template_pt = np.asarray(template_pt,dtype="float32") target_pt = np.asarray(target_pt,dtype="float32") fb_pt = np.asarray(fb_pt,dtype="float32") target_pt, status, track_error = cv2.calcOpticalFlowPyrLK(img1, img2, template_pt, target_pt, winSize=(win_size_lk, win_size_lk), flags = cv2.OPTFLOW_USE_INITIAL_FLOW, criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03)) fb_pt, status_bt, track_error_bt = cv2.calcOpticalFlowPyrLK(img2,img1, target_pt,fb_pt, winSize = (win_size_lk,win_size_lk),flags = cv2.OPTFLOW_USE_INITIAL_FLOW, criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03)) status = status & status_bt ncc = normCrossCorrelation(img1, img2, template_pt, target_pt, status, winsize_ncc, method) fb = euclideanDistance(template_pt, target_pt) newfb = -1*np.ones(len(fb)) newncc = -1*np.ones(len(ncc)) for i in np.argwhere(status): i = i[0] ptsJ[2 * i] = target_pt[i][0] ptsJ[2 * i + 1] = target_pt[i][1] newfb[i] = fb[i] newncc[i] = ncc[i] return newfb, newncc, status, ptsJ
Example #30
Source File: alignment.py From neural-road-inspector with MIT License | 4 votes |
def ecc_align(im1, im2, number_of_iterations=2000): """ Find the translation motion matrix that align im2 to im1. The Enhanced Correlation Coefficient image alignment algorithm is based on a 2008 paper titled Parametric Image Alignment using Enhanced Correlation Coefficient Maximization by Georgios D. Evangelidis and Emmanouil Z. Psarakis. They propose using a new similarity measure called Enhanced Correlation Coefficient (ECC) for estimating the parameters of the motion model. Parameters: im1: image 1 im2: image 2 Returns: the cv2.MOTION_TRANSLATION motion model 2x3 matrix that maps im2 to im1. """ # Convert images to grayscale im1_gray = cv2.cvtColor(im1,cv2.COLOR_BGR2GRAY) im2_gray = cv2.cvtColor(im2,cv2.COLOR_BGR2GRAY) # Find size of image1 sz = im1.shape # Define the motion model warp_mode = cv2.MOTION_TRANSLATION # Define 2x3 matrice and initialize the matrix to identity warp_matrix = np.eye(2, 3, dtype=np.float32) # Specify the threshold of the increment # in the correlation coefficient between two iterations termination_eps = 1e-10; # Define termination criteria criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, number_of_iterations, termination_eps) # Run the ECC algorithm. The results are stored in warp_matrix. (cc, warp_matrix) = cv2.findTransformECC (im1_gray,im2_gray,warp_matrix, warp_mode, criteria) # Use warpAffine for Translation, Euclidean and Affine im2_aligned = cv2.warpAffine(im2, warp_matrix, (sz[1],sz[0]), flags=cv2.INTER_LINEAR + cv2.WARP_INVERSE_MAP); # Show final results cv2.imshow("Image 1", im1) cv2.imshow("Image 2", im2) cv2.imshow("Aligned Image 2", im2_aligned) cv2.waitKey(0) return cc, warp_matrix