Python cv2.CAP_PROP_FRAME_COUNT Examples
The following are 30
code examples of cv2.CAP_PROP_FRAME_COUNT().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
cv2
, or try the search function
.
Example #1
Source File: rivagan.py From RivaGAN with MIT License | 9 votes |
def encode(self, video_in, data, video_out): assert len(data) == self.data_dim video_in = cv2.VideoCapture(video_in) width = int(video_in.get(cv2.CAP_PROP_FRAME_WIDTH)) height = int(video_in.get(cv2.CAP_PROP_FRAME_HEIGHT)) length = int(video_in.get(cv2.CAP_PROP_FRAME_COUNT)) data = torch.FloatTensor([data]).cuda() video_out = cv2.VideoWriter( video_out, cv2.VideoWriter_fourcc(*'mp4v'), 20.0, (width, height)) for i in tqdm(range(length)): ok, frame = video_in.read() frame = torch.FloatTensor([frame]) / 127.5 - 1.0 # (L, H, W, 3) frame = frame.permute(3, 0, 1, 2).unsqueeze(0).cuda() # (1, 3, L, H, W) wm_frame = self.encoder(frame, data) # (1, 3, L, H, W) wm_frame = torch.clamp(wm_frame, min=-1.0, max=1.0) wm_frame = ( (wm_frame[0, :, 0, :, :].permute(1, 2, 0) + 1.0) * 127.5 ).detach().cpu().numpy().astype("uint8") video_out.write(wm_frame) video_out.release()
Example #2
Source File: util.py From SpaceXtract with MIT License | 6 votes |
def find_anchor(self, cap, start=0, end=1, maxiter=10): if not isinstance(self.extractor, RelativeExtract): return False original_location = cap.get(cv2.CAP_PROP_POS_FRAMES) for i in range(maxiter): pos = random.uniform(start, end) cap.set(cv2.CAP_PROP_POS_FRAMES, pos*cap.get(cv2.CAP_PROP_FRAME_COUNT)) _, frame = cap.read() if self.extractor.prepare_image_dict(frame): return True cap.set(cv2.CAP_PROP_POS_FRAMES, original_location) return False
Example #3
Source File: video2tfrecord.py From video2tfrecord with MIT License | 6 votes |
def get_video_capture_and_frame_count(path): assert os.path.isfile( path), "Couldn't find video file:" + path + ". Skipping video." cap = None if path: cap = cv2.VideoCapture(path) assert cap is not None, "Couldn't load video capture:" + path + ". Skipping video." # compute meta data of video if hasattr(cv2, 'cv'): frame_count = int(cap.get(cv2.cv.CAP_PROP_FRAME_COUNT)) else: frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) return cap, frame_count
Example #4
Source File: preprocess.py From filmstrip with MIT License | 6 votes |
def getInfo(sourcePath): cap = cv2.VideoCapture(sourcePath) info = { "framecount": cap.get(cv2.CAP_PROP_FRAME_COUNT), "fps": cap.get(cv2.CAP_PROP_FPS), "width": int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), "height": int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)), "codec": int(cap.get(cv2.CAP_PROP_FOURCC)) } cap.release() return info # # Extracts one frame for every second second of video. # Effectively compresses a video down into much less data. #
Example #5
Source File: test_video.py From deepstar with BSD 3-Clause Clear License | 6 votes |
def test_create_one_video_file_from_many_image_files(self): image_0001 = os.path.dirname(os.path.realpath(__file__)) + '/../../support/image_0001.jpg' # noqa with tempdir() as tempdir_: video_path = os.path.join(tempdir_, 'video.mp4') def image_paths(): for _ in range(0, 5): yield image_0001 ret = create_one_video_file_from_many_image_files(image_paths, video_path) # noqa self.assertTrue(ret) vc = cv2.VideoCapture(video_path) try: self.assertTrue(vc.isOpened()) self.assertEqual(vc.get(cv2.CAP_PROP_FRAME_COUNT), 5) finally: vc.release()
Example #6
Source File: test_video.py From deepstar with BSD 3-Clause Clear License | 6 votes |
def test_create_one_video_file_from_one_image_file(self): image_0001 = os.path.dirname(os.path.realpath(__file__)) + '/../../support/image_0001.jpg' # noqa with tempdir() as tempdir_: video_path = os.path.join(tempdir_, 'video.mp4') ret = create_one_video_file_from_one_image_file(image_0001, video_path) self.assertTrue(ret) vc = cv2.VideoCapture(video_path) try: self.assertTrue(vc.isOpened()) self.assertEqual(vc.get(cv2.CAP_PROP_FRAME_COUNT), 1) finally: vc.release()
Example #7
Source File: util.py From SpaceXtract with MIT License | 6 votes |
def search_switch(self, cap, key, thresh=0.5): left = 0 right = cap.get(cv2.CAP_PROP_FRAME_COUNT) - 1 cap.set(cv2.CAP_PROP_POS_FRAMES, int((right + left) / 2)) while right > left + 1: _, frame = cap.read() image = self.extractor.prepare_frame(frame, self.extractor.image_dict[key][0]) if not self.extractor.exists(image, self.extractor.image_dict[key][1][0], thresh): left = int((right + left) / 2) else: right = int((right + left) / 2) cap.set(cv2.CAP_PROP_POS_FRAMES, int((right + left) / 2)) cap.set(cv2.CAP_PROP_POS_FRAMES, left) return left
Example #8
Source File: evaluator.py From SlowFast-Network-pytorch with MIT License | 6 votes |
def imshow(self,bbox,cls,probs): #print("bbox ",bbox) cap = cv2.VideoCapture("/media/aiuser/78C2F86DC2F830CC1/ava_v2.2/preproc/train_clips/clips/b5pRYl_djbs/986.mp4") # frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) # key_frame_start = int(frame_count * 0.3) # key_frame_end = int(frame_count * 0.9) while (cap.isOpened()): ret, frame = cap.read() self.draw_bboxes_and_show(frame, bbox, cls,probs=probs) # self.draw_bboxes_and_show(frame,frame_num, real_bboxes, real_lables, key_frame_start, key_frame_end,color=(255,0,255)) if ret == True: # 显示视频 cv2.imshow('Frame', frame) # 刷新视频 cv2.waitKey(0) # 按q退出 if cv2.waitKey(25) & 0xFF == ord('q'): break else: break
Example #9
Source File: rtsp_threaded_tracker.py From deep_sort_pytorch with MIT License | 6 votes |
def __init__(self, cfg, args): # Create a VideoCapture object self.cfg = cfg self.args = args use_cuda = self.args.use_cuda and torch.cuda.is_available() if not use_cuda: warnings.warn(UserWarning("Running in cpu mode!")) self.detector = build_detector(cfg, use_cuda=use_cuda) self.deepsort = build_tracker(cfg, use_cuda=use_cuda) self.class_names = self.detector.class_names self.vdo = cv2.VideoCapture(self.args.input) self.status, self.frame = None, None self.total_frames = int(cv2.VideoCapture.get(self.vdo, cv2.CAP_PROP_FRAME_COUNT)) self.im_width = int(self.vdo.get(cv2.CAP_PROP_FRAME_WIDTH)) self.im_height = int(self.vdo.get(cv2.CAP_PROP_FRAME_HEIGHT)) self.output_frame = None self.thread = ThreadPoolExecutor(max_workers=1) self.thread.submit(self.update)
Example #10
Source File: ped_det_server.py From deep_sort_pytorch with MIT License | 6 votes |
def __enter__(self): self.vdo.open(self.args.VIDEO_PATH) self.total_frames = int(cv2.VideoCapture.get(self.vdo, cv2.CAP_PROP_FRAME_COUNT)) self.im_width = int(self.vdo.get(cv2.CAP_PROP_FRAME_WIDTH)) self.im_height = int(self.vdo.get(cv2.CAP_PROP_FRAME_HEIGHT)) video_details = {'frame_width': self.im_width, 'frame_height': self.im_height, 'frame_rate': self.args.write_fps, 'video_name': self.args.VIDEO_PATH} codec = cv2.VideoWriter_fourcc(*'XVID') self.writer = cv2.VideoWriter(self.output_file, codec, self.args.write_fps, (self.im_width, self.im_height)) self.logger.add_video_details(**video_details) assert self.vdo.isOpened() return self
Example #11
Source File: util.py From SpaceXtract with MIT License | 6 votes |
def skip_from_launch(self, cap, key, time, thresh=None): """ Move the capture to T+time (time can be negative) and returns the frame index. :param cap: OpenCV capture :param time: delta time from launch to skip to :return: index of requested frame """ if thresh is None: thresh = self.extractor.image_dict[key][2] number_of_frames = int(cap.get(cv2.CAP_PROP_FPS) * time) + self.search_switch(cap, key, thresh) number_of_frames = max(number_of_frames, 0) number_of_frames = min(number_of_frames, cap.get(cv2.CAP_PROP_FRAME_COUNT)) cap.set(cv2.CAP_PROP_POS_FRAMES, number_of_frames) return number_of_frames
Example #12
Source File: test_video_command_line_route_handler.py From deepstar with BSD 3-Clause Clear License | 6 votes |
def test_insert_image_frame_count(self): with deepstar_path(): image_0001 = os.path.dirname(os.path.realpath(__file__)) + '/../../support/image_0001.jpg' # noqa args = ['main.py', 'insert', 'videos', 'image', image_0001] opts = {'frame-count': '5'} with mock.patch.dict(os.environ, {'DEBUG_LEVEL': '0'}): VideoCommandLineRouteHandler().handle(args, opts) # files video_path = VideoFile.path(VideoModel().select(1)[2]) vc = cv2.VideoCapture(video_path) try: self.assertTrue(vc.isOpened()) self.assertEqual(vc.get(cv2.CAP_PROP_FRAME_COUNT), 5) finally: vc.release()
Example #13
Source File: stage_detection.py From smashscan with MIT License | 6 votes |
def __init__(self, capture, tfnet, show_flag=False, save_flag=False): self.capture = capture self.tfnet = tfnet self.save_flag = save_flag self.show_flag = show_flag # Predetermined parameters that have been tested to work best. self.end_fnum = int(self.capture.get(cv2.CAP_PROP_FRAME_COUNT)) self.max_num_match_frames = 30 self.min_match_length_s = 30 self.num_match_frames = 5 self.step_size = 60 self.timeline_empty_thresh = 4 #### STAGE DETECTOR TESTS ################################################## # Run the standard stage detector test over the entire video.
Example #14
Source File: sequence_run.py From FeatureFlow with MIT License | 6 votes |
def VideoToSequence(path, time): video = cv2.VideoCapture(path) dir_path = 'frames_tmp' os.system("rm -rf %s" % dir_path) os.mkdir(dir_path) fps = int(video.get(cv2.CAP_PROP_FPS)) length = int(video.get(cv2.CAP_PROP_FRAME_COUNT)) print('making ' + str(length) + ' frame sequence in ' + dir_path) i = -1 while (True): (grabbed, frame) = video.read() if not grabbed: break i = i + 1 index = IndexHelper(i*time, len(str(time*length))) cv2.imwrite(dir_path + '/' + index + '.png', frame) # print(index) return [dir_path, length, fps]
Example #15
Source File: CameraDevice.py From vidpipe with GNU General Public License v3.0 | 6 votes |
def __init__(self, fl = None, cameraId=0, mirrored=False, parent=None): super(CameraDevice, self).__init__(parent) self.mirrored = mirrored self._cameraDevice = cv2.VideoCapture( fl if fl else cameraId ) self._timer = QtCore.QTimer( self ) self._timer.timeout.connect( self._queryFrame ) self._timer.setInterval( 1000 / self.fps ) if fl: self._maxFrameCount = self._cameraDevice.get( cv2.CAP_PROP_FRAME_COUNT ); self._frameCount = 0 self.paused = False self._maxFrameCount = -1 # from: https://stackoverflow.com/questions/9710520/opencv-createimage-function-isnt-working
Example #16
Source File: train_featurizer.py From HardRLWithYoutube with MIT License | 6 votes |
def generate_dataset(videos_path, framerate, width, height): """Converts videos from specified path to ndarrays of shape [numberOfVideos, -1, width, height, 1] Args: videos_path: Inside the 'videos/' directory, the name of the subdirectory for videos. framerate: The desired framerate of the dataset. width: The width we will resize the videos to. height: The height we will resize the videos to. Returns: The dataset with the new size and framerate, and converted to monochromatic. """ dataset = [] video_index = 0 for playlist in os.listdir('videos/' + videos_path): for video_name in os.listdir('videos/{}/{}'.format(videos_path, playlist)): dataset.append([]) print('Video: {}'.format(video_name)) video = cv2.VideoCapture('videos/{}/{}/{}'.format(videos_path, playlist, video_name)) while video.isOpened(): success, frame = video.read() if success: frame = preprocess_image(frame, width, height) dataset[video_index].append(frame) frame_index = video.get(cv2.CAP_PROP_POS_FRAMES) video_framerate = video.get(cv2.CAP_PROP_FPS) video.set(cv2.CAP_PROP_POS_FRAMES, frame_index + video_framerate // framerate) last_frame_index = video.get(cv2.CAP_PROP_FRAME_COUNT) if frame_index >= last_frame_index: # Video is over break else: break dataset[video_index] = np.reshape(dataset[video_index], (-1, width, height, 1)) video_index += 1 return dataset
Example #17
Source File: FVS.py From zmMagik with GNU General Public License v2.0 | 5 votes |
def get_wh(self): w = self.stream.get(3) h = self.stream.get(4) print ("W={}, H={}".format(w,h)) print ('TOTAL FRAMES={}'.format(self.stream.get(cv2.CAP_PROP_FRAME_COUNT)))
Example #18
Source File: test.py From Object_Detection_Tracking with Apache License 2.0 | 5 votes |
def speed_test_opencv(video_dir, video_list=VIDEO_LIST): import cv2 for video_name in video_list: print('\t', video_name, flush=True) bar = ProgressBar().start() cap = cv2.VideoCapture(osp.join(video_dir, video_name)) for _ in bar(range(int(cap.get(cv2.CAP_PROP_FRAME_COUNT)))): cap.read()
Example #19
Source File: dataset.py From R2Plus1D-PyTorch with MIT License | 5 votes |
def loadvideo(self, fname): # initialize a VideoCapture object to read video data into a numpy array capture = cv2.VideoCapture(fname) frame_count = int(capture.get(cv2.CAP_PROP_FRAME_COUNT)) frame_width = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH)) frame_height = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT)) # create a buffer. Must have dtype float, so it gets converted to a FloatTensor by Pytorch later buffer = np.empty((frame_count, self.resize_height, self.resize_width, 3), np.dtype('float32')) count = 0 retaining = True # read in each frame, one at a time into the numpy buffer array while (count < frame_count and retaining): retaining, frame = capture.read() frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) # will resize frames if not already final size # NOTE: strongly recommended to resize them during the download process. This script # will process videos of any size, but will take longer the larger the video file. if (frame_height != self.resize_height) or (frame_width != self.resize_width): frame = cv2.resize(frame, (self.resize_width, self.resize_height)) buffer[count] = frame count += 1 # release the VideoCapture once it is no longer needed capture.release() # convert from [D, H, W, C] format to [C, D, H, W] (what PyTorch uses) # D = Depth (in this case, time), H = Height, W = Width, C = Channels buffer = buffer.transpose((3, 0, 1, 2)) return buffer
Example #20
Source File: extract_feature.py From end2end_AU_speech with MIT License | 5 votes |
def get_fps(videofile): cap = cv2.VideoCapture(videofile) fps = cap.get(cv2.CAP_PROP_FPS) nFrame = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) cap.release() return (nFrame, fps)
Example #21
Source File: eval.py From FeatureFlow with MIT License | 5 votes |
def convert_video(source, dest, factor, batch_size=10, output_format='mp4v', output_fps=30): vin = cv2.VideoCapture(source) count = vin.get(cv2.CAP_PROP_FRAME_COUNT) w0, h0 = int(vin.get(cv2.CAP_PROP_FRAME_WIDTH)), int(vin.get(cv2.CAP_PROP_FRAME_HEIGHT)) codec = cv2.VideoWriter_fourcc(*output_format) vout = cv2.VideoWriter(dest, codec, float(output_fps), (w0, h0)) w, h = (w0 // 32) * 32, (h0 // 32) * 32 network.module.setup('custom', h, w) network.module.setup_t(factor) network.cuda() done = 0 batch = [] while True: batch = load_batch(vin, batch_size, batch, w, h) if len(batch) == 1: break done += len(batch) - 1 intermediate_frames = interpolate_batch(batch, factor) intermediate_frames = list(zip(*intermediate_frames)) for fid, iframe in enumerate(intermediate_frames): vout.write(denorm_frame(batch[fid], w0, h0)) for frm in iframe: vout.write(denorm_frame(frm, w0, h0)) try: yield len(batch), done, count except StopIteration: break vout.write(denorm_frame(batch[0], w0, h0)) vin.release() vout.release()
Example #22
Source File: video.py From EDSR-PyTorch with MIT License | 5 votes |
def __init__(self, args, name='Video', train=False, benchmark=False): self.args = args self.name = name self.scale = args.scale self.idx_scale = 0 self.train = False self.do_eval = False self.benchmark = benchmark self.filename, _ = os.path.splitext(os.path.basename(args.dir_demo)) self.vidcap = cv2.VideoCapture(args.dir_demo) self.n_frames = 0 self.total_frames = int(self.vidcap.get(cv2.CAP_PROP_FRAME_COUNT))
Example #23
Source File: video_avi_flow_saliency.py From self-supervision with BSD 3-Clause "New" or "Revised" License | 5 votes |
def extract_optical_flow(fn, times, frames=8, scale_factor=1.0): cap = cv2.VideoCapture(fn) n_frames = cap.get(cv2.CAP_PROP_FRAME_COUNT) outputs = [] if n_frames < frames * 2: return outputs def resize(im): if scale_factor != 1.0: new_size = (int(im.shape[1] * scale_factor), int(im.shape[0] * scale_factor)) return cv2.resize(im, new_size, interpolation=cv2.INTER_LINEAR) else: return im for t in times: cap.set(cv2.CAP_PROP_POS_FRAMES, min(t * n_frames, n_frames - 1 - frames)) ret, frame0 = cap.read() im0 = resize(cv2.cvtColor(frame0, cv2.COLOR_BGR2GRAY)) mags = [] middle_frame = frame0 for f in range(frames - 1): ret, frame1 = cap.read() if f == frames // 2: middle_frame = frame1 im1 = resize(cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)) flow = cv2.calcOpticalFlowFarneback(im0, im1, None, 0.5, 3, 15, 3, 5, 1.2, 0) mag, ang = cv2.cartToPolar(flow[...,0], flow[...,1]) mags.append(mag) im0 = im1 mag = np.sum(mags, 0) mag = mag.clip(min=0) norm_mag = (mag - mag.min()) / (mag.max() - mag.min() + 1e-5) x = middle_frame[..., ::-1].astype(np.float32) / 255 outputs.append((x, norm_mag)) return outputs
Example #24
Source File: eval.py From Super-SloMo with MIT License | 5 votes |
def convert_video(source, dest, factor, batch_size=10, output_format='mp4v', output_fps=30): vin = cv2.VideoCapture(source) count = vin.get(cv2.CAP_PROP_FRAME_COUNT) w0, h0 = int(vin.get(cv2.CAP_PROP_FRAME_WIDTH)), int(vin.get(cv2.CAP_PROP_FRAME_HEIGHT)) codec = cv2.VideoWriter_fourcc(*output_format) vout = cv2.VideoWriter(dest, codec, float(output_fps), (w0, h0)) w, h = (w0 // 32) * 32, (h0 // 32) * 32 setup_back_warp(w, h) done = 0 batch = [] while True: batch = load_batch(vin, batch_size, batch, w, h) if len(batch) == 1: break done += len(batch) - 1 intermediate_frames = interpolate_batch(batch, factor) intermediate_frames = list(zip(*intermediate_frames)) for fid, iframe in enumerate(intermediate_frames): vout.write(denorm_frame(batch[fid], w0, h0)) for frm in iframe: vout.write(denorm_frame(frm, w0, h0)) try: yield len(batch), done, count except StopIteration: break vout.write(denorm_frame(batch[0], w0, h0)) vin.release() vout.release()
Example #25
Source File: rivagan.py From RivaGAN with MIT License | 5 votes |
def decode(self, video_in): video_in = cv2.VideoCapture(video_in) # width = int(video_in.get(cv2.CAP_PROP_FRAME_WIDTH)) # height = int(video_in.get(cv2.CAP_PROP_FRAME_HEIGHT)) length = int(video_in.get(cv2.CAP_PROP_FRAME_COUNT)) for i in tqdm(range(length)): ok, frame = video_in.read() frame = torch.FloatTensor([frame]) / 127.5 - 1.0 # (L, H, W, 3) frame = frame.permute(3, 0, 1, 2).unsqueeze(0).cuda() # (1, 3, L, H, W) data = self.decoder(frame)[0].detach().cpu().numpy() yield data
Example #26
Source File: dataloader.py From RivaGAN with MIT License | 5 votes |
def __init__(self, root_dir, crop_size, seq_len, max_crop_size=(360, 480)): self.seq_len = seq_len self.crop_size = crop_size self.max_crop_size = max_crop_size self.videos = [] for ext in ["avi", "mp4"]: for path in glob(os.path.join(root_dir, "**/*.%s" % ext), recursive=True): cap = cv2.VideoCapture(path) nb_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) self.videos.append((path, nb_frames))
Example #27
Source File: scene_detection.py From filmstrip with MIT License | 5 votes |
def getInfo(sourcePath): cap = cv2.VideoCapture(sourcePath) info = { "framecount": cap.get(cv2.CAP_PROP_FRAME_COUNT), "fps": cap.get(cv2.CAP_PROP_FPS), "width": int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), "height": int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)), "codec": int(cap.get(cv2.CAP_PROP_FOURCC)) } cap.release() return info
Example #28
Source File: test_video_command_line_route_handler.py From deepstar with BSD 3-Clause Clear License | 5 votes |
def test_insert_image(self): with deepstar_path(): image_0001 = os.path.dirname(os.path.realpath(__file__)) + '/../../support/image_0001.jpg' # noqa args = ['main.py', 'insert', 'videos', 'image', image_0001] opts = {} class TestVideoCommandLineRouteHandler_(VideoCommandLineRouteHandler): # noqa def uuid(self): return '12345678-1234-1234-1234-123456789012' route_handler = TestVideoCommandLineRouteHandler_() try: sys.stdout = StringIO() route_handler.handle(args, opts) actual = sys.stdout.getvalue().strip() finally: sys.stdout = sys.__stdout__ # stdout self.assertIsNotNone(re.match('^video_id=1, uri=.*video.mp4, filename=12345678-1234-1234-1234-123456789012.mp4, description=None$', actual)) # noqa # db result = VideoModel().select(1) self.assertEqual(len(result), 4) self.assertEqual(result[0], 1) self.assertIsNotNone(re.match('^.*video.mp4$', result[1])) self.assertEqual(result[2], '12345678-1234-1234-1234-123456789012.mp4') # noqa self.assertEqual(result[3], None) # files video_path = VideoFile.path(result[2]) vc = cv2.VideoCapture(video_path) try: self.assertTrue(vc.isOpened()) self.assertEqual(vc.get(cv2.CAP_PROP_FRAME_COUNT), 1) finally: vc.release()
Example #29
Source File: datasets.py From pruning_yolov3 with GNU General Public License v3.0 | 5 votes |
def new_video(self, path): self.frame = 0 self.cap = cv2.VideoCapture(path) self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
Example #30
Source File: test.py From smashscan with MIT License | 5 votes |
def run_all_pm_tests(test_type_str, video_location, start_fnum, stop_fnum, save_flag, show_flag, wait_flag): # Create a capture object and set the stop frame number if none was given. capture = cv2.VideoCapture(video_location) if stop_fnum == 0: stop_fnum = int(capture.get(cv2.CAP_PROP_FRAME_COUNT)) # Run the PM test with grayscale and non-grayscale parameters. for gray_flag in [True, False]: # Display the flags used for the current PM test. print("==== Percent Matching Test ====") print("\tgray_flag={}".format(gray_flag)) print("\tshow_flag={}".format(show_flag)) pm = percent_matching.PercentMatcher(capture, [start_fnum, stop_fnum], gray_flag, save_flag, show_flag, wait_flag) # Run the PM test according to the input test_type_str. if test_type_str == "pms": pm.sweep_test() elif test_type_str == "pmc": pm.calibrate_test() elif test_type_str == "pmi": pm.initialize_test() elif test_type_str == "pmt": pm.timeline_test() # Release the OpenCV capture object. capture.release() # Run the VA test over the entire video folder.