Python cv2.CAP_PROP_FPS Examples
The following are 30
code examples of cv2.CAP_PROP_FPS().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
cv2
, or try the search function
.
Example #1
Source File: tkinter_functions.py From simba with GNU Lesser General Public License v3.0 | 8 votes |
def clahe(filename): os.chdir(os.path.dirname(filename)) print('Applying CLAHE, this might take awhile...') currentVideo = os.path.basename(filename) fileName, fileEnding = currentVideo.split('.',2) saveName = str('CLAHE_') + str(fileName) + str('.avi') cap = cv2.VideoCapture(currentVideo) imageWidth = int(cap.get(3)) imageHeight = int(cap.get(4)) fps = cap.get(cv2.CAP_PROP_FPS) fourcc = cv2.VideoWriter_fourcc(*'XVID') out = cv2.VideoWriter(saveName, fourcc, fps, (imageWidth, imageHeight), 0) try: while True: ret, image = cap.read() if ret == True: im = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) claheFilter = cv2.createCLAHE(clipLimit=2, tileGridSize=(16, 16)) claheCorrecttedFrame = claheFilter.apply(im) out.write(claheCorrecttedFrame) if cv2.waitKey(10) & 0xFF == ord('q'): break else: print(str('Completed video ') + str(saveName)) break except: print('clahe not applied') cap.release() out.release() cv2.destroyAllWindows() return saveName
Example #2
Source File: yolo.py From keras-yolov3-KF-objectTracking with MIT License | 7 votes |
def detect_video(yolo, video_path, output_path=""): import cv2 vid = cv2.VideoCapture(video_path) if not vid.isOpened(): raise IOError("Couldn't open webcam or video") video_FourCC = int(vid.get(cv2.CAP_PROP_FOURCC)) video_fps = vid.get(cv2.CAP_PROP_FPS) video_size = (int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)), int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))) isOutput = True if output_path != "" else False if isOutput: print("!!! TYPE:", type(output_path), type(video_FourCC), type(video_fps), type(video_size)) out = cv2.VideoWriter(output_path, video_FourCC, video_fps, video_size) accum_time = 0 curr_fps = 0 fps = "FPS: ??" prev_time = timer() while True: return_value, frame = vid.read() image = Image.fromarray(frame) image = yolo.detect_image(image) result = np.asarray(image) curr_time = timer() exec_time = curr_time - prev_time prev_time = curr_time accum_time = accum_time + exec_time curr_fps = curr_fps + 1 if accum_time > 1: accum_time = accum_time - 1 fps = "FPS: " + str(curr_fps) curr_fps = 0 cv2.putText(result, text=fps, org=(3, 15), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.50, color=(255, 0, 0), thickness=2) cv2.namedWindow("result", cv2.WINDOW_NORMAL) cv2.imshow("result", result) if isOutput: out.write(result) if cv2.waitKey(1) & 0xFF == ord('q'): break yolo.close_session()
Example #3
Source File: tkinter_functions.py From simba with GNU Lesser General Public License v3.0 | 6 votes |
def extract_allframescommand(filename): if filename: pathDir = str(filename[:-4]) if not os.path.exists(pathDir): os.makedirs(pathDir) picFname = '%d.png' saveDirFilenames = os.path.join(pathDir, picFname) print(saveDirFilenames) fname = str(filename) cap = cv2.VideoCapture(fname) fps = cap.get(cv2.CAP_PROP_FPS) amount_of_frames = cap.get(7) print('The number of frames in this video = ',amount_of_frames) print('Extracting frames... (Might take awhile)') command = str('ffmpeg -i ' +'"'+ str(fname)+'"' + ' ' + '-q:v 1' + ' ' + '-start_number 0' + ' '+'"'+ str(saveDirFilenames)+'"') print(command) subprocess.call(command, shell=True) print('All frames are extracted!') else: print('Please select a video to convert')
Example #4
Source File: test_compression_mode.py From vidgear with Apache License 2.0 | 6 votes |
def test_input_framerate(c_ffmpeg): """ Testing "-input_framerate" parameter provided by WriteGear(in Compression Mode) """ stream = cv2.VideoCapture(return_testvideo_path()) # Open stream test_video_framerate = stream.get(cv2.CAP_PROP_FPS) output_params = {"-input_framerate": test_video_framerate} writer = WriteGear( output_filename="Output_tif.mp4", custom_ffmpeg=c_ffmpeg, logging=True, **output_params ) # Define writer while True: (grabbed, frame) = stream.read() if not grabbed: break writer.write(frame) stream.release() writer.close() output_video_framerate = getFrameRate(os.path.abspath("Output_tif.mp4")) assert test_video_framerate == output_video_framerate os.remove(os.path.abspath("Output_tif.mp4"))
Example #5
Source File: video.py From Advanced-Deep-Learning-with-Keras with MIT License | 6 votes |
def initialize(self): self.capture = cv2.VideoCapture(self.camera) if not self.capture.isOpened(): print("Error opening video camera") return # cap.set(cv2.CAP_PROP_FPS, 5) self.capture.set(cv2.CAP_PROP_FRAME_WIDTH, self.width) self.capture.set(cv2.CAP_PROP_FRAME_HEIGHT, self.height) if self.record: self.videowriter = cv2.VideoWriter(self.filename, cv2.VideoWriter_fourcc('m', 'p', '4', 'v'), 10, (self.width, self.height), isColor=True)
Example #6
Source File: video.py From visual_dynamics with MIT License | 6 votes |
def __init__(self, device=None, size=None, fps=None, sync=False): self.device = device or 0 self.size = size or (480, 640) fps = fps or 30 self.cap = cv2.VideoCapture(self.device) cap_height, cap_width = self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT), self.cap.get(cv2.CAP_PROP_FRAME_WIDTH) if cap_height != self.size[0]: self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, self.size[0]) if cap_width != self.size[1]: self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, self.size[1]) cap_fps = self.cap.get(cv2.CAP_PROP_FPS) if cap_fps != fps: self.cap.set(cv2.CAP_PROP_FPS, fps) if sync: raise ValueError("sync not supported")
Example #7
Source File: get_telemetry_rocketlab.py From SpaceXtract with MIT License | 6 votes |
def main(): args = set_args() dest = args.destination_path + '.json' if os.path.isfile(dest) and not args.force: if input("'%s' already exists. Do you want to override it? [y/n]: " % args.destination_path) != 'y': print('exiting') exit(4) file = open(dest, 'w') cap = extract_video.get_capture(args.capture_path) if cap is None or cap.get(cv2.CAP_PROP_FPS) == 0: if extract_video.youtube_url_validation(args.capture_path): print("Cannot access video in URL. Please check the URL is a valid YouTube video") exit(2) print("Cannot access video in file. Please make sure the path to the file is valid") exit(3) get_data(cap, file, to_float(args.launch_time), args.out, args.destination_path, args.live, args.from_launch)
Example #8
Source File: get_telemetry_spacex.py From SpaceXtract with MIT License | 6 votes |
def main(): args = set_args() dest = args.destination_path + '.json' if os.path.isfile(dest) and not args.force: if input("'%s' already exists. Do you want to override it? [y/n]: " % args.destination_path) != 'y': print('exiting') exit(4) file = open(dest, 'w') cap = extract_video.get_capture(args.capture_path) if cap is None or cap.get(cv2.CAP_PROP_FPS) == 0: if extract_video.youtube_url_validation(args.capture_path): print("Cannot access video in URL. Please check the URL is a valid YouTube video") exit(2) print("Cannot access video in file. Please make sure the path to the file is valid") exit(3) get_data(cap, file, to_float(args.launch_time), args.out, args.destination_path, args.live)
Example #9
Source File: util.py From SpaceXtract with MIT License | 6 votes |
def skip_from_launch(self, cap, key, time, thresh=None): """ Move the capture to T+time (time can be negative) and returns the frame index. :param cap: OpenCV capture :param time: delta time from launch to skip to :return: index of requested frame """ if thresh is None: thresh = self.extractor.image_dict[key][2] number_of_frames = int(cap.get(cv2.CAP_PROP_FPS) * time) + self.search_switch(cap, key, thresh) number_of_frames = max(number_of_frames, 0) number_of_frames = min(number_of_frames, cap.get(cv2.CAP_PROP_FRAME_COUNT)) cap.set(cv2.CAP_PROP_POS_FRAMES, number_of_frames) return number_of_frames
Example #10
Source File: demo.py From blueoil with Apache License 2.0 | 6 votes |
def __init__(self, video_source, video_width, video_height, video_fps, queue_size=1): self.video_fps = video_fps vc = cv2.VideoCapture(video_source) if hasattr(cv2, 'cv'): vc.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, video_width) vc.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, video_height) vc.set(cv2.cv.CV_CAP_PROP_FPS, video_fps) else: vc.set(cv2.CAP_PROP_FRAME_WIDTH, video_width) vc.set(cv2.CAP_PROP_FRAME_HEIGHT, video_height) vc.set(cv2.CAP_PROP_FPS, video_fps) self.stream = vc self.stopped = False self.queue = Queue(maxsize=queue_size) self.thread = Thread(target=self.update, args=()) self.thread.daemon = True self.thread.start()
Example #11
Source File: sequence_run.py From FeatureFlow with MIT License | 6 votes |
def VideoToSequence(path, time): video = cv2.VideoCapture(path) dir_path = 'frames_tmp' os.system("rm -rf %s" % dir_path) os.mkdir(dir_path) fps = int(video.get(cv2.CAP_PROP_FPS)) length = int(video.get(cv2.CAP_PROP_FRAME_COUNT)) print('making ' + str(length) + ' frame sequence in ' + dir_path) i = -1 while (True): (grabbed, frame) = video.read() if not grabbed: break i = i + 1 index = IndexHelper(i*time, len(str(time*length))) cv2.imwrite(dir_path + '/' + index + '.png', frame) # print(index) return [dir_path, length, fps]
Example #12
Source File: preprocess.py From filmstrip with MIT License | 6 votes |
def getInfo(sourcePath): cap = cv2.VideoCapture(sourcePath) info = { "framecount": cap.get(cv2.CAP_PROP_FRAME_COUNT), "fps": cap.get(cv2.CAP_PROP_FPS), "width": int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), "height": int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)), "codec": int(cap.get(cv2.CAP_PROP_FOURCC)) } cap.release() return info # # Extracts one frame for every second second of video. # Effectively compresses a video down into much less data. #
Example #13
Source File: train_featurizer.py From HardRLWithYoutube with MIT License | 6 votes |
def generate_dataset(videos_path, framerate, width, height): """Converts videos from specified path to ndarrays of shape [numberOfVideos, -1, width, height, 1] Args: videos_path: Inside the 'videos/' directory, the name of the subdirectory for videos. framerate: The desired framerate of the dataset. width: The width we will resize the videos to. height: The height we will resize the videos to. Returns: The dataset with the new size and framerate, and converted to monochromatic. """ dataset = [] video_index = 0 for playlist in os.listdir('videos/' + videos_path): for video_name in os.listdir('videos/{}/{}'.format(videos_path, playlist)): dataset.append([]) print('Video: {}'.format(video_name)) video = cv2.VideoCapture('videos/{}/{}/{}'.format(videos_path, playlist, video_name)) while video.isOpened(): success, frame = video.read() if success: frame = preprocess_image(frame, width, height) dataset[video_index].append(frame) frame_index = video.get(cv2.CAP_PROP_POS_FRAMES) video_framerate = video.get(cv2.CAP_PROP_FPS) video.set(cv2.CAP_PROP_POS_FRAMES, frame_index + video_framerate // framerate) last_frame_index = video.get(cv2.CAP_PROP_FRAME_COUNT) if frame_index >= last_frame_index: # Video is over break else: break dataset[video_index] = np.reshape(dataset[video_index], (-1, width, height, 1)) video_index += 1 return dataset
Example #14
Source File: __init__.py From visil with Apache License 2.0 | 6 votes |
def load_video(video, all_frames=False): cap = cv2.VideoCapture(video) fps = cap.get(cv2.CAP_PROP_FPS) if fps > 144 or fps is None: fps = 25 frames = [] count = 0 while cap.isOpened(): ret, frame = cap.read() if isinstance(frame, np.ndarray): if int(count % round(fps)) == 0 or all_frames: frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) frames.append(center_crop(resize_frame(frame, 256), 256)) else: break count += 1 cap.release() return np.array(frames)
Example #15
Source File: opencv.py From stytra with GNU General Public License v3.0 | 5 votes |
def set(self, param, val): if param == "exposure": self.cam.set(cv2.CAP_PROP_EXPOSURE, val) # if param == "framerate": self.cam.set(cv2.CAP_PROP_FPS, val)
Example #16
Source File: webcam_video_capture.py From detectron2-pipeline with MIT License | 5 votes |
def __init__(self, src=0, fourcc=None, width=None, height=None, fps=None, transform=None, queue_size=128, name="WebcamVideoCapture"): self.cap = cv2.VideoCapture(src) if not self.cap.isOpened(): raise IOError(f"Cannot open video {src}") # Set capture properties if fourcc: self.cap.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*fourcc)) if width: self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, width) if height: self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height) if fps: self.cap.set(cv2.CAP_PROP_FPS, fps) self.transform = transform # initialize the queue used to store frames read from the webcam self.queue = Queue(maxsize=queue_size) # initialize the variable used to indicate if the thread should be stopped self.stopped = False self.thread = Thread(target=self.update, args=(), name=name) self.thread.daemon = True
Example #17
Source File: video_transformer.py From FeatureFlow with MIT License | 5 votes |
def _lower_fps(self, p_args): video_name, q = p_args # pbar.set_description("Processing %s" % video_name) # read a video and create video_writer for lower fps video output video = cv2.VideoCapture(os.path.join(self.videos_folder, video_name)) fps = video.get(cv2.CAP_PROP_FPS) size = (int(video.get(cv2.CAP_PROP_FRAME_WIDTH)), int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))) # fourcc = cv2.VideoWriter_fourcc(*'XVID') fourcc = cv2.VideoWriter_fourcc(*"mp4v") video_writer = [cv2.VideoWriter(self.tmp + video_name[:-4] + '_%s' % str(i) + '.mp4', fourcc, fps / self.lower_rate, size) for i in range(self.lower_rate)] count = 0 while video.isOpened(): ret, frame = video.read() if ret: video_writer[count % self.lower_rate].write(frame) if cv2.waitKey(1) & 0xFF == ord('q'): raise KeyboardInterrupt else: break count += 1 for i in range(self.lower_rate): video_writer[i].release() q.put(1)
Example #18
Source File: read_camera.py From AnimalRecognitionDemo with Apache License 2.0 | 5 votes |
def __init__(self, infile=0, fps=15.0): self.cam = cv2.VideoCapture(infile) self.cam.set(cv2.CAP_PROP_FPS, fps) self.cam.set(cv2.CAP_PROP_FRAME_WIDTH, IMAGE_WIDTH) self.cam.set(cv2.CAP_PROP_FRAME_HEIGHT, IMAGE_HEIGHT)
Example #19
Source File: yolo.py From keras-YOLOv3-mobilenet with MIT License | 5 votes |
def detect_video(yolo, video_path, output_path=""): import cv2 vid = cv2.VideoCapture(video_path) if not vid.isOpened(): raise IOError("Couldn't open webcam or video") video_FourCC = int(vid.get(cv2.CAP_PROP_FOURCC)) video_fps = vid.get(cv2.CAP_PROP_FPS) video_size = (int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)), int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))) isOutput = True if output_path != "" else False if isOutput: print("!!! TYPE:", type(output_path), type(video_FourCC), type(video_fps), type(video_size)) out = cv2.VideoWriter(output_path, video_FourCC, video_fps, video_size) accum_time = 0 curr_fps = 0 fps = "FPS: ??" prev_time = timer() while True: return_value, frame = vid.read() image = Image.fromarray(frame) image = yolo.detect_image(image) result = np.asarray(image) curr_time = timer() exec_time = curr_time - prev_time prev_time = curr_time accum_time = accum_time + exec_time curr_fps = curr_fps + 1 if accum_time > 1: accum_time = accum_time - 1 fps = "FPS: " + str(curr_fps) curr_fps = 0 cv2.putText(result, text=fps, org=(3, 15), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.50, color=(255, 0, 0), thickness=2) cv2.namedWindow("result", cv2.WINDOW_NORMAL) cv2.imshow("result", result) if isOutput: out.write(result) if cv2.waitKey(1) & 0xFF == ord('q'): break yolo.close_session()
Example #20
Source File: yolo_Mobilenet.py From keras-YOLOv3-mobilenet with MIT License | 5 votes |
def detect_video(yolo, video_path, output_path=""): import cv2 vid = cv2.VideoCapture(video_path) if not vid.isOpened(): raise IOError("Couldn't open webcam or video") video_FourCC = int(vid.get(cv2.CAP_PROP_FOURCC)) video_fps = vid.get(cv2.CAP_PROP_FPS) video_size = (int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)), int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))) isOutput = True if output_path != "" else False if isOutput: print("!!! TYPE:", type(output_path), type(video_FourCC), type(video_fps), type(video_size)) out = cv2.VideoWriter(output_path, video_FourCC, video_fps, video_size) accum_time = 0 curr_fps = 0 fps = "FPS: ??" prev_time = timer() while True: return_value, frame = vid.read() image = Image.fromarray(frame) image = yolo.detect_image(image) result = np.asarray(image) curr_time = timer() exec_time = curr_time - prev_time prev_time = curr_time accum_time = accum_time + exec_time curr_fps = curr_fps + 1 if accum_time > 1: accum_time = accum_time - 1 fps = "FPS: " + str(curr_fps) curr_fps = 0 cv2.putText(result, text=fps, org=(3, 15), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.50, color=(255, 0, 0), thickness=2) cv2.namedWindow("result", cv2.WINDOW_NORMAL) cv2.imshow("result", result) if isOutput: out.write(result) if cv2.waitKey(1) & 0xFF == ord('q'): break yolo.close_session()
Example #21
Source File: CameraDevice.py From vidpipe with GNU General Public License v3.0 | 5 votes |
def fps( self ): fps = int( self._cameraDevice.get( cv2.CAP_PROP_FPS ) ) if not fps > 0: fps = self._DEFAULT_FPS return fps
Example #22
Source File: testing_webcam.py From libfaceid with MIT License | 5 votes |
def cam_init(cam_index, width, height): cap = cv2.VideoCapture(cam_index) if sys.version_info < (3, 0): cap.set(cv2.cv.CV_CAP_PROP_FPS, 30) cap.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, width) cap.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, height) else: cap.set(cv2.CAP_PROP_FPS, 30) cap.set(cv2.CAP_PROP_FRAME_WIDTH, width) cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height) return cap
Example #23
Source File: opencv_file.py From IkaLog with Apache License 2.0 | 5 votes |
def get_start_time(self): """Returns the timestamp of the beginning of this video in sec.""" if (not self._source_file) or (not self.video_capture): return None last_modified_time = os.stat(self._source_file).st_mtime frames = self.video_capture.get(cv2.CAP_PROP_FRAME_COUNT) fps = self.video_capture.get(cv2.CAP_PROP_FPS) duration = frames / fps return last_modified_time - duration # override
Example #24
Source File: frame_queue.py From python_video_stab with MIT License | 5 votes |
def set_frame_source(self, source): if isinstance(source, cv2.VideoCapture): self.source = source self.source_frame_count = int(source.get(cv2.CAP_PROP_FRAME_COUNT)) self.source_fps = int(source.get(cv2.CAP_PROP_FPS)) has_max_frames = self.max_frames is not None and not np.isinf(self.max_frames) if self.source_frame_count > 0 and not has_max_frames: self._max_frames = self.source_frame_count elif has_max_frames and self.source_frame_count < self.max_frames: self._max_frames = self.source_frame_count else: raise TypeError('Not yet support for non cv2.VideoCapture frame source.')
Example #25
Source File: agegenderemotion_webcam.py From libfaceid with MIT License | 5 votes |
def cam_init(cam_index, width, height): cap = cv2.VideoCapture(cam_index) if sys.version_info < (3, 0): cap.set(cv2.cv.CV_CAP_PROP_FPS, 30) cap.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, width) cap.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, height) else: cap.set(cv2.CAP_PROP_FPS, 30) cap.set(cv2.CAP_PROP_FRAME_WIDTH, width) cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height) return cap
Example #26
Source File: testing_webcam_voiceenabled_voiceactivated.py From libfaceid with MIT License | 5 votes |
def cam_init(cam_index, width, height): cap = cv2.VideoCapture(cam_index) if sys.version_info < (3, 0): cap.set(cv2.cv.CV_CAP_PROP_FPS, 30) cap.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, width) cap.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, height) else: cap.set(cv2.CAP_PROP_FPS, 30) cap.set(cv2.CAP_PROP_FRAME_WIDTH, width) cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height) return cap
Example #27
Source File: enrollment.py From libfaceid with MIT License | 5 votes |
def cam_init(cam_index, width, height): cap = cv2.VideoCapture(cam_index) if sys.version_info < (3, 0): cap.set(cv2.cv.CV_CAP_PROP_FPS, 30) cap.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, width) cap.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, height) else: cap.set(cv2.CAP_PROP_FPS, 30) cap.set(cv2.CAP_PROP_FRAME_WIDTH, width) cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height) return cap
Example #28
Source File: testing_webcam_voiceenabled.py From libfaceid with MIT License | 5 votes |
def cam_init(cam_index, width, height): cap = cv2.VideoCapture(cam_index) if sys.version_info < (3, 0): cap.set(cv2.cv.CV_CAP_PROP_FPS, 30) cap.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, width) cap.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, height) else: cap.set(cv2.CAP_PROP_FPS, 30) cap.set(cv2.CAP_PROP_FRAME_WIDTH, width) cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height) return cap
Example #29
Source File: facial_recognition.py From libfaceid with MIT License | 5 votes |
def cam_init(width, height): cap = cv2.VideoCapture(0) if sys.version_info < (3, 0): cap.set(cv2.cv.CV_CAP_PROP_FPS, 30) cap.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, width) cap.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, height) else: cap.set(cv2.CAP_PROP_FPS, 30) cap.set(cv2.CAP_PROP_FRAME_WIDTH, width) cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height) return cap
Example #30
Source File: testing_webcam_flask.py From libfaceid with MIT License | 5 votes |
def cam_init(cam_index, width, height): cap = cv2.VideoCapture(cam_index) if sys.version_info < (3, 0): cap.set(cv2.cv.CV_CAP_PROP_FPS, 30) cap.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, width) cap.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, height) else: cap.set(cv2.CAP_PROP_FPS, 30) cap.set(cv2.CAP_PROP_FRAME_WIDTH, width) cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height) return cap