Python cv2.CAP_PROP_BUFFERSIZE Examples
The following are 8
code examples of cv2.CAP_PROP_BUFFERSIZE().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
cv2
, or try the search function
.
Example #1
Source File: datasets.py From pruning_yolov3 with GNU General Public License v3.0 | 10 votes |
def __init__(self, pipe=0, img_size=416, half=False): self.img_size = img_size self.half = half # half precision fp16 images if pipe == '0': pipe = 0 # local camera # pipe = 'rtsp://192.168.1.64/1' # IP camera # pipe = 'rtsp://username:password@192.168.1.64/1' # IP camera with login # pipe = 'rtsp://170.93.143.139/rtplive/470011e600ef003a004ee33696235daa' # IP traffic camera # pipe = 'http://wmccpinetop.axiscam.net/mjpg/video.mjpg' # IP golf camera # https://answers.opencv.org/question/215996/changing-gstreamer-pipeline-to-opencv-in-pythonsolved/ # pipe = '"rtspsrc location="rtsp://username:password@192.168.1.64/1" latency=10 ! appsink' # GStreamer # https://answers.opencv.org/question/200787/video-acceleration-gstremer-pipeline-in-videocapture/ # https://stackoverflow.com/questions/54095699/install-gstreamer-support-for-opencv-python-package # install help # pipe = "rtspsrc location=rtsp://root:root@192.168.0.91:554/axis-media/media.amp?videocodec=h264&resolution=3840x2160 protocols=GST_RTSP_LOWER_TRANS_TCP ! rtph264depay ! queue ! vaapih264dec ! videoconvert ! appsink" # GStreamer self.pipe = pipe self.cap = cv2.VideoCapture(pipe) # video capture object self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size
Example #2
Source File: datasets.py From yolov3-channel-and-layer-pruning with Apache License 2.0 | 6 votes |
def __init__(self, pipe=0, img_size=416, half=False): self.img_size = img_size self.half = half # half precision fp16 images if pipe == '0': pipe = 0 # local camera # pipe = 'rtsp://192.168.1.64/1' # IP camera # pipe = 'rtsp://username:password@192.168.1.64/1' # IP camera with login # pipe = 'rtsp://170.93.143.139/rtplive/470011e600ef003a004ee33696235daa' # IP traffic camera # pipe = 'http://wmccpinetop.axiscam.net/mjpg/video.mjpg' # IP golf camera # https://answers.opencv.org/question/215996/changing-gstreamer-pipeline-to-opencv-in-pythonsolved/ # pipe = '"rtspsrc location="rtsp://username:password@192.168.1.64/1" latency=10 ! appsink' # GStreamer # https://answers.opencv.org/question/200787/video-acceleration-gstremer-pipeline-in-videocapture/ # https://stackoverflow.com/questions/54095699/install-gstreamer-support-for-opencv-python-package # install help # pipe = "rtspsrc location=rtsp://root:root@192.168.0.91:554/axis-media/media.amp?videocodec=h264&resolution=3840x2160 protocols=GST_RTSP_LOWER_TRANS_TCP ! rtph264depay ! queue ! vaapih264dec ! videoconvert ! appsink" # GStreamer self.pipe = pipe self.cap = cv2.VideoCapture(pipe) # video capture object self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size
Example #3
Source File: wifi_camera.py From 1ZLAB_PyEspCar with GNU General Public License v3.0 | 5 votes |
def __init__(self, phone_ip): ip_camera_url = 'http://admin:admin@{}:8081/'.format(phone_ip) self.cap = cv2.VideoCapture(ip_camera_url) # 设置缓存区的大小 self.cap.set(cv2.CAP_PROP_BUFFERSIZE, self.CAP_BUFFER_SIZE) for i in range(self.INIT_JUMP_FRAME_NUM): ret, img = self.cap.read()
Example #4
Source File: camera_publisher_and_services.py From niryo_one_ros with GNU General Public License v3.0 | 4 votes |
def __setup_stream_settings(self): # Set compression format self.__video_stream.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc('M', 'J', 'P', 'G')) # Set Buffer size # -- Not available in opencv 3.4 -- # self.__video_stream.set(cv2.CAP_PROP_BUFFERSIZE, rospy.get_param("~buffer_size")) # Set image size w, h = rospy.get_param("~frame_size") self.__video_stream.set(cv2.CAP_PROP_FRAME_WIDTH, w) self.__video_stream.set(cv2.CAP_PROP_FRAME_HEIGHT, h) # Set frame rate self.__video_stream.set(cv2.CAP_PROP_FPS, self.__frame_rate)
Example #5
Source File: main.py From speed-detector with MIT License | 4 votes |
def main (): # I think KNN works better than MOG2, specifically with trucks/large vehicles # TODO: Block out snowbank where shadows are strongly reflected! bg_subtractor = cv2.createBackgroundSubtractorKNN(detectShadows=True) car_counter = None load_cropped() cap = cv2.VideoCapture(road['stream_url']) cap.set(cv2.CAP_PROP_BUFFERSIZE, 2) cv2.namedWindow('Source Image') cv2.setMouseCallback('Source Image', click_and_crop) frame_width = cap.get(cv2.CAP_PROP_FRAME_WIDTH) frame_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT) frame_number = -1 while True: frame_number += 1 ret, frame = cap.read() if not ret: print('Frame capture failed, stopping...') break if car_counter is None: car_counter = VehicleCounter(frame.shape[:2], road, cap.get(cv2.CAP_PROP_FPS), samples=10) processed = process_frame(frame_number, frame, bg_subtractor, car_counter) cv2.imshow('Source Image', frame) cv2.imshow('Processed Image', processed) key = cv2.waitKey(WAIT_TIME) if key == ord('s'): # save rects! save_cropped() elif key == ord('q') or key == 27: break # Keep video's speed stable # I think that this causes the abrupt jumps in the video time.sleep( 1.0 / cap.get(cv2.CAP_PROP_FPS) ) print('Closing video capture...') cap.release() cv2.destroyAllWindows() print('Done.')
Example #6
Source File: threadedcam.py From speed-detector with MIT License | 4 votes |
def __init__(self, src=0): self.capture = cv2.VideoCapture(src) self.capture.set(cv2.CAP_PROP_BUFFERSIZE, 2) # FPS = 1/X # X = desired FPS self.FPS = 1/30 self.FPS_MS = int(self.FPS * 1000) # Start frame retrieval thread self.thread = Thread(target=self.update, args=()) self.thread.daemon = True self.thread.start()
Example #7
Source File: video_stream.py From open_model_zoo with Apache License 2.0 | 4 votes |
def _frame_generator(input_source, out_frame, frame_shape, finish_flag): """Produces live frames from the input stream""" cap = cv2.VideoCapture(input_source) cap.set(cv2.CAP_PROP_BUFFERSIZE, 1) source_fps = cap.get(cv2.CAP_PROP_FPS) trg_time_step = 1.0 / float(source_fps) while True: start_time = time.perf_counter() _, frame = cap.read() if frame is None: break with out_frame.get_lock(): buffer = np.frombuffer(out_frame.get_obj(), dtype=np.uint8) np.copyto(buffer.reshape(frame_shape), frame) end_time = time.perf_counter() elapsed_time = end_time - start_time rest_time = trg_time_step - elapsed_time if rest_time > 0.0: time.sleep(rest_time) finish_flag.value = True cap.release()
Example #8
Source File: video_library.py From open_model_zoo with Apache License 2.0 | 4 votes |
def _play(visualizer_queue, cur_source_id, source_paths, max_image_size, trg_time_step): """Produces live frame from the active video source""" cap = None last_source_id = cur_source_id.value while True: start_time = time.perf_counter() if cur_source_id.value != last_source_id: last_source_id = cur_source_id.value cap.release() cap = None source_name, source_path = source_paths[cur_source_id.value] if cap is None: cap = cv2.VideoCapture(source_path) cap.set(cv2.CAP_PROP_BUFFERSIZE, 1) _, frame = cap.read() if frame is None: cap.set(cv2.CAP_PROP_POS_FRAMES, 0) _, frame = cap.read() assert frame is not None trg_frame_size = list(frame.shape[:2]) if np.max(trg_frame_size) > max_image_size: if trg_frame_size[0] == np.max(trg_frame_size): trg_frame_size[1] = int(float(max_image_size) / float(trg_frame_size[0]) * float(trg_frame_size[1])) trg_frame_size[0] = max_image_size else: trg_frame_size[0] = int(float(max_image_size) * float(trg_frame_size[0]) / float(trg_frame_size[1])) trg_frame_size[1] = max_image_size frame = cv2.resize(frame, (trg_frame_size[1], trg_frame_size[0])) cv2.putText(frame, 'GT Gesture: {}'.format(source_name), (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2) visualizer_queue.put(np.copy(frame), True) end_time = time.perf_counter() elapsed_time = end_time - start_time rest_time = trg_time_step - elapsed_time if rest_time > 0.0: time.sleep(rest_time)