Python cv2.CAP_PROP_FOURCC Examples
The following are 29
code examples of cv2.CAP_PROP_FOURCC().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
cv2
, or try the search function
.
Example #1
Source File: yolo.py From keras-yolov3-KF-objectTracking with MIT License | 7 votes |
def detect_video(yolo, video_path, output_path=""): import cv2 vid = cv2.VideoCapture(video_path) if not vid.isOpened(): raise IOError("Couldn't open webcam or video") video_FourCC = int(vid.get(cv2.CAP_PROP_FOURCC)) video_fps = vid.get(cv2.CAP_PROP_FPS) video_size = (int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)), int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))) isOutput = True if output_path != "" else False if isOutput: print("!!! TYPE:", type(output_path), type(video_FourCC), type(video_fps), type(video_size)) out = cv2.VideoWriter(output_path, video_FourCC, video_fps, video_size) accum_time = 0 curr_fps = 0 fps = "FPS: ??" prev_time = timer() while True: return_value, frame = vid.read() image = Image.fromarray(frame) image = yolo.detect_image(image) result = np.asarray(image) curr_time = timer() exec_time = curr_time - prev_time prev_time = curr_time accum_time = accum_time + exec_time curr_fps = curr_fps + 1 if accum_time > 1: accum_time = accum_time - 1 fps = "FPS: " + str(curr_fps) curr_fps = 0 cv2.putText(result, text=fps, org=(3, 15), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.50, color=(255, 0, 0), thickness=2) cv2.namedWindow("result", cv2.WINDOW_NORMAL) cv2.imshow("result", result) if isOutput: out.write(result) if cv2.waitKey(1) & 0xFF == ord('q'): break yolo.close_session()
Example #2
Source File: preprocess.py From filmstrip with MIT License | 6 votes |
def getInfo(sourcePath): cap = cv2.VideoCapture(sourcePath) info = { "framecount": cap.get(cv2.CAP_PROP_FRAME_COUNT), "fps": cap.get(cv2.CAP_PROP_FPS), "width": int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), "height": int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)), "codec": int(cap.get(cv2.CAP_PROP_FOURCC)) } cap.release() return info # # Extracts one frame for every second second of video. # Effectively compresses a video down into much less data. #
Example #3
Source File: utils.py From ActionAI with GNU General Public License v3.0 | 6 votes |
def source_capture(source): source = int(source) if source.isdigit() else source cap = cv2.VideoCapture(source) fourcc_cap = cv2.VideoWriter_fourcc(*'MJPG') cap.set(cv2.CAP_PROP_FOURCC, fourcc_cap) cap.set(cv2.CAP_PROP_FRAME_WIDTH, cfg.w) cap.set(cv2.CAP_PROP_FRAME_HEIGHT, cfg.h) return cap
Example #4
Source File: yolo.py From yolo3_keras_Flag_Detection with MIT License | 5 votes |
def detect_video(yolo, video_path=0, output_path=""): import cv2 vid = cv2.VideoCapture(0) if not vid.isOpened(): raise IOError("Couldn't open webcam or video") video_FourCC = int(vid.get(cv2.CAP_PROP_FOURCC)) video_fps = vid.get(cv2.CAP_PROP_FPS) video_size = (int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)), int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))) isOutput = True if output_path != "" else False if isOutput: print("!!! TYPE:", type(output_path), type(video_FourCC), type(video_fps), type(video_size)) out = cv2.VideoWriter(output_path, video_FourCC, video_fps, video_size) accum_time = 0 curr_fps = 0 fps = "FPS: ??" prev_time = timer() while True: return_value, frame = vid.read() image = Image.fromarray(frame) image = yolo.detect_image(image) result = np.asarray(image) curr_time = timer() exec_time = curr_time - prev_time prev_time = curr_time accum_time = accum_time + exec_time curr_fps = curr_fps + 1 if accum_time > 1: accum_time = accum_time - 1 fps = "FPS: " + str(curr_fps) curr_fps = 0 cv2.putText(result, text=fps, org=(3, 15), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.50, color=(255, 0, 0), thickness=2) cv2.namedWindow("result", cv2.WINDOW_NORMAL) cv2.imshow("result", result) if isOutput: out.write(result) if cv2.waitKey(1) & 0xFF == ord('q'): break yolo.close_session()
Example #5
Source File: estimate.py From openpose-pytorch with GNU Lesser General Public License v3.0 | 5 votes |
def create_writer(self, height, width): fps = self.cap.get(cv2.CAP_PROP_FPS) logging.info('cap fps=%f' % fps) path = os.path.expanduser(os.path.expandvars(self.args.output)) if self.args.fourcc: fourcc = cv2.VideoWriter_fourcc(*self.args.fourcc.upper()) else: fourcc = int(self.cap.get(cv2.CAP_PROP_FOURCC)) os.makedirs(os.path.dirname(path), exist_ok=True) return cv2.VideoWriter(path, fourcc, fps, (width, height))
Example #6
Source File: video.py From open_model_zoo with Apache License 2.0 | 5 votes |
def __init__(self, sources): assert sources self.captures = [] self.transforms = [] try: sources = [int(src) for src in sources] mode = 'cam' except ValueError: mode = 'video' if mode == 'cam': for id in sources: log.info('Connection cam {}'.format(id)) cap = cv.VideoCapture(id) cap.set(cv.CAP_PROP_FRAME_WIDTH, 1280) cap.set(cv.CAP_PROP_FRAME_HEIGHT, 720) cap.set(cv.CAP_PROP_FPS, 30) cap.set(cv.CAP_PROP_FOURCC, cv.VideoWriter_fourcc(*'MJPG')) assert cap.isOpened() self.captures.append(cap) else: for video_path in sources: log.info('Opening file {}'.format(video_path)) cap = cv.VideoCapture(video_path) assert cap.isOpened() self.captures.append(cap)
Example #7
Source File: webcam.py From GazeML with MIT License | 5 votes |
def __init__(self, camera_id=0, fps=60, **kwargs): """Create queues and threads to read and preprocess data.""" self._short_name = 'Webcam' self._capture = cv.VideoCapture(camera_id) self._capture.set(cv.CAP_PROP_FRAME_WIDTH, 1280) self._capture.set(cv.CAP_PROP_FRAME_HEIGHT, 720) self._capture.set(cv.CAP_PROP_FOURCC, cv.VideoWriter_fourcc(*'MJPG')) self._capture.set(cv.CAP_PROP_FPS, fps) # Call parent class constructor super().__init__(**kwargs)
Example #8
Source File: predict.py From YOLO-V3-IOU with MIT License | 5 votes |
def detect_video(yolo, video_path, output_path=""): import cv2 vid = cv2.VideoCapture(video_path) if not vid.isOpened(): raise IOError("Couldn't open webcam or video") video_FourCC = int(vid.get(cv2.CAP_PROP_FOURCC)) video_fps = vid.get(cv2.CAP_PROP_FPS) video_size = (int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)), int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))) isOutput = True if output_path != "" else False if isOutput: print("!!! TYPE:", type(output_path), type(video_FourCC), type(video_fps), type(video_size)) out = cv2.VideoWriter(output_path, video_FourCC, video_fps, video_size) accum_time = 0 curr_fps = 0 fps = "FPS: ??" prev_time = timer() while True: return_value, frame = vid.read() image = Image.fromarray(frame) image = yolo.detect_image(image) result = np.asarray(image) curr_time = timer() exec_time = curr_time - prev_time prev_time = curr_time accum_time = accum_time + exec_time curr_fps = curr_fps + 1 if accum_time > 1: accum_time = accum_time - 1 fps = "FPS: " + str(curr_fps) curr_fps = 0 cv2.putText(result, text=fps, org=(3, 15), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.50, color=(255, 0, 0), thickness=2) cv2.namedWindow("result", cv2.WINDOW_NORMAL) cv2.imshow("result", result) if isOutput: out.write(result) if cv2.waitKey(1) & 0xFF == ord('q'): break yolo.close_session() pass
Example #9
Source File: dataloader.py From video-to-pose3D with MIT License | 5 votes |
def videoinfo(self): # indicate the video info fourcc = int(self.stream.get(cv2.CAP_PROP_FOURCC)) fps = self.stream.get(cv2.CAP_PROP_FPS) frameSize = (int(self.stream.get(cv2.CAP_PROP_FRAME_WIDTH)), int(self.stream.get(cv2.CAP_PROP_FRAME_HEIGHT))) return (fourcc, fps, frameSize)
Example #10
Source File: dataloader.py From video-to-pose3D with MIT License | 5 votes |
def videoinfo(self): # indicate the video info fourcc = int(self.stream.get(cv2.CAP_PROP_FOURCC)) fps = self.stream.get(cv2.CAP_PROP_FPS) frameSize = (int(self.stream.get(cv2.CAP_PROP_FRAME_WIDTH)), int(self.stream.get(cv2.CAP_PROP_FRAME_HEIGHT))) return (fourcc, fps, frameSize)
Example #11
Source File: dataloader.py From video-to-pose3D with MIT License | 5 votes |
def videoinfo(self): # indicate the video info fourcc = int(self.stream.get(cv2.CAP_PROP_FOURCC)) fps = self.stream.get(cv2.CAP_PROP_FPS) frameSize = (int(self.stream.get(cv2.CAP_PROP_FRAME_WIDTH)), int(self.stream.get(cv2.CAP_PROP_FRAME_HEIGHT))) return (fourcc, fps, frameSize)
Example #12
Source File: dataloader_webcam.py From video-to-pose3D with MIT License | 5 votes |
def videoinfo(self): # indicate the video info fourcc=int(self.stream.get(cv2.CAP_PROP_FOURCC)) fps=self.stream.get(cv2.CAP_PROP_FPS) frameSize=(int(self.stream.get(cv2.CAP_PROP_FRAME_WIDTH)),int(self.stream.get(cv2.CAP_PROP_FRAME_HEIGHT))) return (fourcc,fps,frameSize)
Example #13
Source File: dataloader_webcam.py From video-to-pose3D with MIT License | 5 votes |
def videoinfo(self): # indicate the video info fourcc=int(self.stream.get(cv2.CAP_PROP_FOURCC)) fps=self.stream.get(cv2.CAP_PROP_FPS) frameSize=(int(self.stream.get(cv2.CAP_PROP_FRAME_WIDTH)),int(self.stream.get(cv2.CAP_PROP_FRAME_HEIGHT))) return (fourcc,fps,frameSize)
Example #14
Source File: people_flow.py From human_counter with MIT License | 5 votes |
def detect_video(yolo, video_path, output_path=""): import cv2 vid = cv2.VideoCapture(video_path) if not vid.isOpened(): raise IOError("Couldn't open webcam or video") video_FourCC = int(vid.get(cv2.CAP_PROP_FOURCC)) video_fps = vid.get(cv2.CAP_PROP_FPS) video_size = (int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)), int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))) isOutput = True if output_path != "" else False if isOutput: print("!!! TYPE:", type(output_path), type(video_FourCC), type(video_fps), type(video_size)) out = cv2.VideoWriter(output_path, video_FourCC, video_fps, video_size) accum_time = 0 curr_fps = 0 fps = "FPS: ??" prev_time = timer() while True: return_value, frame = vid.read() image = Image.fromarray(frame) image = yolo.detect_image(image) result = np.asarray(image) curr_time = timer() exec_time = curr_time - prev_time prev_time = curr_time accum_time = accum_time + exec_time curr_fps = curr_fps + 1 if accum_time > 1: accum_time = accum_time - 1 fps = "FPS: " + str(curr_fps) curr_fps = 0 cv2.putText(result, text=fps, org=(3, 15), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.50, color=(255, 0, 0), thickness=2) cv2.namedWindow("result", cv2.WINDOW_NORMAL) cv2.imshow("result", result) if isOutput: out.write(result) if cv2.waitKey(1) & 0xFF == ord('q'): break yolo.close_session()
Example #15
Source File: yolo.py From human_counter with MIT License | 5 votes |
def detect_video(yolo, video_path, output_path=""): import cv2 vid = cv2.VideoCapture(video_path) if not vid.isOpened(): raise IOError("Couldn't open webcam or video") video_FourCC = int(vid.get(cv2.CAP_PROP_FOURCC)) video_fps = vid.get(cv2.CAP_PROP_FPS) video_size = (int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)), int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))) isOutput = True if output_path != "" else False if isOutput: print("!!! TYPE:", type(output_path), type(video_FourCC), type(video_fps), type(video_size)) out = cv2.VideoWriter(output_path, video_FourCC, video_fps, video_size) accum_time = 0 curr_fps = 0 fps = "FPS: ??" prev_time = timer() while True: return_value, frame = vid.read() image = Image.fromarray(frame) image = yolo.detect_image(image) result = np.asarray(image) curr_time = timer() exec_time = curr_time - prev_time prev_time = curr_time accum_time = accum_time + exec_time curr_fps = curr_fps + 1 if accum_time > 1: accum_time = accum_time - 1 fps = "FPS: " + str(curr_fps) curr_fps = 0 cv2.putText(result, text=fps, org=(3, 15), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.50, color=(255, 0, 0), thickness=2) cv2.namedWindow("result", cv2.WINDOW_NORMAL) cv2.imshow("result", result) if isOutput: out.write(result) if cv2.waitKey(1) & 0xFF == ord('q'): break yolo.close_session()
Example #16
Source File: yolo.py From keras-yolo3 with MIT License | 5 votes |
def detect_video(yolo, video_path, output_path=""): import cv2 vid = cv2.VideoCapture(video_path) if not vid.isOpened(): raise IOError("Couldn't open webcam or video") video_FourCC = int(vid.get(cv2.CAP_PROP_FOURCC)) video_fps = vid.get(cv2.CAP_PROP_FPS) video_size = (int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)), int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))) isOutput = True if output_path != "" else False if isOutput: print("!!! TYPE:", type(output_path), type(video_FourCC), type(video_fps), type(video_size)) out = cv2.VideoWriter(output_path, video_FourCC, video_fps, video_size) accum_time = 0 curr_fps = 0 fps = "FPS: ??" prev_time = timer() while True: return_value, frame = vid.read() image = Image.fromarray(frame) image = yolo.detect_image(image) result = np.asarray(image) curr_time = timer() exec_time = curr_time - prev_time prev_time = curr_time accum_time = accum_time + exec_time curr_fps = curr_fps + 1 if accum_time > 1: accum_time = accum_time - 1 fps = "FPS: " + str(curr_fps) curr_fps = 0 cv2.putText(result, text=fps, org=(3, 15), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.50, color=(255, 0, 0), thickness=2) cv2.namedWindow("result", cv2.WINDOW_NORMAL) cv2.imshow("result", result) if isOutput: out.write(result) if cv2.waitKey(1) & 0xFF == ord('q'): break yolo.close_session()
Example #17
Source File: yolo.py From keras-YOLOv3-mobilenet with MIT License | 5 votes |
def detect_video(yolo, video_path, output_path=""): import cv2 vid = cv2.VideoCapture(video_path) if not vid.isOpened(): raise IOError("Couldn't open webcam or video") video_FourCC = int(vid.get(cv2.CAP_PROP_FOURCC)) video_fps = vid.get(cv2.CAP_PROP_FPS) video_size = (int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)), int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))) isOutput = True if output_path != "" else False if isOutput: print("!!! TYPE:", type(output_path), type(video_FourCC), type(video_fps), type(video_size)) out = cv2.VideoWriter(output_path, video_FourCC, video_fps, video_size) accum_time = 0 curr_fps = 0 fps = "FPS: ??" prev_time = timer() while True: return_value, frame = vid.read() image = Image.fromarray(frame) image = yolo.detect_image(image) result = np.asarray(image) curr_time = timer() exec_time = curr_time - prev_time prev_time = curr_time accum_time = accum_time + exec_time curr_fps = curr_fps + 1 if accum_time > 1: accum_time = accum_time - 1 fps = "FPS: " + str(curr_fps) curr_fps = 0 cv2.putText(result, text=fps, org=(3, 15), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.50, color=(255, 0, 0), thickness=2) cv2.namedWindow("result", cv2.WINDOW_NORMAL) cv2.imshow("result", result) if isOutput: out.write(result) if cv2.waitKey(1) & 0xFF == ord('q'): break yolo.close_session()
Example #18
Source File: yolo_Mobilenet.py From keras-YOLOv3-mobilenet with MIT License | 5 votes |
def detect_video(yolo, video_path, output_path=""): import cv2 vid = cv2.VideoCapture(video_path) if not vid.isOpened(): raise IOError("Couldn't open webcam or video") video_FourCC = int(vid.get(cv2.CAP_PROP_FOURCC)) video_fps = vid.get(cv2.CAP_PROP_FPS) video_size = (int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)), int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))) isOutput = True if output_path != "" else False if isOutput: print("!!! TYPE:", type(output_path), type(video_FourCC), type(video_fps), type(video_size)) out = cv2.VideoWriter(output_path, video_FourCC, video_fps, video_size) accum_time = 0 curr_fps = 0 fps = "FPS: ??" prev_time = timer() while True: return_value, frame = vid.read() image = Image.fromarray(frame) image = yolo.detect_image(image) result = np.asarray(image) curr_time = timer() exec_time = curr_time - prev_time prev_time = curr_time accum_time = accum_time + exec_time curr_fps = curr_fps + 1 if accum_time > 1: accum_time = accum_time - 1 fps = "FPS: " + str(curr_fps) curr_fps = 0 cv2.putText(result, text=fps, org=(3, 15), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.50, color=(255, 0, 0), thickness=2) cv2.namedWindow("result", cv2.WINDOW_NORMAL) cv2.imshow("result", result) if isOutput: out.write(result) if cv2.waitKey(1) & 0xFF == ord('q'): break yolo.close_session()
Example #19
Source File: webcam_video_capture.py From detectron2-pipeline with MIT License | 5 votes |
def __init__(self, src=0, fourcc=None, width=None, height=None, fps=None, transform=None, queue_size=128, name="WebcamVideoCapture"): self.cap = cv2.VideoCapture(src) if not self.cap.isOpened(): raise IOError(f"Cannot open video {src}") # Set capture properties if fourcc: self.cap.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*fourcc)) if width: self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, width) if height: self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height) if fps: self.cap.set(cv2.CAP_PROP_FPS, fps) self.transform = transform # initialize the queue used to store frames read from the webcam self.queue = Queue(maxsize=queue_size) # initialize the variable used to indicate if the thread should be stopped self.stopped = False self.thread = Thread(target=self.update, args=(), name=name) self.thread.daemon = True
Example #20
Source File: yolo_matt.py From keras-yolov3-KF-objectTracking with MIT License | 5 votes |
def detect_video(yolo, video_path, output_path=""): import cv2 vid = cv2.VideoCapture(video_path) if not vid.isOpened(): raise IOError("Couldn't open webcam or video") video_FourCC = int(vid.get(cv2.CAP_PROP_FOURCC)) video_fps = vid.get(cv2.CAP_PROP_FPS) video_size = (int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)), int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))) isOutput = True if output_path != "" else False if isOutput: print("!!! TYPE:", type(output_path), type(video_FourCC), type(video_fps), type(video_size)) out = cv2.VideoWriter(output_path, video_FourCC, video_fps, video_size) accum_time = 0 curr_fps = 0 fps = "FPS: ??" prev_time = timer() while True: return_value, frame = vid.read() image = Image.fromarray(frame) image = yolo.detect_image(image) result = np.asarray(image) curr_time = timer() exec_time = curr_time - prev_time prev_time = curr_time accum_time = accum_time + exec_time curr_fps = curr_fps + 1 if accum_time > 1: accum_time = accum_time - 1 fps = "FPS: " + str(curr_fps) curr_fps = 0 cv2.putText(result, text=fps, org=(3, 15), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.50, color=(255, 0, 0), thickness=2) cv2.namedWindow("result", cv2.WINDOW_NORMAL) cv2.imshow("result", result) if isOutput: out.write(result) if cv2.waitKey(1) & 0xFF == ord('q'): break yolo.close_session()
Example #21
Source File: scene_detection.py From filmstrip with MIT License | 5 votes |
def getInfo(sourcePath): cap = cv2.VideoCapture(sourcePath) info = { "framecount": cap.get(cv2.CAP_PROP_FRAME_COUNT), "fps": cap.get(cv2.CAP_PROP_FPS), "width": int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), "height": int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)), "codec": int(cap.get(cv2.CAP_PROP_FOURCC)) } cap.release() return info
Example #22
Source File: yolo.py From keras-yolo3-master with MIT License | 5 votes |
def detect_video(yolo, video_path, output_path=""): import cv2 vid = cv2.VideoCapture(0) if not vid.isOpened(): raise IOError("Couldn't open webcam or video") video_FourCC = int(vid.get(cv2.CAP_PROP_FOURCC)) video_fps = vid.get(cv2.CAP_PROP_FPS) video_size = (int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)), int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))) isOutput = True if output_path != "" else False if isOutput: print("!!! TYPE:", type(output_path), type(video_FourCC), type(video_fps), type(video_size)) out = cv2.VideoWriter(output_path, video_FourCC, video_fps, video_size) accum_time = 0 curr_fps = 0 fps = "FPS: ??" prev_time = timer() while True: return_value, frame = vid.read() image = Image.fromarray(frame) image = yolo.detect_image(image) result = np.asarray(image) curr_time = timer() exec_time = curr_time - prev_time prev_time = curr_time accum_time = accum_time + exec_time curr_fps = curr_fps + 1 if accum_time > 1: accum_time = accum_time - 1 fps = "FPS: " + str(curr_fps) curr_fps = 0 cv2.putText(result, text=fps, org=(3, 15), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.50, color=(255, 0, 0), thickness=2) cv2.namedWindow("result", cv2.WINDOW_NORMAL) cv2.imshow("result", result) if isOutput: out.write(result) if cv2.waitKey(1) & 0xFF == ord('q'): break yolo.close_session()
Example #23
Source File: detect.py From Tensorflow-YOLOv3 with MIT License | 4 votes |
def main(mode, tiny, iou_threshold, confidence_threshold, path): class_names, n_classes = load_class_names() if tiny: model = YOLOv3_tiny(n_classes=n_classes, iou_threshold=iou_threshold, confidence_threshold=confidence_threshold) else: model = YOLOv3(n_classes=n_classes, iou_threshold=iou_threshold, confidence_threshold=confidence_threshold) inputs = tf.placeholder(tf.float32, [1, *model.input_size, 3]) detections = model(inputs) saver = tf.train.Saver(tf.global_variables(scope=model.scope)) with tf.Session() as sess: saver.restore(sess, './weights/model-tiny.ckpt' if tiny else './weights/model.ckpt') if mode == 'image': image = load_image(path, input_size=model.input_size) result = sess.run(detections, feed_dict={inputs: image}) draw_boxes(path, boxes_dict=result[0], class_names=class_names, input_size=model.input_size) return cv2.namedWindow("Detections") video = cv2.VideoCapture(path) fourcc = int(video.get(cv2.CAP_PROP_FOURCC)) fps = video.get(cv2.CAP_PROP_FPS) frame_size = (int(video.get(cv2.CAP_PROP_FRAME_WIDTH)), int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))) out = cv2.VideoWriter('./detections/video_output.mp4', fourcc, fps, frame_size) print("Video being saved at \"" + './detections/video_output.mp4' + "\"") print("Press 'q' to quit") while True: retval, frame = video.read() if not retval: break resized_frame = cv2.resize(frame, dsize=tuple((x) for x in model.input_size[::-1]), interpolation=cv2.INTER_NEAREST) result = sess.run(detections, feed_dict={inputs: [resized_frame]}) draw_boxes_frame(frame, frame_size, result, class_names, model.input_size) cv2.imshow("Detections", frame) key = cv2.waitKey(1) & 0xFF if key == ord('q'): break out.write(frame) cv2.destroyAllWindows() video.release()
Example #24
Source File: yolo.py From keras-YOLOv3-model-set with MIT License | 4 votes |
def detect_video(yolo, video_path, output_path=""): import cv2 vid = cv2.VideoCapture(0 if video_path == '0' else video_path) if not vid.isOpened(): raise IOError("Couldn't open webcam or video") # here we encode the video to MPEG-4 for better compatibility, you can use ffmpeg later # to convert it to x264 to reduce file size: # ffmpeg -i test.mp4 -vcodec libx264 -f mp4 test_264.mp4 # #video_FourCC = cv2.VideoWriter_fourcc(*'XVID') if video_path == '0' else int(vid.get(cv2.CAP_PROP_FOURCC)) video_FourCC = cv2.VideoWriter_fourcc(*'XVID') if video_path == '0' else cv2.VideoWriter_fourcc(*"mp4v") video_fps = vid.get(cv2.CAP_PROP_FPS) video_size = (int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)), int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))) isOutput = True if output_path != "" else False if isOutput: print("!!! TYPE:", type(output_path), type(video_FourCC), type(video_fps), type(video_size)) out = cv2.VideoWriter(output_path, video_FourCC, (5. if video_path == '0' else video_fps), video_size) accum_time = 0 curr_fps = 0 fps = "FPS: ??" prev_time = timer() while True: return_value, frame = vid.read() image = Image.fromarray(frame) image = yolo.detect_image(image) result = np.asarray(image) curr_time = timer() exec_time = curr_time - prev_time prev_time = curr_time accum_time = accum_time + exec_time curr_fps = curr_fps + 1 if accum_time > 1: accum_time = accum_time - 1 fps = "FPS: " + str(curr_fps) curr_fps = 0 cv2.putText(result, text=fps, org=(3, 15), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.50, color=(255, 0, 0), thickness=2) cv2.namedWindow("result", cv2.WINDOW_NORMAL) cv2.imshow("result", result) if isOutput: out.write(result) if cv2.waitKey(1) & 0xFF == ord('q'): break # Release everything if job is finished vid.release() if isOutput: out.release() cv2.destroyAllWindows()
Example #25
Source File: demo.py From deeplab-pytorch with MIT License | 4 votes |
def live(config_path, model_path, cuda, crf, camera_id): """ Inference from camera stream """ # Setup CONFIG = OmegaConf.load(config_path) device = get_device(cuda) torch.set_grad_enabled(False) torch.backends.cudnn.benchmark = True classes = get_classtable(CONFIG) postprocessor = setup_postprocessor(CONFIG) if crf else None model = eval(CONFIG.MODEL.NAME)(n_classes=CONFIG.DATASET.N_CLASSES) state_dict = torch.load(model_path, map_location=lambda storage, loc: storage) model.load_state_dict(state_dict) model.eval() model.to(device) print("Model:", CONFIG.MODEL.NAME) # UVC camera stream cap = cv2.VideoCapture(camera_id) cap.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*"YUYV")) def colorize(labelmap): # Assign a unique color to each label labelmap = labelmap.astype(np.float32) / CONFIG.DATASET.N_CLASSES colormap = cm.jet_r(labelmap)[..., :-1] * 255.0 return np.uint8(colormap) def mouse_event(event, x, y, flags, labelmap): # Show a class name of a mouse-overed pixel label = labelmap[y, x] name = classes[label] print(name) window_name = "{} + {}".format(CONFIG.MODEL.NAME, CONFIG.DATASET.NAME) cv2.namedWindow(window_name, cv2.WINDOW_AUTOSIZE) while True: _, frame = cap.read() image, raw_image = preprocessing(frame, device, CONFIG) labelmap = inference(model, image, raw_image, postprocessor) colormap = colorize(labelmap) # Register mouse callback function cv2.setMouseCallback(window_name, mouse_event, labelmap) # Overlay prediction cv2.addWeighted(colormap, 0.5, raw_image, 0.5, 0.0, raw_image) # Quit by pressing "q" key cv2.imshow(window_name, raw_image) if cv2.waitKey(10) == ord("q"): break
Example #26
Source File: camera_publisher_and_services.py From niryo_one_ros with GNU General Public License v3.0 | 4 votes |
def __setup_stream_settings(self): # Set compression format self.__video_stream.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc('M', 'J', 'P', 'G')) # Set Buffer size # -- Not available in opencv 3.4 -- # self.__video_stream.set(cv2.CAP_PROP_BUFFERSIZE, rospy.get_param("~buffer_size")) # Set image size w, h = rospy.get_param("~frame_size") self.__video_stream.set(cv2.CAP_PROP_FRAME_WIDTH, w) self.__video_stream.set(cv2.CAP_PROP_FRAME_HEIGHT, h) # Set frame rate self.__video_stream.set(cv2.CAP_PROP_FPS, self.__frame_rate)
Example #27
Source File: yolo.py From WorkControl with Apache License 2.0 | 4 votes |
def detect_video(yolo, video_path, output_path=""): import cv2 vid = cv2.VideoCapture(video_path) if not vid.isOpened(): raise IOError("Couldn't open webcam or video") video_FourCC = int(vid.get(cv2.CAP_PROP_FOURCC)) video_fps = vid.get(cv2.CAP_PROP_FPS) video_size = (int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)), int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))) isOutput = True if output_path != "" else False if isOutput: print("!!! TYPE:", type(output_path), type(video_FourCC), type(video_fps), type(video_size)) out = cv2.VideoWriter(output_path, video_FourCC, video_fps, video_size) accum_time = 0 curr_fps = 0 fps = "FPS: ??" prev_time = timer() while True: return_value, frame = vid.read() image = Image.fromarray(frame) image = yolo.detect_image(image) result = np.asarray(image) curr_time = timer() exec_time = curr_time - prev_time prev_time = curr_time accum_time = accum_time + exec_time curr_fps = curr_fps + 1 if accum_time > 1: accum_time = accum_time - 1 fps = "FPS: " + str(curr_fps) curr_fps = 0 print("ss") cv2.putText(result, text=fps, org=(3, 15), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.50, color=(255, 0, 0), thickness=2) cv2.namedWindow("result", cv2.WINDOW_NORMAL) cv2.imshow("result", result) if isOutput: out.write(result) if cv2.waitKey(1) & 0xFF == ord('q'): break yolo.close_session()
Example #28
Source File: __init__.py From vision-web-service with MIT License | 4 votes |
def detect_video(video_path, output_path=""): ''' detect target in video frame by frame show results in window ''' import cv2 vid = cv2.VideoCapture(video_path) if not vid.isOpened(): raise IOError("Couldn't open webcam or video") video_FourCC = int(vid.get(cv2.CAP_PROP_FOURCC)) video_fps = vid.get(cv2.CAP_PROP_FPS) video_size = (int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)), int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))) isOutput = True if output_path != "" else False if isOutput: print("!!! TYPE:", type(output_path), type(video_FourCC), type(video_fps), type(video_size)) out = cv2.VideoWriter(output_path, video_FourCC, video_fps, video_size) accum_time = 0 curr_fps = 0 fps = "FPS: ??" prev_time = timer() while True: return_value, frame = vid.read() image = Image.fromarray(frame) result = yolo.detect_image(image) result = np.asarray(result[0]) curr_time = timer() exec_time = curr_time - prev_time prev_time = curr_time accum_time = accum_time + exec_time curr_fps = curr_fps + 1 if accum_time > 1: accum_time = accum_time - 1 fps = "FPS: " + str(curr_fps) curr_fps = 0 cv2.putText(result, text=fps, org=(3, 15), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.50, color=(255, 0, 0), thickness=2) cv2.namedWindow("result", cv2.WINDOW_NORMAL) cv2.imshow("result", result) if isOutput: out.write(result) if cv2.waitKey(1) & 0xFF == ord('q'): break
Example #29
Source File: detect.py From yolo2-pytorch with GNU Lesser General Public License v3.0 | 4 votes |
def create_writer(self, height, width): fps = self.cap.get(cv2.CAP_PROP_FPS) logging.info('cap fps=%f' % fps) path = os.path.expanduser(os.path.expandvars(self.args.output)) if self.args.fourcc: fourcc = cv2.VideoWriter_fourcc(*self.args.fourcc.upper()) else: fourcc = int(self.cap.get(cv2.CAP_PROP_FOURCC)) os.makedirs(os.path.dirname(path), exist_ok=True) return cv2.VideoWriter(path, fourcc, fps, (width, height))