Python cv2.CAP_PROP_FRAME_WIDTH Examples
The following are 30
code examples of cv2.CAP_PROP_FRAME_WIDTH().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
cv2
, or try the search function
.
Example #1
Source File: cv2Iterator.py From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 | 10 votes |
def __init__(self, capture=cv2.VideoCapture(0), frame_resize=None): self._capture = capture self._frame_resize = None if frame_resize: if isinstance(frame_resize, (tuple, list)) and (len(frame_resize) == 2): self._frame_resize = tuple(map(int, frame_resize)) self._frame_shape = (1, 3, self._frame_resize[0], self._frame_resize[1]) elif isinstance(frame_resize, float): width = int(self._capture.get(cv2.CAP_PROP_FRAME_WIDTH)*frame_resize) height = int(self._capture.get(cv2.CAP_PROP_FRAME_HEIGHT)*frame_resize) self._frame_shape = (1, 3, width, height) self._frame_resize = (width, height) else: assert False, "frame_resize should be a tuple of (x,y) pixels " "or a float setting the scaling factor" else: self._frame_shape = (1, 3, int(self._capture.get(cv2.CAP_PROP_FRAME_WIDTH)), int(self._capture.get(cv2.CAP_PROP_FRAME_HEIGHT)))
Example #2
Source File: rivagan.py From RivaGAN with MIT License | 9 votes |
def encode(self, video_in, data, video_out): assert len(data) == self.data_dim video_in = cv2.VideoCapture(video_in) width = int(video_in.get(cv2.CAP_PROP_FRAME_WIDTH)) height = int(video_in.get(cv2.CAP_PROP_FRAME_HEIGHT)) length = int(video_in.get(cv2.CAP_PROP_FRAME_COUNT)) data = torch.FloatTensor([data]).cuda() video_out = cv2.VideoWriter( video_out, cv2.VideoWriter_fourcc(*'mp4v'), 20.0, (width, height)) for i in tqdm(range(length)): ok, frame = video_in.read() frame = torch.FloatTensor([frame]) / 127.5 - 1.0 # (L, H, W, 3) frame = frame.permute(3, 0, 1, 2).unsqueeze(0).cuda() # (1, 3, L, H, W) wm_frame = self.encoder(frame, data) # (1, 3, L, H, W) wm_frame = torch.clamp(wm_frame, min=-1.0, max=1.0) wm_frame = ( (wm_frame[0, :, 0, :, :].permute(1, 2, 0) + 1.0) * 127.5 ).detach().cpu().numpy().astype("uint8") video_out.write(wm_frame) video_out.release()
Example #3
Source File: chapter2.py From OpenCV-Computer-Vision-Projects-with-Python with MIT License | 7 votes |
def main(): device = cv2.CAP_OPENNI capture = cv2.VideoCapture(device) if not(capture.isOpened()): capture.open(device) capture.set(cv2.CAP_PROP_FRAME_WIDTH, 640) capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 480) app = wx.App() frame = MyFrame(None, -1, 'chapter2.py', capture) frame.Show(True) # self.SetTopWindow(frame) app.MainLoop() # When everything done, release the capture capture.release() cv2.destroyAllWindows()
Example #4
Source File: yolo.py From keras-yolov3-KF-objectTracking with MIT License | 7 votes |
def detect_video(yolo, video_path, output_path=""): import cv2 vid = cv2.VideoCapture(video_path) if not vid.isOpened(): raise IOError("Couldn't open webcam or video") video_FourCC = int(vid.get(cv2.CAP_PROP_FOURCC)) video_fps = vid.get(cv2.CAP_PROP_FPS) video_size = (int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)), int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))) isOutput = True if output_path != "" else False if isOutput: print("!!! TYPE:", type(output_path), type(video_FourCC), type(video_fps), type(video_size)) out = cv2.VideoWriter(output_path, video_FourCC, video_fps, video_size) accum_time = 0 curr_fps = 0 fps = "FPS: ??" prev_time = timer() while True: return_value, frame = vid.read() image = Image.fromarray(frame) image = yolo.detect_image(image) result = np.asarray(image) curr_time = timer() exec_time = curr_time - prev_time prev_time = curr_time accum_time = accum_time + exec_time curr_fps = curr_fps + 1 if accum_time > 1: accum_time = accum_time - 1 fps = "FPS: " + str(curr_fps) curr_fps = 0 cv2.putText(result, text=fps, org=(3, 15), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.50, color=(255, 0, 0), thickness=2) cv2.namedWindow("result", cv2.WINDOW_NORMAL) cv2.imshow("result", result) if isOutput: out.write(result) if cv2.waitKey(1) & 0xFF == ord('q'): break yolo.close_session()
Example #5
Source File: videocapturethreading.py From video-capture-async with Apache License 2.0 | 6 votes |
def _run(self, n_frames=500, width=1280, height=720, with_threading=False): if with_threading: cap = VideoCaptureTreading(0) else: cap = cv2.VideoCapture(0) cap.set(cv2.CAP_PROP_FRAME_WIDTH, width) cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height) if with_threading: cap.start() t0 = time.time() i = 0 while i < n_frames: _, frame = cap.read() cv2.imshow('Frame', frame) cv2.waitKey(1) & 0xFF i += 1 print('[i] Frames per second: {:.2f}, with_threading={}'.format(n_frames / (time.time() - t0), with_threading)) if with_threading: cap.stop() cv2.destroyAllWindows()
Example #6
Source File: preprocess.py From filmstrip with MIT License | 6 votes |
def getInfo(sourcePath): cap = cv2.VideoCapture(sourcePath) info = { "framecount": cap.get(cv2.CAP_PROP_FRAME_COUNT), "fps": cap.get(cv2.CAP_PROP_FPS), "width": int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), "height": int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)), "codec": int(cap.get(cv2.CAP_PROP_FOURCC)) } cap.release() return info # # Extracts one frame for every second second of video. # Effectively compresses a video down into much less data. #
Example #7
Source File: demo.py From blueoil with Apache License 2.0 | 6 votes |
def __init__(self, video_source, video_width, video_height, video_fps, queue_size=1): self.video_fps = video_fps vc = cv2.VideoCapture(video_source) if hasattr(cv2, 'cv'): vc.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, video_width) vc.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, video_height) vc.set(cv2.cv.CV_CAP_PROP_FPS, video_fps) else: vc.set(cv2.CAP_PROP_FRAME_WIDTH, video_width) vc.set(cv2.CAP_PROP_FRAME_HEIGHT, video_height) vc.set(cv2.CAP_PROP_FPS, video_fps) self.stream = vc self.stopped = False self.queue = Queue(maxsize=queue_size) self.thread = Thread(target=self.update, args=()) self.thread.daemon = True self.thread.start()
Example #8
Source File: cv2Iterator.py From training_results_v0.6 with Apache License 2.0 | 6 votes |
def __init__(self, capture=cv2.VideoCapture(0), frame_resize=None): self._capture = capture self._frame_resize = None if frame_resize: if isinstance(frame_resize, (tuple, list)) and (len(frame_resize) == 2): self._frame_resize = tuple(map(int, frame_resize)) self._frame_shape = (1, 3, self._frame_resize[0], self._frame_resize[1]) elif isinstance(frame_resize, float): width = int(self._capture.get(cv2.CAP_PROP_FRAME_WIDTH)*frame_resize) height = int(self._capture.get(cv2.CAP_PROP_FRAME_HEIGHT)*frame_resize) self._frame_shape = (1, 3, width, height) self._frame_resize = (width, height) else: assert False, "frame_resize should be a tuple of (x,y) pixels " "or a float setting the scaling factor" else: self._frame_shape = (1, 3, int(self._capture.get(cv2.CAP_PROP_FRAME_WIDTH)), int(self._capture.get(cv2.CAP_PROP_FRAME_HEIGHT)))
Example #9
Source File: video.py From visual_dynamics with MIT License | 6 votes |
def __init__(self, device=None, size=None, fps=None, sync=False): self.device = device or 0 self.size = size or (480, 640) fps = fps or 30 self.cap = cv2.VideoCapture(self.device) cap_height, cap_width = self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT), self.cap.get(cv2.CAP_PROP_FRAME_WIDTH) if cap_height != self.size[0]: self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, self.size[0]) if cap_width != self.size[1]: self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, self.size[1]) cap_fps = self.cap.get(cv2.CAP_PROP_FPS) if cap_fps != fps: self.cap.set(cv2.CAP_PROP_FPS, fps) if sync: raise ValueError("sync not supported")
Example #10
Source File: run_estimator_ps.py From VNect with Apache License 2.0 | 6 votes |
def init(): # initialize VNect estimator global estimator estimator = VNectEstimator() # catch the video stream global camera_capture camera_capture = cv2.VideoCapture(video) assert camera_capture.isOpened(), 'Video stream not opened: %s' % str(video) global W_img, H_img W_img, H_img = (int(camera_capture.get(cv2.CAP_PROP_FRAME_WIDTH)), int(camera_capture.get(cv2.CAP_PROP_FRAME_HEIGHT))) ################ ### Box Loop ### ################ # use a simple HOG method to initialize bounding box
Example #11
Source File: utils.py From ActionAI with GNU General Public License v3.0 | 6 votes |
def source_capture(source): source = int(source) if source.isdigit() else source cap = cv2.VideoCapture(source) fourcc_cap = cv2.VideoWriter_fourcc(*'MJPG') cap.set(cv2.CAP_PROP_FOURCC, fourcc_cap) cap.set(cv2.CAP_PROP_FRAME_WIDTH, cfg.w) cap.set(cv2.CAP_PROP_FRAME_HEIGHT, cfg.h) return cap
Example #12
Source File: webcam_video_stream.py From AugmentedAutoencoder with MIT License | 6 votes |
def __init__(self, src, width, height): # initialize the video camera stream and read the first frame # from the stream self.frame_counter = 1 self.width = width self.height = height self.stream = cv2.VideoCapture(src) self.stream.set(cv2.CAP_PROP_FRAME_WIDTH, self.width) self.stream.set(cv2.CAP_PROP_FRAME_HEIGHT, self.height) (self.grabbed, self.frame) = self.stream.read() # initialize the variable used to indicate if the thread should # be stopped self.stopped = False #Debug stream shape self.real_width = int(self.stream.get(3)) self.real_height = int(self.stream.get(4)) print("> Start video stream with shape: {},{}".format(self.real_width,self.real_height))
Example #13
Source File: helper.py From AugmentedAutoencoder with MIT License | 6 votes |
def __init__(self, src, width, height): # initialize the video camera stream and read the first frame # from the stream self.frame_counter = 1 self.width = width self.height = height self.stream = cv2.VideoCapture(src) self.stream.set(cv2.CAP_PROP_FRAME_WIDTH, self.width) self.stream.set(cv2.CAP_PROP_FRAME_HEIGHT, self.height) (self.grabbed, self.frame) = self.stream.read() # initialize the variable used to indicate if the thread should # be stopped self.stopped = False #Debug stream shape self.real_width = int(self.stream.get(3)) self.real_height = int(self.stream.get(4)) print("> Start video stream with shape: {},{}".format(self.real_width,self.real_height))
Example #14
Source File: agegenderemotion_webcam.py From libfaceid with MIT License | 5 votes |
def cam_init(cam_index, width, height): cap = cv2.VideoCapture(cam_index) if sys.version_info < (3, 0): cap.set(cv2.cv.CV_CAP_PROP_FPS, 30) cap.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, width) cap.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, height) else: cap.set(cv2.CAP_PROP_FPS, 30) cap.set(cv2.CAP_PROP_FRAME_WIDTH, width) cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height) return cap
Example #15
Source File: app_utils.py From face-attendance-machine with Apache License 2.0 | 5 votes |
def __init__(self, src, width, height): # initialize the video camera stream and read the first frame # from the stream self.stream = cv2.VideoCapture(src) self.stream.set(cv2.CAP_PROP_FRAME_WIDTH, width) self.stream.set(cv2.CAP_PROP_FRAME_HEIGHT, height) (self.grabbed, self.frame) = self.stream.read() # initialize the variable used to indicate if the thread should # be stopped self.stopped = False
Example #16
Source File: yolo_matt.py From keras-yolov3-KF-objectTracking with MIT License | 5 votes |
def detect_video(yolo, video_path, output_path=""): import cv2 vid = cv2.VideoCapture(video_path) if not vid.isOpened(): raise IOError("Couldn't open webcam or video") video_FourCC = int(vid.get(cv2.CAP_PROP_FOURCC)) video_fps = vid.get(cv2.CAP_PROP_FPS) video_size = (int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)), int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))) isOutput = True if output_path != "" else False if isOutput: print("!!! TYPE:", type(output_path), type(video_FourCC), type(video_fps), type(video_size)) out = cv2.VideoWriter(output_path, video_FourCC, video_fps, video_size) accum_time = 0 curr_fps = 0 fps = "FPS: ??" prev_time = timer() while True: return_value, frame = vid.read() image = Image.fromarray(frame) image = yolo.detect_image(image) result = np.asarray(image) curr_time = timer() exec_time = curr_time - prev_time prev_time = curr_time accum_time = accum_time + exec_time curr_fps = curr_fps + 1 if accum_time > 1: accum_time = accum_time - 1 fps = "FPS: " + str(curr_fps) curr_fps = 0 cv2.putText(result, text=fps, org=(3, 15), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.50, color=(255, 0, 0), thickness=2) cv2.namedWindow("result", cv2.WINDOW_NORMAL) cv2.imshow("result", result) if isOutput: out.write(result) if cv2.waitKey(1) & 0xFF == ord('q'): break yolo.close_session()
Example #17
Source File: read_camera.py From AnimalRecognitionDemo with Apache License 2.0 | 5 votes |
def __init__(self, infile=0, fps=15.0): self.cam = cv2.VideoCapture(infile) self.cam.set(cv2.CAP_PROP_FPS, fps) self.cam.set(cv2.CAP_PROP_FRAME_WIDTH, IMAGE_WIDTH) self.cam.set(cv2.CAP_PROP_FRAME_HEIGHT, IMAGE_HEIGHT)
Example #18
Source File: rivagan.py From RivaGAN with MIT License | 5 votes |
def decode(self, video_in): video_in = cv2.VideoCapture(video_in) # width = int(video_in.get(cv2.CAP_PROP_FRAME_WIDTH)) # height = int(video_in.get(cv2.CAP_PROP_FRAME_HEIGHT)) length = int(video_in.get(cv2.CAP_PROP_FRAME_COUNT)) for i in tqdm(range(length)): ok, frame = video_in.read() frame = torch.FloatTensor([frame]) / 127.5 - 1.0 # (L, H, W, 3) frame = frame.permute(3, 0, 1, 2).unsqueeze(0).cuda() # (1, 3, L, H, W) data = self.decoder(frame)[0].detach().cpu().numpy() yield data
Example #19
Source File: datasets.py From pruning_yolov3 with GNU General Public License v3.0 | 5 votes |
def __init__(self, sources='streams.txt', img_size=416, half=False): self.mode = 'images' self.img_size = img_size self.half = half # half precision fp16 images if os.path.isfile(sources): with open(sources, 'r') as f: sources = [x.strip() for x in f.read().splitlines() if len(x.strip())] else: sources = [sources] n = len(sources) self.imgs = [None] * n self.sources = sources for i, s in enumerate(sources): # Start the thread to read frames from the video stream print('%g/%g: %s... ' % (i + 1, n, s), end='') cap = cv2.VideoCapture(0 if s == '0' else s) assert cap.isOpened(), 'Failed to open %s' % s w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) fps = cap.get(cv2.CAP_PROP_FPS) % 100 _, self.imgs[i] = cap.read() # guarantee first frame thread = Thread(target=self.update, args=([i, cap]), daemon=True) print(' success (%gx%g at %.2f FPS).' % (w, h, fps)) thread.start() print('') # newline
Example #20
Source File: helper.py From realtime_object_detection with MIT License | 5 votes |
def __init__(self, src, width, height): super(VideoStream, self).__init__() # initialize the video camera stream and read the first frame # from the stream self.frame_counter = 1 self.width = width self.height = height self.stream = cv2.VideoCapture(src) self.stream.set(cv2.CAP_PROP_FRAME_WIDTH, self.width) self.stream.set(cv2.CAP_PROP_FRAME_HEIGHT, self.height) (self.grabbed, self.frame) = self.stream.read() #Debug stream shape self.real_width = int(self.stream.get(3)) self.real_height = int(self.stream.get(4))
Example #21
Source File: testing_webcam_flask.py From libfaceid with MIT License | 5 votes |
def cam_init(cam_index, width, height): cap = cv2.VideoCapture(cam_index) if sys.version_info < (3, 0): cap.set(cv2.cv.CV_CAP_PROP_FPS, 30) cap.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, width) cap.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, height) else: cap.set(cv2.CAP_PROP_FPS, 30) cap.set(cv2.CAP_PROP_FRAME_WIDTH, width) cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height) return cap
Example #22
Source File: predict.py From image-segmentation-keras with MIT License | 5 votes |
def set_video(inp, video_name): cap = cv2.VideoCapture(inp) fps = int(cap.get(cv2.CAP_PROP_FPS)) video_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) video_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) size = (video_width, video_height) fourcc = cv2.VideoWriter_fourcc(*"XVID") video = cv2.VideoWriter(video_name, fourcc, fps, size) return cap, video, fps
Example #23
Source File: usb_camera_demo.py From blueoil with Apache License 2.0 | 5 votes |
def init_camera(camera_width, camera_height): if hasattr(cv2, 'cv'): vc = cv2.VideoCapture(0) vc.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, camera_width) vc.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, camera_height) vc.set(cv2.cv.CV_CAP_PROP_FPS, 60) else: vc = cv2.VideoCapture(0) vc.set(cv2.CAP_PROP_FRAME_WIDTH, camera_width) vc.set(cv2.CAP_PROP_FRAME_HEIGHT, camera_height) vc.set(cv2.CAP_PROP_FPS, 60) return vc
Example #24
Source File: yolo.py From keras-yolo3-master with MIT License | 5 votes |
def detect_video(yolo, video_path, output_path=""): import cv2 vid = cv2.VideoCapture(0) if not vid.isOpened(): raise IOError("Couldn't open webcam or video") video_FourCC = int(vid.get(cv2.CAP_PROP_FOURCC)) video_fps = vid.get(cv2.CAP_PROP_FPS) video_size = (int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)), int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))) isOutput = True if output_path != "" else False if isOutput: print("!!! TYPE:", type(output_path), type(video_FourCC), type(video_fps), type(video_size)) out = cv2.VideoWriter(output_path, video_FourCC, video_fps, video_size) accum_time = 0 curr_fps = 0 fps = "FPS: ??" prev_time = timer() while True: return_value, frame = vid.read() image = Image.fromarray(frame) image = yolo.detect_image(image) result = np.asarray(image) curr_time = timer() exec_time = curr_time - prev_time prev_time = curr_time accum_time = accum_time + exec_time curr_fps = curr_fps + 1 if accum_time > 1: accum_time = accum_time - 1 fps = "FPS: " + str(curr_fps) curr_fps = 0 cv2.putText(result, text=fps, org=(3, 15), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.50, color=(255, 0, 0), thickness=2) cv2.namedWindow("result", cv2.WINDOW_NORMAL) cv2.imshow("result", result) if isOutput: out.write(result) if cv2.waitKey(1) & 0xFF == ord('q'): break yolo.close_session()
Example #25
Source File: utils.py From AdaIN-TF with MIT License | 5 votes |
def __init__(self, src=0, width=None, height=None): # initialize the video camera stream and read the first frame # from the stream self.stream = cv2.VideoCapture(src) if width is not None and height is not None: # Both are needed to change default dims self.stream.set(cv2.CAP_PROP_FRAME_WIDTH, width) self.stream.set(cv2.CAP_PROP_FRAME_HEIGHT, height) (self.ret, self.frame) = self.stream.read() # initialize the variable used to indicate if the thread should # be stopped self.stopped = False
Example #26
Source File: capture_video.py From pytorch_mpiigaze with MIT License | 5 votes |
def main(): parser = argparse.ArgumentParser() parser.add_argument('--output', '-o', type=str, default='videos') parser.add_argument('--cap-size', type=int, nargs=2, default=(640, 480)) args = parser.parse_args() cap = cv2.VideoCapture(0) width, height = args.cap_size cap.set(cv2.CAP_PROP_FRAME_WIDTH, width) cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height) output_dir = pathlib.Path(args.output) output_dir.mkdir(exist_ok=True, parents=True) output_path = output_dir / f'{create_timestamp()}.mp4' writer = cv2.VideoWriter(output_path.as_posix(), cv2.VideoWriter_fourcc(*'H264'), 30, (width, height)) while True: key = cv2.waitKey(1) & 0xff if key in QUIT_KEYS: break ok, frame = cap.read() if not ok: break writer.write(frame) cv2.imshow('frame', frame[:, ::-1]) cap.release() writer.release()
Example #27
Source File: demo.py From pytorch_mpiigaze with MIT License | 5 votes |
def _create_capture(self) -> cv2.VideoCapture: if self.config.demo.use_camera: cap = cv2.VideoCapture(0) elif self.config.demo.video_path: cap = cv2.VideoCapture(self.config.demo.video_path) else: raise ValueError cap.set(cv2.CAP_PROP_FRAME_WIDTH, self.gaze_estimator.camera.width) cap.set(cv2.CAP_PROP_FRAME_HEIGHT, self.gaze_estimator.camera.height) return cap
Example #28
Source File: rtsp2image.py From lightnet with MIT License | 5 votes |
def create_capture(args): cap = cv.VideoCapture(args.source) cap.set(cv.CAP_PROP_FRAME_WIDTH, args.w) cap.set(cv.CAP_PROP_FRAME_HEIGHT, args.h) if cap is None or not cap.isOpened(): print('Warning: unable to open video source: ', args.source) else: print('created capture') return cap
Example #29
Source File: video2video.py From Photomosaic-generator with MIT License | 5 votes |
def main(opt): cap = cv2.VideoCapture(opt.input) width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) if opt.fps == 0: fps = int(cap.get(cv2.CAP_PROP_FPS)) else: fps = opt.fps out = cv2.VideoWriter(opt.output, cv2.VideoWriter_fourcc(*"XVID"), fps, (width, height)) images, avg_colors = get_component_images(opt.pool, opt.stride) while cap.isOpened(): flag, frame = cap.read() if not flag: break blank_image = np.zeros((height, width, 3), np.uint8) for i, j in product(range(int(width / opt.stride)), range(int(height / opt.stride))): partial_input_image = frame[j * opt.stride: (j + 1) * opt.stride, i * opt.stride: (i + 1) * opt.stride, :] partial_avg_color = np.sum(np.sum(partial_input_image, axis=0), axis=0) / (opt.stride ** 2) distance_matrix = np.linalg.norm(partial_avg_color - avg_colors, axis=1) idx = np.argmin(distance_matrix) blank_image[j * opt.stride: (j + 1) * opt.stride, i * opt.stride: (i + 1) * opt.stride, :] = images[idx] if opt.overlay_ratio: overlay = cv2.resize(frame, (int(width * opt.overlay_ratio), int(height * opt.overlay_ratio))) blank_image[height-int(height*opt.overlay_ratio):, width-int(width*opt.overlay_ratio):,:] = overlay out.write(blank_image) cap.release() out.release()
Example #30
Source File: camera_communicator.py From SenseAct with BSD 3-Clause "New" or "Revised" License | 5 votes |
def run(self): """Opening the video IO in the child process and invoke parent 'run' """ self._cap = cv.VideoCapture(self._device_id) if not self._cap.isOpened(): raise IOError("Unable to open camera on device id {}".format(self._device_id)) self._cap.set(cv.CAP_PROP_FRAME_WIDTH, self._res[0]) self._cap.set(cv.CAP_PROP_FRAME_HEIGHT, self._res[1]) # main process loop super(CameraCommunicator, self).run() # try to close the IO when the process end self._cap.release()