Python cv2.WINDOW_AUTOSIZE Examples
The following are 30
code examples of cv2.WINDOW_AUTOSIZE().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
cv2
, or try the search function
.
Example #1
Source File: vachat.py From The-chat-room with MIT License | 10 votes |
def run(self): print("VEDIO server starts...") self.sock.bind(self.ADDR) self.sock.listen(1) conn, addr = self.sock.accept() print("remote VEDIO client success connected...") data = "".encode("utf-8") payload_size = struct.calcsize("L") cv2.namedWindow('Remote', cv2.WINDOW_AUTOSIZE) while True: while len(data) < payload_size: data += conn.recv(81920) packed_size = data[:payload_size] data = data[payload_size:] msg_size = struct.unpack("L", packed_size)[0] while len(data) < msg_size: data += conn.recv(81920) zframe_data = data[:msg_size] data = data[msg_size:] frame_data = zlib.decompress(zframe_data) frame = pickle.loads(frame_data) cv2.imshow('Remote', frame) if cv2.waitKey(1) & 0xFF == 27: break
Example #2
Source File: SudokuExtractor.py From SolveSudoku with MIT License | 7 votes |
def parse_grid(path): original = cv2.imread(path, cv2.IMREAD_GRAYSCALE) processed = pre_process_image(original) # cv2.namedWindow('processed',cv2.WINDOW_AUTOSIZE) # processed_img = cv2.resize(processed, (500, 500)) # Resize image # cv2.imshow('processed', processed_img) corners = find_corners_of_largest_polygon(processed) cropped = crop_and_warp(original, corners) # cv2.namedWindow('cropped',cv2.WINDOW_AUTOSIZE) # cropped_img = cv2.resize(cropped, (500, 500)) # Resize image # cv2.imshow('cropped', cropped_img) squares = infer_grid(cropped) # print(squares) digits = get_digits(cropped, squares, 28) # print(digits) final_image = show_digits(digits) return final_image
Example #3
Source File: RtspClient.py From ReolinkCameraAPI with GNU General Public License v3.0 | 6 votes |
def preview(self): """ Blocking function. Opens OpenCV window to display stream. """ self.connect() win_name = 'RTSP' cv2.namedWindow(win_name, cv2.WINDOW_AUTOSIZE) cv2.moveWindow(win_name, 20, 20) while True: cv2.imshow(win_name, self.get_frame()) # if self._latest is not None: # cv2.imshow(win_name,self._latest) if cv2.waitKey(25) & 0xFF == ord('q'): break cv2.waitKey() cv2.destroyAllWindows() cv2.waitKey()
Example #4
Source File: emulator.py From fathom with Apache License 2.0 | 6 votes |
def __init__(self, rom_name, vis,frameskip=1,windowname='preview'): self.ale = ALEInterface() self.max_frames_per_episode = self.ale.getInt("max_num_frames_per_episode"); self.ale.setInt("random_seed",123) self.ale.setInt("frame_skip",frameskip) romfile = str(ROM_PATH)+str(rom_name) if not os.path.exists(romfile): print('No ROM file found at "'+romfile+'".\nAdjust ROM_PATH or double-check the filt exists.') self.ale.loadROM(romfile) self.legal_actions = self.ale.getMinimalActionSet() self.action_map = dict() self.windowname = windowname for i in range(len(self.legal_actions)): self.action_map[self.legal_actions[i]] = i # print(self.legal_actions) self.screen_width,self.screen_height = self.ale.getScreenDims() print("width/height: " +str(self.screen_width) + "/" + str(self.screen_height)) self.vis = vis if vis: cv2.startWindowThread() cv2.namedWindow(self.windowname, flags=cv2.WINDOW_AUTOSIZE) # permit manual resizing
Example #5
Source File: vis_utils.py From PVN3D with MIT License | 6 votes |
def cv2_show_image(window_name, image, size_wh=None, location_xy=None): """Helper function for specifying window size and location when displaying images with cv2. Args: window_name: str window name image: ndarray image to display size_wh: window size (w, h) location_xy: window location (x, y) """ if size_wh is not None: cv2.namedWindow(window_name, cv2.WINDOW_KEEPRATIO | cv2.WINDOW_GUI_NORMAL) cv2.resizeWindow(window_name, *size_wh) else: cv2.namedWindow(window_name, cv2.WINDOW_AUTOSIZE) if location_xy is not None: cv2.moveWindow(window_name, *location_xy) cv2.imshow(window_name, image)
Example #6
Source File: drawers.py From ethoscope with GNU General Public License v3.0 | 6 votes |
def __init__(self, video_out=None, draw_frames=True, video_out_fourcc="DIVX", video_out_fps=2): """ A template class to annotate and save the processed frames. It can also save the annotated frames in a video file and/or display them in a new window. The :meth:`~ethoscope.drawers.drawers.BaseDrawer._annotate_frame` abstract method defines how frames are annotated. :param video_out: The path to the output file (.avi) :type video_out: str :param draw_frames: Whether frames should be displayed on the screen (a new window will be created). :type draw_frames: bool :param video_out_fourcc: When setting ``video_out``, this defines the codec used to save the output video (see `fourcc <http://www.fourcc.org/codecs.php>`_) :type video_out_fourcc: str :param video_out_fps: When setting ``video_out``, this defines the output fps. typically, the same as the input fps. :type video_out_fps: float """ self._video_out = video_out self._draw_frames= draw_frames self._video_writer = None self._window_name = "ethoscope_" + str(os.getpid()) self._video_out_fourcc = video_out_fourcc self._video_out_fps = video_out_fps if draw_frames: cv2.namedWindow(self._window_name, cv2.WINDOW_AUTOSIZE) self._last_drawn_frame = None
Example #7
Source File: vis_utils.py From ip_basic with MIT License | 6 votes |
def cv2_show_image(window_name, image, size_wh=None, location_xy=None): """Helper function for specifying window size and location when displaying images with cv2. Args: window_name: str window name image: ndarray image to display size_wh: window size (w, h) location_xy: window location (x, y) """ if size_wh is not None: cv2.namedWindow(window_name, cv2.WINDOW_KEEPRATIO | cv2.WINDOW_GUI_NORMAL) cv2.resizeWindow(window_name, *size_wh) else: cv2.namedWindow(window_name, cv2.WINDOW_AUTOSIZE) if location_xy is not None: cv2.moveWindow(window_name, *location_xy) cv2.imshow(window_name, image)
Example #8
Source File: simple-camera.py From ai-smarthome with BSD 2-Clause "Simplified" License | 6 votes |
def show_camera(): # To flip the image, modify the flip_method parameter (0 and 2 are the most common) print(gstreamer_pipeline(flip_method=0)) cap = cv2.VideoCapture(gstreamer_pipeline(flip_method=0), cv2.CAP_GSTREAMER) if cap.isOpened(): window_handle = cv2.namedWindow('CSI Camera', cv2.WINDOW_AUTOSIZE) # Window while cv2.getWindowProperty('CSI Camera',0) >= 0: ret_val, img = cap.read(); cv2.imshow('CSI Camera',img) # This also acts as keyCode = cv2.waitKey(30) & 0xff # Stop the program on the ESC key if keyCode == 27: break cap.release() cv2.destroyAllWindows() else: print('Unable to open camera')
Example #9
Source File: ffmpegstream.py From rtsp with MIT License | 5 votes |
def preview(self): """ Blocking function. Opens OpenCV window to display stream. """ win_name = 'Camera' cv2.namedWindow(win_name, cv2.WINDOW_AUTOSIZE) cv2.moveWindow(win_name,20,20) self.open() while(self.isOpened()): cv2.imshow(win_name,self._stream.read()[1]) if cv2.waitKey(25) & 0xFF == ord('q'): break cv2.waitKey() cv2.destroyAllWindows() cv2.waitKey()
Example #10
Source File: ffmpegstream.py From rtsp with MIT License | 5 votes |
def preview(self): """ Blocking function. Opens OpenCV window to display stream. """ win_name = 'RTSP' cv2.namedWindow(win_name, cv2.WINDOW_AUTOSIZE) cv2.moveWindow(win_name,20,20) while(self.isOpened()): cv2.imshow(win_name,self.read(raw=True)) if cv2.waitKey(25) & 0xFF == ord('q'): break cv2.waitKey() cv2.destroyAllWindows() cv2.waitKey()
Example #11
Source File: vis_utils.py From monopsr with MIT License | 5 votes |
def cv2_imshow(window_name, image, size_wh=None, row_col=None, location_xy=None): """Helper function for specifying window size and location when displaying images with cv2 Args: window_name (string): Window title image: image to display size_wh: resize window Recommended sizes for 1920x1080 screen: 2 col: (930, 280) 3 col: (620, 187) 4 col: (465, 140) row_col: Row and column to show images like subplots location_xy: location of window """ if size_wh is not None: cv2.namedWindow(window_name, cv2.WINDOW_KEEPRATIO | cv2.WINDOW_GUI_NORMAL) cv2.resizeWindow(window_name, *size_wh) else: cv2.namedWindow(window_name, cv2.WINDOW_AUTOSIZE | cv2.WINDOW_GUI_NORMAL) if row_col is not None: start_x_offset = 60 start_y_offset = 25 y_offset = 28 subplot_row = row_col[0] subplot_col = row_col[1] location_xy = (start_x_offset + subplot_col * size_wh[0], start_y_offset + subplot_row * size_wh[1] + subplot_row * y_offset) if location_xy is not None: cv2.moveWindow(window_name, *location_xy) cv2.imshow(window_name, image)
Example #12
Source File: start_face_recon_app.py From Smart-Surveillance-System-using-Raspberry-Pi with GNU General Public License v3.0 | 5 votes |
def add_person(people_folder, shape): """ Funtion to add pictures of a person """ person_name = input('What is the name of the new person: ').lower() folder = people_folder + person_name if not os.path.exists(folder): input("I will now take 20 pictures. Press ENTER when ready.") os.mkdir(folder) video = VideoCamera() detector = FaceDetector('face_recognition_system/frontal_face.xml') counter = 1 timer = 0 cv2.namedWindow('Video Feed', cv2.WINDOW_AUTOSIZE) cv2.namedWindow('Saved Face', cv2.WINDOW_NORMAL) while counter < 21: frame = video.get_frame() face_coord = detector.detect(frame) if len(face_coord): frame, face_img = get_images(frame, face_coord, shape) # save a face every second, we start from an offset '5' because # the first frame of the camera gets very high intensity # readings. if timer % 100 == 5: cv2.imwrite(folder + '/' + str(counter) + '.jpg', face_img[0]) print ('Images Saved:' + str(counter)) counter += 1 cv2.imshow('Saved Face', face_img[0]) cv2.imshow('Video Feed', frame) cv2.waitKey(50) timer += 5 else: print ("This name already exists.") sys.exit()
Example #13
Source File: opencv_image_viewer.py From dm2gym with MIT License | 5 votes |
def __init__(self, *, escape_to_exit=False): """Construct the viewing window""" self._escape_to_exit = escape_to_exit self._window_name = str(uuid.uuid4()) cv2.namedWindow(self._window_name, cv2.WINDOW_AUTOSIZE) self._isopen = True
Example #14
Source File: demo.py From YOLOv3 with MIT License | 5 votes |
def detect_video(video, yolo, all_classes): """Use yolo v3 to detect video. # Argument: video: video file. yolo: YOLO, yolo model. all_classes: all classes name. """ video_path = os.path.join("videos", "test", video) camera = cv2.VideoCapture(video_path) cv2.namedWindow("detection", cv2.WINDOW_AUTOSIZE) # Prepare for saving the detected video sz = (int(camera.get(cv2.CAP_PROP_FRAME_WIDTH)), int(camera.get(cv2.CAP_PROP_FRAME_HEIGHT))) fourcc = cv2.VideoWriter_fourcc(*'mpeg') vout = cv2.VideoWriter() vout.open(os.path.join("videos", "res", video), fourcc, 20, sz, True) while True: res, frame = camera.read() if not res: break image = detect_image(frame, yolo, all_classes) cv2.imshow("detection", image) # Save the video frame by frame vout.write(image) if cv2.waitKey(110) & 0xff == 27: break vout.release() camera.release()
Example #15
Source File: ColorFilter.py From ImageProcessingProjects with MIT License | 5 votes |
def showImage(title, image): cv.namedWindow(title, cv.WINDOW_AUTOSIZE) cv.imshow(title, image) cv.waitKey(0)
Example #16
Source File: ImageManipulation.py From ImageProcessingProjects with MIT License | 5 votes |
def thresholdGrayImage(): cv.namedWindow('Thresholding', cv.WINDOW_AUTOSIZE) cv.createTrackbar('Threshold', 'Thresholding',128, 255, sliderCallBack) cv.waitKey(0)
Example #17
Source File: ImageManipulation.py From ImageProcessingProjects with MIT License | 5 votes |
def showImage(title, image): cv.namedWindow(title, cv.WINDOW_AUTOSIZE) cv.imshow(title, image) cv.waitKey(0) # works with gray images
Example #18
Source File: jetson_live_object_detection.py From jetson_nano_detection_and_tracking with GNU Lesser General Public License v2.1 | 5 votes |
def start(self): print ("Starting Live object detection, may take a few minutes to initialize...") self.camera.startStreaming() self.detector.initializeSession() if not self.camera.isOpened(): print ("Camera has failed to open") exit(-1) elif self.debug: cv2.namedWindow("Jetson Live Detection", cv2.WINDOW_AUTOSIZE) while True: curr_time = time.time() img = self.camera.getFrame() scores, boxes, classes, num_detections = self.detector.detect(img) if self.debug: self._visualizeDetections(img, scores, boxes, classes, num_detections) print ("Debug: Running at: " + str(1.0/(time.time() - curr_time)) + " Hz.") if cv2.waitKey(1) == ord('q'): break # throttle to rate capture_duration = time.time() - curr_time sleep_time = self.rate - capture_duration if sleep_time > 0: time.sleep(sleep_time) cv2.destroyAllWindows() self.camera.__del__() self.detector.__del__() print ("Exiting...") return
Example #19
Source File: display_video.py From image-processing-pipeline with MIT License | 5 votes |
def __init__(self, src, window_name=None, org=None): self.src = src self.window_name = window_name if window_name else src cv2.startWindowThread() cv2.namedWindow(self.window_name, cv2.WINDOW_AUTOSIZE) if org: # Set the window position x, y = org cv2.moveWindow(self.window_name, x, y) super(DisplayVideo, self).__init__()
Example #20
Source File: display_video.py From detectron2-pipeline with MIT License | 5 votes |
def __init__(self, src, window_name=None, org=None): self.src = src self.window_name = window_name if window_name else src cv2.startWindowThread() cv2.namedWindow(self.window_name, cv2.WINDOW_AUTOSIZE) if org: # Set the window position x, y = org cv2.moveWindow(self.window_name, x, y) super().__init__()
Example #21
Source File: ocr.py From pokr with MIT License | 5 votes |
def __init__(self, debug=False): self.debug = debug if self.debug: cv2.namedWindow("Stream", cv2.WINDOW_AUTOSIZE) self.tile_map = self.make_tilemap('firered_tiles.png') self.tile_text = self.make_tile_text('firered_tiles.txt') self.ocr_engine = video.OCREngine(self.tile_map, self.tile_text)
Example #22
Source File: drawing.py From PyIntroduction with MIT License | 5 votes |
def drawingDemo(): img = emptyImage() # 太さ2の直線描画 drawLine(img, (10, 10), (200, 200), (0, 0, 255), 2) # 太さに-1を指定すると,塗りつぶしになる drawCircle(img, (300, 100), 80, (0, 255, 0), -1) # 中と外を両方描画 drawRectangle(img, (10, 210), (210, 350), (100, 100, 0), -1) drawRectangle(img, (10, 210), (210, 350), (255, 0, 0), 3) # 楕円の描画 drawElipse(img, (450, 100), (30, 80), 0, 0, 360, (0, 100, 100), -1) # ポリゴンの描画 pts = np.array([[(250, 240), (270, 280), (350, 320), (500, 300), (450, 230), (350, 210)]], dtype=np.int32) drawPolylines(img, pts, True, (255, 100, 100), 5) # テキストの描画 drawText(img, 'OpenCV', (20, 450), font_types[0], 4, (200, 200, 200), 2) cv2.namedWindow('DrawingDemo', cv2.WINDOW_AUTOSIZE) cv2.imshow('DrawingDemo', img) cv2.waitKey(0) cv2.destroyAllWindows()
Example #23
Source File: fer_demo.py From Efficient-Facial-Feature-Learning-with-Wide-Ensemble-based-Convolutional-Neural-Networks with MIT License | 5 votes |
def __init__(self, window_name=_DEFAULT_WINDOW_NAME, screen_size=_DEFAULT_SCREEN_SIZE_ID, display_individual_classification=_DEFAULT_DISPLAY_INDIVIDUAL_CLASSIFICATION, display_graph_ensemble=_DEFAULT_DISPLAY_GRAPH_ENSEMBLE): """ Initialize GUI of the FER demo. :param window_name: (string) The name of the window :param screen_size: ((int, int)) Tuple of int values for width and height, respectively. """ # Screen components self._fer = None self._input_image = None self._background = None self._plot_arousal = [] self._plot_valence = [] # Screen self._window_name = window_name self._screen_size = screen_size - 1 self._width, self._height = FERDemo._SCREEN_SIZE[self._screen_size] self._display_individual_classification = display_individual_classification self._display_graph_ensemble = display_graph_ensemble # Container parameters self._container_width, self._container_height = (int(self._width // 2), int(self._height)) self._container_center_position = np.array([self._container_width // 2, self._container_height // 2], dtype=np.int) self._input_container = None self._output_container = None self._input_container_initial_position = np.array([0, 0], dtype=np.int) self._output_container_initial_position = np.array([0, self._width // 2], dtype=np.int) # Output blocks self._output_block_height = (self._container_height // FERDemo._BLOCK_NUM_BLOCKS) self._output_block_height_ensemble = self._container_height self._output_block_width = self._container_width # Screen initialization self._draw_background() self._screen = self._get_container(0, 0, self._height, self._width) self._blank_screen() cv2.namedWindow(self._window_name, cv2.WINDOW_AUTOSIZE)
Example #24
Source File: Utils.py From pix2code with Apache License 2.0 | 5 votes |
def show(image): import cv2 cv2.namedWindow("view", cv2.WINDOW_AUTOSIZE) cv2.imshow("view", image) cv2.waitKey(0) cv2.destroyWindow("view")
Example #25
Source File: opencv_windows_management.py From OpenCV-Python-Tutorial with MIT License | 5 votes |
def add(self, name, image, weight=1): ''' 权重,越高,图片显示越大 :return: ''' cv2.namedWindow(name, flags=cv2.WINDOW_AUTOSIZE) window = Window(name, image, weight) self.windows[name] = window # self.windows[name] = image
Example #26
Source File: match_template.py From OpenCV-Python-Tutorial with MIT License | 5 votes |
def main(argv): if (len(sys.argv) < 3): print 'Not enough parameters' print 'Usage:\nmatch_template_demo.py <image_name> <template_name> [<mask_name>]' return -1 ## [load_image] global img global templ img = cv2.imread(sys.argv[1], cv2.IMREAD_COLOR) templ = cv2.imread(sys.argv[2], cv2.IMREAD_COLOR) if (len(sys.argv) > 3): global use_mask use_mask = True global mask mask = cv2.imread( sys.argv[3], cv2.IMREAD_COLOR ) if ((img is None) or (templ is None) or (use_mask and (mask is None))): print 'Can\'t read one of the images' return -1 ## [load_image] ## [create_windows] cv2.namedWindow( image_window, cv2.WINDOW_AUTOSIZE ) cv2.namedWindow( result_window, cv2.WINDOW_AUTOSIZE ) ## [create_windows] ## [create_trackbar] trackbar_label = 'Method: \n 0: SQDIFF \n 1: SQDIFF NORMED \n 2: TM CCORR \n 3: TM CCORR NORMED \n 4: TM COEFF \n 5: TM COEFF NORMED' cv2.createTrackbar( trackbar_label, image_window, match_method, max_Trackbar, MatchingMethod ) ## [create_trackbar] MatchingMethod(match_method) ## [wait_key] cv2.waitKey(0) return 0 ## [wait_key]
Example #27
Source File: ball_tracker.py From SunFounder_PiCar-V with GNU General Public License v2.0 | 4 votes |
def find_blob() : radius = 0 # Load input image _, bgr_image = img.read() orig_image = bgr_image bgr_image = cv2.medianBlur(bgr_image, 3) # Convert input image to HSV hsv_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2HSV) # Threshold the HSV image, keep only the red pixels lower_red_hue_range = cv2.inRange(hsv_image, (0, 100, 100), (10, 255, 255)) upper_red_hue_range = cv2.inRange(hsv_image, (160, 100, 100), (179, 255, 255)) # Combine the above two images red_hue_image = cv2.addWeighted(lower_red_hue_range, 1.0, upper_red_hue_range, 1.0, 0.0) red_hue_image = cv2.GaussianBlur(red_hue_image, (9, 9), 2, 2) # Use the Hough transform to detect circles in the combined threshold image circles = cv2.HoughCircles(red_hue_image, cv2.HOUGH_GRADIENT, 1, 120, 100, 20, 10, 0) circles = np.uint16(np.around(circles)) # Loop over all detected circles and outline them on the original image all_r = np.array([]) # print("circles: %s"%circles) if circles is not None: try: for i in circles[0,:]: # print("i: %s"%i) all_r = np.append(all_r, int(round(i[2]))) closest_ball = all_r.argmax() center=(int(round(circles[0][closest_ball][0])), int(round(circles[0][closest_ball][1]))) radius=int(round(circles[0][closest_ball][2])) if draw_circle_enable: cv2.circle(orig_image, center, radius, (0, 255, 0), 5) except IndexError: pass # print("circles: %s"%circles) # Show images if show_image_enable: cv2.namedWindow("Threshold lower image", cv2.WINDOW_AUTOSIZE) cv2.imshow("Threshold lower image", lower_red_hue_range) cv2.namedWindow("Threshold upper image", cv2.WINDOW_AUTOSIZE) cv2.imshow("Threshold upper image", upper_red_hue_range) cv2.namedWindow("Combined threshold images", cv2.WINDOW_AUTOSIZE) cv2.imshow("Combined threshold images", red_hue_image) cv2.namedWindow("Detected red circles on the input image", cv2.WINDOW_AUTOSIZE) cv2.imshow("Detected red circles on the input image", orig_image) k = cv2.waitKey(5) & 0xFF if k == 27: return (0, 0), 0 if radius > 3: return center, radius else: return (0, 0), 0
Example #28
Source File: vis.py From PINTO_model_zoo with MIT License | 4 votes |
def vis_tflite(model): interpreter = tf.lite.Interpreter(model_path=model) interpreter.allocate_tensors() input_details = interpreter.get_input_details() output_details = interpreter.get_output_details() input_size = 160 image_width = 640 image_height = 480 cam = cv2.VideoCapture(0) cam.set(cv2.CAP_PROP_FPS, 30) cam.set(cv2.CAP_PROP_FRAME_WIDTH, image_width) cam.set(cv2.CAP_PROP_FRAME_HEIGHT, image_height) window_name = "USB Camera" cv2.namedWindow(window_name, cv2.WINDOW_AUTOSIZE) scalex = image_width / input_size scaley = image_height / input_size while True: start_time = time.perf_counter() ret, image = cam.read() if not ret: continue img_show = np.array(image) frame = cv2.resize(image, (input_size, input_size)) frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) frame = np.expand_dims(frame, axis=0) frame = frame.astype(np.float32) start=time.time() interpreter.set_tensor(input_details[0]['index'], frame) interpreter.invoke() tflite_res = interpreter.get_tensor(output_details[2]['index']) print('xxxx',time.time()-start) img_show=img_show.astype(np.uint8) landmark = np.array(tflite_res).reshape([-1, 2]) for _index in range(landmark.shape[0]): x_y = landmark[_index] cv2.circle(img_show, center=(int(x_y[0] * input_size * scalex), int(x_y[1] * input_size * scaley)), color=(255, 122, 122), radius=1, thickness=2) cv2.imshow('USB camera',img_show) if cv2.waitKey(1)&0xFF == ord('q'): break
Example #29
Source File: mobilenetv2ssd-async-usbcam.py From TensorflowLite-bin with Apache License 2.0 | 4 votes |
def camThread(results, frameBuffer, camera_width, camera_height, vidfps, usbcamno): global fps global detectfps global framecount global detectframecount global time1 global time2 global lastresults global cam global window_name cam = cv2.VideoCapture(usbcamno) cam.set(cv2.CAP_PROP_FPS, vidfps) cam.set(cv2.CAP_PROP_FRAME_WIDTH, camera_width) cam.set(cv2.CAP_PROP_FRAME_HEIGHT, camera_height) window_name = "USB Camera" cv2.namedWindow(window_name, cv2.WINDOW_AUTOSIZE) while True: t1 = time.perf_counter() ret, color_image = cam.read() if not ret: continue if frameBuffer.full(): frameBuffer.get() frames = color_image frameBuffer.put(color_image.copy()) res = None if not results.empty(): res = results.get(False) detectframecount += 1 imdraw = overlay_on_image(frames, res, camera_width, camera_height) lastresults = res else: imdraw = overlay_on_image(frames, lastresults, camera_width, camera_height) cv2.imshow('USB Camera', imdraw) if cv2.waitKey(1)&0xFF == ord('q'): break # FPS calculation framecount += 1 if framecount >= 15: fps = "(Playback) {:.1f} FPS".format(time1/15) detectfps = "(Detection) {:.1f} FPS".format(detectframecount/time2) framecount = 0 detectframecount = 0 time1 = 0 time2 = 0 t2 = time.perf_counter() elapsedTime = t2-t1 time1 += 1/elapsedTime time2 += elapsedTime
Example #30
Source File: MobileNet-SSD-TPU-async.py From TPU-MobilenetSSD with MIT License | 4 votes |
def camThread(label, results, frameBuffer, camera_width, camera_height, vidfps, usbcamno): global fps global detectfps global framecount global detectframecount global time1 global time2 global lastresults global cam global window_name cam = cv2.VideoCapture(usbcamno) cam.set(cv2.CAP_PROP_FPS, vidfps) cam.set(cv2.CAP_PROP_FRAME_WIDTH, camera_width) cam.set(cv2.CAP_PROP_FRAME_HEIGHT, camera_height) window_name = "USB Camera" cv2.namedWindow(window_name, cv2.WINDOW_AUTOSIZE) while True: t1 = time.perf_counter() ret, color_image = cam.read() if not ret: continue if frameBuffer.full(): frameBuffer.get() frames = color_image frameBuffer.put(color_image.copy()) res = None if not results.empty(): res = results.get(False) detectframecount += 1 imdraw = overlay_on_image(frames, res, label, camera_width, camera_height) lastresults = res else: imdraw = overlay_on_image(frames, lastresults, label, camera_width, camera_height) cv2.imshow('USB Camera', imdraw) if cv2.waitKey(1)&0xFF == ord('q'): break # FPS calculation framecount += 1 if framecount >= 15: fps = "(Playback) {:.1f} FPS".format(time1/15) detectfps = "(Detection) {:.1f} FPS".format(detectframecount/time2) framecount = 0 detectframecount = 0 time1 = 0 time2 = 0 t2 = time.perf_counter() elapsedTime = t2-t1 time1 += 1/elapsedTime time2 += elapsedTime