Python cv2.namedWindow() Examples
The following are 30
code examples of cv2.namedWindow().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
cv2
, or try the search function
.
Example #1
Source File: vachat.py From The-chat-room with MIT License | 10 votes |
def run(self): print("VEDIO server starts...") self.sock.bind(self.ADDR) self.sock.listen(1) conn, addr = self.sock.accept() print("remote VEDIO client success connected...") data = "".encode("utf-8") payload_size = struct.calcsize("L") cv2.namedWindow('Remote', cv2.WINDOW_AUTOSIZE) while True: while len(data) < payload_size: data += conn.recv(81920) packed_size = data[:payload_size] data = data[payload_size:] msg_size = struct.unpack("L", packed_size)[0] while len(data) < msg_size: data += conn.recv(81920) zframe_data = data[:msg_size] data = data[msg_size:] frame_data = zlib.decompress(zframe_data) frame = pickle.loads(frame_data) cv2.imshow('Remote', frame) if cv2.waitKey(1) & 0xFF == 27: break
Example #2
Source File: object_tracker.py From OpenCV-3-x-with-Python-By-Example with MIT License | 8 votes |
def __init__(self): # Initialize the video capture object # 0 -> indicates that frame should be captured # from webcam self.cap = cv2.VideoCapture(0) # Capture the frame from the webcam ret, self.frame = self.cap.read() # Downsampling factor for the input frame self.scaling_factor = 0.8 self.frame = cv2.resize(self.frame, None, fx=self.scaling_factor, fy=self.scaling_factor, interpolation=cv2.INTER_AREA) cv2.namedWindow('Object Tracker') cv2.setMouseCallback('Object Tracker', self.mouse_event) self.selection = None self.drag_start = None self.tracking_state = 0 # Method to track mouse events
Example #3
Source File: streaming.py From olympe with BSD 3-Clause "New" or "Revised" License | 8 votes |
def run(self): window_name = "Olympe Streaming Example" cv2.namedWindow(window_name, cv2.WINDOW_NORMAL) main_thread = next( filter(lambda t: t.name == "MainThread", threading.enumerate()) ) while main_thread.is_alive(): with self.flush_queue_lock: try: yuv_frame = self.frame_queue.get(timeout=0.01) except queue.Empty: continue try: self.show_yuv_frame(window_name, yuv_frame) except Exception: # We have to continue popping frame from the queue even if # we fail to show one frame traceback.print_exc() finally: # Don't forget to unref the yuv frame. We don't want to # starve the video buffer pool yuv_frame.unref() cv2.destroyWindow(window_name)
Example #4
Source File: SudokuExtractor.py From SolveSudoku with MIT License | 7 votes |
def parse_grid(path): original = cv2.imread(path, cv2.IMREAD_GRAYSCALE) processed = pre_process_image(original) # cv2.namedWindow('processed',cv2.WINDOW_AUTOSIZE) # processed_img = cv2.resize(processed, (500, 500)) # Resize image # cv2.imshow('processed', processed_img) corners = find_corners_of_largest_polygon(processed) cropped = crop_and_warp(original, corners) # cv2.namedWindow('cropped',cv2.WINDOW_AUTOSIZE) # cropped_img = cv2.resize(cropped, (500, 500)) # Resize image # cv2.imshow('cropped', cropped_img) squares = infer_grid(cropped) # print(squares) digits = get_digits(cropped, squares, 28) # print(digits) final_image = show_digits(digits) return final_image
Example #5
Source File: RtspClient.py From ReolinkCameraAPI with GNU General Public License v3.0 | 6 votes |
def preview(self): """ Blocking function. Opens OpenCV window to display stream. """ self.connect() win_name = 'RTSP' cv2.namedWindow(win_name, cv2.WINDOW_AUTOSIZE) cv2.moveWindow(win_name, 20, 20) while True: cv2.imshow(win_name, self.get_frame()) # if self._latest is not None: # cv2.imshow(win_name,self._latest) if cv2.waitKey(25) & 0xFF == ord('q'): break cv2.waitKey() cv2.destroyAllWindows() cv2.waitKey()
Example #6
Source File: train.py From keras-image-segmentation with MIT License | 6 votes |
def train_generator(self, image_generator, mask_generator): # cv2.namedWindow('show', 0) # cv2.resizeWindow('show', 1280, 640) while True: image = next(image_generator) mask = next(mask_generator) label = self.make_regressor_label(mask).astype(np.float32) # print (image.dtype, label.dtype) # print (image.shape, label.shape) # exit() # cv2.imshow('show', image[0].astype(np.uint8)) # cv2.imshow('label', label[0].astype(np.uint8)) # mask = self.select_labels(mask) # print (image.shape) # print (mask.shape) # image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) # mask = (mask.astype(np.float32)*255/33).astype(np.uint8) # mask_color = cv2.applyColorMap(mask, cv2.COLORMAP_JET) # print (mask_color.shape) # show = cv2.addWeighted(image, 0.5, mask_color, 0.5, 0.0) # cv2.imshow("show", show) # key = cv2.waitKey() # if key == 27: # exit() yield (image, label)
Example #7
Source File: check_add_to_hue_and_saturation.py From imgaug with MIT License | 6 votes |
def main(): image = data.astronaut() cv2.namedWindow("aug", cv2.WINDOW_NORMAL) cv2.imshow("aug", image) cv2.waitKey(TIME_PER_STEP) # for value in cycle(np.arange(-255, 255, VAL_PER_STEP)): for value in np.arange(-255, 255, VAL_PER_STEP): aug = iaa.AddToHueAndSaturation(value=value) img_aug = aug.augment_image(image) img_aug = iaa.pad(img_aug, bottom=40) img_aug = ia.draw_text(img_aug, x=0, y=img_aug.shape[0]-38, text="value=%d" % (value,), size=30) cv2.imshow("aug", img_aug) cv2.waitKey(TIME_PER_STEP) images_aug = iaa.AddToHueAndSaturation(value=(-255, 255), per_channel=True).augment_images([image] * 64) ia.imshow(ia.draw_grid(images_aug)) image = ia.quokka_square((128, 128)) images_aug = [] images_aug.extend(iaa.AddToHue().augment_images([image] * 10)) images_aug.extend(iaa.AddToSaturation().augment_images([image] * 10)) ia.imshow(ia.draw_grid(images_aug, rows=2))
Example #8
Source File: kalman_filter.py From face_landmark_dnn with MIT License | 6 votes |
def main(): """Test code""" global mp mp = np.array((2, 1), np.float32) # measurement def onmouse(k, x, y, s, p): global mp mp = np.array([[np.float32(x)], [np.float32(y)]]) cv2.namedWindow("kalman") cv2.setMouseCallback("kalman", onmouse) kalman = Stabilizer(4, 2) frame = np.zeros((480, 640, 3), np.uint8) # drawing canvas while True: kalman.update(mp) point = kalman.prediction state = kalman.filter.statePost cv2.circle(frame, (state[0], state[1]), 2, (255, 0, 0), -1) cv2.circle(frame, (point[0], point[1]), 2, (0, 255, 0), -1) cv2.imshow("kalman", frame) k = cv2.waitKey(30) & 0xFF if k == 27: break
Example #9
Source File: videoExtenso.py From crappy with GNU General Public License v2.0 | 6 votes |
def prepare(self): if self.save_folder and not os.path.exists(self.save_folder): try: os.makedirs(self.save_folder) except OSError: assert os.path.exists(self.save_folder),\ "Error creating "+self.save_folder self.cam = Camera.classes[self.camera]() self.cam.open(**self.cam_kwargs) self.ve = VE(**self.ve_kwargs) config = VE_config(self.cam,self.ve) config.main() self.ve.start_tracking() if self.show_image: try: flags = cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO except AttributeError: flags = cv2.WINDOW_NORMAL cv2.namedWindow("Videoextenso",flags) self.loops = 0 self.last_fps_print = 0 self.last_fps_loops = 0
Example #10
Source File: discorrel.py From crappy with GNU General Public License v2.0 | 6 votes |
def prepare(self): if self.save_folder and not os.path.exists(self.save_folder): try: os.makedirs(self.save_folder) except OSError: assert os.path.exists(self.save_folder),\ "Error creating "+self.save_folder self.cam = Camera.classes[self.camera]() self.cam.open(**self.cam_kwargs) config = DISConfig(self.cam) config.main() self.bbox = config.box t,img0 = self.cam.get_image() self.correl = DIS(img0,bbox=self.bbox) if self.show_image: try: flags = cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO except AttributeError: flags = cv2.WINDOW_NORMAL cv2.namedWindow("DISCorrel",flags) self.loops = 0 self.last_fps_print = 0 self.last_fps_loops = 0
Example #11
Source File: vis_utils.py From ip_basic with MIT License | 6 votes |
def cv2_show_image(window_name, image, size_wh=None, location_xy=None): """Helper function for specifying window size and location when displaying images with cv2. Args: window_name: str window name image: ndarray image to display size_wh: window size (w, h) location_xy: window location (x, y) """ if size_wh is not None: cv2.namedWindow(window_name, cv2.WINDOW_KEEPRATIO | cv2.WINDOW_GUI_NORMAL) cv2.resizeWindow(window_name, *size_wh) else: cv2.namedWindow(window_name, cv2.WINDOW_AUTOSIZE) if location_xy is not None: cv2.moveWindow(window_name, *location_xy) cv2.imshow(window_name, image)
Example #12
Source File: check_superpixels.py From imgaug with MIT License | 6 votes |
def main(): image = data.astronaut()[..., ::-1] # rgb2bgr print(image.shape) cv2.namedWindow("aug", cv2.WINDOW_NORMAL) cv2.imshow("aug", image) cv2.waitKey(TIME_PER_STEP) for n_segments in cycle(reversed(np.arange(1, 200, SEGMENTS_PER_STEP))): aug = iaa.Superpixels(p_replace=0.75, n_segments=n_segments) time_start = time.time() img_aug = aug.augment_image(image) print("augmented %d in %.4fs" % (n_segments, time.time() - time_start)) img_aug = ia.draw_text(img_aug, x=5, y=5, text="%d" % (n_segments,)) cv2.imshow("aug", img_aug) cv2.waitKey(TIME_PER_STEP)
Example #13
Source File: check_directed_edge_detect.py From imgaug with MIT License | 6 votes |
def main(): image = data.astronaut() cv2.namedWindow("aug", cv2.WINDOW_NORMAL) cv2.imshow("aug", image) cv2.waitKey(TIME_PER_STEP) height, width = image.shape[0], image.shape[1] center_x = width // 2 center_y = height // 2 r = int(min(image.shape[0], image.shape[1]) / 3) for deg in cycle(np.arange(0, 360, DEG_PER_STEP)): rad = np.deg2rad(deg-90) point_x = int(center_x + r * np.cos(rad)) point_y = int(center_y + r * np.sin(rad)) direction = deg / 360 aug = iaa.DirectedEdgeDetect(alpha=1.0, direction=direction) img_aug = aug.augment_image(image) img_aug[point_y-POINT_SIZE:point_y+POINT_SIZE+1, point_x-POINT_SIZE:point_x+POINT_SIZE+1, :] =\ np.array([0, 255, 0]) cv2.imshow("aug", img_aug) cv2.waitKey(TIME_PER_STEP)
Example #14
Source File: video_capture.py From PyIntroduction with MIT License | 6 votes |
def cvCaptureVideo(): capture = cv2.VideoCapture(0) if capture.isOpened() is False: raise("IO Error") cv2.namedWindow("Capture", cv2.WINDOW_NORMAL) while True: ret, image = capture.read() if ret == False: continue cv2.imshow("Capture", image) if cv2.waitKey(1) & 0xFF == ord('q'): break capture.release() cv2.destroyAllWindows() # MatplotによるWebカメラのキャプチャと表示
Example #15
Source File: captures.py From holodeck with MIT License | 6 votes |
def display_multiple(images: List[Tuple[List, Optional[str]]]): """Displays one or more captures in a CV2 window. Useful for debugging Args: images: List of tuples containing MxNx3 pixel arrays and optional titles OR list of image data """ for image in images: if isinstance(image, tuple): image_data = image[0] else: image_data = image if isinstance(image, tuple) and len(image) > 1: title = image[1] else: title = "Camera Output" cv2.namedWindow(title) cv2.moveWindow(title, 500, 500) cv2.imshow(title, image_data) cv2.waitKey(0) cv2.destroyAllWindows()
Example #16
Source File: enjoy_latent.py From srl-zoo with MIT License | 6 votes |
def createFigureAndSlider(name, state_dim): """ Creating a window for the latent space visualization, an another for the slider to control it :param name: name of model (str) :param state_dim: (int) :return: """ # opencv gui setup cv2.namedWindow(name, cv2.WINDOW_NORMAL) cv2.resizeWindow(name, 500, 500) cv2.namedWindow('slider for ' + name) # add a slider for each component of the latent space for i in range(state_dim): # the sliders MUST be between 0 and max, so we placed max at 100, and start at 50 # So that when we substract 50 and divide 10 we get [-5,5] for each component cv2.createTrackbar(str(i), 'slider for ' + name, 50, 100, (lambda a: None))
Example #17
Source File: check_color.py From DL.EyeSight with GNU General Public License v3.0 | 6 votes |
def main_WithColorspace(): image = data.astronaut() print("image shape:", image.shape) aug = WithColorspace( from_colorspace="RGB", to_colorspace="HSV", children=WithChannels(0, Add(50)) ) aug_no_colorspace = WithChannels(0, Add(50)) img_show = np.hstack([ image, aug.augment_image(image), aug_no_colorspace.augment_image(image) ]) cv2.namedWindow("aug", cv2.WINDOW_NORMAL) cv2.imshow("aug", img_show[..., ::-1]) cv2.waitKey(TIME_PER_STEP)
Example #18
Source File: color_range_detector.py From Color-Tracker with MIT License | 6 votes |
def _init_trackbars(self): trackbars_window_name = "hsv settings" cv2.namedWindow(trackbars_window_name, cv2.WINDOW_NORMAL) # HSV Lower Bound h_min_trackbar = _Trackbar("H min", trackbars_window_name, 0, 255) s_min_trackbar = _Trackbar("S min", trackbars_window_name, 0, 255) v_min_trackbar = _Trackbar("V min", trackbars_window_name, 0, 255) # HSV Upper Bound h_max_trackbar = _Trackbar("H max", trackbars_window_name, 255, 255) s_max_trackbar = _Trackbar("S max", trackbars_window_name, 255, 255) v_max_trackbar = _Trackbar("V max", trackbars_window_name, 255, 255) # Kernel for morphology kernel_x = _Trackbar("kernel x", trackbars_window_name, 0, 30) kernel_y = _Trackbar("kernel y", trackbars_window_name, 0, 30) self._trackbars = [h_min_trackbar, s_min_trackbar, v_min_trackbar, h_max_trackbar, s_max_trackbar, v_max_trackbar, kernel_x, kernel_y]
Example #19
Source File: DisplayThread.py From Gesture-Recognition with MIT License | 6 votes |
def display_loop(self, display_queue): # global display_queue cv2.namedWindow(self.window_name) while not self.stopped: fps = 1.0 / (time.time() - self.fps_time) self.fps_time = time.time() with self.lock: img = display_queue.get() cv2.putText(img, "FPS: %f" % fps, (10, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2) cv2.imshow(self.window_name, img) display_queue.task_done() cv2.waitKey(self.delay) if cv2.waitKey(1) == 27: self.stop()
Example #20
Source File: drawing.py From PyIntroduction with MIT License | 5 votes |
def drawingDemo(): img = emptyImage() # 太さ2の直線描画 drawLine(img, (10, 10), (200, 200), (0, 0, 255), 2) # 太さに-1を指定すると,塗りつぶしになる drawCircle(img, (300, 100), 80, (0, 255, 0), -1) # 中と外を両方描画 drawRectangle(img, (10, 210), (210, 350), (100, 100, 0), -1) drawRectangle(img, (10, 210), (210, 350), (255, 0, 0), 3) # 楕円の描画 drawElipse(img, (450, 100), (30, 80), 0, 0, 360, (0, 100, 100), -1) # ポリゴンの描画 pts = np.array([[(250, 240), (270, 280), (350, 320), (500, 300), (450, 230), (350, 210)]], dtype=np.int32) drawPolylines(img, pts, True, (255, 100, 100), 5) # テキストの描画 drawText(img, 'OpenCV', (20, 450), font_types[0], 4, (200, 200, 200), 2) cv2.namedWindow('DrawingDemo', cv2.WINDOW_AUTOSIZE) cv2.imshow('DrawingDemo', img) cv2.waitKey(0) cv2.destroyAllWindows()
Example #21
Source File: color_range_detector.py From Color-Tracker with MIT License | 5 votes |
def __init__(self, camera: Camera): self._camera = camera self._trackbars = [] self._main_window_name = "HSV color range detector" cv2.namedWindow(self._main_window_name) self._init_trackbars()
Example #22
Source File: display_image.py From PyIntroduction with MIT License | 5 votes |
def cvShowImageColor(image_file): image_bgr = cv2.imread(image_file) cv2.namedWindow('image', cv2.WINDOW_NORMAL) cv2.imshow('image', image_bgr) cv2.waitKey(0) cv2.destroyAllWindows() # OpenCVによるグレースケール画像の表示
Example #23
Source File: mouse_painting.py From PyIntroduction with MIT License | 5 votes |
def simplePaint(): img = emptyImage() colors = [(0, 0, 255), (0, 255, 0), (255, 0, 0)] color = colors[0] # ドラッグ時に描画する関数の定義 def brushPaint(x, y): cv2.circle(img, (x, y), 20, color, -1) win_name = 'PaintDemo' cv2.namedWindow(win_name) # CVMouseEventクラスによるドラッグ描画関数の登録 mouse_event = CVMouseEvent(drag_func=brushPaint) mouse_event.setCallBack(win_name) while(True): cv2.imshow(win_name, img) key = cv2.waitKey(30) & 0xFF # 色切り替えの実装 if key == ord('1'): color = colors[0] elif key == ord('2'): color = colors[1] elif key == ord('3'): color = colors[2] # 画像のリセット elif key == ord('r'): img = emptyImage() elif key == ord('q'): break cv2.destroyAllWindows()
Example #24
Source File: detect_crazyflie.py From ROS-Robotics-By-Example with MIT License | 5 votes |
def __init__(self): # initialize ROS node and transform publisher rospy.init_node('crazyflie_detector', anonymous=True) self.pub_tf = tf.TransformBroadcaster() self.rate = rospy.Rate(50.0) # publish transform at 50 Hz # initialize values for crazyflie location on Kinect v2 image self.cf_u = 0 # u is pixels left(0) to right(+) self.cf_v = 0 # v is pixels top(0) to bottom(+) self.cf_d = 0 # d is distance camera(0) to crazyflie(+) from depth image self.last_d = 0 # last non-zero depth measurement # crazyflie orientation to Kinect v2 image (Euler) self.r = -1.5708 self.p = 0 self.y = -3.1415 # Convert image from a ROS image message to a CV image self.bridge = CvBridge() cv2.namedWindow("KinectV2", 1) # Wait for the camera_info topic to become available rospy.wait_for_message('/kinect2/qhd/camera_info', CameraInfo) # Subscribe to Kinect v2 sd camera_info to get image frame height and width rospy.Subscriber('/kinect2/qhd/camera_info', CameraInfo, self.camera_data, queue_size=1) # Subscribe to registered color and depth images rospy.Subscriber('/kinect2/qhd/image_color_rect', Image, self.image_callback, queue_size=1) rospy.Subscriber('/kinect2/qhd/image_depth_rect', Image, self.depth_callback, queue_size=1) self.rate.sleep() # suspend until next cycle # This callback function sets parameters regarding the camera.
Example #25
Source File: exposure_correction.py From exposure_correction with MIT License | 5 votes |
def main(): # img = cv2.imread("test_img.JPG", 0) img = np.ones((600, 600)) adj_matrix = get_unweighted_adjacency(img) # print adj_matrix[0, 1] # cv2.namedWindow("output") # dist_transform = cv2.distanceTransform(opening,cv2.DIST_L2,5) # ret, sure_fg = cv2.threshold(dist_transform,0.7*dist_transform.max(),255,0)
Example #26
Source File: webcam.py From AdaIN-TF with MIT License | 5 votes |
def __init__(self, style_path, img_size=512, scale=1, alpha=1, interpolate=False): self.style_imgs = get_files(style_path) # Create room for two styles for interpolation self.style_rgbs = [None, None] self.img_size = img_size self.crop_size = 256 self.scale = scale self.alpha = alpha cv2.namedWindow('Style Controls') if len(self.style_imgs) > 1: # Select style image by index cv2.createTrackbar('index','Style Controls', 0, len(self.style_imgs)-1, self.set_idx) # Blend param for AdaIN transform cv2.createTrackbar('alpha','Style Controls', 100, 100, self.set_alpha) # Resize style to this size before cropping cv2.createTrackbar('size','Style Controls', img_size, 2048, self.set_size) # Size of square crop box for style cv2.createTrackbar('crop size','Style Controls', 256, 2048, self.set_crop_size) # Scale the content before processing cv2.createTrackbar('scale','Style Controls', int(scale*100), 200, self.set_scale) self.set_style(random=True, window='Style Controls', style_idx=0) if interpolate: # Create a window to show second style image for interpolation cv2.namedWindow('style2') self.interp_weight = 1. cv2.createTrackbar('interpolation','Style Controls', 100, 100, self.set_interp) self.set_style(random=True, style_idx=1, window='style2')
Example #27
Source File: benchmark.py From VNect with Apache License 2.0 | 5 votes |
def BB_init(self): # use HOG method to initialize bounding box self.hog = cv2.HOGDescriptor() self.hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector()) self._box_init_window_name = 'Bounding Box Initialization' cv2.namedWindow(self._box_init_window_name) cv2.setMouseCallback(self._box_init_window_name, self._on_mouse)
Example #28
Source File: debug.py From DeepForest with MIT License | 5 votes |
def main(args=None): # parse arguments if args is None: args = sys.argv[1:] args = parse_args(args) # make sure keras is the minimum required version check_keras_version() # create the generator generator = create_generator(args) # optionally load config parameters if args.config: args.config = read_config_file(args.config) # optionally load anchor parameters anchor_params = None if args.config and 'anchor_parameters' in args.config: anchor_params = parse_anchor_parameters(args.config) # create the display window if necessary if not args.no_gui: cv2.namedWindow('Image', cv2.WINDOW_NORMAL) run(generator, args, anchor_params=anchor_params)
Example #29
Source File: pose_estimation.py From OpenCV-3-x-with-Python-By-Example with MIT License | 5 votes |
def __init__(self, win_name, init_frame, callback_func): self.callback_func = callback_func self.selected_rect = None self.drag_start = None self.tracking_state = 0 event_params = {"frame": init_frame} cv2.namedWindow(win_name) cv2.setMouseCallback(win_name, self.mouse_event, event_params)
Example #30
Source File: check_gaussian_blur.py From DL.EyeSight with GNU General Public License v3.0 | 5 votes |
def main(): image = data.astronaut() image = eu.imresize_single_image(image, (128, 128)) print("image shape:", image.shape) print("Press any key or wait %d ms to proceed to the next image." % (TIME_PER_STEP,)) k = [ 1, 3, 5, 7, (3, 3), (1, 11) ] cv2.namedWindow("aug", cv2.WINDOW_NORMAL) cv2.resizeWindow("aug", 128*NB_AUGS_PER_IMAGE, 128) #cv2.imshow("aug", image[..., ::-1]) #cv2.waitKey(TIME_PER_STEP) for ki in k: aug = MedianBlur(k=ki) img_aug = [aug.augment_image(image) for _ in range(NB_AUGS_PER_IMAGE)] img_aug = np.hstack(img_aug) print("dtype", img_aug.dtype, "averages", np.average(img_aug, axis=tuple(range(0, img_aug.ndim-1)))) #print("dtype", img_aug.dtype, "averages", img_aug.mean(axis=range(1, img_aug.ndim))) # title = "k=%s" % (str(ki),) # img_aug = ia.draw_text(img_aug, x=5, y=5, text=title) cv2.imshow("aug", img_aug[..., ::-1]) # here with rgb2bgr cv2.waitKey(TIME_PER_STEP)