Python cv2.WINDOW_NORMAL Examples

The following are 30 code examples of cv2.WINDOW_NORMAL(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module cv2 , or try the search function .
Example #1
Source File: streaming.py    From olympe with BSD 3-Clause "New" or "Revised" License 8 votes vote down vote up
def run(self):
        window_name = "Olympe Streaming Example"
        cv2.namedWindow(window_name, cv2.WINDOW_NORMAL)
        main_thread = next(
            filter(lambda t: t.name == "MainThread", threading.enumerate())
        )
        while main_thread.is_alive():
            with self.flush_queue_lock:
                try:
                    yuv_frame = self.frame_queue.get(timeout=0.01)
                except queue.Empty:
                    continue
                try:
                    self.show_yuv_frame(window_name, yuv_frame)
                except Exception:
                    # We have to continue popping frame from the queue even if
                    # we fail to show one frame
                    traceback.print_exc()
                finally:
                    # Don't forget to unref the yuv frame. We don't want to
                    # starve the video buffer pool
                    yuv_frame.unref()
        cv2.destroyWindow(window_name) 
Example #2
Source File: yolo.py    From keras-yolov3-KF-objectTracking with MIT License 7 votes vote down vote up
def detect_video(yolo, video_path, output_path=""):
    import cv2
    vid = cv2.VideoCapture(video_path)
    if not vid.isOpened():
        raise IOError("Couldn't open webcam or video")
    video_FourCC    = int(vid.get(cv2.CAP_PROP_FOURCC))
    video_fps       = vid.get(cv2.CAP_PROP_FPS)
    video_size      = (int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)),
                        int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT)))
    isOutput = True if output_path != "" else False
    if isOutput:
        print("!!! TYPE:", type(output_path), type(video_FourCC), type(video_fps), type(video_size))
        out = cv2.VideoWriter(output_path, video_FourCC, video_fps, video_size)
    accum_time = 0
    curr_fps = 0
    fps = "FPS: ??"
    prev_time = timer()
    while True:
        return_value, frame = vid.read()
        image = Image.fromarray(frame)
        image = yolo.detect_image(image)
        result = np.asarray(image)
        curr_time = timer()
        exec_time = curr_time - prev_time
        prev_time = curr_time
        accum_time = accum_time + exec_time
        curr_fps = curr_fps + 1
        if accum_time > 1:
            accum_time = accum_time - 1
            fps = "FPS: " + str(curr_fps)
            curr_fps = 0
        cv2.putText(result, text=fps, org=(3, 15), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                    fontScale=0.50, color=(255, 0, 0), thickness=2)
        cv2.namedWindow("result", cv2.WINDOW_NORMAL)
        cv2.imshow("result", result)
        if isOutput:
            out.write(result)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    yolo.close_session() 
Example #3
Source File: cvutils.py    From 1ZLAB_PyEspCar with GNU General Public License v3.0 7 votes vote down vote up
def select_roi(target):
    # 创建一个窗口
    cv2.namedWindow("image", flags= cv2.WINDOW_NORMAL | cv2.WINDOW_FREERATIO)
    cv2.imshow("image", target)
    # 是否显示网格 
    showCrosshair = True

    # 如果为Ture的话 , 则鼠标的其实位置就作为了roi的中心
    # False: 从左上角到右下角选中区域
    fromCenter = False
    # Select ROI
    rect = cv2.selectROI("image", target, showCrosshair, fromCenter)

    print("选中矩形区域")
    (x, y, w, h) = rect

    # Crop image
    roi = target[y : y+h, x:x+w]
    
    return rect, roi 
Example #4
Source File: test.py    From pytorch_pose_proposal_networks with MIT License 6 votes vote down vote up
def draw_limb(img, persons):
    overlay = img[0].copy()
    for p in persons:
        for j in range(1, 16):
            if p[j][0] == -1 or p[j][1] == -1:
                continue
            cv2.circle(overlay, (int(p[j][0]), int(p[j][1])), 3, colors[j-1],
                       -1, cv2.LINE_AA)

    for p in persons:
        for j in range(14):
            j1, j2 = p[limbs1[j]], p[limbs2[j]]
            if (j1 == -1).any() or (j2 == -1).any():
                continue
            cv2.line(overlay, (int(j1[0]), int(j1[1])), (int(j2[0]), int(j2[1])),
                     colors[j], 2, cv2.LINE_AA)
    img_dst = cv2.addWeighted(overlay, alpha, img[0], 1-alpha, 0)[:, :, ::-1]
    cv2.namedWindow('persons', cv2.WINDOW_NORMAL)
    cv2.resizeWindow('persons', 600, 600)
    cv2.imshow('persons', img_dst)
    key = cv2.waitKey(0)
    if key == ord('s'):
        cv2.imwrite('persons.png', img_dst * 255) 
Example #5
Source File: videoExtenso.py    From crappy with GNU General Public License v2.0 6 votes vote down vote up
def prepare(self):
    if self.save_folder and not os.path.exists(self.save_folder):
      try:
        os.makedirs(self.save_folder)
      except OSError:
        assert os.path.exists(self.save_folder),\
            "Error creating "+self.save_folder
    self.cam = Camera.classes[self.camera]()
    self.cam.open(**self.cam_kwargs)
    self.ve = VE(**self.ve_kwargs)
    config = VE_config(self.cam,self.ve)
    config.main()
    self.ve.start_tracking()
    if self.show_image:
      try:
        flags = cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO
      except AttributeError:
        flags = cv2.WINDOW_NORMAL
      cv2.namedWindow("Videoextenso",flags)
    self.loops = 0
    self.last_fps_print = 0
    self.last_fps_loops = 0 
Example #6
Source File: discorrel.py    From crappy with GNU General Public License v2.0 6 votes vote down vote up
def prepare(self):
    if self.save_folder and not os.path.exists(self.save_folder):
      try:
        os.makedirs(self.save_folder)
      except OSError:
        assert os.path.exists(self.save_folder),\
            "Error creating "+self.save_folder
    self.cam = Camera.classes[self.camera]()
    self.cam.open(**self.cam_kwargs)
    config = DISConfig(self.cam)
    config.main()
    self.bbox = config.box
    t,img0 = self.cam.get_image()
    self.correl = DIS(img0,bbox=self.bbox)
    if self.show_image:
      try:
        flags = cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO
      except AttributeError:
        flags = cv2.WINDOW_NORMAL
      cv2.namedWindow("DISCorrel",flags)
    self.loops = 0
    self.last_fps_print = 0
    self.last_fps_loops = 0 
Example #7
Source File: vchat.py    From lan-ichat with ISC License 6 votes vote down vote up
def run(self):
        print("VEDIO server starts...")
        self.sock.bind(self.ADDR)
        self.sock.listen(1)
        conn, addr = self.sock.accept()
        print("remote VEDIO client success connected...")
        data = "".encode("utf-8")
        payload_size = struct.calcsize("L")
        cv2.namedWindow('Remote', cv2.WINDOW_NORMAL)
        while True:
            while len(data) < payload_size:
                data += conn.recv(81920)
            packed_size = data[:payload_size]
            data = data[payload_size:]
            msg_size = struct.unpack("L", packed_size)[0]
            while len(data) < msg_size:
                data += conn.recv(81920)
            zframe_data = data[:msg_size]
            data = data[msg_size:]
            frame_data = zlib.decompress(zframe_data)
            frame = pickle.loads(frame_data)
            cv2.imshow('Remote', frame)
            if cv2.waitKey(1) & 0xFF == 27:
                break 
Example #8
Source File: vchat.py    From lan-ichat with ISC License 6 votes vote down vote up
def run(self):
        print("VEDIO server starts...")
        self.sock.bind(self.ADDR)
        self.sock.listen(1)
        conn, addr = self.sock.accept()
        print("remote VEDIO client success connected...")
        data = "".encode("utf-8")
        payload_size = struct.calcsize("L")
        cv2.namedWindow('Remote', cv2.WINDOW_NORMAL)
        while True:
            while len(data) < payload_size:
                data += conn.recv(81920)
            packed_size = data[:payload_size]
            data = data[payload_size:]
            msg_size = struct.unpack("L", packed_size)[0]
            while len(data) < msg_size:
                data += conn.recv(81920)
            zframe_data = data[:msg_size]
            data = data[msg_size:]
            frame_data = zlib.decompress(zframe_data)
            frame = pickle.loads(frame_data)
            cv2.imshow('Remote', frame)
            if cv2.waitKey(1) & 0xFF == 27:
                break 
Example #9
Source File: color_range_detector.py    From Color-Tracker with MIT License 6 votes vote down vote up
def _init_trackbars(self):
        trackbars_window_name = "hsv settings"
        cv2.namedWindow(trackbars_window_name, cv2.WINDOW_NORMAL)

        # HSV Lower Bound
        h_min_trackbar = _Trackbar("H min", trackbars_window_name, 0, 255)
        s_min_trackbar = _Trackbar("S min", trackbars_window_name, 0, 255)
        v_min_trackbar = _Trackbar("V min", trackbars_window_name, 0, 255)

        # HSV Upper Bound
        h_max_trackbar = _Trackbar("H max", trackbars_window_name, 255, 255)
        s_max_trackbar = _Trackbar("S max", trackbars_window_name, 255, 255)
        v_max_trackbar = _Trackbar("V max", trackbars_window_name, 255, 255)

        # Kernel for morphology
        kernel_x = _Trackbar("kernel x", trackbars_window_name, 0, 30)
        kernel_y = _Trackbar("kernel y", trackbars_window_name, 0, 30)

        self._trackbars = [h_min_trackbar, s_min_trackbar, v_min_trackbar, h_max_trackbar, s_max_trackbar,
                           v_max_trackbar, kernel_x, kernel_y] 
Example #10
Source File: enjoy_latent.py    From srl-zoo with MIT License 6 votes vote down vote up
def createFigureAndSlider(name, state_dim):
    """
    Creating a window for the latent space visualization, an another for the slider to control it
    :param name: name of model (str)
    :param state_dim: (int)
    :return:
    """
    # opencv gui setup
    cv2.namedWindow(name, cv2.WINDOW_NORMAL)
    cv2.resizeWindow(name, 500, 500)
    cv2.namedWindow('slider for ' + name)
    # add a slider for each component of the latent space
    for i in range(state_dim):
        # the sliders MUST be between 0 and max, so we placed max at 100, and start at 50
        # So that when we substract 50 and divide 10 we get [-5,5] for each component
        cv2.createTrackbar(str(i), 'slider for ' + name, 50, 100, (lambda a: None)) 
Example #11
Source File: video_capture.py    From PyIntroduction with MIT License 6 votes vote down vote up
def cvCaptureVideo():
    capture = cv2.VideoCapture(0)

    if capture.isOpened() is False:
        raise("IO Error")

    cv2.namedWindow("Capture", cv2.WINDOW_NORMAL)

    while True:
        ret, image = capture.read()

        if ret == False:
            continue

        cv2.imshow("Capture", image)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    capture.release()
    cv2.destroyAllWindows()


# MatplotによるWebカメラのキャプチャと表示 
Example #12
Source File: temp.py    From aggregation with Apache License 2.0 6 votes vote down vote up
def read_file(fname):
    image = cv2.imread(fname,0)


    image = cv2.adaptiveThreshold(image,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY,11,2)
    # cv2.namedWindow('image', cv2.WINDOW_NORMAL)
    # cv2.imshow('image',image)
    # cv2.waitKey(0)
    # cv2.destroyAllWindows()

    # image = cv2.adaptiveThreshold(image,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,11,2)
    # cv2.imwrite("/home/ggdhines/temp.jpg",image)
    # assert False


    # _,image = cv2.threshold(image,200,255,cv2.THRESH_BINARY)

    # image = 255 - image
    # image = image > 0
    image = image.astype(np.float)

    return image 
Example #13
Source File: check_add_to_hue_and_saturation.py    From imgaug with MIT License 6 votes vote down vote up
def main():
    image = data.astronaut()

    cv2.namedWindow("aug", cv2.WINDOW_NORMAL)
    cv2.imshow("aug", image)
    cv2.waitKey(TIME_PER_STEP)

    # for value in cycle(np.arange(-255, 255, VAL_PER_STEP)):
    for value in np.arange(-255, 255, VAL_PER_STEP):
        aug = iaa.AddToHueAndSaturation(value=value)
        img_aug = aug.augment_image(image)
        img_aug = iaa.pad(img_aug, bottom=40)
        img_aug = ia.draw_text(img_aug, x=0, y=img_aug.shape[0]-38, text="value=%d" % (value,), size=30)

        cv2.imshow("aug", img_aug)
        cv2.waitKey(TIME_PER_STEP)

    images_aug = iaa.AddToHueAndSaturation(value=(-255, 255), per_channel=True).augment_images([image] * 64)
    ia.imshow(ia.draw_grid(images_aug))

    image = ia.quokka_square((128, 128))
    images_aug = []
    images_aug.extend(iaa.AddToHue().augment_images([image] * 10))
    images_aug.extend(iaa.AddToSaturation().augment_images([image] * 10))
    ia.imshow(ia.draw_grid(images_aug, rows=2)) 
Example #14
Source File: check_superpixels.py    From imgaug with MIT License 6 votes vote down vote up
def main():
    image = data.astronaut()[..., ::-1]  # rgb2bgr
    print(image.shape)

    cv2.namedWindow("aug", cv2.WINDOW_NORMAL)
    cv2.imshow("aug", image)
    cv2.waitKey(TIME_PER_STEP)

    for n_segments in cycle(reversed(np.arange(1, 200, SEGMENTS_PER_STEP))):
        aug = iaa.Superpixels(p_replace=0.75, n_segments=n_segments)
        time_start = time.time()
        img_aug = aug.augment_image(image)
        print("augmented %d in %.4fs" % (n_segments, time.time() - time_start))
        img_aug = ia.draw_text(img_aug, x=5, y=5, text="%d" % (n_segments,))

        cv2.imshow("aug", img_aug)
        cv2.waitKey(TIME_PER_STEP) 
Example #15
Source File: stream.py    From katarina with MIT License 6 votes vote down vote up
def main():
    signal.signal(signal.SIGINT, signal_handler)
    try:
        drone.moveCamera(tilt=-50, pan=0)
        drone.videoEnable()

        cap = cv2.VideoCapture('./bebop.sdp')

        while (True):
            ret, img = cap.read()
            if ret:
                cv2.imshow('img', img)
                cv2.namedWindow('img', cv2.WINDOW_NORMAL)
                cv2.waitKey(1)

            drone.update()

        sys.exit(0)
    except (TypeError) as e:
        pass 
Example #16
Source File: debug.py    From kaggle-rsna18 with MIT License 6 votes vote down vote up
def main(args=None):
    # parse arguments
    if args is None:
        args = sys.argv[1:]
    args = parse_args(args)

    # create the generator
    generator = create_generator(args)

    # create the display window
    cv2.namedWindow('Image', cv2.WINDOW_NORMAL)

    if args.loop:
        while run(generator, args):
            pass
    else:
        run(generator, args) 
Example #17
Source File: detector.py    From deep_sort_pytorch with MIT License 6 votes vote down vote up
def demo():
    import os
    from vizer.draw import draw_boxes

    yolo = YOLOv3("cfg/yolo_v3.cfg", "weight/yolov3.weights", "cfg/coco.names")
    print("yolo.size =", yolo.size)
    root = "./demo"
    resdir = os.path.join(root, "results")
    os.makedirs(resdir, exist_ok=True)
    files = [os.path.join(root, file) for file in os.listdir(root) if file.endswith('.jpg')]
    files.sort()
    for filename in files:
        img = cv2.imread(filename)
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        bbox, cls_conf, cls_ids = yolo(img)

        if bbox is not None:
            img = draw_boxes(img, bbox, cls_ids, cls_conf, class_name_map=yolo.class_names)
        # save results
        cv2.imwrite(os.path.join(resdir, os.path.basename(filename)), img[:, :, (2, 1, 0)])
        # imshow
        # cv2.namedWindow("yolo", cv2.WINDOW_NORMAL)
        # cv2.resizeWindow("yolo", 600,600)
        # cv2.imshow("yolo",res[:,:,(2,1,0)])
        # cv2.waitKey(0) 
Example #18
Source File: yolov3_deepsort.py    From deep_sort_pytorch with MIT License 6 votes vote down vote up
def __init__(self, cfg, args, video_path):
        self.cfg = cfg
        self.args = args
        self.video_path = video_path
        self.logger = get_logger("root")

        use_cuda = args.use_cuda and torch.cuda.is_available()
        if not use_cuda:
            warnings.warn("Running in cpu mode which maybe very slow!", UserWarning)

        if args.display:
            cv2.namedWindow("test", cv2.WINDOW_NORMAL)
            cv2.resizeWindow("test", args.display_width, args.display_height)

        if args.cam != -1:
            print("Using webcam " + str(args.cam))
            self.vdo = cv2.VideoCapture(args.cam)
        else:
            self.vdo = cv2.VideoCapture()
        self.detector = build_detector(cfg, use_cuda=use_cuda)
        self.deepsort = build_tracker(cfg, use_cuda=use_cuda)
        self.class_names = self.detector.class_names 
Example #19
Source File: cv_bridge_demo.py    From Learning-Robotics-using-Python-Second-Edition with MIT License 6 votes vote down vote up
def show_img_cb(self,event):
    	try: 


		cv2.namedWindow("RGB_Image", cv2.WINDOW_NORMAL)
		cv2.moveWindow("RGB_Image", 25, 75)
		
		cv2.namedWindow("Processed_Image", cv2.WINDOW_NORMAL)
		cv2.moveWindow("Processed_Image", 500, 75)

        	# And one for the depth image
		cv2.moveWindow("Depth_Image", 950, 75)
		cv2.namedWindow("Depth_Image", cv2.WINDOW_NORMAL)


        	cv2.imshow("RGB_Image",self.frame)
        	cv2.imshow("Processed_Image",self.display_image)
        	cv2.imshow("Depth_Image",self.depth_display_image)
      		cv2.waitKey(3)
    	except:
		pass 
Example #20
Source File: check_color.py    From DL.EyeSight with GNU General Public License v3.0 6 votes vote down vote up
def main_WithColorspace():
    image = data.astronaut()
    print("image shape:", image.shape)

    aug = WithColorspace(
        from_colorspace="RGB",
        to_colorspace="HSV",
        children=WithChannels(0, Add(50))
    )

    aug_no_colorspace = WithChannels(0, Add(50))

    img_show = np.hstack([
        image,
        aug.augment_image(image),
        aug_no_colorspace.augment_image(image)
    ])

    cv2.namedWindow("aug", cv2.WINDOW_NORMAL)
    cv2.imshow("aug", img_show[..., ::-1])
    cv2.waitKey(TIME_PER_STEP) 
Example #21
Source File: cv_bridge_demo.py    From Learning-Robotics-using-Python-Second-Edition with MIT License 6 votes vote down vote up
def show_img_cb(self,event):
    	try: 


		cv2.namedWindow("RGB_Image", cv2.WINDOW_NORMAL)
		cv2.moveWindow("RGB_Image", 25, 75)
		
		cv2.namedWindow("Processed_Image", cv2.WINDOW_NORMAL)
		cv2.moveWindow("Processed_Image", 500, 75)

        	# And one for the depth image
		cv2.moveWindow("Depth_Image", 950, 75)
		cv2.namedWindow("Depth_Image", cv2.WINDOW_NORMAL)


        	cv2.imshow("RGB_Image",self.frame)
        	cv2.imshow("Processed_Image",self.display_image)
        	cv2.imshow("Depth_Image",self.depth_display_image)
      		cv2.waitKey(3)
    	except:
		pass 
Example #22
Source File: check_directed_edge_detect.py    From imgaug with MIT License 6 votes vote down vote up
def main():
    image = data.astronaut()

    cv2.namedWindow("aug", cv2.WINDOW_NORMAL)
    cv2.imshow("aug", image)
    cv2.waitKey(TIME_PER_STEP)

    height, width = image.shape[0], image.shape[1]
    center_x = width // 2
    center_y = height // 2
    r = int(min(image.shape[0], image.shape[1]) / 3)

    for deg in cycle(np.arange(0, 360, DEG_PER_STEP)):
        rad = np.deg2rad(deg-90)
        point_x = int(center_x + r * np.cos(rad))
        point_y = int(center_y + r * np.sin(rad))

        direction = deg / 360
        aug = iaa.DirectedEdgeDetect(alpha=1.0, direction=direction)
        img_aug = aug.augment_image(image)
        img_aug[point_y-POINT_SIZE:point_y+POINT_SIZE+1, point_x-POINT_SIZE:point_x+POINT_SIZE+1, :] =\
            np.array([0, 255, 0])

        cv2.imshow("aug", img_aug)
        cv2.waitKey(TIME_PER_STEP) 
Example #23
Source File: test.py    From pytorch_pose_proposal_networks with MIT License 6 votes vote down vote up
def draw_box(img, joints):
    overlay = img[0].copy()
    for i in range(1, 16):
        if joints[i]:
            for j in range(len(joints[i][0])):
                box = joints[i][0][j]
                tl_x, tl_y, br_x, br_y = int(box[2] - 0.5 * box[4]), int(box[3] - 0.5 * box[5]), \
                                         int(box[2] + 0.5 * box[4]), int(box[3] + 0.5 * box[5])
                cv2.rectangle(overlay, (tl_x, tl_y), (br_x, br_y), colors[i-1], -1)

    img_transparent = cv2.addWeighted(overlay, alpha, img[0], 1 - alpha, 0)[:, :, ::-1]
    img_transparent[:, ::cfg.CELL_SIZE, :] = np.array([1., 1, 1])
    img_transparent[::cfg.CELL_SIZE, :, :] = np.array([1., 1, 1])
    cv2.namedWindow('box', cv2.WINDOW_NORMAL)
    cv2.resizeWindow('box', 600, 600)
    cv2.imshow('box', img_transparent)
    key = cv2.waitKey(0)
    if key == ord('s'):
        cv2.imwrite('box.png', img_transparent * 255) 
Example #24
Source File: yolo_Mobilenet.py    From keras-YOLOv3-mobilenet with MIT License 5 votes vote down vote up
def detect_video(yolo, video_path, output_path=""):
    import cv2
    vid = cv2.VideoCapture(video_path)
    if not vid.isOpened():
        raise IOError("Couldn't open webcam or video")
    video_FourCC    = int(vid.get(cv2.CAP_PROP_FOURCC))
    video_fps       = vid.get(cv2.CAP_PROP_FPS)
    video_size      = (int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)),
                        int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT)))
    isOutput = True if output_path != "" else False
    if isOutput:
        print("!!! TYPE:", type(output_path), type(video_FourCC), type(video_fps), type(video_size))
        out = cv2.VideoWriter(output_path, video_FourCC, video_fps, video_size)
    accum_time = 0
    curr_fps = 0
    fps = "FPS: ??"
    prev_time = timer()
    while True:
        return_value, frame = vid.read()
        image = Image.fromarray(frame)
        image = yolo.detect_image(image)
        result = np.asarray(image)
        curr_time = timer()
        exec_time = curr_time - prev_time
        prev_time = curr_time
        accum_time = accum_time + exec_time
        curr_fps = curr_fps + 1
        if accum_time > 1:
            accum_time = accum_time - 1
            fps = "FPS: " + str(curr_fps)
            curr_fps = 0
        cv2.putText(result, text=fps, org=(3, 15), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                    fontScale=0.50, color=(255, 0, 0), thickness=2)
        cv2.namedWindow("result", cv2.WINDOW_NORMAL)
        cv2.imshow("result", result)
        if isOutput:
            out.write(result)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    yolo.close_session() 
Example #25
Source File: check_median_blur.py    From DL.EyeSight with GNU General Public License v3.0 5 votes vote down vote up
def main():
    image = data.astronaut()
    image = eu.imresize_single_image(image, (64, 64))
    print("image shape:", image.shape)
    print("Press any key or wait %d ms to proceed to the next image." % (TIME_PER_STEP,))

    k = [
        1,
        3,
        5,
        7,
        (3, 3),
        (1, 11)
    ]

    cv2.namedWindow("aug", cv2.WINDOW_NORMAL)
    cv2.resizeWindow("aug", 64*NB_AUGS_PER_IMAGE, 64)
    #cv2.imshow("aug", image[..., ::-1])
    #cv2.waitKey(TIME_PER_STEP)

    for ki in k:
        aug = MedianBlur(k=ki)
        img_aug = [aug.augment_image(image) for _ in range(NB_AUGS_PER_IMAGE)]
        img_aug = np.hstack(img_aug)
        print("dtype", img_aug.dtype, "averages", np.average(img_aug, axis=tuple(range(0, img_aug.ndim-1))))
        #print("dtype", img_aug.dtype, "averages", img_aug.mean(axis=range(1, img_aug.ndim)))

        # title = "k=%s" % (str(ki),)
        # img_aug = ia.draw_text(img_aug, x=5, y=5, text=title)

        cv2.imshow("aug", img_aug[..., ::-1]) # here with rgb2bgr
        cv2.waitKey(TIME_PER_STEP) 
Example #26
Source File: pdf-to-csv-cv.py    From pdf-to-csv-table-extactor with Do What The F*ck You Want To Public License 5 votes vote down vote up
def show_wait_destroy(winname, img):
    cv2.namedWindow(winname,cv2.WINDOW_NORMAL)
    cv2.imshow(winname, img)
    cv2.resizeWindow(winname, 1000,800)
    cv2.moveWindow(winname, 500, 0)
    cv2.waitKey(0)
    cv2.destroyWindow(winname) 
Example #27
Source File: display.py    From tensorrt_demos with MIT License 5 votes vote down vote up
def set_display(window_name, full_scrn):
    """Set disply window to either full screen or normal."""
    if full_scrn:
        cv2.setWindowProperty(window_name, cv2.WND_PROP_FULLSCREEN,
                              cv2.WINDOW_FULLSCREEN)
    else:
        cv2.setWindowProperty(window_name, cv2.WND_PROP_FULLSCREEN,
                              cv2.WINDOW_NORMAL) 
Example #28
Source File: vchat.py    From lan-ichat with ISC License 5 votes vote down vote up
def run(self):
        print ("VEDIO server starts...")
        while True:
            try:
                self.sock.connect(self.ADDR)
                break
            except:
                time.sleep(3)
                continue
        print ("video server <-> remote server success connected...")
        check = "F"
        check = self.sock.recv(1)
        if check.decode("utf-8") != "S":
            return
        data = "".encode("utf-8")
        payload_size = struct.calcsize("L")
        cv2.namedWindow('Remote', cv2.WINDOW_NORMAL)
        while True:
            while len(data) < payload_size:
                data += self.sock.recv(81920)
            packed_size = data[:payload_size]
            data = data[payload_size:]
            msg_size = struct.unpack("L", packed_size)[0]
            while len(data) < msg_size:
                data += self.sock.recv(81920)
            zframe_data = data[:msg_size]
            data = data[msg_size:]
            frame_data = zlib.decompress(zframe_data)
            frame = pickle.loads(frame_data)
            try:
                cv2.imshow('Remote', frame)
            except:
                pass
            if cv2.waitKey(1) & 0xFF == 27:
                break 
Example #29
Source File: vchat.py    From lan-ichat with ISC License 5 votes vote down vote up
def run(self):
        while True:
            try:
                self.sock.connect(self.ADDR)
                break
            except:
                time.sleep(3)
                continue
        if self.showme:
            cv2.namedWindow('You', cv2.WINDOW_NORMAL)
        print("VEDIO client connected...")
        while self.cap.isOpened():
            ret, frame = self.cap.read()
            if self.showme:
                cv2.imshow('You', frame)
                if cv2.waitKey(1) & 0xFF == 27:
                    self.showme = False
                    cv2.destroyWindow('You')
            sframe = cv2.resize(frame, (0,0), fx=self.fx, fy=self.fx)
            data = pickle.dumps(sframe)
            zdata = zlib.compress(data, zlib.Z_BEST_COMPRESSION)
            try:
                self.sock.sendall(struct.pack("L", len(zdata)) + zdata)
            except:
                break
            for i in range(self.interval):
                self.cap.read() 
Example #30
Source File: vchat.py    From lan-ichat with ISC License 5 votes vote down vote up
def run(self):
        print ("VEDIO client starts...")
        while True:
            try:
                self.sock.connect(self.ADDR)
                break
            except:
                time.sleep(3)
                continue
        print ("video client <-> remote server success connected...")
        check = "F"
        check = self.sock.recv(1)
        if check.decode("utf-8") != "S":
            return
        print ("receive authend")
        #self.cap = cv2.VideoCapture(0)
        self.cap = cv2.VideoCapture("test.mp4")
        if self.showme:
            cv2.namedWindow('You', cv2.WINDOW_NORMAL)
        print ("remote VEDIO client connected...")
        while self.cap.isOpened():
            ret, frame = self.cap.read()
            if self.showme:
                cv2.imshow('You', frame)
                if cv2.waitKey(1) & 0xFF == 27:
                    self.showme = False
                    cv2.destroyWindow('You')
            if self.level > 0:
                frame = cv2.resize(frame, (0,0), fx=self.fx, fy=self.fx)
            data = pickle.dumps(frame)
            zdata = zlib.compress(data, zlib.Z_BEST_COMPRESSION)
            try:
                self.sock.sendall(struct.pack("L", len(zdata)) + zdata)
                print("video send ", len(zdata))
            except:
                break
            for i in range(self.interval):
                self.cap.read()