Python cv2.getWindowProperty() Examples
The following are 21
code examples of cv2.getWindowProperty().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
cv2
, or try the search function
.
Example #1
Source File: facesearch.py From FunUtils with MIT License | 7 votes |
def show(window, img): """ Shows the image in OpenCV window with support for updating the image in real-time. This will simply repeatedly display the image. This makes real-time update of image possible and also lets us handle window close events reliably. Params: window: A python string, the name of the window in which to show the image img: A numpy array. Image to be shown. """ while(1): # Will repeatedly show the image in given window. cv2.imshow(window, img) k = cv2.waitKey(1) & 0xFF # Capture the code of the pressed key. # Stop the loop when the user clicks on GUI close button [x]. if not cv2.getWindowProperty(window, cv2.WND_PROP_VISIBLE): print("Operation Cancelled") break if k == 27: # Key code for ESC break
Example #2
Source File: watershed.py From OpenCV-Python-Tutorial with MIT License | 6 votes |
def run(self): while cv2.getWindowProperty('img', 0) != -1 or cv2.getWindowProperty('watershed', 0) != -1: ch = cv2.waitKey(50) if ch == 27: break if ch >= ord('1') and ch <= ord('7'): self.cur_marker = ch - ord('0') print('marker: ', self.cur_marker) if ch == ord(' ') or (self.sketch.dirty and self.auto_update): self.watershed() self.sketch.dirty = False if ch in [ord('a'), ord('A')]: self.auto_update = not self.auto_update print('auto_update if', ['off', 'on'][self.auto_update]) if ch in [ord('r'), ord('R')]: self.markers[:] = 0 self.markers_vis[:] = self.img self.sketch.show() cv2.destroyAllWindows()
Example #3
Source File: simple-camera.py From ai-smarthome with BSD 2-Clause "Simplified" License | 6 votes |
def show_camera(): # To flip the image, modify the flip_method parameter (0 and 2 are the most common) print(gstreamer_pipeline(flip_method=0)) cap = cv2.VideoCapture(gstreamer_pipeline(flip_method=0), cv2.CAP_GSTREAMER) if cap.isOpened(): window_handle = cv2.namedWindow('CSI Camera', cv2.WINDOW_AUTOSIZE) # Window while cv2.getWindowProperty('CSI Camera',0) >= 0: ret_val, img = cap.read(); cv2.imshow('CSI Camera',img) # This also acts as keyCode = cv2.waitKey(30) & 0xff # Stop the program on the ESC key if keyCode == 27: break cap.release() cv2.destroyAllWindows() else: print('Unable to open camera')
Example #4
Source File: image.py From mmcv with Apache License 2.0 | 6 votes |
def imshow(img, win_name='', wait_time=0): """Show an image. Args: img (str or ndarray): The image to be displayed. win_name (str): The window name. wait_time (int): Value of waitKey param. """ cv2.imshow(win_name, imread(img)) if wait_time == 0: # prevent from hangning if windows was closed while True: ret = cv2.waitKey(1) closed = cv2.getWindowProperty(win_name, cv2.WND_PROP_VISIBLE) < 1 # if user closed window or if some key pressed if closed or ret != -1: break else: ret = cv2.waitKey(wait_time)
Example #5
Source File: fermodel_example_webcam.py From EmoPy with GNU Affero General Public License v3.0 | 6 votes |
def display_prediction(frame, frameString, fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, thickness=2): # Display emotion retval, _ = cv2.getTextSize( frameString, fontFace, fontScale, thickness) cv2.rectangle(frame, (0, 0), (20 + retval[0], 50), (0, 0, 0), -1) cv2.putText(frame, frameString, (10, 35), fontFace, fontScale, (255, 255, 255), thickness, cv2.LINE_AA) window_name = 'EmoPy Assessment' cv2.imshow(window_name, frame) while True: key = cv2.waitKey(1) # Press Esc to exit the window if key == 27 or cv2.getWindowProperty(window_name, cv2.WND_PROP_VISIBLE) < 1: break # Closes all windows cv2.destroyAllWindows()
Example #6
Source File: mnist_calc.py From ncappzoo with MIT License | 6 votes |
def is_window_closed(self): """Try to determine if the user closed the window (by clicking the x). This may only work with OpenCV 3.x. All OpenCV window properties should return -1.0 for windows that are closed. If we read a property that has a value < 0 or an exception is raised we assume the window has been closed. We use the aspect ratio property but it could be any. """ try: prop_asp = cv2.getWindowProperty(self._window_name, cv2.WND_PROP_ASPECT_RATIO) if prop_asp < 0.0: # the property returned was < 0 so assume window was closed by user return True except: return True return False
Example #7
Source File: display_video.py From image-processing-pipeline with MIT License | 5 votes |
def map(self, data): image = data[self.src] cv2.imshow(self.window_name, image) # Exit? key = cv2.waitKey(1) & 0xFF # Esc key pressed or window closed? if key == 27 or cv2.getWindowProperty(self.window_name, cv2.WND_PROP_VISIBLE) < 1: raise StopIteration return data
Example #8
Source File: utils.py From hermit with Apache License 2.0 | 5 votes |
def window_is_open(window_name): return (cv2.waitKey(1) and cv2.getWindowProperty(window_name, cv2.WND_PROP_VISIBLE) == 1.0)
Example #9
Source File: jobs_manual.py From faceswap with GNU General Public License v3.0 | 5 votes |
def window_closed(self, is_windows, is_conda, key): """ Check whether the window has been closed MS Windows doesn't appear to read the window state property properly, so we check for a negative key press. Conda (tested on Windows) doesn't appear to read the window state property or negative key press properly, so we arbitrarily use another property """ # pylint: disable=no-member logger.trace("Commencing closed window check") closed = False prop_autosize = cv2.getWindowProperty('Frame', cv2.WND_PROP_AUTOSIZE) prop_visible = cv2.getWindowProperty('Frame', cv2.WND_PROP_VISIBLE) if self.arguments.disable_monitor: closed = False elif is_conda and prop_autosize < 1: closed = True elif is_windows and not is_conda and key == -1: closed = True elif not is_windows and not is_conda and prop_visible < 1: closed = True logger.trace("Completed closed window check. Closed is %s", closed) if closed: logger.debug("Window closed detected") return closed
Example #10
Source File: tx2_surveillance.py From homesecurity with MIT License | 5 votes |
def loop_and_detect(stream_handler, tf_sess, conf_th, vis, od_type): """Loop, grab images from camera, and do object detection. # Arguments stream_handler: the stream handler object. tf_sess: TensorFlow/TensorRT session to run SSD object detection. conf_th: confidence/score threshold for object detection. vis: for visualization. """ show_fps = True full_scrn = True fps = 0.0 tic = time.time() while True: if cv2.getWindowProperty(WINDOW_NAME, 0) < 0: # Check to see if the user has closed the display window. # If yes, terminate the while loop. break img = stream_handler.read_streams() box, conf, cls = detect(img, tf_sess, conf_th, od_type=od_type) cls-=1 img = vis.draw_bboxes(img, box, conf, cls) if show_fps: img = draw_help_and_fps(img, fps) cv2.imshow(WINDOW_NAME, img) toc = time.time() curr_fps = 1.0 / (toc - tic) # calculate an exponentially decaying average of fps number fps = curr_fps if fps == 0.0 else (fps*0.9 + curr_fps*0.1) tic = toc key = cv2.waitKey(1) if key == ord('q') or key == ord('Q'): # q key: quit program break elif key == ord('H') or key == ord('h'): # Toggle help/fps show_fps = not show_fps elif key == ord('F') or key == ord('f'): # Toggle fullscreen full_scrn = not full_scrn set_full_screen(full_scrn)
Example #11
Source File: trt_mtcnn.py From tensorrt_demos with MIT License | 5 votes |
def loop_and_detect(cam, mtcnn, minsize): """Continuously capture images from camera and do face detection.""" full_scrn = False fps = 0.0 tic = time.time() while True: if cv2.getWindowProperty(WINDOW_NAME, 0) < 0: break img = cam.read() if img is not None: dets, landmarks = mtcnn.detect(img, minsize=minsize) print('{} face(s) found'.format(len(dets))) img = show_faces(img, dets, landmarks) img = show_fps(img, fps) cv2.imshow(WINDOW_NAME, img) toc = time.time() curr_fps = 1.0 / (toc - tic) # calculate an exponentially decaying average of fps number fps = curr_fps if fps == 0.0 else (fps*0.95 + curr_fps*0.05) tic = toc key = cv2.waitKey(1) if key == 27: # ESC key: quit program break elif key == ord('F') or key == ord('f'): # Toggle fullscreen full_scrn = not full_scrn set_display(WINDOW_NAME, full_scrn)
Example #12
Source File: trt_ssd.py From tensorrt_demos with MIT License | 5 votes |
def loop_and_detect(cam, trt_ssd, conf_th, vis): """Continuously capture images from camera and do object detection. # Arguments cam: the camera instance (video source). trt_ssd: the TRT SSD object detector instance. conf_th: confidence/score threshold for object detection. vis: for visualization. """ full_scrn = False fps = 0.0 tic = time.time() while True: if cv2.getWindowProperty(WINDOW_NAME, 0) < 0: break img = cam.read() if img is not None: boxes, confs, clss = trt_ssd.detect(img, conf_th) img = vis.draw_bboxes(img, boxes, confs, clss) img = show_fps(img, fps) cv2.imshow(WINDOW_NAME, img) toc = time.time() curr_fps = 1.0 / (toc - tic) # calculate an exponentially decaying average of fps number fps = curr_fps if fps == 0.0 else (fps*0.95 + curr_fps*0.05) tic = toc key = cv2.waitKey(1) if key == 27: # ESC key: quit program break elif key == ord('F') or key == ord('f'): # Toggle fullscreen full_scrn = not full_scrn set_display(WINDOW_NAME, full_scrn)
Example #13
Source File: trt_googlenet_async.py From tensorrt_demos with MIT License | 5 votes |
def loop_and_display(condition): """Continuously capture images from camera and do classification.""" global s_img, s_probs, s_labels full_scrn = False fps = 0.0 tic = time.time() while True: if cv2.getWindowProperty(WINDOW_NAME, 0) < 0: break with condition: condition.wait() img, top_probs, top_labels = s_img, s_probs, s_labels show_top_preds(img, top_probs, top_labels) img = show_fps(img, fps) cv2.imshow(WINDOW_NAME, img) toc = time.time() curr_fps = 1.0 / (toc - tic) # calculate an exponentially decaying average of fps number fps = curr_fps if fps == 0.0 else (fps*0.95 + curr_fps*0.05) tic = toc key = cv2.waitKey(1) if key == 27: # ESC key: quit program break elif key == ord('H') or key == ord('h'): # Toggle help message show_help = not show_help elif key == ord('F') or key == ord('f'): # Toggle fullscreen full_scrn = not full_scrn set_display(WINDOW_NAME, full_scrn)
Example #14
Source File: trt_yolov3.py From tensorrt_demos with MIT License | 5 votes |
def loop_and_detect(cam, trt_yolov3, conf_th, vis): """Continuously capture images from camera and do object detection. # Arguments cam: the camera instance (video source). trt_yolov3: the TRT YOLOv3 object detector instance. conf_th: confidence/score threshold for object detection. vis: for visualization. """ full_scrn = False fps = 0.0 tic = time.time() while True: if cv2.getWindowProperty(WINDOW_NAME, 0) < 0: break img = cam.read() if img is not None: boxes, confs, clss = trt_yolov3.detect(img, conf_th) img = vis.draw_bboxes(img, boxes, confs, clss) img = show_fps(img, fps) cv2.imshow(WINDOW_NAME, img) toc = time.time() curr_fps = 1.0 / (toc - tic) # calculate an exponentially decaying average of fps number fps = curr_fps if fps == 0.0 else (fps*0.95 + curr_fps*0.05) tic = toc key = cv2.waitKey(1) if key == 27: # ESC key: quit program break elif key == ord('F') or key == ord('f'): # Toggle fullscreen full_scrn = not full_scrn set_display(WINDOW_NAME, full_scrn)
Example #15
Source File: trt_ssd_async.py From tensorrt_demos with MIT License | 5 votes |
def loop_and_display(condition, vis): """Take detection results from the child thread and display. # Arguments condition: the condition variable for synchronization with the child thread. vis: for visualization. """ global s_img, s_boxes, s_confs, s_clss full_scrn = False fps = 0.0 tic = time.time() while True: if cv2.getWindowProperty(WINDOW_NAME, 0) < 0: break with condition: # Wait for the next frame and detection result. When # getting the signal from the child thread, save the # references to the frame and detection result for # display. condition.wait() img, boxes, confs, clss = s_img, s_boxes, s_confs, s_clss img = vis.draw_bboxes(img, boxes, confs, clss) img = show_fps(img, fps) cv2.imshow(WINDOW_NAME, img) toc = time.time() curr_fps = 1.0 / (toc - tic) # calculate an exponentially decaying average of fps number fps = curr_fps if fps == 0.0 else (fps*0.95 + curr_fps*0.05) tic = toc key = cv2.waitKey(1) if key == 27: # ESC key: quit program break elif key == ord('F') or key == ord('f'): # Toggle fullscreen full_scrn = not full_scrn set_display(WINDOW_NAME, full_scrn)
Example #16
Source File: display_video.py From detectron2-pipeline with MIT License | 5 votes |
def map(self, data): image = data[self.src] cv2.imshow(self.window_name, image) # Exit? key = cv2.waitKey(1) & 0xFF # Esc key pressed or window closed? if key == 27 or cv2.getWindowProperty(self.window_name, cv2.WND_PROP_VISIBLE) < 1: raise StopIteration return data
Example #17
Source File: fer_demo.py From Efficient-Facial-Feature-Learning-with-Wide-Ensemble-based-Convolutional-Neural-Networks with MIT License | 5 votes |
def is_running(self): return (cv2.waitKey(1) != 27) and (cv2.getWindowProperty(self._window_name, cv2.WND_PROP_VISIBLE) >= 1)
Example #18
Source File: xavier_surveillance.py From homesecurity with MIT License | 4 votes |
def loop_and_detect(stream_handler, conf_th): """Loop, grab images from camera, and do object detection. # Arguments stream_handler: the stream handler object. conf_th: confidence/score threshold for object detection. """ show_fps = True full_scrn = True fps = 0.0 netMain = darknet.load_net_custom(configPath.encode( "ascii"), weightPath.encode("ascii"), 0, 1) # batch size = 1 metaMain = darknet.load_meta(metaPath.encode("ascii")) tic = time.time() img=stream_handler.read_streams() darknet_image = darknet.make_image(darknet.network_width(netMain), darknet.network_height(netMain),3) scale=(float(img.shape[1])/darknet.network_width(netMain),\ float(img.shape[0])/darknet.network_height(netMain)) while True: if cv2.getWindowProperty(WINDOW_NAME, 0) < 0: # Check to see if the user has closed the display window. # If yes, terminate the while loop. break frame_read = stream_handler.read_streams() frame_rgb=frame_read[:,:,::-1] frame_resized = cv2.resize(frame_rgb, (darknet.network_width(netMain), darknet.network_height(netMain)), interpolation=cv2.INTER_LINEAR) darknet.copy_image_from_bytes(darknet_image,frame_resized.tobytes()) detections = darknet.detect_image(netMain, metaMain, darknet_image, thresh=conf_th) img = cvDrawBoxes(detections, frame_read, scale) if show_fps: img = draw_help_and_fps(img, fps) cv2.imshow(WINDOW_NAME, img) toc = time.time() curr_fps = 1.0 / (toc - tic) # calculate an exponentially decaying average of fps number fps = curr_fps if fps == 0.0 else (fps*0.9 + curr_fps*0.1) tic = toc key = cv2.waitKey(1) if key == ord('q') or key == ord('Q'): # q key: quit program break elif key == ord('H') or key == ord('h'): # Toggle help/fps show_fps = not show_fps elif key == ord('F') or key == ord('f'): # Toggle fullscreen full_scrn = not full_scrn set_full_screen(full_scrn)
Example #19
Source File: facenet.py From ncappzoo with MIT License | 4 votes |
def run_images(valid_output, validated_image_filename, facenet_exec_net, input_image_filename_list, input_blob, output_blob): print("------------------ " + YELLOW + "Facenet" + NOCOLOR + " ------------------\n") print(" - Face Match Threshold: " + YELLOW + str(FACE_MATCH_THRESHOLD) + NOCOLOR) print(" - Valid image: " + YELLOW + validated_image_filename + NOCOLOR) print(" - Test images: " + YELLOW + TEST_IMAGES_DIR + str(input_image_filename_list) + NOCOLOR) print("\n---------------------------------------------\n") cv2.namedWindow(CV_WINDOW_NAME) for input_image_file in input_image_filename_list : # read one of the images to run an inference on from the disk infer_image = cv2.imread(TEST_IMAGES_DIR + input_image_file) if infer_image is None: print("Cannot read image.") exit(1) # run a single inference on the image and overwrite the # boxes and labels preprocessed_image = preprocess_image(infer_image) test_output = run_inference(preprocessed_image, facenet_exec_net, input_blob, output_blob) # scale the faces so that we can display a large enough image in the window infer_image_h = infer_image.shape[0] infer_image_w = infer_image.shape[1] # h to w ratio of original image h_w_ratio = infer_image_h / infer_image_w # calculate new h and w new_infer_image_w = 300 new_infer_image_h = int(new_infer_image_w * h_w_ratio) # resize for better viewing infer_image = cv2.resize(infer_image, (new_infer_image_w, new_infer_image_h)) # Test the inference results of this image with the results # from the known valid face. matching = False if (face_match(valid_output, test_output)): matching = True text_color = (0, 255, 0) match_text = "MATCH" print(GREEN + ' PASS! File ' + input_image_file + ' matches ' + validated_image_filename + "\n" + NOCOLOR) else: matching = False match_text = "NOT A MATCH" text_color = (0, 0, 255) print(RED + ' FAIL! File ' + input_image_file + ' does not match ' + validated_image_filename + "\n" + NOCOLOR) overlay_on_image(infer_image, input_image_file, matching) cv2.putText(infer_image, match_text + " - Hit key for next.", (30, 60), cv2.FONT_HERSHEY_SIMPLEX, 0.5, text_color, 1) # check if the window is visible, this means the user hasn't closed # the window via the X button prop_val = cv2.getWindowProperty(CV_WINDOW_NAME, cv2.WND_PROP_ASPECT_RATIO) if (prop_val < 0.0): print('window closed') break # display the results and wait for user to hit a key cv2.imshow(CV_WINDOW_NAME, infer_image) cv2.waitKey(0) # This function is called from the entry point to do # all the work of the program
Example #20
Source File: tiny_yolo_v1.py From ncappzoo with MIT License | 4 votes |
def display_objects_in_gui(source_image, filtered_objects, network_input_w, network_input_h): # copy image so we can draw on it. Could just draw directly on source image if not concerned about that. display_image = source_image.copy() source_image_width = source_image.shape[1] source_image_height = source_image.shape[0] x_ratio = float(source_image_width) / network_input_w y_ratio = float(source_image_height) / network_input_h # loop through each box and draw it on the image along with a classification label print('\n Found this many objects in the image: ' + str(len(filtered_objects))) for obj_index in range(len(filtered_objects)): center_x = int(filtered_objects[obj_index][1] * x_ratio) center_y = int(filtered_objects[obj_index][2] * y_ratio) half_width = int(filtered_objects[obj_index][3] * x_ratio)//2 half_height = int(filtered_objects[obj_index][4] * y_ratio)//2 # calculate box (left, top) and (right, bottom) coordinates box_left = max(center_x - half_width, 0) box_top = max(center_y - half_height, 0) box_right = min(center_x + half_width, source_image_width) box_bottom = min(center_y + half_height, source_image_height) print(' - object: ' + YELLOW + str(filtered_objects[obj_index][0]) + NOCOLOR + ' is at left: ' + str(box_left) + ', top: ' + str(box_top) + ', right: ' + str(box_right) + ', bottom: ' + str(box_bottom)) #draw the rectangle on the image. This is hopefully around the object box_color = (0, 255, 0) # green box box_thickness = 2 cv2.rectangle(display_image, (box_left, box_top),(box_right, box_bottom), box_color, box_thickness) # draw the classification label string just above and to the left of the rectangle label_background_color = (70, 120, 70) # greyish green background for text label_text_color = (255, 255, 255) # white text cv2.rectangle(display_image,(box_left, box_top+20),(box_right,box_top), label_background_color, -1) cv2.putText(display_image,filtered_objects[obj_index][0] + ' : %.2f' % filtered_objects[obj_index][5], (box_left+5, box_top+15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, label_text_color, 1) window_name = 'TinyYolo (hit key to exit)' cv2.imshow(window_name, display_image) cv2.moveWindow(window_name, 10, 10) while (True): raw_key = cv2.waitKey(1) # check if the window is visible, this means the user hasn't closed # the window via the X button (may only work with opencv 3.x prop_val = cv2.getWindowProperty(window_name, cv2.WND_PROP_ASPECT_RATIO) if ((raw_key != -1) or (prop_val < 0.0)): # the user hit a key or closed the window (in that order) break
Example #21
Source File: train.py From cat-bbs with MIT License | 4 votes |
def update_window(win, inputs, outputs_gt, model): """Show true and generated outputs/heatmaps for example images.""" model.eval() # prepare inputs and forward through network inputs, outputs_gt = torch.from_numpy(inputs), torch.from_numpy(outputs_gt) inputs, outputs_gt = Variable(inputs), Variable(outputs_gt) if GPU >= 0: inputs = inputs.cuda(GPU) outputs_gt = outputs_gt.cuda(GPU) outputs_pred = model(inputs) # draw rows of resulting image rows = [] for i in range(inputs.size()[0]): # image, ground truth outputs, predicted outputs img_np = (inputs[i].cpu().data.numpy() * 255).astype(np.uint8).transpose(1, 2, 0) hm_gt_np = outputs_gt[i].cpu().data.numpy() hm_pred_np = outputs_pred[i].cpu().data.numpy() # per image # first row: ground truth outputs, # second row: predicted outputs # each row starts with the input image, followed by heatmap images row_truth = [img_np] + [draw_heatmap(img_np, np.squeeze(hm_gt_np[hm_idx]), alpha=0.5) for hm_idx in range(hm_gt_np.shape[0])] row_pred = [img_np] + [draw_heatmap(img_np, np.squeeze(hm_pred_np[hm_idx]), alpha=0.5) for hm_idx in range(hm_pred_np.shape[0])] rows.append(np.hstack(row_truth)) rows.append(np.hstack(row_pred)) grid = np.vstack(rows) if SHOW_DEBUG_WINDOWS: # show grid in opencv window if cv2.getWindowProperty(win, 0) == -1: cv2.namedWindow(win, cv2.WINDOW_NORMAL) cv2.resizeWindow(win, 1200, 600) time.sleep(1) cv2.imshow(win, grid.astype(np.uint8)[:, :, ::-1]) cv2.waitKey(10) else: # save grid to file misc.imsave("window_%s.jpg" % (win,), grid.astype(np.uint8))