Python cv2.resizeWindow() Examples
The following are 30
code examples of cv2.resizeWindow().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
cv2
, or try the search function
.
Example #1
Source File: yolov3_deepsort.py From deep_sort_pytorch with MIT License | 6 votes |
def __init__(self, cfg, args, video_path): self.cfg = cfg self.args = args self.video_path = video_path self.logger = get_logger("root") use_cuda = args.use_cuda and torch.cuda.is_available() if not use_cuda: warnings.warn("Running in cpu mode which maybe very slow!", UserWarning) if args.display: cv2.namedWindow("test", cv2.WINDOW_NORMAL) cv2.resizeWindow("test", args.display_width, args.display_height) if args.cam != -1: print("Using webcam " + str(args.cam)) self.vdo = cv2.VideoCapture(args.cam) else: self.vdo = cv2.VideoCapture() self.detector = build_detector(cfg, use_cuda=use_cuda) self.deepsort = build_tracker(cfg, use_cuda=use_cuda) self.class_names = self.detector.class_names
Example #2
Source File: vis_utils.py From PVN3D with MIT License | 6 votes |
def cv2_show_image(window_name, image, size_wh=None, location_xy=None): """Helper function for specifying window size and location when displaying images with cv2. Args: window_name: str window name image: ndarray image to display size_wh: window size (w, h) location_xy: window location (x, y) """ if size_wh is not None: cv2.namedWindow(window_name, cv2.WINDOW_KEEPRATIO | cv2.WINDOW_GUI_NORMAL) cv2.resizeWindow(window_name, *size_wh) else: cv2.namedWindow(window_name, cv2.WINDOW_AUTOSIZE) if location_xy is not None: cv2.moveWindow(window_name, *location_xy) cv2.imshow(window_name, image)
Example #3
Source File: idcardocr.py From idcardocr with GNU General Public License v3.0 | 6 votes |
def showimg(img): cv2.namedWindow("contours", 0); cv2.resizeWindow("contours", 1280, 720); cv2.imshow("contours", img) cv2.waitKey() #psm model: # 0 Orientation and script detection (OSD) only. # 1 Automatic page segmentation with OSD. # 2 Automatic page segmentation, but no OSD, or OCR. # 3 Fully automatic page segmentation, but no OSD. (Default) # 4 Assume a single column of text of variable sizes. # 5 Assume a single uniform block of vertically aligned text. # 6 Assume a single uniform block of text. # 7 Treat the image as a single text line. # 8 Treat the image as a single word. # 9 Treat the image as a single word in a circle. # 10 Treat the image as a single character. # 11 Sparse text. Find as much text as possible in no particular order. # 12 Sparse text with OSD. # 13 Raw line. Treat the image as a single text line, # bypassing hacks that are Tesseract-specific
Example #4
Source File: test.py From pytorch_pose_proposal_networks with MIT License | 6 votes |
def draw_limb(img, persons): overlay = img[0].copy() for p in persons: for j in range(1, 16): if p[j][0] == -1 or p[j][1] == -1: continue cv2.circle(overlay, (int(p[j][0]), int(p[j][1])), 3, colors[j-1], -1, cv2.LINE_AA) for p in persons: for j in range(14): j1, j2 = p[limbs1[j]], p[limbs2[j]] if (j1 == -1).any() or (j2 == -1).any(): continue cv2.line(overlay, (int(j1[0]), int(j1[1])), (int(j2[0]), int(j2[1])), colors[j], 2, cv2.LINE_AA) img_dst = cv2.addWeighted(overlay, alpha, img[0], 1-alpha, 0)[:, :, ::-1] cv2.namedWindow('persons', cv2.WINDOW_NORMAL) cv2.resizeWindow('persons', 600, 600) cv2.imshow('persons', img_dst) key = cv2.waitKey(0) if key == ord('s'): cv2.imwrite('persons.png', img_dst * 255)
Example #5
Source File: test.py From pytorch_pose_proposal_networks with MIT License | 6 votes |
def draw_box(img, joints): overlay = img[0].copy() for i in range(1, 16): if joints[i]: for j in range(len(joints[i][0])): box = joints[i][0][j] tl_x, tl_y, br_x, br_y = int(box[2] - 0.5 * box[4]), int(box[3] - 0.5 * box[5]), \ int(box[2] + 0.5 * box[4]), int(box[3] + 0.5 * box[5]) cv2.rectangle(overlay, (tl_x, tl_y), (br_x, br_y), colors[i-1], -1) img_transparent = cv2.addWeighted(overlay, alpha, img[0], 1 - alpha, 0)[:, :, ::-1] img_transparent[:, ::cfg.CELL_SIZE, :] = np.array([1., 1, 1]) img_transparent[::cfg.CELL_SIZE, :, :] = np.array([1., 1, 1]) cv2.namedWindow('box', cv2.WINDOW_NORMAL) cv2.resizeWindow('box', 600, 600) cv2.imshow('box', img_transparent) key = cv2.waitKey(0) if key == ord('s'): cv2.imwrite('box.png', img_transparent * 255)
Example #6
Source File: augment.py From Real-time-Text-Detection with Apache License 2.0 | 6 votes |
def show_pic(img, bboxes=None, name='pic'): ''' 输入: img:图像array bboxes:图像的所有boudning box list, 格式为[[x_min, y_min, x_max, y_max]....] names:每个box对应的名称 ''' show_img = img.copy() if not isinstance(bboxes, np.ndarray): bboxes = np.array(bboxes) for point in bboxes.astype(np.int): cv2.line(show_img, tuple(point[0]), tuple(point[1]), (255, 0, 0), 2) cv2.line(show_img, tuple(point[1]), tuple(point[2]), (255, 0, 0), 2) cv2.line(show_img, tuple(point[2]), tuple(point[3]), (255, 0, 0), 2) cv2.line(show_img, tuple(point[3]), tuple(point[0]), (255, 0, 0), 2) # cv2.namedWindow(name, 0) # 1表示原图 # cv2.moveWindow(name, 0, 0) # cv2.resizeWindow(name, 1200, 800) # 可视化的图片大小 cv2.imshow(name, show_img) # 图像均为cv2读取
Example #7
Source File: detector.py From deep_sort_pytorch with MIT License | 6 votes |
def demo(): import os from vizer.draw import draw_boxes yolo = YOLOv3("cfg/yolo_v3.cfg", "weight/yolov3.weights", "cfg/coco.names") print("yolo.size =", yolo.size) root = "./demo" resdir = os.path.join(root, "results") os.makedirs(resdir, exist_ok=True) files = [os.path.join(root, file) for file in os.listdir(root) if file.endswith('.jpg')] files.sort() for filename in files: img = cv2.imread(filename) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) bbox, cls_conf, cls_ids = yolo(img) if bbox is not None: img = draw_boxes(img, bbox, cls_ids, cls_conf, class_name_map=yolo.class_names) # save results cv2.imwrite(os.path.join(resdir, os.path.basename(filename)), img[:, :, (2, 1, 0)]) # imshow # cv2.namedWindow("yolo", cv2.WINDOW_NORMAL) # cv2.resizeWindow("yolo", 600,600) # cv2.imshow("yolo",res[:,:,(2,1,0)]) # cv2.waitKey(0)
Example #8
Source File: vis_utils.py From ip_basic with MIT License | 6 votes |
def cv2_show_image(window_name, image, size_wh=None, location_xy=None): """Helper function for specifying window size and location when displaying images with cv2. Args: window_name: str window name image: ndarray image to display size_wh: window size (w, h) location_xy: window location (x, y) """ if size_wh is not None: cv2.namedWindow(window_name, cv2.WINDOW_KEEPRATIO | cv2.WINDOW_GUI_NORMAL) cv2.resizeWindow(window_name, *size_wh) else: cv2.namedWindow(window_name, cv2.WINDOW_AUTOSIZE) if location_xy is not None: cv2.moveWindow(window_name, *location_xy) cv2.imshow(window_name, image)
Example #9
Source File: facifier.py From facifier with MIT License | 6 votes |
def analyze_picture(model_emotion, model_gender, path, window_size, window_name='static'): cv2.namedWindow(window_name, WINDOW_NORMAL) cv2.namedWindow(window_name, WINDOW_NORMAL) if window_size: width, height = window_size cv2.resizeWindow(window_name, width, height) image = cv2.imread(path, 1) for normalized_face, (x, y, w, h) in find_faces(image): emotion_prediction = model_emotion.predict(normalized_face) gender_prediction = model_gender.predict(normalized_face) if (gender_prediction[0] == 0): cv2.rectangle(image, (x,y), (x+w, y+h), (0,0,255), 2) else: cv2.rectangle(image, (x,y), (x+w, y+h), (255,0,0), 2) cv2.putText(image, emotions[emotion_prediction[0]], (x,y-10), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,255,255), 2) cv2.imshow(window_name, image) key = cv2.waitKey(0) if key == ESC: cv2.destroyWindow(window_name)
Example #10
Source File: train.py From keras-image-segmentation with MIT License | 6 votes |
def train_generator(self, image_generator, mask_generator): # cv2.namedWindow('show', 0) # cv2.resizeWindow('show', 1280, 640) while True: image = next(image_generator) mask = next(mask_generator) label = self.make_regressor_label(mask).astype(np.float32) # print (image.dtype, label.dtype) # print (image.shape, label.shape) # exit() # cv2.imshow('show', image[0].astype(np.uint8)) # cv2.imshow('label', label[0].astype(np.uint8)) # mask = self.select_labels(mask) # print (image.shape) # print (mask.shape) # image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) # mask = (mask.astype(np.float32)*255/33).astype(np.uint8) # mask_color = cv2.applyColorMap(mask, cv2.COLORMAP_JET) # print (mask_color.shape) # show = cv2.addWeighted(image, 0.5, mask_color, 0.5, 0.0) # cv2.imshow("show", show) # key = cv2.waitKey() # if key == 27: # exit() yield (image, label)
Example #11
Source File: enjoy_latent.py From srl-zoo with MIT License | 6 votes |
def createFigureAndSlider(name, state_dim): """ Creating a window for the latent space visualization, an another for the slider to control it :param name: name of model (str) :param state_dim: (int) :return: """ # opencv gui setup cv2.namedWindow(name, cv2.WINDOW_NORMAL) cv2.resizeWindow(name, 500, 500) cv2.namedWindow('slider for ' + name) # add a slider for each component of the latent space for i in range(state_dim): # the sliders MUST be between 0 and max, so we placed max at 100, and start at 50 # So that when we substract 50 and divide 10 we get [-5,5] for each component cv2.createTrackbar(str(i), 'slider for ' + name, 50, 100, (lambda a: None))
Example #12
Source File: getStereoImages.py From stereoDepth with Apache License 2.0 | 5 votes |
def main(): global i if len(sys.argv) < 3: print("Usage: ./program_name directory_to_save start_index") sys.exit(1) i = int(sys.argv[2]) # Get the start number. while True: # Grab and retreive for sync if not (capL.grab() and capR.grab()): print("No more frames") break _, leftFrame = capL.retrieve() _, rightFrame = capR.retrieve() # Use if you need high resolution. If you set the camera for high res, you can pass these. # cv2.namedWindow('capL', cv2.WINDOW_NORMAL) # cv2.resizeWindow('capL', 1024, 768) # cv2.namedWindow('capR', cv2.WINDOW_NORMAL) # cv2.resizeWindow('capR', 1024, 768) cv2.imshow('capL', leftFrame) cv2.imshow('capR', rightFrame) key = cv2.waitKey(1) if key == ord('q'): break elif key == ord('c'): cv2.imwrite(sys.argv[1] + "/left" + str(i) + ".png", leftFrame) cv2.imwrite(sys.argv[1] + "/right" + str(i) + ".png", rightFrame) i += 1 capL.release() capR.release() cv2.destroyAllWindows()
Example #13
Source File: GeometricAugmenters_test.py From impy with Apache License 2.0 | 5 votes |
def test_horizontal_flip(self): if (self.visualize): frame = self.augmenter.horizontalFlip(frame = self.frame) cv2.namedWindow("__horizontalFlip__", 0) #cv2.resizeWindow("__horizontalFlip__", self.windowSize); cv2.imshow("__horizontalFlip__", frame) cv2.waitKey(self.waitTime) cv2.destroyAllWindows()
Example #14
Source File: findidcard.py From idcardocr with GNU General Public License v3.0 | 5 votes |
def showimg(self, img): cv2.namedWindow("contours", 0); #cv2.resizeWindow("contours", 1600, 1200); cv2.imshow("contours", img) cv2.waitKey()
Example #15
Source File: facifier.py From facifier with MIT License | 5 votes |
def start_webcam(model_emotion, model_gender, window_size, window_name='live', update_time=50): cv2.namedWindow(window_name, WINDOW_NORMAL) if window_size: width, height = window_size cv2.resizeWindow(window_name, width, height) video_feed = cv2.VideoCapture(0) video_feed.set(3, width) video_feed.set(4, height) read_value, webcam_image = video_feed.read() delay = 0 init = True while read_value: read_value, webcam_image = video_feed.read() for normalized_face, (x, y, w, h) in find_faces(webcam_image): if init or delay == 0: init = False emotion_prediction = model_emotion.predict(normalized_face) gender_prediction = model_gender.predict(normalized_face) if (gender_prediction[0] == 0): cv2.rectangle(webcam_image, (x,y), (x+w, y+h), (0,0,255), 2) else: cv2.rectangle(webcam_image, (x,y), (x+w, y+h), (255,0,0), 2) cv2.putText(webcam_image, emotions[emotion_prediction[0]], (x,y-10), cv2.FONT_HERSHEY_SIMPLEX, 1.5, (255,0,0), 2) delay += 1 delay %= 20 cv2.imshow(window_name, webcam_image) key = cv2.waitKey(update_time) if key == ESC: break cv2.destroyWindow(window_name)
Example #16
Source File: check_average_blur.py From ViolenceDetection with Apache License 2.0 | 5 votes |
def main(): image = data.astronaut() image = ia.imresize_single_image(image, (64, 64)) print("image shape:", image.shape) print("Press any key or wait %d ms to proceed to the next image." % (TIME_PER_STEP,)) k = [ 1, 2, 4, 8, 16, (8, 8), (1, 8), ((1, 1), (8, 8)), ((1, 16), (1, 16)), ((1, 16), 1) ] cv2.namedWindow("aug", cv2.WINDOW_NORMAL) cv2.resizeWindow("aug", 64*NB_AUGS_PER_IMAGE, 64) #cv2.imshow("aug", image[..., ::-1]) #cv2.waitKey(TIME_PER_STEP) for ki in k: aug = iaa.AverageBlur(k=ki) img_aug = [aug.augment_image(image) for _ in range(NB_AUGS_PER_IMAGE)] img_aug = np.hstack(img_aug) print("dtype", img_aug.dtype, "averages", np.average(img_aug, axis=tuple(range(0, img_aug.ndim-1)))) #print("dtype", img_aug.dtype, "averages", img_aug.mean(axis=range(1, img_aug.ndim))) title = "k=%s" % (str(ki),) img_aug = ia.draw_text(img_aug, x=5, y=5, text=title) cv2.imshow("aug", img_aug[..., ::-1]) # here with rgb2bgr cv2.waitKey(TIME_PER_STEP)
Example #17
Source File: check_median_blur.py From ViolenceDetection with Apache License 2.0 | 5 votes |
def main(): image = data.astronaut() image = ia.imresize_single_image(image, (64, 64)) print("image shape:", image.shape) print("Press any key or wait %d ms to proceed to the next image." % (TIME_PER_STEP,)) k = [ 1, 3, 5, 7, (3, 3), (1, 11) ] cv2.namedWindow("aug", cv2.WINDOW_NORMAL) cv2.resizeWindow("aug", 64*NB_AUGS_PER_IMAGE, 64) #cv2.imshow("aug", image[..., ::-1]) #cv2.waitKey(TIME_PER_STEP) for ki in k: aug = iaa.MedianBlur(k=ki) img_aug = [aug.augment_image(image) for _ in range(NB_AUGS_PER_IMAGE)] img_aug = np.hstack(img_aug) print("dtype", img_aug.dtype, "averages", np.average(img_aug, axis=tuple(range(0, img_aug.ndim-1)))) #print("dtype", img_aug.dtype, "averages", img_aug.mean(axis=range(1, img_aug.ndim))) title = "k=%s" % (str(ki),) img_aug = ia.draw_text(img_aug, x=5, y=5, text=title) cv2.imshow("aug", img_aug[..., ::-1]) # here with rgb2bgr cv2.waitKey(TIME_PER_STEP)
Example #18
Source File: demo.py From hrnet with MIT License | 5 votes |
def main(): args = parse_args() update_config(cfg, args) # cudnn related setting cudnn.benchmark = cfg.CUDNN.BENCHMARK torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED ########## 加载human detecotor model from lib.detector.yolo.human_detector import load_model as yolo_model human_model = yolo_model() from lib.detector.yolo.human_detector import human_bbox_get as yolo_det bboxs, scores = yolo_det(args.img_input, human_model, confidence=0.5) # bboxes (N, 4) [x0, y0, x1, y1] # bbox is coordinate location inputs, origin_img, center, scale = PreProcess(args.img_input, bboxs, scores, cfg) # load MODEL model = model_load(cfg) with torch.no_grad(): # compute output heatmap # inputs = inputs[:,[2,1,0]] # inputs = cv2.cvtColor(inputs, cv2.COLOR_BGR2RGB) output = model(inputs) # compute coordinate preds, maxvals = get_final_preds( cfg, output.clone().cpu().numpy(), np.asarray(center), np.asarray(scale)) image = plot_keypoint(origin_img, preds, maxvals, 0.3) cv2.imwrite(args.img_output, image) if args.display: cv2.namedWindow("enhanced", cv2.WINDOW_GUI_NORMAL); cv2.resizeWindow("enhanced", 960, 480); cv2.imshow('enhanced', image) cv2.waitKey(5000)
Example #19
Source File: demo_mmd.py From hrnet with MIT License | 5 votes |
def main(): args = parse_args() update_config(cfg, args) # cudnn related setting cudnn.benchmark = cfg.CUDNN.BENCHMARK torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED ########## 加载human detecotor model from lib.detector.mmdetection.high_api import load_model human_model = load_model() from lib.detector.mmdetection.high_api import human_boxes_get as mmd_detector bboxs, scores = mmd_detector(human_model, args.img_input) # bboxes (N, 4) [x0, y0, x1, y1] # bbox is coordinate location inputs, origin_img, center, scale = PreProcess(args.img_input, bboxs, scores, cfg) # load HRNET MODEL model = model_load(cfg) with torch.no_grad(): # compute output heatmap # inputs = inputs[:,[2,1,0]] # inputs = cv2.cvtColor(inputs, cv2.COLOR_BGR2RGB) output = model(inputs) # compute coordinate preds, maxvals = get_final_preds( cfg, output.clone().cpu().numpy(), np.asarray(center), np.asarray(scale)) image = plot_keypoint(origin_img, preds, maxvals, 0.3) cv2.imwrite(args.img_output, image) if args.display: cv2.namedWindow("enhanced", cv2.WINDOW_GUI_NORMAL); cv2.resizeWindow("enhanced", 960, 480); cv2.imshow('enhanced', image) cv2.waitKey(5000)
Example #20
Source File: vis_utils.py From monopsr with MIT License | 5 votes |
def cv2_imshow(window_name, image, size_wh=None, row_col=None, location_xy=None): """Helper function for specifying window size and location when displaying images with cv2 Args: window_name (string): Window title image: image to display size_wh: resize window Recommended sizes for 1920x1080 screen: 2 col: (930, 280) 3 col: (620, 187) 4 col: (465, 140) row_col: Row and column to show images like subplots location_xy: location of window """ if size_wh is not None: cv2.namedWindow(window_name, cv2.WINDOW_KEEPRATIO | cv2.WINDOW_GUI_NORMAL) cv2.resizeWindow(window_name, *size_wh) else: cv2.namedWindow(window_name, cv2.WINDOW_AUTOSIZE | cv2.WINDOW_GUI_NORMAL) if row_col is not None: start_x_offset = 60 start_y_offset = 25 y_offset = 28 subplot_row = row_col[0] subplot_col = row_col[1] location_xy = (start_x_offset + subplot_col * size_wh[0], start_y_offset + subplot_row * size_wh[1] + subplot_row * y_offset) if location_xy is not None: cv2.moveWindow(window_name, *location_xy) cv2.imshow(window_name, image)
Example #21
Source File: GeometricAugmenters_test.py From impy with Apache License 2.0 | 5 votes |
def test_rotation(self): if (self.visualize): theta = 0.0 height, width = self.frame.shape[0], self.frame.shape[1] for i in range(3): frame, ps = self.augmenter.rotation(frame = self.frame, bndbox = [0,0,width,height], theta = theta) ix, iy, x, y = ps cv2.namedWindow("__rotation__", 0) #cv2.resizeWindow("__rotation__", self.windowSize); cv2.imshow("__rotation__", frame[iy:y, ix:x, :]) cv2.waitKey(2500) cv2.destroyAllWindows() theta += 0.5
Example #22
Source File: GeometricAugmenters_test.py From impy with Apache License 2.0 | 5 votes |
def test_crop(self): frame = self.augmenter.crop(frame = self.frame, size = None) cv2.namedWindow("__crop__", 0) #cv2.resizeWindow("__crop__", (800,800)) cv2.imshow("__crop__", frame) cv2.waitKey(self.waitTime) cv2.destroyAllWindows()
Example #23
Source File: GeometricAugmenters_test.py From impy with Apache License 2.0 | 5 votes |
def test_scale(self): if (self.visualize): frame = self.augmenter.scale(frame = self.frame, size = (100, 100), interpolationMethod = 1) cv2.namedWindow("__scale__", 0) #cv2.resizeWindow("__scale__", self.windowSize) cv2.imshow("__scale__", frame) cv2.waitKey(self.waitTime) cv2.destroyAllWindows()
Example #24
Source File: BoundingBoxAugmenters_test.py From impy with Apache License 2.0 | 5 votes |
def test_crop(self): # Prepare data. boundingBoxes = [[100, 100, 150, 150]] # Apply transformation. newboundingBoxes = self.augmenter.crop(boundingBoxes = boundingBoxes, size = None) # print(boundingBoxes) # Assert values for i in range(len(newboundingBoxes)): ix, iy, x, y = newboundingBoxes[i] ixo, iyo, xo, yo = boundingBoxes[i] self.assertLess(x-ix, xo-ixo) self.assertLess(y-iy, yo-iyo) # Visual test. if (self.visualize): localbnxboxes = self.bndboxes frame = self.frame.copy() bndboxes = self.augmenter.crop(boundingBoxes = localbnxboxes, size = (300,300)) for each in bndboxes: ix, iy, x, y = each frame = cv2.rectangle(frame, (ix, iy), (x, y), (0,0,255), 5) cv2.namedWindow("__crop__", 0) # cv2.resizeWindow("__crop__", self.windowSize); cv2.imshow("__crop__", frame) cv2.waitKey(self.waitTime) cv2.destroyAllWindows()
Example #25
Source File: GeometricAugmenters_test.py From impy with Apache License 2.0 | 5 votes |
def test_vertical_flip(self): if (self.visualize): frame = self.augmenter.verticalFlip(frame = self.frame) cv2.namedWindow("__verticalFlip__", 0) #cv2.resizeWindow("__verticalFlip__", self.windowSize) cv2.imshow("__verticalFlip__", frame) cv2.waitKey(self.waitTime) cv2.destroyAllWindows()
Example #26
Source File: BoundingBoxAugmenters_test.py From impy with Apache License 2.0 | 5 votes |
def test_pad(self): # Prepare data boundingBoxes = [[100, 100, 150, 150]] size = (50, 50) # Apply transformation boundingBoxes = self.augmenter.pad(boundingBoxes = boundingBoxes, frameHeight = 200, frameWidth = 200, size = size) # print(boundingBoxes) self.assertLessEqual(boundingBoxes[0][0], 100) self.assertLessEqual(boundingBoxes[0][1], 100) self.assertGreaterEqual(boundingBoxes[0][2], 150) self.assertGreaterEqual(boundingBoxes[0][3], 150) # Visual test if (self.visualize): frame = self.frame.copy() bndboxes = self.augmenter.pad(frameHeight = self.frame.shape[0], frameWidth = self.frame.shape[1], boundingBoxes = self.bndboxes, size = (25, 25)) for each in bndboxes: ix, iy, x, y = each frame = cv2.rectangle(frame, (ix, iy), (x, y), (0,0,255), 2) cv2.namedWindow("__padding__", 0) # cv2.resizeWindow("__padding__", self.windowSize); cv2.imshow("__padding__", frame) cv2.waitKey(self.waitTime) cv2.destroyAllWindows()
Example #27
Source File: BoundingBoxAugmenters_test.py From impy with Apache License 2.0 | 5 votes |
def test_jitter_boxes(self): if (self.visualize): frame = self.augmenter.jitterBoxes(frame = self.frame, boundingBoxes = self.bndboxes, size = (20,20), quantity = 20) cv2.namedWindow("__jitterBoxes__", 0) # cv2.resizeWindow("__jitterBoxes__", self.windowSize); cv2.imshow("__jitterBoxes__", frame) cv2.waitKey(self.waitTime) cv2.destroyAllWindows()
Example #28
Source File: PerspectiveTransformation.py From Finger-Detection-and-Tracking with BSD 2-Clause "Simplified" License | 5 votes |
def main(): global countClicks, coordinates, copyimage cv2.resizeWindow(windowname, 700, 700) while (countClicks < 4): preseedKey = cv2.waitKey(1) cv2.imshow(windowname, image) if preseedKey & 0xFF == 27: break pointone = np.float32( [[coordinates[0], coordinates[1]], [coordinates[2], coordinates[3]], [coordinates[4], coordinates[5]], [coordinates[6], coordinates[7]]]) pointtwo = np.float32([[0, 0], [300, 0], [0, 300], [300, 300]]) perspective = cv2.getPerspectiveTransform(pointone, pointtwo) output = cv2.warpPerspective(copyimage, perspective, (310, 310)) cv2.imshow("Output Image", output) cv2.waitKey(0) cv2.destroyAllWindows()
Example #29
Source File: GeometricAugmenters_test.py From impy with Apache License 2.0 | 5 votes |
def test_translate(self): if (self.visualize): frame = self.augmenter.translate(frame = self.frame, offset = (100, 100)) cv2.namedWindow("__translate__", 0) #cv2.resizeWindow("__translate__", self.windowSize) cv2.imshow("__translate__", frame) cv2.waitKey(self.waitTime) cv2.destroyAllWindows()
Example #30
Source File: BoundingBoxAugmenters_test.py From impy with Apache License 2.0 | 5 votes |
def test_horizontal_flip(self): if (self.visualize): # Perform horizontal flip. frame = self.augmenter.horizontalFlip(frame = self.frame.copy(), boundingBoxes = self.bndboxes) # Visualization. cv2.namedWindow("__horizontalFlip__", 0) # cv2.resizeWindow("__horizontalFlip__", self.windowSize); cv2.imshow("__horizontalFlip__", frame) cv2.waitKey(self.waitTime) cv2.destroyAllWindows()