Python cv2.waitKey() Examples
The following are 30
code examples of cv2.waitKey().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
cv2
, or try the search function
.
Example #1
Source File: generate_coco_json.py From coco-json-converter with GNU General Public License v3.0 | 14 votes |
def __get_annotation__(self, mask, image=None): _, contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) segmentation = [] for contour in contours: # Valid polygons have >= 6 coordinates (3 points) if contour.size >= 6: segmentation.append(contour.flatten().tolist()) RLEs = cocomask.frPyObjects(segmentation, mask.shape[0], mask.shape[1]) RLE = cocomask.merge(RLEs) # RLE = cocomask.encode(np.asfortranarray(mask)) area = cocomask.area(RLE) [x, y, w, h] = cv2.boundingRect(mask) if image is not None: image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) cv2.drawContours(image, contours, -1, (0,255,0), 1) cv2.rectangle(image,(x,y),(x+w,y+h), (255,0,0), 2) cv2.imshow("", image) cv2.waitKey(1) return segmentation, [x, y, w, h], area
Example #2
Source File: BlurDetection.py From python-- with GNU General Public License v3.0 | 10 votes |
def _lapulaseDetection(self, imgName): """ :param strdir: 文件所在的目录 :param name: 文件名称 :return: 检测模糊后的分数 """ # step1: 预处理 img2gray, reImg = self.preImgOps(imgName) # step2: laplacian算子 获取评分 resLap = cv2.Laplacian(img2gray, cv2.CV_64F) score = resLap.var() print("Laplacian %s score of given image is %s", str(score)) # strp3: 绘制图片并保存 不应该写在这里 抽象出来 这是共有的部分 newImg = self._drawImgFonts(reImg, str(score)) newDir = self.strDir + "/_lapulaseDetection_/" if not os.path.exists(newDir): os.makedirs(newDir) newPath = newDir + imgName # 显示 cv2.imwrite(newPath, newImg) # 保存图片 cv2.imshow(imgName, newImg) cv2.waitKey(0) # step3: 返回分数 return score
Example #3
Source File: vachat.py From The-chat-room with MIT License | 10 votes |
def run(self): print("VEDIO server starts...") self.sock.bind(self.ADDR) self.sock.listen(1) conn, addr = self.sock.accept() print("remote VEDIO client success connected...") data = "".encode("utf-8") payload_size = struct.calcsize("L") cv2.namedWindow('Remote', cv2.WINDOW_AUTOSIZE) while True: while len(data) < payload_size: data += conn.recv(81920) packed_size = data[:payload_size] data = data[payload_size:] msg_size = struct.unpack("L", packed_size)[0] while len(data) < msg_size: data += conn.recv(81920) zframe_data = data[:msg_size] data = data[msg_size:] frame_data = zlib.decompress(zframe_data) frame = pickle.loads(frame_data) cv2.imshow('Remote', frame) if cv2.waitKey(1) & 0xFF == 27: break
Example #4
Source File: calibrate_camera.py From derplearning with MIT License | 9 votes |
def live_undistort(camera, camera_matrix, distortion_coefficients): """ Using a given calibration matrix, display the distorted, undistorted, and cropped frame""" scaled_camera_matrix, roi = cv2.getOptimalNewCameraMatrix( camera_matrix, distortion_coefficients, camera.size, 1, camera.size ) while True: ret, frame = camera.cap.read() assert ret distorted_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) undistorted_frame = cv2.undistort( distorted_frame, camera_matrix, distortion_coefficients, None, scaled_camera_matrix, ) roi_x, roi_y, roi_w, roi_h = roi cropped_frame = undistorted_frame[roi_y : roi_y + roi_h, roi_x : roi_x + roi_w] cv2.imshow("distorted %s" % (distorted_frame.shape,), distorted_frame) cv2.imshow("undistorted %s" % (undistorted_frame.shape,), undistorted_frame) cv2.imshow("cropped %s" % (cropped_frame.shape,), cropped_frame) cv2.waitKey(10)
Example #5
Source File: tkinter_functions.py From simba with GNU Lesser General Public License v3.0 | 8 votes |
def clahe(filename): os.chdir(os.path.dirname(filename)) print('Applying CLAHE, this might take awhile...') currentVideo = os.path.basename(filename) fileName, fileEnding = currentVideo.split('.',2) saveName = str('CLAHE_') + str(fileName) + str('.avi') cap = cv2.VideoCapture(currentVideo) imageWidth = int(cap.get(3)) imageHeight = int(cap.get(4)) fps = cap.get(cv2.CAP_PROP_FPS) fourcc = cv2.VideoWriter_fourcc(*'XVID') out = cv2.VideoWriter(saveName, fourcc, fps, (imageWidth, imageHeight), 0) try: while True: ret, image = cap.read() if ret == True: im = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) claheFilter = cv2.createCLAHE(clipLimit=2, tileGridSize=(16, 16)) claheCorrecttedFrame = claheFilter.apply(im) out.write(claheCorrecttedFrame) if cv2.waitKey(10) & 0xFF == ord('q'): break else: print(str('Completed video ') + str(saveName)) break except: print('clahe not applied') cap.release() out.release() cv2.destroyAllWindows() return saveName
Example #6
Source File: demo_caffe.py From MobileNetv2-SSDLite with MIT License | 8 votes |
def detect(imgfile): origimg = cv2.imread(imgfile) img = preprocess(origimg) img = img.astype(np.float32) img = img.transpose((2, 0, 1)) net.blobs['data'].data[...] = img out = net.forward() box, conf, cls = postprocess(origimg, out) for i in range(len(box)): p1 = (box[i][0], box[i][1]) p2 = (box[i][2], box[i][3]) cv2.rectangle(origimg, p1, p2, (0,255,0)) p3 = (max(p1[0], 15), max(p1[1], 15)) title = "%s:%.2f" % (COCO_CLASSES[int(cls[i])], conf[i]) cv2.putText(origimg, title, p3, cv2.FONT_ITALIC, 0.6, (0, 255, 0), 1) cv2.imshow("SSD", origimg) k = cv2.waitKey(0) & 0xff #Exit if ESC pressed if k == 27 : return False return True
Example #7
Source File: demo_caffe_voc.py From MobileNetv2-SSDLite with MIT License | 7 votes |
def detect(imgfile): origimg = cv2.imread(imgfile) img = preprocess(origimg) img = img.astype(np.float32) img = img.transpose((2, 0, 1)) net.blobs['data'].data[...] = img out = net.forward() box, conf, cls = postprocess(origimg, out) for i in range(len(box)): p1 = (box[i][0], box[i][1]) p2 = (box[i][2], box[i][3]) cv2.rectangle(origimg, p1, p2, (0,255,0)) p3 = (max(p1[0], 15), max(p1[1], 15)) title = "%s:%.2f" % (CLASSES[int(cls[i])], conf[i]) cv2.putText(origimg, title, p3, cv2.FONT_ITALIC, 0.6, (0, 255, 0), 1) cv2.imshow("SSD", origimg) k = cv2.waitKey(0) & 0xff #Exit if ESC pressed if k == 27 : return False return True
Example #8
Source File: misc.py From Automatic-Identification-and-Counting-of-Blood-Cells with GNU General Public License v3.0 | 7 votes |
def show2(im, allobj): for obj in allobj: cv2.rectangle(im, (obj[1], obj[2]), (obj[3], obj[4]), (0,0,255),2) cv2.imshow('result', im) cv2.waitKey() cv2.destroyAllWindows()
Example #9
Source File: misc.py From Automatic-Identification-and-Counting-of-Blood-Cells with GNU General Public License v3.0 | 7 votes |
def show(im, allobj, S, w, h, cellx, celly): for obj in allobj: a = obj[5] % S b = obj[5] // S cx = a + obj[1] cy = b + obj[2] centerx = cx * cellx centery = cy * celly ww = obj[3]**2 * w hh = obj[4]**2 * h cv2.rectangle(im, (int(centerx - ww/2), int(centery - hh/2)), (int(centerx + ww/2), int(centery + hh/2)), (0,0,255), 2) cv2.imshow('result', im) cv2.waitKey() cv2.destroyAllWindows()
Example #10
Source File: misc.py From Traffic-Signs-and-Object-Detection with GNU General Public License v3.0 | 7 votes |
def show(im, allobj, S, w, h, cellx, celly): for obj in allobj: a = obj[5] % S b = obj[5] // S cx = a + obj[1] cy = b + obj[2] centerx = cx * cellx centery = cy * celly ww = obj[3]**2 * w hh = obj[4]**2 * h cv2.rectangle(im, (int(centerx - ww/2), int(centery - hh/2)), (int(centerx + ww/2), int(centery + hh/2)), (0,0,255), 2) cv2.imshow('result', im) cv2.waitKey() cv2.destroyAllWindows()
Example #11
Source File: misc.py From Traffic_sign_detection_YOLO with MIT License | 7 votes |
def show(im, allobj, S, w, h, cellx, celly): for obj in allobj: a = obj[5] % S b = obj[5] // S cx = a + obj[1] cy = b + obj[2] centerx = cx * cellx centery = cy * celly ww = obj[3]**2 * w hh = obj[4]**2 * h cv2.rectangle(im, (int(centerx - ww/2), int(centery - hh/2)), (int(centerx + ww/2), int(centery + hh/2)), (0,0,255), 2) cv2.imshow('result', im) cv2.waitKey() cv2.destroyAllWindows()
Example #12
Source File: detect.py From pedestrian-haar-based-detector with GNU General Public License v2.0 | 7 votes |
def main(): #IMG PATHS imagePath = "test3.jpg" cascPath = "cascades/haarcascade_pedestrian.xml" pplCascade = cv2.CascadeClassifier(cascPath) image = cv2.imread(imagePath) gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) gray = normalize_grayimage(gray) pedestrians = pplCascade.detectMultiScale( gray, scaleFactor=1.2, minNeighbors=10, minSize=(32,96), flags = cv2.cv.CV_HAAR_SCALE_IMAGE ) print "Found {0} ppl!".format(len(pedestrians)) #Draw a rectangle around the detected objects for (x, y, w, h) in pedestrians: cv2.rectangle(image, (x, y), (x+w, y+h), (0, 255, 0), 2) cv2.imwrite("saida.jpg", image) cv2.imshow("Ppl found", image) cv2.waitKey(0) return 0
Example #13
Source File: esr_visualizer.py From Udacity-SDC-Radar-Driver-Micro-Challenge with MIT License | 6 votes |
def update(self, radarData): self.img = np.zeros((self.height, self.width, self.channels), np.uint8) cv2.line(self.img, (10, 0), (self.width/2 - 5, self.height), (100, 255, 255)) cv2.line(self.img, (self.width - 10, 0), (self.width/2 + 5, self.height), (100, 255, 255)) for track_number in range(1, 65): if str(track_number)+'_track_range' in radarData: track_range = radarData[str(track_number)+'_track_range'] track_angle = (float(radarData[str(track_number)+'_track_angle'])+90.0)*math.pi/180 x_pos = math.cos(track_angle)*track_range*4 y_pos = math.sin(track_angle)*track_range*4 cv2.circle(self.img, (self.width/2 + int(x_pos), self.height - int(y_pos) - 10), 5, (255, 255, 255)) #cv2.putText(self.img, str(track_number), # (self.width/2 + int(x_pos)-2, self.height - int(y_pos) - 10), self.font, 1, (255,255,255), 2) cv2.imshow("Radar", self.img) cv2.waitKey(2)
Example #14
Source File: webcam_demo.py From mmdetection with Apache License 2.0 | 6 votes |
def main(): args = parse_args() device = torch.device(args.device) model = init_detector(args.config, args.checkpoint, device=device) camera = cv2.VideoCapture(args.camera_id) print('Press "Esc", "q" or "Q" to exit.') while True: ret_val, img = camera.read() result = inference_detector(model, img) ch = cv2.waitKey(1) if ch == 27 or ch == ord('q') or ch == ord('Q'): break model.show_result( img, result, score_thr=args.score_thr, wait_time=1, show=True)
Example #15
Source File: objectDetectorYOLO.py From Traffic_sign_detection_YOLO with MIT License | 6 votes |
def processFrames(self): try: for img in self.anotations_list: img = img.split(';') # print(img) # ret,imgcv = cap.read() if self.video: ret,imgcv = self.cap.read() else: imgcv = cv2.imread(os.path.join('../',self.config["dataset"],img[0])) result = self.tfnet.return_predict(imgcv) print(result) imgcv = self.drawBoundingBox(imgcv,result) cv2.imshow('detected objects',imgcv) if cv2.waitKey(10) == ord('q'): print('exitting loop') break except KeyboardInterrupt: cv2.destroyAllWindows() print('exitting program')
Example #16
Source File: datasets.py From pruning_yolov3 with GNU General Public License v3.0 | 6 votes |
def __next__(self): self.count += 1 img0 = self.imgs.copy() if cv2.waitKey(1) == ord('q'): # q to quit cv2.destroyAllWindows() raise StopIteration # Letterbox img = [letterbox(x, new_shape=self.img_size, interp=cv2.INTER_LINEAR)[0] for x in img0] # Stack img = np.stack(img, 0) # Normalize RGB img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB img = np.ascontiguousarray(img, dtype=np.float16 if self.half else np.float32) # uint8 to fp16/fp32 img /= 255.0 # 0 - 255 to 0.0 - 1.0 return self.sources, img, img0, None
Example #17
Source File: misc.py From Traffic_sign_detection_YOLO with MIT License | 6 votes |
def show2(im, allobj): for obj in allobj: cv2.rectangle(im, (obj[1], obj[2]), (obj[3], obj[4]), (0,0,255),2) cv2.imshow('result', im) cv2.waitKey() cv2.destroyAllWindows()
Example #18
Source File: boxing.py From ICDAR-2019-SROIE with MIT License | 6 votes |
def draw(): f = open(box_path + 'jpglist.txt') # read each image and its label line = f.readline() line_num =0 while line: line_num=line_num+1 print('Image:', line_num) name = line.strip('\n') img = cv2.imread(image_path + name) img_size = img.shape img_size = img_size[0]*img_size[1] # read each coordinate and draw box f_txt = open(image_path + name.strip('.jpg') + '.txt') #line_txt = f_txt.readline() # pass the first ROI information line_txt = f_txt.readline() while line_txt: coor = line_txt.split(',') x1 = int(coor[0].strip('\'')) y1 = int(coor[1].strip('\'')) x3 = int(coor[4].strip('\'')) y3 = int(coor[5].strip('\'')) text = coor[8].strip('\n').strip('\'') text_show = text + '(' + str(x1) + ',' + str(y1) +')' cv2.rectangle(img, (x1, y1), (x3, y3), (255, 0, 0), 1) #cv2.putText(img, text_show, (x1, y1 - 1), # cv2.FONT_HERSHEY_TRIPLEX, 0.35, (0, 0, 255), 1) line_txt = f_txt.readline() cv2.imwrite(box_path + name, img) line = f.readline() # img = cv2.imshow('image', img) # cv2.waitKey(0)
Example #19
Source File: BlurDetection.py From python-- with GNU General Public License v3.0 | 6 votes |
def _blurDetection(self, imgName): # step 1 图像的预处理 img2gray, reImg = self.preImgOps(imgName) imgMat=self._imageToMatrix(img2gray)/255.0 x, y = imgMat.shape score = 0 for i in range(x - 2): for j in range(y - 2): score += (imgMat[i + 2, j] - imgMat[i, j]) ** 2 # step3: 绘制图片并保存 不应该写在这里 抽象出来 这是共有的部分 score=score/10 newImg = self._drawImgFonts(reImg, str(score)) newDir = self.strDir + "/_blurDetection_/" if not os.path.exists(newDir): os.makedirs(newDir) newPath = newDir + imgName cv2.imwrite(newPath, newImg) # 保存图片 cv2.imshow(imgName, newImg) cv2.waitKey(0) return score
Example #20
Source File: BlurDetection.py From python-- with GNU General Public License v3.0 | 6 votes |
def _SMD2Detection(self, imgName): """ 灰度方差乘积 :param imgName: :return: """ # step 1 图像的预处理 img2gray, reImg = self.preImgOps(imgName) f=self._imageToMatrix(img2gray)/255.0 x, y = f.shape score = 0 for i in range(x - 1): for j in range(y - 1): score += np.abs(f[i+1,j]-f[i,j])*np.abs(f[i,j]-f[i,j+1]) # strp3: 绘制图片并保存 不应该写在这里 抽象出来 这是共有的部分 score=score newImg = self._drawImgFonts(reImg, str(score)) newDir = self.strDir + "/_SMD2Detection_/" if not os.path.exists(newDir): os.makedirs(newDir) newPath = newDir + imgName cv2.imwrite(newPath, newImg) # 保存图片 cv2.imshow(imgName, newImg) cv2.waitKey(0) return score
Example #21
Source File: BlurDetection.py From python-- with GNU General Public License v3.0 | 6 votes |
def _Variance(self, imgName): """ 灰度方差乘积 :param imgName: :return: """ # step 1 图像的预处理 img2gray, reImg = self.preImgOps(imgName) f = self._imageToMatrix(img2gray) # strp3: 绘制图片并保存 不应该写在这里 抽象出来 这是共有的部分 score = np.var(f) newImg = self._drawImgFonts(reImg, str(score)) newDir = self.strDir + "/_Variance_/" if not os.path.exists(newDir): os.makedirs(newDir) newPath = newDir + imgName cv2.imwrite(newPath, newImg) # 保存图片 cv2.imshow(imgName, newImg) cv2.waitKey(0) return score
Example #22
Source File: streaming.py From olympe with BSD 3-Clause "New" or "Revised" License | 6 votes |
def show_yuv_frame(self, window_name, yuv_frame): # the VideoFrame.info() dictionary contains some useful information # such as the video resolution info = yuv_frame.info() height, width = info["yuv"]["height"], info["yuv"]["width"] # yuv_frame.vmeta() returns a dictionary that contains additional # metadata from the drone (GPS coordinates, battery percentage, ...) # convert pdraw YUV flag to OpenCV YUV flag cv2_cvt_color_flag = { olympe.PDRAW_YUV_FORMAT_I420: cv2.COLOR_YUV2BGR_I420, olympe.PDRAW_YUV_FORMAT_NV12: cv2.COLOR_YUV2BGR_NV12, }[info["yuv"]["format"]] # yuv_frame.as_ndarray() is a 2D numpy array with the proper "shape" # i.e (3 * height / 2, width) because it's a YUV I420 or NV12 frame # Use OpenCV to convert the yuv frame to RGB cv2frame = cv2.cvtColor(yuv_frame.as_ndarray(), cv2_cvt_color_flag) # Use OpenCV to show this frame cv2.imshow(window_name, cv2frame) cv2.waitKey(1) # please OpenCV for 1 ms...
Example #23
Source File: esr_visualizer.py From Udacity-SDC-Radar-Driver-Micro-Challenge with MIT License | 6 votes |
def update(self, radarData): self.img = np.zeros((self.height, self.width, self.channels), np.uint8) cv2.line(self.img, (10, 0), (self.width/2 - 5, self.height), (100, 255, 255)) cv2.line(self.img, (self.width - 10, 0), (self.width/2 + 5, self.height), (100, 255, 255)) for track_number in range(1, 65): if str(track_number)+'_track_range' in radarData: track_range = radarData[str(track_number)+'_track_range'] track_angle = (float(radarData[str(track_number)+'_track_angle'])+90.0)*math.pi/180 x_pos = math.cos(track_angle)*track_range*4 y_pos = math.sin(track_angle)*track_range*4 cv2.circle(self.img, (self.width/2 + int(x_pos), self.height - int(y_pos) - 10), 5, (255, 255, 255)) #cv2.putText(self.img, str(track_number), # (self.width/2 + int(x_pos)-2, self.height - int(y_pos) - 10), self.font, 1, (255,255,255), 2) cv2.imshow("Radar", self.img) cv2.waitKey(2)
Example #24
Source File: test.py From yolo_tensorflow with MIT License | 6 votes |
def camera_detector(self, cap, wait=10): detect_timer = Timer() ret, _ = cap.read() while ret: ret, frame = cap.read() detect_timer.tic() result = self.detect(frame) detect_timer.toc() print('Average detecting time: {:.3f}s'.format( detect_timer.average_time)) self.draw_result(frame, result) cv2.imshow('Camera', frame) cv2.waitKey(wait) ret, frame = cap.read()
Example #25
Source File: functions.py From 3D-HourGlass-Network with MIT License | 5 votes |
def playVideoFromArray(s): # Capture frame-by-frame for frame in s: cv2.imshow('Frame',frame) # Press Q on keyboard to exit if cv2.waitKey(20) & 0xFF == ord('q'): break # Closes all the frames cv2.destroyAllWindows()
Example #26
Source File: noise_model.py From n2n-watermark-remove with MIT License | 5 votes |
def main(): args = get_args() image_size = args.image_size noise_model = get_noise_model(args.noise_model) while True: image = np.ones((image_size, image_size, 3), dtype=np.uint8) * 128 noisy_image = noise_model(image) cv2.imshow("noise image", noisy_image) key = cv2.waitKey(-1) # "q": quit if key == 113: return 0
Example #27
Source File: fatigue.py From Traffic-Signs-and-Object-Detection with GNU General Public License v3.0 | 5 votes |
def pre(img): global face_cascade,eye_cascade load() if 1: #ret, img = cap.read() gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray, 1.3, 5) for (x, y, w, h) in faces: cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2) roi_gray = gray[y:y + h, x:x + w] roi_color = img[y:y + h, x:x + w] eyes = eye_cascade.detectMultiScale(roi_gray) if eyes is not (): for (ex, ey, ew, eh) in eyes: cv2.rectangle(roi_color, (ex, ey), (ex + ew, ey + eh), (0, 255, 0), 2) openeye() else: closed() if closed.count == 3: print ("driver is sleeping") playsound("alarm.mp3") cv2.imshow('img', img) k = cv2.waitKey(30) & 0xff
Example #28
Source File: demo.py From ssds.pytorch with MIT License | 5 votes |
def demo(args, image_path): # 1. load the configure file cfg_from_file(args.confg_file) # 2. load detector based on the configure file object_detector = ObjectDetector() # 3. load image image = cv2.imread(image_path) # 4. detect _labels, _scores, _coords = object_detector.predict(image) # 5. draw bounding box on the image for labels, scores, coords in zip(_labels, _scores, _coords): cv2.rectangle(image, (int(coords[0]), int(coords[1])), (int(coords[2]), int(coords[3])), COLORS[labels % 3], 2) cv2.putText(image, '{label}: {score:.3f}'.format(label=VOC_CLASSES[labels], score=scores), (int(coords[0]), int(coords[1])), FONT, 0.5, COLORS[labels % 3], 2) # 6. visualize result if args.display is True: cv2.imshow('result', image) cv2.waitKey(0) # 7. write result if args.save is True: path, _ = os.path.splitext(image_path) cv2.imwrite(path + '_result.jpg', image)
Example #29
Source File: functions.py From 3D-HourGlass-Network with MIT License | 5 votes |
def playVideoFromArray(s): # Capture frame-by-frame for frame in s: cv2.imshow('Frame',frame) # Press Q on keyboard to exit if cv2.waitKey(20) & 0xFF == ord('q'): break # Closes all the frames cv2.destroyAllWindows()
Example #30
Source File: functions.py From 3D-HourGlass-Network with MIT License | 5 votes |
def playVideoFromAVI(s): cap = cv2.VideoCapture(s) # Check if camera opened successfully if (cap.isOpened()== False): print("Error opening video stream or file") # Read until video is completed while(cap.isOpened()): # Capture frame-by-frame ret, frame = cap.read() if ret == True: # Display the resulting frame cv2.imshow('Frame',frame) # Press Q on keyboard to exit if cv2.waitKey(1) & 0xFF == ord('q'): break # Break the loop else: break # When everything done, release the video capture object cap.release() # Closes all the frames cv2.destroyAllWindows()