Python cv2.putText() Examples
The following are 30
code examples of cv2.putText().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
cv2
, or try the search function
.
Example #1
Source File: objectDetectorYOLO.py From Traffic_sign_detection_YOLO with MIT License | 11 votes |
def drawBoundingBox(self,imgcv,result): for box in result: # print(box) x1,y1,x2,y2 = (box['topleft']['x'],box['topleft']['y'],box['bottomright']['x'],box['bottomright']['y']) conf = box['confidence'] # print(conf) label = box['label'] if conf < self.predictThresh: continue # print(x1,y1,x2,y2,conf,label) cv2.rectangle(imgcv,(x1,y1),(x2,y2),(0,255,0),6) labelSize=cv2.getTextSize(label,cv2.FONT_HERSHEY_COMPLEX,0.5,2) # print('labelSize>>',labelSize) _x1 = x1 _y1 = y1#+int(labelSize[0][1]/2) _x2 = _x1+labelSize[0][0] _y2 = y1-int(labelSize[0][1]) cv2.rectangle(imgcv,(_x1,_y1),(_x2,_y2),(0,255,0),cv2.FILLED) cv2.putText(imgcv,label,(x1,y1),cv2.FONT_HERSHEY_COMPLEX,0.5,(0,0,0),1) return imgcv
Example #2
Source File: utils.py From Tensorflow-YOLOv3 with MIT License | 9 votes |
def draw_boxes_frame(frame, frame_size, boxes_dicts, class_names, input_size): """Draws detected boxes in a video frame""" boxes_dict = boxes_dicts[0] resize_factor = (frame_size[0] / input_size[1], frame_size[1] / input_size[0]) for cls in range(len(class_names)): boxes = boxes_dict[cls] color = (0, 0, 255) if np.size(boxes) != 0: for box in boxes: xy = box[:4] xy = [int(xy[i] * resize_factor[i % 2]) for i in range(4)] cv2.rectangle(frame, (xy[0], xy[1]), (xy[2], xy[3]), color[::-1], 2) (test_width, text_height), baseline = cv2.getTextSize(class_names[cls], cv2.FONT_HERSHEY_SIMPLEX, 0.75, 1) cv2.rectangle(frame, (xy[0], xy[1]), (xy[0] + test_width, xy[1] - text_height - baseline), color[::-1], thickness=cv2.FILLED) cv2.putText(frame, class_names[cls], (xy[0], xy[1] - baseline), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 0), 1)
Example #3
Source File: demo_caffe.py From MobileNetv2-SSDLite with MIT License | 8 votes |
def detect(imgfile): origimg = cv2.imread(imgfile) img = preprocess(origimg) img = img.astype(np.float32) img = img.transpose((2, 0, 1)) net.blobs['data'].data[...] = img out = net.forward() box, conf, cls = postprocess(origimg, out) for i in range(len(box)): p1 = (box[i][0], box[i][1]) p2 = (box[i][2], box[i][3]) cv2.rectangle(origimg, p1, p2, (0,255,0)) p3 = (max(p1[0], 15), max(p1[1], 15)) title = "%s:%.2f" % (COCO_CLASSES[int(cls[i])], conf[i]) cv2.putText(origimg, title, p3, cv2.FONT_ITALIC, 0.6, (0, 255, 0), 1) cv2.imshow("SSD", origimg) k = cv2.waitKey(0) & 0xff #Exit if ESC pressed if k == 27 : return False return True
Example #4
Source File: utils.py From tf2-yolo3 with Apache License 2.0 | 8 votes |
def draw_outputs(img, outputs, class_names=None): boxes, objectness, classes = outputs #boxes, objectness, classes = boxes[0], objectness[0], classes[0] wh = np.flip(img.shape[0:2]) if img.ndim == 2 or img.shape[2] == 1: img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) min_wh = np.amin(wh) if min_wh <= 100: font_size = 0.5 else: font_size = 1 for i in range(classes.shape[0]): x1y1 = tuple((np.array(boxes[i][0:2]) * wh).astype(np.int32)) x2y2 = tuple((np.array(boxes[i][2:4]) * wh).astype(np.int32)) img = cv2.rectangle(img, x1y1, x2y2, (255, 0, 0), 1) img = cv2.putText(img, '{}'.format(int(classes[i])), x1y1, cv2.FONT_HERSHEY_COMPLEX_SMALL, font_size, (0, 0, 255), 1) return img
Example #5
Source File: utils.py From tf2-yolo3 with Apache License 2.0 | 8 votes |
def draw_labels(x, y, class_names=None): img = x.numpy() if img.ndim == 2 or img.shape[2] == 1: img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) boxes, classes = tf.split(y, (4, 1), axis=-1) classes = classes[..., 0] wh = np.flip(img.shape[0:2]) min_wh = np.amin(wh) if min_wh <= 100: font_size = 0.5 else: font_size = 1 for i in range(len(boxes)): x1y1 = tuple((np.array(boxes[i][0:2]) * wh).astype(np.int32)) x2y2 = tuple((np.array(boxes[i][2:4]) * wh).astype(np.int32)) img = cv2.rectangle(img, x1y1, x2y2, (255, 0, 0), 1) if class_names: img = cv2.putText(img, class_names[classes[i]], x1y1, cv2.FONT_HERSHEY_COMPLEX_SMALL, font_size, (0, 0, 255), 1) else: img = cv2.putText(img, str(classes[i]), x1y1, cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 1) return img
Example #6
Source File: demo_caffe_voc.py From MobileNetv2-SSDLite with MIT License | 7 votes |
def detect(imgfile): origimg = cv2.imread(imgfile) img = preprocess(origimg) img = img.astype(np.float32) img = img.transpose((2, 0, 1)) net.blobs['data'].data[...] = img out = net.forward() box, conf, cls = postprocess(origimg, out) for i in range(len(box)): p1 = (box[i][0], box[i][1]) p2 = (box[i][2], box[i][3]) cv2.rectangle(origimg, p1, p2, (0,255,0)) p3 = (max(p1[0], 15), max(p1[1], 15)) title = "%s:%.2f" % (CLASSES[int(cls[i])], conf[i]) cv2.putText(origimg, title, p3, cv2.FONT_ITALIC, 0.6, (0, 255, 0), 1) cv2.imshow("SSD", origimg) k = cv2.waitKey(0) & 0xff #Exit if ESC pressed if k == 27 : return False return True
Example #7
Source File: vis.py From Parsing-R-CNN with MIT License | 7 votes |
def vis_class(img, pos, class_str, bg_color): """Visualizes the class.""" font_color = cfg.VIS.SHOW_CLASS.COLOR font_scale = cfg.VIS.SHOW_CLASS.FONT_SCALE x0, y0 = int(pos[0]), int(pos[1]) # Compute text size. txt = class_str font = cv2.FONT_HERSHEY_SIMPLEX ((txt_w, txt_h), _) = cv2.getTextSize(txt, font, font_scale, 1) # Place text background. back_tl = x0, y0 - int(1.3 * txt_h) back_br = x0 + txt_w, y0 cv2.rectangle(img, back_tl, back_br, bg_color, -1) # Show text. txt_tl = x0, y0 - int(0.3 * txt_h) cv2.putText(img, txt, txt_tl, font, font_scale, font_color, lineType=cv2.LINE_AA) return img
Example #8
Source File: utils.py From pruning_yolov3 with GNU General Public License v3.0 | 7 votes |
def plot_one_box(x, img, color=None, label=None, line_thickness=None): # Plots one bounding box on image img tl = line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1 # line thickness color = color or [random.randint(0, 255) for _ in range(3)] c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3])) cv2.rectangle(img, c1, c2, color, thickness=tl) if label: tf = max(tl - 1, 1) # font thickness t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0] c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3 cv2.rectangle(img, c1, c2, color, -1) # filled cv2.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA)
Example #9
Source File: net_utils.py From cascade-rcnn_Pytorch with MIT License | 6 votes |
def vis_detections(im, class_name, dets, thresh=0.8): """Visual debugging of detections.""" for i in range(np.minimum(10, dets.shape[0])): bbox = tuple(int(np.round(x)) for x in dets[i, :4]) score = dets[i, -1] if score > thresh: cv2.rectangle(im, bbox[0:2], bbox[2:4], (0, 204, 0), 2) cv2.putText(im, '%s: %.3f' % (class_name, score), (bbox[0], bbox[1] + 15), cv2.FONT_HERSHEY_PLAIN, 1.0, (0, 0, 255), thickness=1) return im # Borrow from matterport mask R-CNN implementation
Example #10
Source File: esr_visualizer.py From Udacity-SDC-Radar-Driver-Micro-Challenge with MIT License | 6 votes |
def update(self, radarData): self.img = np.zeros((self.height, self.width, self.channels), np.uint8) cv2.line(self.img, (10, 0), (self.width/2 - 5, self.height), (100, 255, 255)) cv2.line(self.img, (self.width - 10, 0), (self.width/2 + 5, self.height), (100, 255, 255)) for track_number in range(1, 65): if str(track_number)+'_track_range' in radarData: track_range = radarData[str(track_number)+'_track_range'] track_angle = (float(radarData[str(track_number)+'_track_angle'])+90.0)*math.pi/180 x_pos = math.cos(track_angle)*track_range*4 y_pos = math.sin(track_angle)*track_range*4 cv2.circle(self.img, (self.width/2 + int(x_pos), self.height - int(y_pos) - 10), 5, (255, 255, 255)) #cv2.putText(self.img, str(track_number), # (self.width/2 + int(x_pos)-2, self.height - int(y_pos) - 10), self.font, 1, (255,255,255), 2) cv2.imshow("Radar", self.img) cv2.waitKey(2)
Example #11
Source File: image_viewer.py From deep_sort with GNU General Public License v3.0 | 6 votes |
def rectangle(self, x, y, w, h, label=None): """Draw a rectangle. Parameters ---------- x : float | int Top left corner of the rectangle (x-axis). y : float | int Top let corner of the rectangle (y-axis). w : float | int Width of the rectangle. h : float | int Height of the rectangle. label : Optional[str] A text label that is placed at the top left corner of the rectangle. """ pt1 = int(x), int(y) pt2 = int(x + w), int(y + h) cv2.rectangle(self.image, pt1, pt2, self._color, self.thickness) if label is not None: text_size = cv2.getTextSize( label, cv2.FONT_HERSHEY_PLAIN, 1, self.thickness) center = pt1[0] + 5, pt1[1] + 5 + text_size[0][1] pt2 = pt1[0] + 10 + text_size[0][0], pt1[1] + 10 + \ text_size[0][1] cv2.rectangle(self.image, pt1, pt2, self._color, -1) cv2.putText(self.image, label, center, cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 255), self.thickness)
Example #12
Source File: net.py From exposure with MIT License | 6 votes |
def draw_value_reward_score(self, img, value, reward, score): img = img.copy() # Average with 0.5 for semi-transparent background img[:14] = img[:14] * 0.5 + 0.25 img[50:] = img[50:] * 0.5 + 0.25 if self.cfg.gan == 'ls': red = -np.tanh(float(score) / 1) * 0.5 + 0.5 else: red = -np.tanh(float(score) / 10.0) * 0.5 + 0.5 top = '%+.2f %+.2f' % (value, reward) cv2.putText(img, top, (3, 7), cv2.FONT_HERSHEY_SIMPLEX, 0.25, (1.0, 1.0 - red, 1.0 - red)) score = '%+.3f' % score cv2.putText(img, score, (10, 60), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (1.0, 1.0 - red, 1.0 - red)) return img
Example #13
Source File: YOLOtest.py From Traffic_sign_detection_YOLO with MIT License | 6 votes |
def drawBoundingBox(self,imgcv,result): #finding max val self.predictThresh=max([box['confidence'] for box in result]) for box in result: # print(box) x1,y1,x2,y2 = (box['topleft']['x'],box['topleft']['y'],box['bottomright']['x'],box['bottomright']['y']) conf = box['confidence'] # print(conf) label = box['label'] print("label",label,"confidence",conf) if conf < self.predictThresh: continue # print(x1,y1,x2,y2,conf,label) cv2.rectangle(imgcv,(x1,y1),(x2,y2),(0,255,0),6) labelSize=cv2.getTextSize(label,cv2.FONT_HERSHEY_COMPLEX,0.5,2) # print('labelSize>>',labelSize) _x1 = x1 _y1 = y1#+int(labelSize[0][1]/2) _x2 = _x1+labelSize[0][0] _y2 = y1-int(labelSize[0][1]) cv2.rectangle(imgcv,(_x1,_y1),(_x2,_y2),(0,255,0),cv2.FILLED) cv2.putText(imgcv,label,(x1,y1),cv2.FONT_HERSHEY_COMPLEX,0.5,(0,0,0),1) return imgcv
Example #14
Source File: main.py From ICDAR-2019-SROIE with MIT License | 6 votes |
def draw(): filenames = [os.path.splitext(f)[0] for f in glob.glob("for_task3/*.txt")] txt_files = [s + ".txt" for s in filenames] for txt in txt_files: image = cv2.imread('test_original/'+ txt.split('/')[1].split('.')[0]+'.jpg', cv2.IMREAD_COLOR) with open(txt, 'r') as txt_file: for line in csv.reader(txt_file): box = [int(string, 10) for string in line[0:8]] if len(line) < 9: print(txt) cv2.rectangle(image, (box[0], box[1]), (box[4], box[5]), (0,255,0), 2) font = cv2.FONT_HERSHEY_SIMPLEX cv2.putText(image, line[8].upper(), (box[0],box[1]), font, 0.5, (0, 0, 255), 1, cv2.LINE_AA) cv2.imwrite('task2_result_draw/'+ txt.split('/')[1].split('.')[0]+'.jpg', image)
Example #15
Source File: predictor.py From Res2Net-maskrcnn with MIT License | 6 votes |
def overlay_class_names(self, image, predictions): """ Adds detected class names and scores in the positions defined by the top-left corner of the predicted bounding box Arguments: image (np.ndarray): an image as returned by OpenCV predictions (BoxList): the result of the computation by the model. It should contain the field `scores` and `labels`. """ scores = predictions.get_field("scores").tolist() labels = predictions.get_field("labels").tolist() labels = [self.CATEGORIES[i] for i in labels] boxes = predictions.bbox template = "{}: {:.2f}" for box, score, label in zip(boxes, scores, labels): x, y = box[:2] s = template.format(label, score) cv2.putText( image, s, (x, y), cv2.FONT_HERSHEY_SIMPLEX, .5, (255, 255, 255), 1 ) return image
Example #16
Source File: boxing.py From ICDAR-2019-SROIE with MIT License | 6 votes |
def draw(): f = open(box_path + 'jpglist.txt') # read each image and its label line = f.readline() line_num =0 while line: line_num=line_num+1 print('Image:', line_num) name = line.strip('\n') img = cv2.imread(image_path + name) img_size = img.shape img_size = img_size[0]*img_size[1] # read each coordinate and draw box f_txt = open(image_path + name.strip('.jpg') + '.txt') #line_txt = f_txt.readline() # pass the first ROI information line_txt = f_txt.readline() while line_txt: coor = line_txt.split(',') x1 = int(coor[0].strip('\'')) y1 = int(coor[1].strip('\'')) x3 = int(coor[4].strip('\'')) y3 = int(coor[5].strip('\'')) text = coor[8].strip('\n').strip('\'') text_show = text + '(' + str(x1) + ',' + str(y1) +')' cv2.rectangle(img, (x1, y1), (x3, y3), (255, 0, 0), 1) #cv2.putText(img, text_show, (x1, y1 - 1), # cv2.FONT_HERSHEY_TRIPLEX, 0.35, (0, 0, 255), 1) line_txt = f_txt.readline() cv2.imwrite(box_path + name, img) line = f.readline() # img = cv2.imshow('image', img) # cv2.waitKey(0)
Example #17
Source File: image_process.py From Advanced_Lane_Lines with MIT License | 6 votes |
def test_yellow_white_thresh_images(src, dst, y_low=(10,50,0), y_high=(30,255,255), w_low=(180,180,180), w_high=(255,255,255)): """ apply the thresh to images in a src folder and output to dst foler """ image_files = glob.glob(src+"*.jpg") for idx, file in enumerate(image_files): print(file) img = mpimg.imread(file) image_threshed = yellow_white_thresh(img, y_low, y_high, w_low, w_high) file_name = file.split("\\")[-1] print(file_name) out_image = dst+file_name print(out_image) # convert binary to RGB, *255, to visiual, 1 will not visual after write to file image_threshed = cv2.cvtColor(image_threshed*255, cv2.COLOR_GRAY2RGB) # HSV = cv2.cvtColor(img, cv2.COLOR_RGB2HSV) # V = HSV[:,:,2] # brightness = np.mean(V) # info_str = "brightness is: {}".format(int(brightness)) # cv2.putText(image_threshed, info_str, (50,700), cv2.FONT_HERSHEY_SIMPLEX,2,(0,255,255),2) cv2.imwrite(out_image, image_threshed)
Example #18
Source File: utils.py From object-detection with MIT License | 6 votes |
def draw_boxed_text(img, text, topleft, color): """Draw a transluent boxed text in white, overlayed on top of a colored patch surrounded by a black border. FONT, TEXT_SCALE, TEXT_THICKNESS and ALPHA values are constants (fixed) as defined on top. # Arguments img: the input image as a numpy array. text: the text to be drawn. topleft: XY coordinate of the topleft corner of the boxed text. color: color of the patch, i.e. background of the text. # Output img: note the original image is modified inplace. """ assert img.dtype == np.uint8 img_h, img_w, _ = img.shape if topleft[0] >= img_w or topleft[1] >= img_h: return img margin = 3 size = cv2.getTextSize(text, FONT, TEXT_SCALE, TEXT_THICKNESS) w = size[0][0] + margin * 2 h = size[0][1] + margin * 2 # the patch is used to draw boxed text patch = np.zeros((h, w, 3), dtype=np.uint8) patch[...] = color cv2.putText(patch, text, (margin+1, h-margin-2), FONT, TEXT_SCALE, WHITE, thickness=TEXT_THICKNESS, lineType=cv2.LINE_8) cv2.rectangle(patch, (0, 0), (w-1, h-1), BLACK, thickness=1) w = min(w, img_w - topleft[0]) # clip overlay at image boundary h = min(h, img_h - topleft[1]) # Overlay the boxed text onto region of interest (roi) in img roi = img[topleft[1]:topleft[1]+h, topleft[0]:topleft[0]+w, :] cv2.addWeighted(patch[0:h, 0:w, :], ALPHA, roi, 1 - ALPHA, 0, roi) return img
Example #19
Source File: esr_visualizer.py From Udacity-SDC-Radar-Driver-Micro-Challenge with MIT License | 6 votes |
def update(self, radarData): self.img = np.zeros((self.height, self.width, self.channels), np.uint8) cv2.line(self.img, (10, 0), (self.width/2 - 5, self.height), (100, 255, 255)) cv2.line(self.img, (self.width - 10, 0), (self.width/2 + 5, self.height), (100, 255, 255)) for track_number in range(1, 65): if str(track_number)+'_track_range' in radarData: track_range = radarData[str(track_number)+'_track_range'] track_angle = (float(radarData[str(track_number)+'_track_angle'])+90.0)*math.pi/180 x_pos = math.cos(track_angle)*track_range*4 y_pos = math.sin(track_angle)*track_range*4 cv2.circle(self.img, (self.width/2 + int(x_pos), self.height - int(y_pos) - 10), 5, (255, 255, 255)) #cv2.putText(self.img, str(track_number), # (self.width/2 + int(x_pos)-2, self.height - int(y_pos) - 10), self.font, 1, (255,255,255), 2) cv2.imshow("Radar", self.img) cv2.waitKey(2)
Example #20
Source File: util.py From exposure with MIT License | 6 votes |
def vis_images_and_indexs(images, features, dir, name): # indexs = np.reshape(indexs, (len(indexs),)) # print('visualizing images and indexs: ', images.shape, indexs.shape) id_imgs = [] for feature in features: img = np.ones((64, 64, 3)) cv2.putText(img, str(feature), (4, 33), cv2.FONT_HERSHEY_SIMPLEX, 0.25, (1.0, 0.0, 0.0)) id_imgs.append(img) id_imgs = np.stack(id_imgs, axis=0) # print('id imgs: ', id_imgs.shape) vis_imgs = np.vstack([images, id_imgs]) image = make_image_grid(vis_imgs, per_row=images.shape[0]) vis_dir = dir try: os.mkdir(vis_dir) except: pass cv2.imwrite(os.path.join(vis_dir, name + '.png'), image[:, :, ::-1] * 255.0)
Example #21
Source File: facerec_from_webcam_faster.py From face-attendance-machine with Apache License 2.0 | 6 votes |
def face_process(): myprint("face process start",time.time()) # Find all the faces and face encodings in the current frame of video # face_locations = face_recognition.face_locations(rgb_small_frame, model="cnn") myprint('face_locations start', time.time()) face_locations = face_recognition.face_locations(rgb_small_frame, model="hog") myprint('face_locations end', time.time()) myprint('face_encodings start', time.time()) face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations) myprint('face_encodings end', time.time()) face_names = [] for face_encoding in face_encodings: # optimize start 采用KNN 排名*权重, 在类别上进行叠加,然后排序取出top1 name, dis = vote_class(face_encoding) # optimize end 采用 排名*权重, 在类别上进行叠加,然后排序取出top1 face_names.append(name) # 将人脸数据 # Display the results for (top, right, bottom, left), name in zip(face_locations, face_names): # Scale back up face locations since the frame we detected in was scaled to 1/4 size top *= 4 right *= 4 bottom *= 4 left *= 4 myprint('putText start', time.time()) # Draw a box around the face cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2) # Draw a label with a name below the face cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED) font = cv2.FONT_HERSHEY_DUPLEX cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1) myprint("putText end " + name, time.time()) # say hello and save record to file myprint('process_face_records start', time.time()) process_face_records(name) myprint('process_face_records end', time.time()) # Display the resulting image cv2.imshow('Video', frame) myprint("face process end", time.time())
Example #22
Source File: chapter2.py From OpenCV-Computer-Vision-Projects-with-Python with MIT License | 6 votes |
def ProcessFrame(self, frame): # segment arm region segment = self.SegmentArm(frame) # make a copy of the segmented image to draw on draw = cv2.cvtColor(segment, cv2.COLOR_GRAY2RGB) # draw some helpers for correctly placing hand cv2.circle(draw,(self.imgWidth/2,self.imgHeight/2),3,[255,102,0],2) cv2.rectangle(draw, (self.imgWidth/3,self.imgHeight/3), (self.imgWidth*2/3, self.imgHeight*2/3), [255,102,0],2) # find the hull of the segmented area, and based on that find the # convexity defects [contours,defects] = self.FindHullDefects(segment) # detect the number of fingers depending on the contours and convexity defects # draw defects that belong to fingers green, others red [nofingers,draw] = self.DetectNumberFingers(contours, defects, draw) # print number of fingers on image cv2.putText(draw, str(nofingers), (30,30), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255)) return draw
Example #23
Source File: net_utils.py From cascade-rcnn_Pytorch with MIT License | 6 votes |
def vis_det_and_mask(im, class_name, dets, masks, thresh=0.8): """Visual debugging of detections.""" num_dets = np.minimum(10, dets.shape[0]) colors_mask = random_colors(num_dets) colors_bbox = np.round(np.random.rand(num_dets, 3) * 255) # sort rois according to the coordinates, draw upper bbox first draw_mask = np.zeros(im.shape[:2], dtype=np.uint8) for i in range(1): bbox = tuple(int(np.round(x)) for x in dets[i, :4]) mask = masks[i, :, :] full_mask = unmold_mask(mask, bbox, im.shape) score = dets[i, -1] if score > thresh: word_width = len(class_name) cv2.rectangle(im, bbox[0:2], bbox[2:4], colors_bbox[i], 2) cv2.rectangle(im, bbox[0:2], (bbox[0] + 18 + word_width*8, bbox[1]+15), colors_bbox[i], thickness=cv2.FILLED) apply_mask(im, full_mask, draw_mask, colors_mask[i], 0.5) draw_mask += full_mask cv2.putText(im, '%s' % (class_name), (bbox[0]+5, bbox[1] + 12), cv2.FONT_HERSHEY_PLAIN, 1.0, (255,255,255), thickness=1) return im
Example #24
Source File: demo.py From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 | 6 votes |
def draw_detection(frame, det, class_names): (klass, score, x0, y0, x1, y1) = det klass_name = class_names[int(klass)] h = frame.shape[0] w = frame.shape[1] # denormalize detections from [0,1] to the frame size p0 = tuple(map(int, (x0*w,y0*h))) p1 = tuple(map(int, (x1*w,y1*h))) logging.info("detection: %s %s", klass_name, score) cv2.rectangle(frame, p0, p1, (0,0,255), 2) # Where to draw the text, a few pixels above the top y coordinate tp0 = (p0[0], p0[1]-5) draw_text = "{} {}".format(klass_name, score) cv2.putText(frame, draw_text, tp0, cv2.FONT_HERSHEY_COMPLEX_SMALL, 0.5, (0,0,255))
Example #25
Source File: camera_pi.py From object-detection with MIT License | 5 votes |
def object_track(self, img, conf_th=0.3, conf_class=[]): output = self.detector.prediction(img) df = self.detector.filter_prediction(output, img, conf_th=conf_th, conf_class=conf_class) img = self.detector.draw_boxes(img, df) boxes = df[['x1', 'y1', 'x2', 'y2']].values objects = self.ct.update(boxes) if len(boxes) > 0 and (df['class_name'].str.contains('person').any()): for (objectID, centroid) in objects.items(): text = "ID {}".format(objectID) cv2.putText(img, text, (centroid[0] - 10, centroid[1] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2) cv2.circle(img, (centroid[0], centroid[1]), 4, (0, 255, 0), -1) return img
Example #26
Source File: play_video_pseudo.py From simba with GNU Lesser General Public License v3.0 | 5 votes |
def printOnFrame(currentFrame): currentTime = currentFrame / fps currentTime = round(currentTime, 2) cv2.putText(frame, 'F~ ' + str(currentFrame), (10, (height - 20)), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 255), 2) cv2.putText(frame, 'T~ ' + str(currentTime), (10, (height - 80)), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 255), 2)
Example #27
Source File: tracking.py From pedestrian-haar-based-detector with GNU General Public License v2.0 | 5 votes |
def drawLabel(self, image): cv2.putText(image, self.label, (self.x, self.y-10), cv2.FONT_HERSHEY_PLAIN, 1, self.color, 2); return True
Example #28
Source File: camera_opencv.py From object-detection with MIT License | 5 votes |
def object_track(self, img, conf_th=0.3, conf_class=[]): output = self.detector.prediction(img) df = self.detector.filter_prediction(output, img, conf_th=conf_th, conf_class=conf_class) img = self.detector.draw_boxes(img, df) boxes = df[['x1', 'y1', 'x2', 'y2']].values objects = self.ct.update(boxes) if len(boxes) > 0 and (df['class_name'].str.contains('person').any()): for (objectID, centroid) in objects.items(): text = "ID {}".format(objectID) cv2.putText(img, text, (centroid[0] - 10, centroid[1] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2) cv2.circle(img, (centroid[0], centroid[1]), 4, (0, 255, 0), -1) return img
Example #29
Source File: filters.py From exposure with MIT License | 5 votes |
def visualize_filter(self, debug_info, canvas): exposure = debug_info['filter_parameters'][0] if canvas.shape[0] == 64: cv2.rectangle(canvas, (8, 40), (56, 52), (1, 1, 1), cv2.FILLED) cv2.putText(canvas, 'S %+.2f' % exposure, (8, 48), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (0, 0, 0)) else: self.draw_high_res_text('Saturation %+.2f' % exposure, canvas)
Example #30
Source File: cascade.py From object-detection with MIT License | 5 votes |
def draw_boxes(self, image, df): for idx, box in df.iterrows(): color = self.colors[int(box['label'])] cv2.rectangle( image, (box['x1'], box['y1']), (box['x2'], box['y2']), color, 6) cv2.putText( image, box['label'], (box['x1'], box['y1'] - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2) return image