Python cv2.FONT_HERSHEY_SIMPLEX Examples
The following are 30
code examples of cv2.FONT_HERSHEY_SIMPLEX().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
cv2
, or try the search function
.
Example #1
Source File: utils.py From Tensorflow-YOLOv3 with MIT License | 9 votes |
def draw_boxes_frame(frame, frame_size, boxes_dicts, class_names, input_size): """Draws detected boxes in a video frame""" boxes_dict = boxes_dicts[0] resize_factor = (frame_size[0] / input_size[1], frame_size[1] / input_size[0]) for cls in range(len(class_names)): boxes = boxes_dict[cls] color = (0, 0, 255) if np.size(boxes) != 0: for box in boxes: xy = box[:4] xy = [int(xy[i] * resize_factor[i % 2]) for i in range(4)] cv2.rectangle(frame, (xy[0], xy[1]), (xy[2], xy[3]), color[::-1], 2) (test_width, text_height), baseline = cv2.getTextSize(class_names[cls], cv2.FONT_HERSHEY_SIMPLEX, 0.75, 1) cv2.rectangle(frame, (xy[0], xy[1]), (xy[0] + test_width, xy[1] - text_height - baseline), color[::-1], thickness=cv2.FILLED) cv2.putText(frame, class_names[cls], (xy[0], xy[1] - baseline), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 0), 1)
Example #2
Source File: vis.py From Parsing-R-CNN with MIT License | 7 votes |
def vis_class(img, pos, class_str, bg_color): """Visualizes the class.""" font_color = cfg.VIS.SHOW_CLASS.COLOR font_scale = cfg.VIS.SHOW_CLASS.FONT_SCALE x0, y0 = int(pos[0]), int(pos[1]) # Compute text size. txt = class_str font = cv2.FONT_HERSHEY_SIMPLEX ((txt_w, txt_h), _) = cv2.getTextSize(txt, font, font_scale, 1) # Place text background. back_tl = x0, y0 - int(1.3 * txt_h) back_br = x0 + txt_w, y0 cv2.rectangle(img, back_tl, back_br, bg_color, -1) # Show text. txt_tl = x0, y0 - int(0.3 * txt_h) cv2.putText(img, txt, txt_tl, font, font_scale, font_color, lineType=cv2.LINE_AA) return img
Example #3
Source File: util.py From frigate with GNU Affero General Public License v3.0 | 6 votes |
def draw_box_with_label(frame, x_min, y_min, x_max, y_max, label, info, thickness=2, color=None, position='ul'): if color is None: color = (0,0,255) display_text = "{}: {}".format(label, info) cv2.rectangle(frame, (x_min, y_min), (x_max, y_max), color, thickness) font_scale = 0.5 font = cv2.FONT_HERSHEY_SIMPLEX # get the width and height of the text box size = cv2.getTextSize(display_text, font, fontScale=font_scale, thickness=2) text_width = size[0][0] text_height = size[0][1] line_height = text_height + size[1] # set the text start position if position == 'ul': text_offset_x = x_min text_offset_y = 0 if y_min < line_height else y_min - (line_height+8) elif position == 'ur': text_offset_x = x_max - (text_width+8) text_offset_y = 0 if y_min < line_height else y_min - (line_height+8) elif position == 'bl': text_offset_x = x_min text_offset_y = y_max elif position == 'br': text_offset_x = x_max - (text_width+8) text_offset_y = y_max # make the coords of the box with a small padding of two pixels textbox_coords = ((text_offset_x, text_offset_y), (text_offset_x + text_width + 2, text_offset_y + line_height)) cv2.rectangle(frame, textbox_coords[0], textbox_coords[1], color, cv2.FILLED) cv2.putText(frame, display_text, (text_offset_x, text_offset_y + line_height - 3), font, fontScale=font_scale, color=(0, 0, 0), thickness=2)
Example #4
Source File: main.py From ICDAR-2019-SROIE with MIT License | 6 votes |
def draw(): filenames = [os.path.splitext(f)[0] for f in glob.glob("for_task3/*.txt")] txt_files = [s + ".txt" for s in filenames] for txt in txt_files: image = cv2.imread('test_original/'+ txt.split('/')[1].split('.')[0]+'.jpg', cv2.IMREAD_COLOR) with open(txt, 'r') as txt_file: for line in csv.reader(txt_file): box = [int(string, 10) for string in line[0:8]] if len(line) < 9: print(txt) cv2.rectangle(image, (box[0], box[1]), (box[4], box[5]), (0,255,0), 2) font = cv2.FONT_HERSHEY_SIMPLEX cv2.putText(image, line[8].upper(), (box[0],box[1]), font, 0.5, (0, 0, 255), 1, cv2.LINE_AA) cv2.imwrite('task2_result_draw/'+ txt.split('/')[1].split('.')[0]+'.jpg', image)
Example #5
Source File: demo.py From tf_ctpn with MIT License | 6 votes |
def draw_rpn_boxes(img, img_name, boxes, scores, im_scale, nms, save_dir): """ :param boxes: [(x1, y1, x2, y2)] """ boxes = recover_scale(boxes, im_scale) base_name = img_name.split('/')[-1] color = (0, 255, 0) out = img.copy() if nms: boxes, scores = TextDetector.pre_process(boxes, scores) file_name = "%s_rpn_nms.jpg" % base_name else: file_name = "%s_rpn.jpg" % base_name for i, box in enumerate(boxes): cv2.rectangle(out, (box[0], box[1]), (box[2], box[3]), color, 2) cx = int((box[0] + box[2]) / 2) cy = int((box[1] + box[3]) / 2) cv2.putText(out, "%.01f" % scores[i], (cx, cy), cv2.FONT_HERSHEY_SIMPLEX, 0.2, (255, 0, 0)) cv2.imwrite(os.path.join(save_dir, file_name), out)
Example #6
Source File: net.py From exposure with MIT License | 6 votes |
def draw_value_reward_score(self, img, value, reward, score): img = img.copy() # Average with 0.5 for semi-transparent background img[:14] = img[:14] * 0.5 + 0.25 img[50:] = img[50:] * 0.5 + 0.25 if self.cfg.gan == 'ls': red = -np.tanh(float(score) / 1) * 0.5 + 0.5 else: red = -np.tanh(float(score) / 10.0) * 0.5 + 0.5 top = '%+.2f %+.2f' % (value, reward) cv2.putText(img, top, (3, 7), cv2.FONT_HERSHEY_SIMPLEX, 0.25, (1.0, 1.0 - red, 1.0 - red)) score = '%+.3f' % score cv2.putText(img, score, (10, 60), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (1.0, 1.0 - red, 1.0 - red)) return img
Example #7
Source File: utils.py From generative_adversary with GNU General Public License v3.0 | 6 votes |
def label_images(images, labels): font = cv.FONT_HERSHEY_SIMPLEX new_imgs = [] for i, img in enumerate(images): new_img = ((img.copy() + 1.) * 127.5).astype(np.uint8) if new_img.shape[-1] == 3: new_img = new_img[..., ::-1] new_img = cv.resize(new_img, (100, 100), interpolation=cv.INTER_LINEAR) new_img = cv.putText(new_img, str(labels[i]), (10, 30), font, 1, (255, 255, 255), 2, cv.LINE_AA) new_img = cv.copyMakeBorder(new_img, top=2, bottom=2, left=2, right=2, borderType=cv.BORDER_CONSTANT, value=(255, 255, 255)) else: new_img = np.squeeze(new_img) new_img = cv.resize(new_img, (100, 100), interpolation=cv.INTER_LINEAR) new_img = cv.putText(new_img, str(labels[i]), (10, 30), font, 1, (255), 2, cv.LINE_AA) new_img = new_img[..., None] new_img = (new_img / 127.5 - 1.0).astype(np.float32) new_imgs.append(new_img[..., ::-1]) return np.stack(new_imgs, axis=0)
Example #8
Source File: debugger.py From centerpose with MIT License | 6 votes |
def add_coco_bbox(self, bbox, cat, conf=1, show_txt=True, img_id='default'): bbox = np.array(bbox, dtype=np.int32) # cat = (int(cat) + 1) % 80 cat = int(cat) # print('cat', cat, self.names[cat]) c = self.colors[cat][0][0].tolist() if self.theme == 'white': c = (255 - np.array(c)).tolist() txt = '{}{:.1f}'.format(self.names[cat], conf) font = cv2.FONT_HERSHEY_SIMPLEX cat_size = cv2.getTextSize(txt, font, 0.5, 2)[0] cv2.rectangle( self.imgs[img_id], (bbox[0], bbox[1]), (bbox[2], bbox[3]), c, 2) if show_txt: cv2.rectangle(self.imgs[img_id], (bbox[0], bbox[1] - cat_size[1] - 2), (bbox[0] + cat_size[0], bbox[1] - 2), c, -1) cv2.putText(self.imgs[img_id], txt, (bbox[0], bbox[1] - 2), font, 0.5, (0, 0, 0), thickness=1, lineType=cv2.LINE_AA)
Example #9
Source File: util.py From exposure with MIT License | 6 votes |
def vis_images_and_indexs(images, features, dir, name): # indexs = np.reshape(indexs, (len(indexs),)) # print('visualizing images and indexs: ', images.shape, indexs.shape) id_imgs = [] for feature in features: img = np.ones((64, 64, 3)) cv2.putText(img, str(feature), (4, 33), cv2.FONT_HERSHEY_SIMPLEX, 0.25, (1.0, 0.0, 0.0)) id_imgs.append(img) id_imgs = np.stack(id_imgs, axis=0) # print('id imgs: ', id_imgs.shape) vis_imgs = np.vstack([images, id_imgs]) image = make_image_grid(vis_imgs, per_row=images.shape[0]) vis_dir = dir try: os.mkdir(vis_dir) except: pass cv2.imwrite(os.path.join(vis_dir, name + '.png'), image[:, :, ::-1] * 255.0)
Example #10
Source File: QR_Scaner1.py From OpenCV-Python-Tutorial with MIT License | 6 votes |
def main(): fp = 'macbookPro.jpg' # image = Image.open(fp) # image.show() image = cv2.imread(fp) barcodes = decode(image) decoded = barcodes[0] print(decoded) # url: bytes = decoded.data url = url.decode() print(url) # rect rect = decoded.rect print(rect) # Rect(left=19, top=19, width=292, height=292) # loop over the detected barcodes for barcode in barcodes: # extract the bounding box location of the barcode and draw the # bounding box surrounding the barcode on the image (x, y, w, h) = barcode.rect cv2.rectangle(image, (x, y), (x + w, y + h), (0, 0, 255), 2) # the barcode data is a bytes object so if we want to draw it on # our output image we need to convert it to a string first barcodeData = barcode.data.decode("utf-8") barcodeType = barcode.type # draw the barcode data and barcode type on the image text = "{} ({})".format(barcodeData, barcodeType) cv2.putText(image, text, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2) # print the barcode type and data to the terminal print("[INFO] Found {} barcode: {}".format(barcodeType, barcodeData)) # show the output image cv2.imshow("Image", image) # cv2.imwrite('macbook_qr_rect.jpg', image) cv2.waitKey(0) # 按任意键退出
Example #11
Source File: main_engine.py From TripletLossFace with MIT License | 6 votes |
def show_who_in_image(self, path, get_face: bool = True, show: bool = True, turn_rgb: bool = True): min_im, image, all_frames = self.index_image(path, get_face) for (confidance, who), frame in zip(min_im, all_frames): try: color = self.colors[str(who)] x1, x2, y1, y2 = frame cv2.rectangle(image, (x1, y1), (x2, y2), color, 4) cv2.putText(image, f"id: {str(who)}- conf:{abs(round(float(confidance), 2))}", (x1, y1-10), cv2.FONT_HERSHEY_SIMPLEX, 1, color, 3, cv2.LINE_AA) # -{round(float(confidance), 2)} except KeyError: continue if turn_rgb: image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) if show: cv2.imshow("a", image) cv2.waitKey(1) return image, min_im, all_frames
Example #12
Source File: predictor.py From Res2Net-maskrcnn with MIT License | 6 votes |
def overlay_class_names(self, image, predictions): """ Adds detected class names and scores in the positions defined by the top-left corner of the predicted bounding box Arguments: image (np.ndarray): an image as returned by OpenCV predictions (BoxList): the result of the computation by the model. It should contain the field `scores` and `labels`. """ scores = predictions.get_field("scores").tolist() labels = predictions.get_field("labels").tolist() labels = [self.CATEGORIES[i] for i in labels] boxes = predictions.bbox template = "{}: {:.2f}" for box, score, label in zip(boxes, scores, labels): x, y = box[:2] s = template.format(label, score) cv2.putText( image, s, (x, y), cv2.FONT_HERSHEY_SIMPLEX, .5, (255, 255, 255), 1 ) return image
Example #13
Source File: examples.py From buzzard with Apache License 2.0 | 6 votes |
def create_text_mask(text, font_face=cv2.FONT_HERSHEY_SIMPLEX, font_scale=2, thickness=2): """Build a binary image with text drawn in it""" color = [1] (w, h), _ = cv2.getTextSize( text, fontFace=font_face, fontScale=font_scale, thickness=thickness ) border = 30 dst = np.zeros((h + border, w + border), dtype='uint8') cv2.putText( dst, text=text, org=(border // 2, h + border // 2), fontFace=font_face, fontScale=font_scale, thickness=thickness, color=color ) ymask = dst.any(1).cumsum() ymask = (ymask != 0) & (ymask != ymask[-1]) xmask = dst.any(0).cumsum() xmask = (xmask != 0) & (xmask != xmask[-1]) dst = dst[ymask][:, xmask] return dst.astype(bool)
Example #14
Source File: image_process.py From Advanced_Lane_Lines with MIT License | 6 votes |
def test_yellow_white_thresh_images(src, dst, y_low=(10,50,0), y_high=(30,255,255), w_low=(180,180,180), w_high=(255,255,255)): """ apply the thresh to images in a src folder and output to dst foler """ image_files = glob.glob(src+"*.jpg") for idx, file in enumerate(image_files): print(file) img = mpimg.imread(file) image_threshed = yellow_white_thresh(img, y_low, y_high, w_low, w_high) file_name = file.split("\\")[-1] print(file_name) out_image = dst+file_name print(out_image) # convert binary to RGB, *255, to visiual, 1 will not visual after write to file image_threshed = cv2.cvtColor(image_threshed*255, cv2.COLOR_GRAY2RGB) # HSV = cv2.cvtColor(img, cv2.COLOR_RGB2HSV) # V = HSV[:,:,2] # brightness = np.mean(V) # info_str = "brightness is: {}".format(int(brightness)) # cv2.putText(image_threshed, info_str, (50,700), cv2.FONT_HERSHEY_SIMPLEX,2,(0,255,255),2) cv2.imwrite(out_image, image_threshed)
Example #15
Source File: train.py From TFFRCNN with MIT License | 6 votes |
def _draw_boxes_to_image(im, res): colors = [(86, 0, 240), (173, 225, 61), (54, 137, 255),\ (151, 0, 255), (243, 223, 48), (0, 117, 255),\ (58, 184, 14), (86, 67, 140), (121, 82, 6),\ (174, 29, 128), (115, 154, 81), (86, 255, 234)] font = cv2.FONT_HERSHEY_SIMPLEX image = np.copy(im) cnt = 0 for ind, r in enumerate(res): if r['dets'] is None: continue dets = r['dets'] for i in range(0, dets.shape[0]): (x1, y1, x2, y2, score) = dets[i, :] cv2.rectangle(image, (int(x1), int(y1)), (int(x2), int(y2)), colors[ind % len(colors)], 2) text = '{:s} {:.2f}'.format(r['class'], score) cv2.putText(image, text, (x1, y1), font, 0.6, colors[ind % len(colors)], 1) cnt = (cnt + 1) return image
Example #16
Source File: kitti2pascalvoc.py From TFFRCNN with MIT License | 6 votes |
def _draw_on_image(img, objs, class_sets_dict): colors = [(86, 0, 240), (173, 225, 61), (54, 137, 255),\ (151, 0, 255), (243, 223, 48), (0, 117, 255),\ (58, 184, 14), (86, 67, 140), (121, 82, 6),\ (174, 29, 128), (115, 154, 81), (86, 255, 234)] font = cv2.FONT_HERSHEY_SIMPLEX for ind, obj in enumerate(objs): if obj['box'] is None: continue x1, y1, x2, y2 = obj['box'].astype(int) cls_id = class_sets_dict[obj['class']] if obj['class'] == 'dontcare': cv2.rectangle(img, (int(x1), int(y1)), (int(x2), int(y2)), (255, 0, 0), 1) continue cv2.rectangle(img, (int(x1), int(y1)), (int(x2), int(y2)), colors[cls_id % len(colors)], 1) text = '{:s}*|'.format(obj['class'][:3]) if obj['difficult'] == 1 else '{:s}|'.format(obj['class'][:3]) text += '{:.1f}|'.format(obj['truncation']) text += str(obj['occlusion']) cv2.putText(img, text, (x1-2, y2-2), font, 0.5, (255, 0, 255), 1) return img
Example #17
Source File: inference_engine.py From R2CNN.pytorch with MIT License | 6 votes |
def overlay_class_names(self, image, predictions): """ Adds detected class names and scores in the positions defined by the top-left corner of the predicted bounding box Arguments: image (np.ndarray): an image as returned by OpenCV predictions (BoxList): the result of the computation by the model. It should contain the field `scores` and `labels`. """ scores = predictions.get_field("scores").tolist() labels = predictions.get_field("labels").tolist() labels = [self.CATEGORIES[i] for i in labels] boxes = predictions.bbox template = "{}: {:.2f}" for box, score, label in zip(boxes, scores, labels): x, y = box[:2] s = template.format(label, score) cv2.putText( image, s, (x, y), cv2.FONT_HERSHEY_SIMPLEX, .5, (255, 255, 255), 1 ) return image
Example #18
Source File: predictor.py From R2CNN.pytorch with MIT License | 6 votes |
def overlay_class_names(self, image, predictions): """ Adds detected class names and scores in the positions defined by the top-left corner of the predicted bounding box Arguments: image (np.ndarray): an image as returned by OpenCV predictions (BoxList): the result of the computation by the model. It should contain the field `scores` and `labels`. """ scores = predictions.get_field("scores").tolist() labels = predictions.get_field("labels").tolist() labels = [self.CATEGORIES[i] for i in labels] boxes = predictions.bbox template = "{}: {:.2f}" for box, score, label in zip(boxes, scores, labels): x, y = box[:2] s = template.format(label, score) cv2.putText( image, s, (x, y), cv2.FONT_HERSHEY_SIMPLEX, .5, (255, 255, 255), 1 ) return image
Example #19
Source File: chapter2.py From OpenCV-Computer-Vision-Projects-with-Python with MIT License | 6 votes |
def ProcessFrame(self, frame): # segment arm region segment = self.SegmentArm(frame) # make a copy of the segmented image to draw on draw = cv2.cvtColor(segment, cv2.COLOR_GRAY2RGB) # draw some helpers for correctly placing hand cv2.circle(draw,(self.imgWidth/2,self.imgHeight/2),3,[255,102,0],2) cv2.rectangle(draw, (self.imgWidth/3,self.imgHeight/3), (self.imgWidth*2/3, self.imgHeight*2/3), [255,102,0],2) # find the hull of the segmented area, and based on that find the # convexity defects [contours,defects] = self.FindHullDefects(segment) # detect the number of fingers depending on the contours and convexity defects # draw defects that belong to fingers green, others red [nofingers,draw] = self.DetectNumberFingers(contours, defects, draw) # print number of fingers on image cv2.putText(draw, str(nofingers), (30,30), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255)) return draw
Example #20
Source File: face_recognition_tester.py From TripletLossFace with MIT License | 6 votes |
def show_who_in_image(self, path, get_face: bool = True, show: bool = True, turn_rgb: bool = True): min_im, image, all_frames = self.detect_which(path, get_face) for (confidance, who), frame in zip(min_im, all_frames): color = self.colors[who] x1, x2, y1, y2 = frame cv2.rectangle(image, (x1, y1), (x2, y2), color, 4) cv2.putText(image, f"{who}", (x1, y1-10), cv2.FONT_HERSHEY_SIMPLEX, 1, color, 3, cv2.LINE_AA) # -{round(float(confidance), 2)} if turn_rgb: image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) if show: cv2.imshow("a", image) cv2.waitKey(0) return image
Example #21
Source File: train.py From RetinaNet with MIT License | 6 votes |
def _draw_boxes_to_image(im, res): colors = [(86, 0, 240), (173, 225, 61), (54, 137, 255),\ (151, 0, 255), (243, 223, 48), (0, 117, 255),\ (58, 184, 14), (86, 67, 140), (121, 82, 6),\ (174, 29, 128), (115, 154, 81), (86, 255, 234)] font = cv2.FONT_HERSHEY_SIMPLEX image = np.copy(im) cnt = 0 for ind, r in enumerate(res): if r['dets'] is None: continue dets = r['dets'] for i in range(0, dets.shape[0]): (x1, y1, x2, y2, score) = dets[i, :] cv2.rectangle(image, (int(x1), int(y1)), (int(x2), int(y2)), colors[ind % len(colors)], 2) text = '{:s} {:.2f}'.format(r['class'], score) cv2.putText(image, text, (x1, y1), font, 0.6, colors[ind % len(colors)], 1) cnt = (cnt + 1) return image
Example #22
Source File: utils.py From automl-video-ondevice with Apache License 2.0 | 5 votes |
def render_bbox(image, annotations): """Renders a visualzation of bounding box annotations. Args: image: numpy array of image bytes. annotations: the annotations to render onto the image. Returns: numpy array of image, with bounding box drawn. """ height, width, _ = image.shape for annotation in annotations: x0, y0, x1, y1 = (annotation.bbox.left, annotation.bbox.top, annotation.bbox.right, annotation.bbox.bottom) # Converts coordinates from relative [0,1] space to [width,height] x0, y0, x1, y1 = int(x0 * width), int(y0 * height), int(x1 * width), int( y1 * height) percent = int(100 * annotation.confidence_score) label = '%d%% %s' % (percent, annotation.class_name) if annotation.track_id == -1: color = (0, 255, 0) else: color = colors[annotation.track_id % len(colors)] image = cv2.rectangle(image, (x0, y0), (x1, y1), color, 2) image = cv2.putText(image, label, (x0, y0 + 30), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255, 0, 0), 2) return image
Example #23
Source File: test.py From yolo_tensorflow with MIT License | 5 votes |
def draw_result(self, img, result): for i in range(len(result)): x = int(result[i][1]) y = int(result[i][2]) w = int(result[i][3] / 2) h = int(result[i][4] / 2) cv2.rectangle(img, (x - w, y - h), (x + w, y + h), (0, 255, 0), 2) cv2.rectangle(img, (x - w, y - h - 20), (x + w, y - h), (125, 125, 125), -1) lineType = cv2.LINE_AA if cv2.__version__ > '3' else cv2.CV_AA cv2.putText( img, result[i][0] + ' : %.2f' % result[i][5], (x - w + 5, y - h - 7), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1, lineType)
Example #24
Source File: demo.py From EfficientDet.Pytorch with MIT License | 5 votes |
def camera(self): cap = cv2.VideoCapture(0) if not cap.isOpened(): print("Unable to open camera") exit(-1) count_tfps = 1 accum_time = 0 curr_fps = 0 fps = "FPS: ??" prev_time = timer() while True: res, img = cap.read() curr_time = timer() exec_time = curr_time - prev_time prev_time = curr_time accum_time = accum_time + exec_time curr_fps = curr_fps + 1 if accum_time > 1: accum_time = accum_time - 1 fps = curr_fps curr_fps = 0 if res: show_image = self.process(img=img) cv2.putText( show_image, "FPS: " + str(fps), (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (204, 51, 51), 2 ) cv2.imshow("Detection", show_image) k = cv2.waitKey(1) if k == 27: break else: print("Unable to read image") exit(-1) count_tfps += 1 cap.release() cv2.destroyAllWindows()
Example #25
Source File: violation_detection.py From Traffic-Rules-Violation-Detection with GNU General Public License v3.0 | 5 votes |
def __init__(self, vid_file): # vid_file = 'videos/traffic.avi' self.cnt_up = 0 self.cnt_down = 0 self.zone1 = (100, 200) self.zone2 = (450, 100) self.cap = cv2.VideoCapture(vid_file) # insane # Capture the properties of VideoCapture to console # for i in range(19): # print(i, self.cap.get(i)) self.w = self.cap.get(3) self.h = self.cap.get(4) self.frameArea = self.h * self.w self.areaTH = self.frameArea / 200 print('Area Threshold', self.areaTH) # Input/Output Lines self.line_up = int(2 * (self.h / 5)) self.line_down = int(3 * (self.h / 5)) self.up_limit = int(1 * (self.h / 5)) self.down_limit = int(4 * (self.h / 5)) self.line_down_color = (255, 0, 0) self.line_up_color = (0, 0, 255) self.pt1 = [0, self.line_down] self.pt2 = [self.w, self.line_down] self.pts_L1 = np.array([self.pt1, self.pt2], np.int32) self.pts_L1 = self.pts_L1.reshape((-1, 1, 2)) self.pt3 = [0, self.line_up] self.pt4 = [self.w, self.line_up] self.pts_L2 = np.array([self.pt3, self.pt4], np.int32) self.pts_L2 = self.pts_L2.reshape((-1, 1, 2)) self.pt5 = [0, self.up_limit] self.pt6 = [self.w, self.up_limit] self.pts_L3 = np.array([self.pt5, self.pt6], np.int32) self.pts_L3 = self.pts_L3.reshape((-1, 1, 2)) self.pt7 = [0, self.down_limit] self.pt8 = [self.w, self.down_limit] self.pts_L4 = np.array([self.pt7, self.pt8], np.int32) self.pts_L4 = self.pts_L4.reshape((-1, 1, 2)) # Create the background subtractor self.fgbg = cv2.createBackgroundSubtractorMOG2() self.kernelOp = np.ones((3, 3), np.uint8) self.kernelOp2 = np.ones((5, 5), np.uint8) self.kernelCl = np.ones((11, 11), np.uint8) # Variables self.font = cv2.FONT_HERSHEY_SIMPLEX self.vehicles = [] self.max_p_age = 5 self.pid = 1
Example #26
Source File: visual_aug.py From EfficientDet.Pytorch with MIT License | 5 votes |
def visualize_bbox(img, bbox, class_id, class_idx_to_name, color=BOX_COLOR, thickness=2): x_min, y_min, x_max, y_max = bbox x_min, x_max, y_min, y_max = int(x_min), int(x_max), int(y_min), int(y_max) cv2.rectangle(img, (x_min, y_min), (x_max, y_max), color=color, thickness=thickness) # class_name = class_idx_to_name[class_id] # ((text_width, text_height), _) = cv2.getTextSize(class_name, cv2.FONT_HERSHEY_SIMPLEX, 0.35, 1) # cv2.rectangle(img, (x_min, y_min - int(1.3 * text_height)), (x_min + text_width, y_min), BOX_COLOR, -1) # cv2.putText(img, class_name, (x_min, y_min - int(0.3 * text_height)), cv2.FONT_HERSHEY_SIMPLEX, 0.35,TEXT_COLOR, lineType=cv2.LINE_AA) return img
Example #27
Source File: vision2.py From RubiksCube-TwophaseSolver with GNU General Public License v3.0 | 5 votes |
def display_colorname(bgrcap, p): """Display the colornames on the webcam picture.""" p = p.astype(np.uint16) _, col = getcolor(p) if col in ('blue', 'green', 'red'): txtcol = (255, 255, 255) else: txtcol = (0, 0, 0) font = cv2.FONT_HERSHEY_SIMPLEX tz = cv2.getTextSize(col, font, 0.4, 1)[0] cv2.putText( bgrcap, col, tuple(p - (tz[0] // 2, -tz[1] // 2)), font, 0.4, txtcol, 1)
Example #28
Source File: utils.py From pytorch-0.4-yolov3 with MIT License | 5 votes |
def plot_boxes_cv2(img, boxes, savename=None, class_names=None, color=None): import cv2 colors = torch.FloatTensor([[1,0,1],[0,0,1],[0,1,1],[0,1,0],[1,1,0],[1,0,0]]) def get_color(c, x, max_val): ratio = float(x)/max_val * 5 i = int(math.floor(ratio)) j = int(math.ceil(ratio)) ratio = ratio - i r = (1-ratio) * colors[i][c] + ratio*colors[j][c] return int(r*255) width = img.shape[1] height = img.shape[0] for i in range(len(boxes)): box = boxes[i] x1 = int(round((box[0] - box[2]/2.0) * width)) y1 = int(round((box[1] - box[3]/2.0) * height)) x2 = int(round((box[0] + box[2]/2.0) * width)) y2 = int(round((box[1] + box[3]/2.0) * height)) if color: rgb = color else: rgb = (255, 0, 0) if len(box) >= 7 and class_names: cls_conf = box[5] cls_id = box[6] #print('%s: %f' % (class_names[cls_id], cls_conf)) classes = len(class_names) offset = cls_id * 123457 % classes red = get_color(2, offset, classes) green = get_color(1, offset, classes) blue = get_color(0, offset, classes) if color is None: rgb = (red, green, blue) img = cv2.putText(img, class_names[cls_id], (x1,y1), cv2.FONT_HERSHEY_SIMPLEX, 1.2, rgb, 1) img = cv2.rectangle(img, (x1,y1), (x2,y2), rgb, 1) if savename: print("save plot results to %s" % savename) cv2.imwrite(savename, img) return img
Example #29
Source File: annotator.py From simba with GNU Lesser General Public License v3.0 | 5 votes |
def dpkAnnotator(dpkini,annotationfile): config = ConfigParser() configFile = str(dpkini) config.read(configFile) project_path = config.get('general DPK settings', 'project_folder') annotationsPath = annotationfile bodyPartsListPath = os.path.join(project_path, 'skeleton.csv') app = Annotator(datapath=annotationsPath, dataset='images', skeleton=bodyPartsListPath, shuffle_colors=False, text_scale=1) im = np.zeros((300, 600, 3)) cv2.putText(im, 'Instructions', (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5,(255, 255, 255), 2) cv2.putText(im, '+- = rescale image by +/- 10%', (10, 40), cv2.FONT_HERSHEY_SIMPLEX, 0.5,(0, 255, 0), 2) cv2.putText(im, 'left mouse button = move active keypoint to cursor location', (10, 60), cv2.FONT_HERSHEY_SIMPLEX, 0.5,(0, 255, 0), 2) cv2.putText(im, 'WASD = move active keypoint 1px or 10px', (10, 80), cv2.FONT_HERSHEY_SIMPLEX, 0.5,(0, 255, 0), 2) cv2.putText(im, 'JL = next or previous image', (10, 100), cv2.FONT_HERSHEY_SIMPLEX, 0.5,(0, 255, 0), 2) cv2.putText(im, '<> = jump 10 images forward or backward', (10, 120), cv2.FONT_HERSHEY_SIMPLEX, 0.5,(0, 255, 0), 2) cv2.putText(im, 'I,K or tab, shift+tab = switch active keypoint', (10, 140), cv2.FONT_HERSHEY_SIMPLEX, 0.5,(0, 255, 0), 2) cv2.putText(im, 'R = mark image as unannotated ("reset")', (10, 160), cv2.FONT_HERSHEY_SIMPLEX, 0.5,(0, 255, 0), 2) cv2.putText(im, 'F = mark image as annotated ("finished")', (10, 180), cv2.FONT_HERSHEY_SIMPLEX, 0.5,(0, 255, 0), 2) cv2.putText(im, 'esc or Q = quit', (10, 200), cv2.FONT_HERSHEY_SIMPLEX, 0.5,(0, 255, 0), 2) cv2.putText(im, 'Tap tab to begin', (10, 240), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2) cv2.imshow('Instructions', im) k = cv2.waitKey(0) while (1): cv2.imshow('Instructions', im) k = cv2.waitKey(0) app.run() if k == 27: # Esc key to stop print('Annotatations saved in: ' + str(annotationfile)) break
Example #30
Source File: mark_detector.py From face_landmark_dnn with MIT License | 5 votes |
def draw_all_result(self, image): """Draw the detection result on image""" for facebox, conf in self.detection_result: cv2.rectangle(image, (facebox[0], facebox[1]), (facebox[2], facebox[3]), (0, 255, 0)) label = "face: %.4f" % conf label_size, base_line = cv2.getTextSize( label, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1) cv2.rectangle(image, (facebox[0], facebox[1] - label_size[1]), (facebox[0] + label_size[0], facebox[1] + base_line), (0, 255, 0), cv2.FILLED) cv2.putText(image, label, (facebox[0], facebox[1]), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0))