Python cv2.FONT_HERSHEY_TRIPLEX Examples
The following are 11
code examples of cv2.FONT_HERSHEY_TRIPLEX().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
cv2
, or try the search function
.
Example #1
Source File: net_utils.py From One-Shot-Object-Detection with MIT License | 7 votes |
def vis_detections(im, class_name, dets, thresh=0.5): """Visual debugging of detections.""" for i in range(np.minimum(10, dets.shape[0])): bbox = tuple(int(np.round(x)) for x in dets[i, :4]) score = dets[i, -1] if score > 0.8: cv2.rectangle(im, bbox[0:2], bbox[2:4], (0, 110, 255), 5) text = '%.3f' % (score) (text_width, text_height) = cv2.getTextSize(text, cv2.FONT_HERSHEY_TRIPLEX, fontScale=1.2, thickness=2)[0] cv2.rectangle(im, (bbox[0], bbox[1] ), (bbox[0]+text_width, bbox[1] + text_height), (0, 255, 251), -1) cv2.putText(im,text , (bbox[0], bbox[1]+text_height), cv2.FONT_HERSHEY_TRIPLEX, 1.2, (0, 0, 0), thickness=2) return im
Example #2
Source File: boxing.py From ICDAR-2019-SROIE with MIT License | 6 votes |
def draw(): f = open(box_path + 'jpglist.txt') # read each image and its label line = f.readline() line_num =0 while line: line_num=line_num+1 print('Image:', line_num) name = line.strip('\n') img = cv2.imread(image_path + name) img_size = img.shape img_size = img_size[0]*img_size[1] # read each coordinate and draw box f_txt = open(image_path + name.strip('.jpg') + '.txt') #line_txt = f_txt.readline() # pass the first ROI information line_txt = f_txt.readline() while line_txt: coor = line_txt.split(',') x1 = int(coor[0].strip('\'')) y1 = int(coor[1].strip('\'')) x3 = int(coor[4].strip('\'')) y3 = int(coor[5].strip('\'')) text = coor[8].strip('\n').strip('\'') text_show = text + '(' + str(x1) + ',' + str(y1) +')' cv2.rectangle(img, (x1, y1), (x3, y3), (255, 0, 0), 1) #cv2.putText(img, text_show, (x1, y1 - 1), # cv2.FONT_HERSHEY_TRIPLEX, 0.35, (0, 0, 255), 1) line_txt = f_txt.readline() cv2.imwrite(box_path + name, img) line = f.readline() # img = cv2.imshow('image', img) # cv2.waitKey(0)
Example #3
Source File: calibrate.py From depthai with MIT License | 5 votes |
def show_failed_capture_frame(self): width, height = int(self.width * self.output_scale_factor), int(self.height * self.output_scale_factor) info_frame = np.zeros((height, width, 3), np.uint8) print("py: Capture failed, unable to find chessboard! Fix position and press spacebar again") def show(position, text): cv2.putText(info_frame, text, position, cv2.FONT_HERSHEY_TRIPLEX, 0.7, (0, 255, 0)) show((50, int(height / 2 - 40)), "Capture failed, unable to find chessboard!") show((60, int(height / 2 + 40)), "Fix position and press spacebar again") # cv2.imshow("left", info_frame) # cv2.imshow("right", info_frame) cv2.imshow("left + right",info_frame) cv2.waitKey(2000)
Example #4
Source File: summary_utils.py From fc4 with MIT License | 5 votes |
def put_text_on_image(image, lines): for i, line in enumerate(lines[::-1]): text_width, text_height = cv2.getTextSize(line, cv2.FONT_HERSHEY_TRIPLEX, 0.4, 1)[0] cv2.putText(image, line, (image.shape[1] - text_width, image.shape[0] - 2 * i * text_height - 10), cv2.FONT_HERSHEY_TRIPLEX, 0.4, [0, 0, 0])
Example #5
Source File: visualization.py From mmskeleton with Apache License 2.0 | 5 votes |
def put_text(img, text, position, scale_factor=1): t_w, t_h = cv2.getTextSize(text, cv2.FONT_HERSHEY_TRIPLEX, scale_factor, thickness=1)[0] H, W, _ = img.shape position = (int(W * position[1] - t_w * 0.5), int(H * position[0] - t_h * 0.5)) params = (position, cv2.FONT_HERSHEY_TRIPLEX, scale_factor, (255, 255, 255)) cv2.putText(img, text, *params)
Example #6
Source File: visualization.py From st-gcn with BSD 2-Clause "Simplified" License | 5 votes |
def put_text(img, text, position, scale_factor=1): t_w, t_h = cv2.getTextSize( text, cv2.FONT_HERSHEY_TRIPLEX, scale_factor, thickness=1)[0] H, W, _ = img.shape position = (int(W * position[1] - t_w * 0.5), int(H * position[0] - t_h * 0.5)) params = (position, cv2.FONT_HERSHEY_TRIPLEX, scale_factor, (255,255,255)) cv2.putText(img, text, *params)
Example #7
Source File: visualizer.py From higan with MIT License | 5 votes |
def add_text_to_image(image, text='', position=None, font=cv2.FONT_HERSHEY_TRIPLEX, font_size=1.0, line_type=cv2.LINE_8, line_width=1, color=(255, 255, 255)): """Overlays text on given image. NOTE: The input image is assumed to be with `RGB` channel order. Args: image: The image to overlay text on. text: Text content to overlay on the image. (default: '') position: Target position (bottom-left corner) to add text. If not set, center of the image will be used by default. (default: None) font: Font of the text added. (default: cv2.FONT_HERSHEY_TRIPLEX) font_size: Font size of the text added. (default: 1.0) line_type: Line type used to depict the text. (default: cv2.LINE_8) line_width: Line width used to depict the text. (default: 1) color: Color of the text added in `RGB` channel order. (default: (255, 255, 255)) Returns: An image with target text overlayed on. """ if image is None or not text: return image cv2.putText(img=image, text=text, org=position, fontFace=font, fontScale=font_size, color=color, thickness=line_width, lineType=line_type, bottomLeftOrigin=False) return image
Example #8
Source File: utils.py From gender-age-classification with Mozilla Public License 2.0 | 5 votes |
def draw_boxes_v2(image, boxes, labels,listPrediction): i=0 for box in boxes: xmin = int((box.x - box.w/2) * image.shape[1]) xmax = int((box.x + box.w/2) * image.shape[1]) ymin = int((box.y - box.h/2) * image.shape[0]) ymax = int((box.y + box.h/2) * image.shape[0]) cv2.rectangle(image, (xmin,ymin), (xmax,ymax), (0,255,0), 3) temp=array( listPrediction[i][3]) if temp[0]>temp[1]: gender="Female" else: gender="Male" (gender +' ' + str(listPrediction[i][1]) +' '+ str(listPrediction[i][0])) a=int((xmin+xmax)/2)-10 cv2.putText(image,gender +' ' + str(listPrediction[i][1]), (xmin, ymax+25), fontFace=cv2.FONT_HERSHEY_TRIPLEX, fontScale=1e-3 *3* image.shape[0], color=(0, 255, 0)) i=i+1 cv2.cvtColor(image, cv2.COLOR_BGR2RGB) #del listPrediction del gender del box return image
Example #9
Source File: dataset_statistic.py From ICDAR-2019-SROIE with MIT License | 4 votes |
def draw(): f = open(box_path + 'jpglist.txt') rect_scale_pack=[] rect_ratio_pack=[] # read each image and its label line = f.readline() line_num =0 while line: line_num=line_num+1 print('Image:', line_num) name = line.strip('\n') img = cv2.imread(image_path + name) img_size = img.shape img_size = img_size[0]*img_size[1] # read each coordinate and draw box f_txt = open(image_path + name.strip('.jpg') + '.txt') line_txt = f_txt.readline() while line_txt: coor = line_txt.split(',') x1 = int(coor[0].strip('\'')) y1 = int(coor[1].strip('\'')) x3 = int(coor[4].strip('\'')) y3 = int(coor[5].strip('\'')) text = coor[8].strip('\n').strip('\'') rect_size = (x3-x1)*(y3-y1) rect_scale = np.sqrt(rect_size / img_size) rect_scale_pack.append(rect_scale) #print(rect_scale_pack) rect_ratio = (x3-x1)/(y3-y1) rect_ratio_pack.append(rect_ratio) #cv2.rectangle(img, (x1, y1), (x3, y3), (255, 0, 0), 1) #cv2.putText(img, text, (x1, y1 - 1), #cv2.FONT_HERSHEY_TRIPLEX, 0.35, (0, 0, 255), 1) line_txt = f_txt.readline() #cv2.imwrite(box_path + name, img) line = f.readline() # img = cv2.imshow('image', img) # cv2.waitKey(0) return rect_scale_pack,rect_ratio_pack
Example #10
Source File: CreateGest.py From Emojinator with MIT License | 4 votes |
def main(g_id): total_pics = 1200 cap = cv2.VideoCapture(0) x, y, w, h = 300, 50, 350, 350 create_folder("gestures/" + str(g_id)) pic_no = 0 flag_start_capturing = False frames = 0 while True: ret, frame = cap.read() frame = cv2.flip(frame, 1) hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) mask2 = cv2.inRange(hsv, np.array([2, 50, 60]), np.array([25, 150, 255])) res = cv2.bitwise_and(frame, frame, mask=mask2) gray = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY) median = cv2.GaussianBlur(gray, (5, 5), 0) kernel_square = np.ones((5, 5), np.uint8) dilation = cv2.dilate(median, kernel_square, iterations=2) opening=cv2.morphologyEx(dilation,cv2.MORPH_CLOSE,kernel_square) ret, thresh = cv2.threshold(opening, 30, 255, cv2.THRESH_BINARY) thresh = thresh[y:y + h, x:x + w] contours = cv2.findContours(thresh.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)[1] if len(contours) > 0: contour = max(contours, key=cv2.contourArea) if cv2.contourArea(contour) > 10000 and frames > 50: x1, y1, w1, h1 = cv2.boundingRect(contour) pic_no += 1 save_img = thresh[y1:y1 + h1, x1:x1 + w1] if w1 > h1: save_img = cv2.copyMakeBorder(save_img, int((w1 - h1) / 2), int((w1 - h1) / 2), 0, 0, cv2.BORDER_CONSTANT, (0, 0, 0)) elif h1 > w1: save_img = cv2.copyMakeBorder(save_img, 0, 0, int((h1 - w1) / 2), int((h1 - w1) / 2), cv2.BORDER_CONSTANT, (0, 0, 0)) save_img = cv2.resize(save_img, (image_x, image_y)) cv2.putText(frame, "Capturing...", (30, 60), cv2.FONT_HERSHEY_TRIPLEX, 2, (127, 255, 255)) cv2.imwrite("gestures/" + str(g_id) + "/" + str(pic_no) + ".jpg", save_img) cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2) cv2.putText(frame, str(pic_no), (30, 400), cv2.FONT_HERSHEY_TRIPLEX, 1.5, (127, 127, 255)) cv2.imshow("Capturing gesture", frame) cv2.imshow("thresh", thresh) keypress = cv2.waitKey(1) if keypress == ord('c'): if flag_start_capturing == False: flag_start_capturing = True else: flag_start_capturing = False frames = 0 if flag_start_capturing == True: frames += 1 if pic_no == total_pics: break
Example #11
Source File: create_gestures.py From Sign-Language-Interpreter-using-Deep-Learning with MIT License | 4 votes |
def store_images(g_id): total_pics = 1200 hist = get_hand_hist() cam = cv2.VideoCapture(1) if cam.read()[0]==False: cam = cv2.VideoCapture(0) x, y, w, h = 300, 100, 300, 300 create_folder("gestures/"+str(g_id)) pic_no = 0 flag_start_capturing = False frames = 0 while True: img = cam.read()[1] img = cv2.flip(img, 1) imgHSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) dst = cv2.calcBackProject([imgHSV], [0, 1], hist, [0, 180, 0, 256], 1) disc = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(10,10)) cv2.filter2D(dst,-1,disc,dst) blur = cv2.GaussianBlur(dst, (11,11), 0) blur = cv2.medianBlur(blur, 15) thresh = cv2.threshold(blur,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)[1] thresh = cv2.merge((thresh,thresh,thresh)) thresh = cv2.cvtColor(thresh, cv2.COLOR_BGR2GRAY) thresh = thresh[y:y+h, x:x+w] contours = cv2.findContours(thresh.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)[1] if len(contours) > 0: contour = max(contours, key = cv2.contourArea) if cv2.contourArea(contour) > 10000 and frames > 50: x1, y1, w1, h1 = cv2.boundingRect(contour) pic_no += 1 save_img = thresh[y1:y1+h1, x1:x1+w1] if w1 > h1: save_img = cv2.copyMakeBorder(save_img, int((w1-h1)/2) , int((w1-h1)/2) , 0, 0, cv2.BORDER_CONSTANT, (0, 0, 0)) elif h1 > w1: save_img = cv2.copyMakeBorder(save_img, 0, 0, int((h1-w1)/2) , int((h1-w1)/2) , cv2.BORDER_CONSTANT, (0, 0, 0)) save_img = cv2.resize(save_img, (image_x, image_y)) rand = random.randint(0, 10) if rand % 2 == 0: save_img = cv2.flip(save_img, 1) cv2.putText(img, "Capturing...", (30, 60), cv2.FONT_HERSHEY_TRIPLEX, 2, (127, 255, 255)) cv2.imwrite("gestures/"+str(g_id)+"/"+str(pic_no)+".jpg", save_img) cv2.rectangle(img, (x,y), (x+w, y+h), (0,255,0), 2) cv2.putText(img, str(pic_no), (30, 400), cv2.FONT_HERSHEY_TRIPLEX, 1.5, (127, 127, 255)) cv2.imshow("Capturing gesture", img) cv2.imshow("thresh", thresh) keypress = cv2.waitKey(1) if keypress == ord('c'): if flag_start_capturing == False: flag_start_capturing = True else: flag_start_capturing = False frames = 0 if flag_start_capturing == True: frames += 1 if pic_no == total_pics: break