Python cv2.FONT_HERSHEY_DUPLEX Examples

The following are 30 code examples of cv2.FONT_HERSHEY_DUPLEX(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module cv2 , or try the search function .
Example #1
Source File: visualization.py    From Gather-Deployment with MIT License 6 votes vote down vote up
def bboxes_draw_on_img(img, classes, scores, bboxes, thickness=2):
    shape = img.shape
    for i in range(bboxes.shape[0]):
        label = labels[classes[i] - 1]
        if label not in accept_labels:
            continue
        bbox = bboxes[i]
        #color = colors_tableau[classes[i] - 1]
        p1 = (int(bbox[0] * shape[0]), int(bbox[1] * shape[1]))
        p2 = (int(bbox[2] * shape[0]), int(bbox[3] * shape[1]))
        cv2.rectangle(img, p1[::-1], p2[::-1], (0,0,255), 1)
        s = '%s' % (label)
        p1 = (p1[0]-5, p1[1])
        cv2.putText(img, s, p1[::-1], cv2.FONT_HERSHEY_DUPLEX, 0.7, (0,0,255), 1)


# =========================================================================== #
# Matplotlib show...
# =========================================================================== # 
Example #2
Source File: pascal_voc.py    From transforms with MIT License 6 votes vote down vote up
def draw_bbox(self, img, bboxes, labels, relative=False):
        if len(labels) == 0:
            return img
        img = img.copy()
        h, w = img.shape[:2]

        if relative:
            bboxes = bboxes * [w, h, w, h]

        bboxes = bboxes.astype(np.int)
        labels = labels.astype(np.int)

        for bbox, label in zip(bboxes, labels):
            left, top, right, bot = bbox
            color = self.colors[label]
            label = self.id_to_label[label]
            cv2.rectangle(img, (left, top), (right, bot), color, 2)
            cv2.putText(img, label, (left+1, top-5), cv2.FONT_HERSHEY_DUPLEX, 
                0.4, color, 1, cv2.LINE_AA)

        return img 
Example #3
Source File: main_window.py    From smpl_viewer with MIT License 6 votes vote down vote up
def _draw_annotations(self, img):
        self.joints2d.set(t=self.camera.t, rt=self.camera.rt, f=self.camera.f, c=self.camera.c, k=self.camera.k)

        if self.view_bones.isChecked():
            kintree = self.model.kintree_table[:, 1:]
            for k in range(kintree.shape[1]):
                cv2.line(img, (int(self.joints2d.r[kintree[0, k], 0]), int(self.joints2d.r[kintree[0, k], 1])),
                         (int(self.joints2d.r[kintree[1, k], 0]), int(self.joints2d.r[kintree[1, k], 1])),
                         (0.98, 0.98, 0.98), 3)

        if self.view_joints.isChecked():
            for j in self.joints2d.r:
                cv2.circle(img, (int(j[0]), int(j[1])), 5, (0.38, 0.68, 0.15), -1)

        if self.view_joint_ids.isChecked():
            for k, j in enumerate(self.joints2d.r):
                cv2.putText(img, str(k), (int(j[0]), int(j[1])), cv2.FONT_HERSHEY_DUPLEX, 0.6, (0.3, 0.23, 0.9), 2)

        return img 
Example #4
Source File: pascal_voc.py    From SSD-variants with MIT License 6 votes vote down vote up
def draw_bbox(self, img, bboxes, labels, relative=False):
        if len(labels) == 0:
            return img
        img = img.copy()
        h, w = img.shape[:2]

        if relative:
            bboxes = bboxes * [w, h, w, h]

        bboxes = bboxes.astype(np.int)
        labels = labels.astype(np.int)

        for bbox, label in zip(bboxes, labels):
            left, top, right, bot = bbox
            color = self.colors[label]
            label = self.id_to_label[label]
            cv2.rectangle(img, (left, top), (right, bot), color, 2)
            #img[max(0,top-18):min(h+1,top+2), max(0,left-2):min(left + len(label)*7+5,w+1)] = 15
            cv2.putText(img, label, (left+1, top-5), cv2.FONT_HERSHEY_DUPLEX, 0.4, color, 1, cv2.LINE_AA)

        return img 
Example #5
Source File: video.py    From Advanced-Deep-Learning-with-Keras with MIT License 6 votes vote down vote up
def loop(self):
        font = cv2.FONT_HERSHEY_DUPLEX
        pos = (10,30)
        font_scale = 0.9
        font_color = (0, 0, 0)
        line_type = 1

        while True:
            start_time = datetime.datetime.now()
            ret, image = self.capture.read()

            #image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) #/ 255.0

            cv2.imshow('image', image)
            if self.videowriter is not None:
                if self.videowriter.isOpened():
                    self.videowriter.write(image)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break

        # When everything done, release the capture
        self.capture.release()
        cv2.destroyAllWindows() 
Example #6
Source File: annotate.py    From faceswap with GNU General Public License v3.0 6 votes vote down vote up
def draw_extract_box(self, color_id=2, thickness=1):
        """ Draw the extracted face box """
        if not self.roi:
            return
        color = self.colors[color_id]
        for idx, roi in enumerate(self.roi):
            logger.trace("Drawing Extract Box: (idx: %s, roi: %s)", idx, roi)
            top_left = [point for point in roi.squeeze()[0]]
            top_left = (top_left[0], top_left[1] - 10)
            cv2.putText(self.image,
                        str(idx),
                        top_left,
                        cv2.FONT_HERSHEY_DUPLEX,
                        1.0,
                        color,
                        thickness)
            cv2.polylines(self.image, [roi], True, color, thickness) 
Example #7
Source File: visualization.py    From Gather-Deployment with MIT License 6 votes vote down vote up
def bboxes_draw_on_img(img, classes, scores, bboxes, thickness=2):
    shape = img.shape
    for i in range(bboxes.shape[0]):
        label = labels[classes[i] - 1]
        if label not in accept_labels:
            continue
        bbox = bboxes[i]
        #color = colors_tableau[classes[i] - 1]
        p1 = (int(bbox[0] * shape[0]), int(bbox[1] * shape[1]))
        p2 = (int(bbox[2] * shape[0]), int(bbox[3] * shape[1]))
        cv2.rectangle(img, p1[::-1], p2[::-1], (0,0,255), 1)
        s = '%s' % (label)
        p1 = (p1[0]-5, p1[1])
        cv2.putText(img, s, p1[::-1], cv2.FONT_HERSHEY_DUPLEX, 0.7, (0,0,255), 1)


# =========================================================================== #
# Matplotlib show...
# =========================================================================== # 
Example #8
Source File: visualization.py    From SSD_tensorflow_VOC with Apache License 2.0 6 votes vote down vote up
def bboxes_draw_on_img(img, classes, scores, bboxes, colors, thickness=2):
    shape = img.shape
    for i in range(bboxes.shape[0]):
        bbox = bboxes[i]
        color = colors[classes[i]]
        # Draw bounding box...
        p1 = (int(bbox[0] * shape[0]), int(bbox[1] * shape[1]))
        p2 = (int(bbox[2] * shape[0]), int(bbox[3] * shape[1]))
        cv2.rectangle(img, p1[::-1], p2[::-1], color, thickness)
        # Draw text...
        s = '%s/%.3f' % (classes[i], scores[i])
        p1 = (p1[0]-5, p1[1])
        cv2.putText(img, s, p1[::-1], cv2.FONT_HERSHEY_DUPLEX, 0.4, color, 1)


# =========================================================================== #
# Matplotlib show...
# =========================================================================== # 
Example #9
Source File: facerec_from_webcam_faster.py    From face-attendance-machine with Apache License 2.0 6 votes vote down vote up
def face_process():
    myprint("face process start",time.time())
    # Find all the faces and face encodings in the current frame of video
    # face_locations = face_recognition.face_locations(rgb_small_frame, model="cnn")
    myprint('face_locations start', time.time())
    face_locations = face_recognition.face_locations(rgb_small_frame, model="hog")
    myprint('face_locations end', time.time())
    myprint('face_encodings start', time.time())
    face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
    myprint('face_encodings end', time.time())
    face_names = []
    for face_encoding in face_encodings:
        # optimize start 采用KNN 排名*权重, 在类别上进行叠加,然后排序取出top1
        name, dis = vote_class(face_encoding)
        # optimize end 采用 排名*权重, 在类别上进行叠加,然后排序取出top1
        face_names.append(name)  # 将人脸数据

    # Display the results
    for (top, right, bottom, left), name in zip(face_locations, face_names):
        # Scale back up face locations since the frame we detected in was scaled to 1/4 size
        top *= 4
        right *= 4
        bottom *= 4
        left *= 4
        myprint('putText start', time.time())
        # Draw a box around the face
        cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
        # Draw a label with a name below the face
        cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
        font = cv2.FONT_HERSHEY_DUPLEX
        cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
        myprint("putText end " + name, time.time())
        # say hello and save record to file
        myprint('process_face_records start', time.time())
        process_face_records(name)
        myprint('process_face_records end', time.time())

    # Display the resulting image
    cv2.imshow('Video', frame)
    myprint("face process end", time.time()) 
Example #10
Source File: camera.py    From rps-cv with MIT License 6 votes vote down vote up
def addFrameRateText(self, img, pos=(0, 25), bgr=(0,255,0), samples=21):
        """Returns an image with the frame rate added as text on the image
        passed as argument. The framerate is calculated based on the time
        between calls to this method and averaged over a number of samples.
        img: image to which the framerate is to be added,
        bgr: tuple defining the blue, green and red values of the text color,
        samples: number of samples used for averaging.
        """
        # Calculate framerate and reset timer
        self.frameRateFilter.addDataPoint(1 / self.frameRateTimer.getElapsed())
        self.frameRateTimer.reset()
        # Get averaged framerate as a string
        frString = '{}fps'.format(str(int(round(self.frameRateFilter.getMean(),
                                                0))))
        # Add text to image
        cv2.putText(img, frString, pos, cv2.FONT_HERSHEY_DUPLEX, 1, bgr) 
Example #11
Source File: camera.py    From Live-USB-Webcam-Streaming-on-ThingsBoard-IoT-Platform with MIT License 6 votes vote down vote up
def get_frame(self):
        success, image = self.video.read()
        image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        faces_rects = haar_cascade_face.detectMultiScale(image_gray, scaleFactor=1.2, minNeighbors=5);
        # Let us print the no. of faces found
        #print('Faces found: ', len(faces_rects))

        for (x, y, w, h) in faces_rects:
            cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)

        if len(faces_rects) > 0:
            
            cv2.putText(img = image, text = 'Faces found:' + str(len(faces_rects)), org=(50,50), fontFace=cv2.FONT_HERSHEY_DUPLEX, fontScale=1, color=(0, 0, 255))
        # We are using Motion JPEG, but OpenCV defaults to capture raw images,
        # so we must encode it into JPEG in order to correctly display the
        # video stream.
        ret, jpeg = cv2.imencode('.jpg', image)
        return jpeg.tobytes() 
Example #12
Source File: capture.py    From rps-cv with MIT License 6 votes vote down vote up
def saveImage(img, gesture):

    # Define image path and filename
    folder = utils.imgPathsRaw[gesture]
    name = utils.gestureTxt[gesture] + '-' + time.strftime('%Y%m%d-%H%M%S')
    extension = '.png'

    print("Saving " + name + extension + " - Accept ([y]/n)?")

    # Write gesture name to image and show for a few seconds
    imgTxt = img.copy()
    font = cv2.FONT_HERSHEY_DUPLEX
    cv2.putText(imgTxt, utils.gestureTxt[gesture], (10,25), font, 1, (0, 0, 255))
    cv2.imshow('Camera', imgTxt)
    key = cv2.waitKey(2000)
    if key not in [110, 120]:
        # Key is not x or n. Save image
        cv2.imwrite(folder + name + extension, img)
        print("Saved ({}x{})".format(img.shape[1], img.shape[0]))
    else:
        print("Save cancelled") 
Example #13
Source File: draw_toolbox.py    From X-Detector with Apache License 2.0 6 votes vote down vote up
def draw_bbox(img, bbox, shape, label, color=[255, 0, 0], thickness=2):
    p1 = (int(bbox[0] * shape[0]), int(bbox[1] * shape[1]))
    p2 = (int(bbox[2] * shape[0]), int(bbox[3] * shape[1]))
    cv2.rectangle(img, p1[::-1], p2[::-1], color, thickness)
    p1 = (p1[0]+15, p1[1])
    cv2.putText(img, str(label), p1[::-1], cv2.FONT_HERSHEY_DUPLEX, 0.5, color, 1)

    return img

# def bboxes_draw_on_img(img, classes, scores, bboxes, thickness=2):
#     shape = img.shape
#     for i in range(bboxes.shape[0]):
#         bbox = bboxes[i]
#         color = colors_tableau[classes[i]]
#         # Draw bounding box...
#         p1 = (int(bbox[0] * shape[0]), int(bbox[1] * shape[1]))
#         p2 = (int(bbox[2] * shape[0]), int(bbox[3] * shape[1]))
#         cv2.rectangle(img, p1[::-1], p2[::-1], color, thickness)
#         # Draw text...
#         s = '%s|%.3f' % (label2name_table[classes[i]], scores[i])
#         p1 = (p1[0]-6, p1[1])
#         cv2.putText(img, s, p1[::-1], cv2.FONT_HERSHEY_DUPLEX, 0.5, color, 1)

#     return img 
Example #14
Source File: dataset.py    From OpenCV-Video-Label with GNU General Public License v3.0 5 votes vote down vote up
def draw_roi(self, add_class_label=True):
        font = cv2.FONT_HERSHEY_DUPLEX

        # draw the bounding box
        cv2.rectangle(self.image, (self.tl_x, self.tl_y), (self.br_x, self.br_y), GUI_REDD_RGB, 2)
        # draw the class label background and label
        if add_class_label:
            cv2.rectangle(self.image, (self.tl_x - 1, self.tl_y - 15),
                          (self.tl_x + 10 + len(self.image_class) * 10, self.tl_y),
                          GUI_REDD_RGB, cv2.FILLED)
            cv2.putText(self.image, self.image_class, (self.tl_x + 5, self.tl_y - 2), font, .5,
                        (255, 255, 255), 1, cv2.LINE_AA)

    # crops image to objects location 
Example #15
Source File: video_infer.py    From homesecurity with MIT License 5 votes vote down vote up
def show_bounding_boxes(img, box, conf, cls, cls_dict):
    """Draw detected bounding boxes on the original image."""
    font = cv2.FONT_HERSHEY_DUPLEX
    for bb, cf, cl in zip(box, conf, cls):
        cl = int(cl)
        y_min, x_min, y_max, x_max = bb[0], bb[1], bb[2], bb[3]
        cv2.rectangle(img, (x_min, y_min), (x_max, y_max), BBOX_COLOR, 2)
        txt_loc = (max(x_min, 5), max(y_min-3, 20))
        cls_name = cls_dict.get(cl, 'CLASS{}'.format(cl))
        txt = '{} {:.2f}'.format(cls_name, cf)
        cv2.putText(img, txt, txt_loc, font, 0.8, BBOX_COLOR, 1)
    return img 
Example #16
Source File: tx2_surveillance.py    From homesecurity with MIT License 5 votes vote down vote up
def show_bounding_boxes(img, box, conf, cls, cls_dict):
    """Draw detected bounding boxes on the original image."""
    font = cv2.FONT_HERSHEY_DUPLEX
    for bb, cf, cl in zip(box, conf, cls):
        cl = int(cl)
        y_min, x_min, y_max, x_max = bb[0], bb[1], bb[2], bb[3]
        cv2.rectangle(img, (x_min, y_min), (x_max, y_max), BBOX_COLOR, 2)
        txt_loc = (max(x_min, 5), max(y_min-3, 20))
        cls_name = cls_dict.get(cl, 'CLASS{}'.format(cl))
        txt = '{} {:.2f}'.format(cls_name, cf)
        cv2.putText(img, txt, txt_loc, font, 0.8, BBOX_COLOR, 1)
    return img 
Example #17
Source File: tracking_demo.py    From RFL with MIT License 5 votes vote down vote up
def display_result(image, pred_boxes, frame_idx, seq_name=None):
    if len(image.shape) == 3:
        r, g, b = cv2.split(image)
        image = cv2.merge([b, g, r])
    pred_boxes = pred_boxes.astype(int)
    cv2.rectangle(image, tuple(pred_boxes[0:2]), tuple(pred_boxes[0:2] + pred_boxes[2:4]), (0, 0, 255), 2)

    cv2.putText(image, 'Frame: %d' % frame_idx, (20, 30), cv2.FONT_HERSHEY_DUPLEX, 0.8, (0, 255, 255))
    cv2.imshow('tracker', image)
    if cv2.waitKey(1) & 0xFF == ord('q'):
        return True
    if config.is_save:
        cv2.imwrite(os.path.join(config.save_path, seq_name, '%04d.jpg' % frame_idx), image) 
Example #18
Source File: calcui.py    From ncappzoo with MIT License 5 votes vote down vote up
def __init__(self, label, x, y, canvas, color=(0, 0, 0), thickness=1, scale=1, font=cv2.FONT_HERSHEY_DUPLEX):
        super(Label, self).__init__(x, y, canvas, color=color, thickness=thickness)
        self.label = label
        self.font = font
        self.color = color
        self.scale = scale 
Example #19
Source File: demo.py    From MemTrack with MIT License 5 votes vote down vote up
def display_result(image, pred_boxes, frame_idx, seq_name=None):
    if len(image.shape) == 3:
        r, g, b = cv2.split(image)
        image = cv2.merge([b, g, r])
    pred_boxes = pred_boxes.astype(int)
    cv2.rectangle(image, tuple(pred_boxes[0:2]), tuple(pred_boxes[0:2] + pred_boxes[2:4]), (0, 0, 255), 2)

    cv2.putText(image, 'Frame: %d' % frame_idx, (20, 30), cv2.FONT_HERSHEY_DUPLEX, 0.8, (0, 255, 255))
    cv2.imshow('tracker', image)
    if cv2.waitKey(1) & 0xFF == ord('q'):
        return True
    if config.is_save:
        cv2.imwrite(os.path.join(config.save_path, seq_name, '%04d.jpg' % frame_idx), image) 
Example #20
Source File: video_extractor.py    From rbb_core with MIT License 5 votes vote down vote up
def write_text(cv_img, text, x, y):
        cv2.putText(cv_img, text, (x, y), cv2.FONT_HERSHEY_DUPLEX, .5, (0,0,0), thickness=2)
        cv2.putText(cv_img, text, (x, y), cv2.FONT_HERSHEY_DUPLEX, .5, (255,255,255), thickness=1) 
Example #21
Source File: visualization.py    From SSD_tensorflow_VOC with Apache License 2.0 5 votes vote down vote up
def draw_bbox(img, bbox, shape, label, color=[255, 0, 0], thickness=2):
    p1 = (int(bbox[0] * shape[0]), int(bbox[1] * shape[1]))
    p2 = (int(bbox[2] * shape[0]), int(bbox[3] * shape[1]))
    cv2.rectangle(img, p1[::-1], p2[::-1], color, thickness)
    p1 = (p1[0]+15, p1[1])
    cv2.putText(img, str(label), p1[::-1], cv2.FONT_HERSHEY_DUPLEX, 0.5, color, 1) 
Example #22
Source File: simplevis.py    From Det3D with Apache License 2.0 5 votes vote down vote up
def cv2_draw_text(img, locs, labels, colors, thickness, line_type=cv2.LINE_8):
    locs = locs.astype(np.int32)
    font_line_type = cv2.LINE_8
    font = cv2.FONT_ITALIC
    font = cv2.FONT_HERSHEY_DUPLEX
    font = cv2.FONT_HERSHEY_PLAIN
    font = cv2.FONT_HERSHEY_SIMPLEX
    for loc, label, color in zip(locs, labels, colors):
        color = list(int(c) for c in color)
        cv2.putText(
            img, label, tuple(loc), font, 0.7, color, thickness, font_line_type, False
        )
    return img 
Example #23
Source File: ObjectCounter.py    From ivy with MIT License 5 votes vote down vote up
def visualize(self):
        frame = self.frame
        font = cv2.FONT_HERSHEY_DUPLEX
        line_type = cv2.LINE_AA

        # draw and label blob bounding boxes
        for _id, blob in self.blobs.items():
            (x, y, w, h) = [int(v) for v in blob.bounding_box]
            cv2.rectangle(frame, (x, y), (x + w, y + h), self.hud_color, 2)
            object_label = 'I: ' + _id[:8] \
                            if blob.type is None \
                            else 'I: {0}, T: {1} ({2})'.format(_id[:8], blob.type, str(blob.type_confidence)[:4])
            cv2.putText(frame, object_label, (x, y - 5), font, 1, self.hud_color, 2, line_type)

        # draw counting lines
        for counting_line in self.counting_lines:
            cv2.line(frame, counting_line['line'][0], counting_line['line'][1], self.hud_color, 3)
            cl_label_origin = (counting_line['line'][0][0], counting_line['line'][0][1] + 35)
            cv2.putText(frame, counting_line['label'], cl_label_origin, font, 1, self.hud_color, 2, line_type)

        # show detection roi
        if self.show_droi:
            frame = draw_roi(frame, self.droi)

        # show counts
        if self.show_counts:
            offset = 1
            for line, objects in self.counts.items():
                cv2.putText(frame, line, (10, 40 * offset), font, 1, self.hud_color, 2, line_type)
                for label, count in objects.items():
                    offset += 1
                    cv2.putText(frame, "{}: {}".format(label, count), (10, 40 * offset), font, 1, self.hud_color, 2, line_type)
                offset += 2

        return frame 
Example #24
Source File: visualizer.py    From deep_human with GNU General Public License v3.0 5 votes vote down vote up
def show_prections(img, predictions):
    i = 0
    jointsnum = predictions.shape[0]
    for coord in range(jointsnum):
        if(True):
            keypt = (int(predictions[coord,0]), int(predictions[coord,1]))
            print(keypt)
            text_loc = (keypt[0]+5, keypt[1]+7)
            cv2.circle(img, keypt, 3, (55,255,155), -1)
            cv2.putText(img, str(coord), text_loc, cv2.FONT_HERSHEY_DUPLEX, 0.5, (55,255,155), 1)
    cv2.imshow('img', img)
    cv2.waitKey(0)
    cv2.destroyAllWindows() 
Example #25
Source File: visualizer.py    From deep_human with GNU General Public License v3.0 5 votes vote down vote up
def show_prections(img, predictions):
    i = 0
    jointsnum = predictions.shape[0]
    for coord in range(jointsnum):
        if(True):
            keypt = (int(predictions[coord,0]), int(predictions[coord,1]))
            print(keypt)
            text_loc = (keypt[0]+5, keypt[1]+7)
            cv2.circle(img, keypt, 3, (55,255,155), -1)
            cv2.putText(img, str(coord), text_loc, cv2.FONT_HERSHEY_DUPLEX, 0.5, (55,255,155), 1)
    cv2.imshow('img', img)
    cv2.waitKey(0)
    cv2.destroyAllWindows() 
Example #26
Source File: simplevis.py    From second.pytorch with MIT License 5 votes vote down vote up
def cv2_draw_text(img, locs, labels, colors, thickness, line_type=cv2.LINE_8):
    locs = locs.astype(np.int32)
    font_line_type = cv2.LINE_8
    font = cv2.FONT_ITALIC
    font = cv2.FONT_HERSHEY_DUPLEX
    font = cv2.FONT_HERSHEY_PLAIN
    font = cv2.FONT_HERSHEY_SIMPLEX
    for loc, label, color in zip(locs, labels, colors):
        color = list(int(c) for c in color)
        cv2.putText(img, label, tuple(loc), font, 0.7, color, thickness,
                    font_line_type, False)
    return img 
Example #27
Source File: visualization.py    From VTuber_Unity with MIT License 5 votes vote down vote up
def draw_FPS(frame, FPS):
    cv2.putText(frame, "FPS: %d"%FPS, (40, 40), cv2.FONT_HERSHEY_DUPLEX, 1, (0, 255, 0), 1) 
Example #28
Source File: HandRecognition.py    From hand-gesture-recognition-opencv with MIT License 5 votes vote down vote up
def find_gesture(frame_in,finger,palm):
    frame_gesture.set_palm(palm[0],palm[1])
    frame_gesture.set_finger_pos(finger)
    frame_gesture.calc_angles()
    gesture_found=DecideGesture(frame_gesture,GestureDictionary)
    gesture_text="GESTURE:"+str(gesture_found)
    cv2.putText(frame_in,gesture_text,(int(0.56*frame_in.shape[1]),int(0.97*frame_in.shape[0])),cv2.FONT_HERSHEY_DUPLEX,1,(0,255,255),1,8)
    return frame_in,gesture_found

# 7. Remove bg from image 
Example #29
Source File: image_parsing.py    From hazymaze with Apache License 2.0 5 votes vote down vote up
def write_text(image, text):
    h, w = image.shape[0], image.shape[1]
    font = cv2.FONT_HERSHEY_DUPLEX
    cv2.putText(image,text, (w//5, h-40), font, 1,(255,255,255),2,cv2.LINE_AA) 
Example #30
Source File: test_webcam.py    From face_landmark_dnn with MIT License 5 votes vote down vote up
def webcam_main():
    print("Camera sensor warming up...")
    vs = cv2.VideoCapture(0)
    time.sleep(2.0)

    mark_detector = MarkDetector()
    
    # loop over the frames from the video stream
    while True:
        _, frame = vs.read()
        start = cv2.getTickCount()

        frame = imutils.resize(frame, width=750, height=750)
        frame = cv2.flip(frame, 1)
        faceboxes = mark_detector.extract_cnn_facebox(frame)

        if faceboxes is not None:
            for facebox in faceboxes:
                # Detect landmarks from image of 64X64 with grayscale.
                face_img = frame[facebox[1]: facebox[3],
                                    facebox[0]: facebox[2]]
                # cv2.rectangle(frame, (facebox[0], facebox[1]), (facebox[2], facebox[3]), (0, 255, 0), 2)
                face_img = cv2.resize(face_img, (CNN_INPUT_SIZE, CNN_INPUT_SIZE))
                face_img = cv2.cvtColor(face_img, cv2.COLOR_BGR2GRAY)
                face_img0 = face_img.reshape(1, CNN_INPUT_SIZE, CNN_INPUT_SIZE, 1)

                land_start_time = time.time()
                marks = mark_detector.detect_marks_keras(face_img0)
                # marks *= 255
                marks *= facebox[2] - facebox[0]
                marks[:, 0] += facebox[0]
                marks[:, 1] += facebox[1]
                # Draw Predicted Landmarks
                mark_detector.draw_marks(frame, marks, color=(255, 255, 255), thick=2)

        fps_time = (cv2.getTickCount() - start)/cv2.getTickFrequency()
        cv2.putText(frame, '%.1ffps'%(1/fps_time) , (frame.shape[1]-65,15), cv2.FONT_HERSHEY_DUPLEX, 0.5, (0,255,0))
        # show the frame
        cv2.imshow("Frame", frame)
        # writer.write(frame)
        key = cv2.waitKey(1) & 0xFF

        # if the `q` key was pressed, break from the loop
        if key == ord("q"):
            break

    # do a bit of cleanup
    cv2.destroyAllWindows()
    vs.stop()