Python imutils.face_utils.rect_to_bb() Examples

The following are 7 code examples of imutils.face_utils.rect_to_bb(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module imutils.face_utils , or try the search function .
Example #1
Source File: rec-feat.py    From Facial-Recognition-using-Facenet with MIT License 6 votes vote down vote up
def extract_face_info(img, img_rgb, database,ear):
    faces = detector(img_rgb)
    x, y, w, h = 0, 0, 0, 0
    if len(faces) > 0:
        for face in faces:
            (x, y, w, h) = face_utils.rect_to_bb(face)
            cv2.rectangle(img, (x, y), (x + w, y + h), (255, 255, 0), 2)
            image = img[y:y + h, x:x + w]
            name, min_dist = recognize_face(image, database)
            if ear > thresh:
                if min_dist < 0.1:
                    cv2.putText(img, "Face : " + name, (x, y - 50), cv2.FONT_HERSHEY_PLAIN, 1.5, (0, 255, 0), 2)
                    cv2.putText(img, "Dist : " + str(min_dist), (x, y - 20), cv2.FONT_HERSHEY_PLAIN, 1.5, (0, 255, 0), 2)
                else:
                    cv2.putText(img, 'No matching faces', (x, y - 20), cv2.FONT_HERSHEY_PLAIN, 1.5, (0, 0, 255), 2)
            else:
                cv2.putText(img, 'Eyes Closed', (x, y - 20), cv2.FONT_HERSHEY_PLAIN, 1.5, (0, 0, 255), 2) 
Example #2
Source File: face_utilities.py    From Heart-rate-measurement-using-camera with Apache License 2.0 6 votes vote down vote up
def face_detection(self, frame):
        '''
        Detect faces in a frame
        
        Args:
            frame (cv2 image): a normal frame grab from camera or video
            
        Outputs:
            rects (array): detected faces as rectangles
        '''
        if self.detector is None:
            self.detector = dlib.get_frontal_face_detector()
        
        if frame is None:
            return
            
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        #get all faces in the frame
        rects = self.detector(gray, 0)
        # to get the coords from a rect, use: (x, y, w, h) = face_utils.rect_to_bb(rects[0])
        
        return rects 
Example #3
Source File: FaceRecognizer.py    From FaceRecognition with GNU General Public License v3.0 5 votes vote down vote up
def rect_to_bb(rect):
    # we will take the bounding box predicted by dlib library
    # and convert it into (x, y, w, h) where x, y are coordinates
    # and w, h are width and height

    x = rect.left()
    y = rect.top()
    w = rect.right() - x
    h = rect.bottom() - y

    return (x, y, w, h) 
Example #4
Source File: eval.py    From Age-Gender-Estimate-TF with MIT License 5 votes vote down vote up
def load_image(image_path, shape_predictor):
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor(shape_predictor)
    fa = FaceAligner(predictor, desiredFaceWidth=160)
    image = cv2.imread(image_path, cv2.IMREAD_COLOR)
    # image = imutils.resize(image, width=256)
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    rects = detector(gray, 2)
    rect_nums = len(rects)
    XY, aligned_images = [], []
    if rect_nums == 0:
        aligned_images.append(image)
        return aligned_images, image, rect_nums, XY
    else:
        for i in range(rect_nums):
            aligned_image = fa.align(image, gray, rects[i])
            aligned_images.append(aligned_image)
            (x, y, w, h) = rect_to_bb(rects[i])
            image = cv2.rectangle(image, (x, y), (x + w, y + h), color=(255, 0, 0), thickness=2)
            XY.append((x, y))
        return np.array(aligned_images), image, rect_nums, XY


# def draw_label(image, point, ages, genders, font=cv2.FONT_HERSHEY_COMPLEX, font_scale=1, thickness=1):
#     for i in range(len(point)):
#         label = "{}, {}".format(int(ages[i]), "F" if genders[i] == 0 else "M")
#         size = cv2.getTextSize(label, font, font_scale, thickness)[0]
#         x, y = point[i]
#         # cv2.rectangle(image, (x, y - size[1]), (x + size[0], y), (255, 0, 0), cv2.FILLED)
#         cv2.putText(image, label, (x, np.max(y - 5, 0)), font, font_scale, (255, 255, 255), thickness) 
Example #5
Source File: face_utilities.py    From Heart-rate-measurement-using-camera with Apache License 2.0 5 votes vote down vote up
def no_age_gender_face_process(self, frame, type):
        '''
        full process to extract face, ROI but no age and gender detection
        
        Args:
            frame (cv2 image): input frame 
            type (str): 5 or 68 landmarks
            
        Outputs:
            rects (array): detected faces as rectangles
            face (cv2 image): face
            shape (array): facial landmarks' co-ords in format of tuples (x,y)
            aligned_face (cv2 image): face after alignment
            aligned_shape (array): facial landmarks' co-ords of the aligned face in format of tuples (x,y)
        
        '''
        if(type=="5"):
            shape, rects = self.get_landmarks(frame, "5")
            
            if shape is None:
                return None
        else:    
            shape, rects = self.get_landmarks(frame, "68")
            if shape is None:
                return None
        
        (x, y, w, h) = face_utils.rect_to_bb(rects[0])
        
        face = frame[y:y+h,x:x+w]
        aligned_face,aligned_shape = self.face_alignment(frame, shape)
        
        # if(type=="5"):
            # aligned_shape, rects_2 = self.get_landmarks(aligned_face, "5")
            # if aligned_shape is None:
                # return None
        # else:    
            # aligned_shape, rects_2 = self.get_landmarks(aligned_face, "68")
            # if aligned_shape is None:
                # return None
                
        return rects, face, shape, aligned_face, aligned_shape 
Example #6
Source File: create_face.py    From Facial-Recognition-using-Facenet with MIT License 4 votes vote down vote up
def main():
    create_folder(FACE_DIR)
    while True:
        name=input("EnterName: ")
        face_id = input("Enter id for face: ")
        try:
            face_id = int(face_id)
            face_folder = FACE_DIR + str(face_id) + "/"
            create_folder(face_folder)
            break
        except:
            print("Invalid input. id must be int")
            continue

    # get beginning image number
    while True:
        init_img_no = input("Starting img no.: ")
        try:
            init_img_no = int(init_img_no)
            break
        except:
            print("Starting img no should be integer...")
            continue

    img_no = init_img_no
    cap = cv2.VideoCapture(0)
    total_imgs = 10
    while True:
        ret, img = cap.read()
        img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        faces = detector(img_gray)
        if len(faces) == 1:
            face = faces[0]
            (x, y, w, h) = face_utils.rect_to_bb(face)
            face_img = img_gray[y-50:y + h+100, x-50:x + w+100]
            face_aligned = face_aligner.align(img, img_gray, face)

            face_img = face_aligned
            img_path = face_folder +name+ str(img_no) + ".jpg"
            cv2.imwrite(img_path, face_img)
            cv2.rectangle(img, (x, y), (x + w, y + h), (255, 255, 0), 3)
            cv2.imshow("aligned", face_img)
            img_no += 1

        cv2.imshow("Saving", img)
        cv2.waitKey(1)
        if img_no == init_img_no + total_imgs:
            break

    cap.release() 
Example #7
Source File: test_face_utilities.py    From Heart-rate-measurement-using-camera with Apache License 2.0 4 votes vote down vote up
def flow_process(frame):
    display_frame = frame.copy()  
    rects = last_rects
    age = last_age
    gender = last_gender
    shape = last_shape
    
    # convert the frame to gray scale before performing face detection
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    # get all faces as rectangles every 3 frames
    if(i%3==0):
        rects = face_ut.face_detection(frame)
    
    #check if there is any face in the frame, if not, show the frame and move to the next frame
    if len(rects)<0:
        return frame, None
    
    # draw face rectangle, only grab one face in the frame
    (x, y, w, h) = face_utils.rect_to_bb(rects[0])
    cv2.rectangle(display_frame,(x,y),(x+w,y+h),(255,0,0),2)
    
    # crop the face from frame
    face = frame[y:y+h,x:x+w]
    
    if(i%6==0):
    # detect age and gender and put it into the frame every 6 frames
        age, gender = face_ut.age_gender_detection(face)
        
    overlay_text = "%s, %s" % (gender, age)
    cv2.putText(display_frame, overlay_text ,(x,y-15), cv2.FONT_HERSHEY_SIMPLEX, 1,(255,0,0),2,cv2.LINE_AA)
    
    if(i%3==0):
        # get 68 facial landmarks and draw it into the face every 3 frames
        shape = face_ut.get_landmarks(frame, "5")
    
    for (x, y) in shape: 
        cv2.circle(face, (x, y), 1, (0, 0, 255), -1)
        
    # get the mask of the face
    remapped_landmarks = face_ut.facial_landmarks_remap(shape)
    mask = np.zeros((face.shape[0], face.shape[1]))
    cv2.fillConvexPoly(mask, remapped_landmarks[0:27], 1) 
    
    aligned_face = face_ut.face_alignment(frame, shape)
    
    aligned_shape = face_ut.get_landmarks(aligned_face, "68")
    
    cv2.rectangle(aligned_face, (aligned_shape[54][0], aligned_shape[29][1]), #draw rectangle on right and left cheeks
            (aligned_shape[12][0],aligned_shape[33][1]), (0,255,0), 0)
    cv2.rectangle(aligned_face, (aligned_shape[4][0], aligned_shape[29][1]), 
            (aligned_shape[48][0],aligned_shape[33][1]), (0,255,0), 0)
    
    
    #assign to last params
    last_rects = rects
    last_age = age
    last_gender = gender
    last_shape = shape
    
    return display_frame, aligned_face