Python face_recognition.compare_faces() Examples

The following are 13 code examples of face_recognition.compare_faces(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module face_recognition , or try the search function .
Example #1
Source File: encoding_images.py    From face-attendance-machine with Apache License 2.0 7 votes vote down vote up
def encoding_images(path):
    """
    对path路径下的子文件夹中的图片进行编码,
    TODO:
        对人脸数据进行历史库中的人脸向量进行欧式距离的比较,当距离小于某个阈值的时候提醒:
        如果相似的是本人,则跳过该条记录,并提醒已经存在,否则警告人脸过度相似问题,
    :param path:
    :return:
    """
    with open(name_and_encoding, 'w') as f:
        subdirs = [os.path.join(path, x) for x in os.listdir(path) if os.path.isdir(os.path.join(path, x))]
        for subdir in subdirs:
            print('process image name :', subdir)
            person_image_encoding = []
            for y in os.listdir(subdir):
                print("image name is ", y)
                _image = face_recognition.load_image_file(os.path.join(subdir, y))
                face_encodings = face_recognition.face_encodings(_image)
                name = os.path.split(subdir)[-1]
                if face_encodings and len(face_encodings) == 1:
                    if len(person_image_encoding) == 0:
                        person_image_encoding.append(face_encodings[0])
                        known_face_names.append(name)
                        continue
                    for i in range(len(person_image_encoding)):
                        distances = face_recognition.compare_faces(person_image_encoding, face_encodings[0], tolerance=image_thread)
                        if False in distances:
                            person_image_encoding.append(face_encodings[0])
                            known_face_names.append(name)
                            print(name, " new feature")
                            f.write(name + ":" + str(face_encodings[0]) + "\n")
                            break
                    # face_encoding = face_recognition.face_encodings(_image)[0]
                    # face_recognition.compare_faces()
            known_face_encodings.extend(person_image_encoding)
            bb = np.array(known_face_encodings)
            print("--------")
    np.save(KNOWN_FACE_ENCODINGS, known_face_encodings)
    np.save(KNOWN_FACE_NANE, known_face_names) 
Example #2
Source File: face_recog.py    From EagleEye with Do What The F*ck You Want To Public License 5 votes vote down vote up
def constructIndexes(self, label):
        valid_links = []
        console.section('Analyzing')
        file_name = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(6))
        file_name += '.jpg'
        tmp_path = os.path.join(tempfile.gettempdir(), file_name)
        console.task("Storing Image in {0}".format(tmp_path))
        for num, i in enumerate(self.profile_img):
            console.task('Analyzing {0}...'.format(i.strip()[:90]))
            urlretrieve(i, tmp_path)
            frame = cv2.imread(tmp_path)
            big_frame = cv2.resize(frame, (0, 0), fx=2.0, fy=2.0)
            rgb_small_frame = big_frame[:, :, ::-1]
            face_locations = face_recognition.face_locations(rgb_small_frame)
            face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations, num_jitters=self.num_jitters)
            face_names = []
            for face_encoding in face_encodings:
                # See if the face is a match for the known face(s)
                matches = face_recognition.compare_faces(self.known_face_encodings, face_encoding)
                name = "Unknown"
                # If a match was found in known_face_encodings, just use the first one.
                if True in matches:
                    first_match_index = matches.index(True)
                    name = self.known_face_names[first_match_index]
                face_names.append(name)

            for _, name in zip(face_locations, face_names):
                if name == label:
                    valid_links.append(num)
        if os.path.isfile(tmp_path):
            console.task("Removing {0}".format(tmp_path))
            os.remove(tmp_path)
        return valid_links 
Example #3
Source File: facerec_service.py    From face_recognition with MIT License 5 votes vote down vote up
def detect_faces_in_image(file_stream):
    # Load the uploaded image file
    img = face_recognition.load_image_file(file_stream)

    # Get face encodings for any faces in the uploaded image
    uploaded_faces = face_recognition.face_encodings(img)

    # Defaults for the result object
    faces_found = len(uploaded_faces)
    faces = []

    if faces_found:
        face_encodings = list(faces_dict.values())
        for uploaded_face in uploaded_faces:
            match_results = face_recognition.compare_faces(
                face_encodings, uploaded_face)
            for idx, match in enumerate(match_results):
                if match:
                    match = list(faces_dict.keys())[idx]
                    match_encoding = face_encodings[idx]
                    dist = face_recognition.face_distance([match_encoding],
                            uploaded_face)[0]
                    faces.append({
                        "id": match,
                        "dist": dist
                    })

    return {
        "count": faces_found,
        "faces": faces
    }

# <Picture functions> #

# <Controller> 
Example #4
Source File: dlib_compare.py    From rabbitVE with GNU General Public License v3.0 5 votes vote down vote up
def compare(self, img_list, img2=None):
        temp = []
        for img in img_list:
            h, w, c = img.shape
            # img = cv2.resize(img, (64, 64))
            code = face_recognition.face_encodings(img, [(0, w, h, 0)])
            if len(code) > 0:
                temp.append(code[0])
        res_list = []
        for temp_ in temp:
            res = face_recognition.compare_faces(self.img_encode_code, temp_, tolerance=self.conf_threshold)  # 越小越好
            # res 一个bool 的向量
            res_list.append(res)
        return np.sum(res_list) > 0, res_list 
Example #5
Source File: FaceRecognition.py    From robot-camera-platform with GNU General Public License v3.0 5 votes vote down vote up
def find(self, image):
        import face_recognition
        rgb_frame = image[:, :, ::-1]
        # Find all the faces and face encodings in the current frame of video
        face_locations = face_recognition.face_locations(rgb_frame, model='hog')
        face_encodings = face_recognition.face_encodings(rgb_frame, face_locations)
        for (top, right, bottom, left), face_encoding in zip(face_locations, face_encodings):
            # for each face found try to match against known faces
            matches = face_recognition.compare_faces(self.__known_face_encodings, face_encoding)
            if True not in matches:
                return None
            first_match_index = matches.index(True)
            top, right, bottom, left = face_locations[first_match_index]
            return (left, top, right, bottom)
        return None 
Example #6
Source File: find_same_person.py    From FaceDataset with Apache License 2.0 5 votes vote down vote up
def find_same_person(person_image_path):
    # 获取该人中的所有图片
    image_paths = os.listdir(person_image_path)
    known_face_encodings = []
    for image_path in image_paths:
        img_path = os.path.join(person_image_path, image_path)
        try:
            image = face_recognition.load_image_file(img_path)
            encodings = face_recognition.face_encodings(image, num_jitters=10)[0]
            known_face_encodings.append(encodings)
        except Exception as e:
            try:
                os.remove(img_path)
            except Exception as e:
                print(e)

    for image_path in image_paths:
        try:
            print(image_path)
            img_path = os.path.join(person_image_path, image_path)
            image = face_recognition.load_image_file(img_path)
            a_single_unknown_face_encoding = face_recognition.face_encodings(image, num_jitters=10)[0]
            results = face_recognition.compare_faces(known_face_encodings, a_single_unknown_face_encoding,
                                                     tolerance=0.5)
            results = numpy.array(results).astype(numpy.int64)
            if numpy.sum(results) > 5:
                main_path = os.path.join(person_image_path, '0.jpg')
                if os.path.exists(main_path):
                    os.remove(main_path)
                shutil.copyfile(img_path, main_path)
                break
        except:
            pass 
Example #7
Source File: encoding_images.py    From face-attendance-machine with Apache License 2.0 4 votes vote down vote up
def encoding_images_mult_thread(path,threads=8):
    """
    对path路径下的子文件夹中的图片进行编码,
    TODO:
        对人脸数据进行历史库中的人脸向量进行欧式距离的比较,当距离小于某个阈值的时候提醒:
        如果相似的是本人,则跳过该条记录,并提醒已经存在,否则警告人脸过度相似问题,

    :param path:
    :return:
    """
    # with open("./dataset/encoded_face_names.txt", 'w') as f:
    #     lines = f.readlines()
    #     print(lines)

    with open(name_and_encoding, 'w') as f:
        subdirs = [os.path.join(path, x) for x in os.listdir(path) if os.path.isdir(os.path.join(path, x))]
        # if
        for subdir in subdirs:
            print('---name :', subdir)
            person_image_encoding = []
            for y in os.listdir(subdir):
                print("image name is ", y)
                _image = face_recognition.load_image_file(os.path.join(subdir, y))
                face_encodings = face_recognition.face_encodings(_image)
                name = os.path.split(subdir)[-1]
                if face_encodings and len(face_encodings) == 1:
                    if len(person_image_encoding) == 0:
                        person_image_encoding.append(face_encodings[0])
                        known_face_names.append(name)
                        continue

                    for i in range(len(person_image_encoding)):
                        distances = face_recognition.compare_faces(person_image_encoding, face_encodings[0], tolerance=image_thread)
                        if False in distances:
                            person_image_encoding.append(face_encodings[0])
                            known_face_names.append(name)
                            print(name, " new feature")
                            f.write(name + ":" + str(face_encodings[0]) + "\n")
                            break

                    # face_encoding = face_recognition.face_encodings(_image)[0]
                    # face_recognition.compare_faces()
            known_face_encodings.extend(person_image_encoding)
            bb = np.array(known_face_encodings)
            print("--------")

    np.save(KNOWN_FACE_ENCODINGS, known_face_encodings)
    np.save(KNOWN_FACE_NANE, known_face_names) 
Example #8
Source File: encoding_images.py    From face-attendance-machine with Apache License 2.0 4 votes vote down vote up
def encoding_ones_images(name):
    """
    对path路径下的子文件夹中的图片进行编码,
    TODO:
        对人脸数据进行历史库中的人脸向量进行欧式距离的比较,当距离小于某个阈值的时候提醒:
        如果相似的是本人,则跳过该条记录,并提醒已经存在,否则警告人脸过度相似问题,

    :param path:
    :return:
    """
    # with open("./dataset/encoded_face_names.txt", 'w') as f:
    #     lines = f.readlines()
    #     print(lines)

    with open(name_and_encoding, 'w') as f:
        image_dirs = os.path.join(data_path, name)
        files = [os.path.join(image_dirs, x) for x in os.listdir(image_dirs) if os.path.isfile(os.path.join(image_dirs, x))]
        print('---name :', files)
        person_image_encoding = []
        for image_path in files:
            print("image name is ", image_path)
            _image = face_recognition.load_image_file(image_path )
            face_encodings = face_recognition.face_encodings(_image)
            # name = os.path.split(image_path)[1]
            if face_encodings and len(face_encodings) == 1:
                if len(person_image_encoding) == 0:
                    person_image_encoding.append(face_encodings[0])
                    known_face_names.append(name)
                    continue

                for i in range(len(person_image_encoding)):
                    distances = face_recognition.compare_faces(person_image_encoding, face_encodings[0], tolerance=image_thread)
                    if False in distances:
                        person_image_encoding.append(face_encodings[0])
                        known_face_names.append(name)
                        print(name, " new feature")
                        f.write(name + ":" + str(face_encodings[0]) + "\n")
                        break

                # face_encoding = face_recognition.face_encodings(_image)[0]
                # face_recognition.compare_faces()
        known_face_encodings.extend(person_image_encoding)
        bb = np.array(known_face_encodings)
        print("--------")

    KNOWN_FACE_ENCODINGS = "./dataset/known_face_encodings_{}.npy"  # 已知人脸向量
    KNOWN_FACE_NANE = "./dataset/known_face_name_{}.npy"  # 已知人脸名称
    np.save(KNOWN_FACE_ENCODINGS.format(int(time.time())), known_face_encodings)
    np.save(KNOWN_FACE_NANE.format(int(time.time())), known_face_names) 
Example #9
Source File: face-rec-emotion.py    From Face-and-Emotion-Recognition with MIT License 4 votes vote down vote up
def face_compare(frame,process_this_frame):
    print ("compare")
    # Resize frame of video to 1/4 size for faster face recognition processing
    small_frame = cv2.resize(frame, (0, 0), fx=0.50, fy=0.50)

    # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
    rgb_small_frame = small_frame[:, :, ::-1]

    # Only process every other frame of video to save time
    if process_this_frame:
        # Find all the faces and face encodings in the current frame of video
        face_locations = face_recognition.face_locations(rgb_small_frame)
        face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)

        face_names = []
        for face_encoding in face_encodings:
            # See if the face is a match for the known face(s)
            matches = face_recognition.compare_faces(known_face_encodings, face_encoding)
            name = "Unknown"

            # If a match was found in known_face_encodings, just use the first one.
            if True in matches:
                first_match_index = matches.index(True)
                name = known_face_names[first_match_index]

            face_names.append(name)

    process_this_frame = not process_this_frame

    return face_names
    # Display the results
    for (top, right, bottom, left), name in zip(face_locations, face_names):
        # Scale back up face locations since the frame we detected in was scaled to 1/4 size
        top *= 2
        right *= 2
        bottom *= 2
        left *= 2
        #cv2.rectangle(frame, (left, bottom+36), (right, bottom), (0, 0, 0), cv2.FILLED)
        font = cv2.FONT_HERSHEY_DUPLEX
        cv2.putText(frame, name, (left + 6, bottom+20), font, 0.3, (255, 255, 255), 1)
        print ("text print")

# starting video streaming 
Example #10
Source File: web_service_example.py    From face_recognition with MIT License 4 votes vote down vote up
def detect_faces_in_image(file_stream):
    # Pre-calculated face encoding of Obama generated with face_recognition.face_encodings(img)
    known_face_encoding = [-0.09634063,  0.12095481, -0.00436332, -0.07643753,  0.0080383,
                            0.01902981, -0.07184699, -0.09383309,  0.18518871, -0.09588896,
                            0.23951106,  0.0986533 , -0.22114635, -0.1363683 ,  0.04405268,
                            0.11574756, -0.19899382, -0.09597053, -0.11969153, -0.12277931,
                            0.03416885, -0.00267565,  0.09203379,  0.04713435, -0.12731361,
                           -0.35371891, -0.0503444 , -0.17841317, -0.00310897, -0.09844551,
                           -0.06910533, -0.00503746, -0.18466514, -0.09851682,  0.02903969,
                           -0.02174894,  0.02261871,  0.0032102 ,  0.20312519,  0.02999607,
                           -0.11646006,  0.09432904,  0.02774341,  0.22102901,  0.26725179,
                            0.06896867, -0.00490024, -0.09441824,  0.11115381, -0.22592428,
                            0.06230862,  0.16559327,  0.06232892,  0.03458837,  0.09459756,
                           -0.18777156,  0.00654241,  0.08582542, -0.13578284,  0.0150229 ,
                            0.00670836, -0.08195844, -0.04346499,  0.03347827,  0.20310158,
                            0.09987706, -0.12370517, -0.06683611,  0.12704916, -0.02160804,
                            0.00984683,  0.00766284, -0.18980607, -0.19641446, -0.22800779,
                            0.09010898,  0.39178532,  0.18818057, -0.20875394,  0.03097027,
                           -0.21300618,  0.02532415,  0.07938635,  0.01000703, -0.07719778,
                           -0.12651891, -0.04318593,  0.06219772,  0.09163868,  0.05039065,
                           -0.04922386,  0.21839413, -0.02394437,  0.06173781,  0.0292527 ,
                            0.06160797, -0.15553983, -0.02440624, -0.17509389, -0.0630486 ,
                            0.01428208, -0.03637431,  0.03971229,  0.13983178, -0.23006812,
                            0.04999552,  0.0108454 , -0.03970895,  0.02501768,  0.08157793,
                           -0.03224047, -0.04502571,  0.0556995 , -0.24374914,  0.25514284,
                            0.24795187,  0.04060191,  0.17597422,  0.07966681,  0.01920104,
                           -0.01194376, -0.02300822, -0.17204897, -0.0596558 ,  0.05307484,
                            0.07417042,  0.07126575,  0.00209804]

    # Load the uploaded image file
    img = face_recognition.load_image_file(file_stream)
    # Get face encodings for any faces in the uploaded image
    unknown_face_encodings = face_recognition.face_encodings(img)

    face_found = False
    is_obama = False

    if len(unknown_face_encodings) > 0:
        face_found = True
        # See if the first face in the uploaded image matches the known face of Obama
        match_results = face_recognition.compare_faces([known_face_encoding], unknown_face_encodings[0])
        if match_results[0]:
            is_obama = True

    # Return the result as json
    result = {
        "face_found_in_image": face_found,
        "is_picture_of_obama": is_obama
    }
    return jsonify(result) 
Example #11
Source File: facerec_from_webcam_multiprocessing.py    From face_recognition with MIT License 4 votes vote down vote up
def process(worker_id, read_frame_list, write_frame_list, Global, worker_num):
    known_face_encodings = Global.known_face_encodings
    known_face_names = Global.known_face_names
    while not Global.is_exit:

        # Wait to read
        while Global.read_num != worker_id or Global.read_num != prev_id(Global.buff_num, worker_num):
            # If the user has requested to end the app, then stop waiting for webcam frames
            if Global.is_exit:
                break

            time.sleep(0.01)

        # Delay to make the video look smoother
        time.sleep(Global.frame_delay)

        # Read a single frame from frame list
        frame_process = read_frame_list[worker_id]

        # Expect next worker to read frame
        Global.read_num = next_id(Global.read_num, worker_num)

        # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
        rgb_frame = frame_process[:, :, ::-1]

        # Find all the faces and face encodings in the frame of video, cost most time
        face_locations = face_recognition.face_locations(rgb_frame)
        face_encodings = face_recognition.face_encodings(rgb_frame, face_locations)

        # Loop through each face in this frame of video
        for (top, right, bottom, left), face_encoding in zip(face_locations, face_encodings):
            # See if the face is a match for the known face(s)
            matches = face_recognition.compare_faces(known_face_encodings, face_encoding)

            name = "Unknown"

            # If a match was found in known_face_encodings, just use the first one.
            if True in matches:
                first_match_index = matches.index(True)
                name = known_face_names[first_match_index]

            # Draw a box around the face
            cv2.rectangle(frame_process, (left, top), (right, bottom), (0, 0, 255), 2)

            # Draw a label with a name below the face
            cv2.rectangle(frame_process, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
            font = cv2.FONT_HERSHEY_DUPLEX
            cv2.putText(frame_process, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)

        # Wait to write
        while Global.write_num != worker_id:
            time.sleep(0.01)

        # Send frame to global
        write_frame_list[worker_id] = frame_process

        # Expect next worker to write frame
        Global.write_num = next_id(Global.write_num, worker_num) 
Example #12
Source File: web_service_example_Simplified_Chinese.py    From face_recognition with MIT License 4 votes vote down vote up
def detect_faces_in_image(file_stream):
    # 用face_recognition.face_encodings(img)接口提前把奥巴马人脸的编码录入
    known_face_encoding = [-0.09634063,  0.12095481, -0.00436332, -0.07643753,  0.0080383,
                            0.01902981, -0.07184699, -0.09383309,  0.18518871, -0.09588896,
                            0.23951106,  0.0986533 , -0.22114635, -0.1363683 ,  0.04405268,
                            0.11574756, -0.19899382, -0.09597053, -0.11969153, -0.12277931,
                            0.03416885, -0.00267565,  0.09203379,  0.04713435, -0.12731361,
                           -0.35371891, -0.0503444 , -0.17841317, -0.00310897, -0.09844551,
                           -0.06910533, -0.00503746, -0.18466514, -0.09851682,  0.02903969,
                           -0.02174894,  0.02261871,  0.0032102 ,  0.20312519,  0.02999607,
                           -0.11646006,  0.09432904,  0.02774341,  0.22102901,  0.26725179,
                            0.06896867, -0.00490024, -0.09441824,  0.11115381, -0.22592428,
                            0.06230862,  0.16559327,  0.06232892,  0.03458837,  0.09459756,
                           -0.18777156,  0.00654241,  0.08582542, -0.13578284,  0.0150229 ,
                            0.00670836, -0.08195844, -0.04346499,  0.03347827,  0.20310158,
                            0.09987706, -0.12370517, -0.06683611,  0.12704916, -0.02160804,
                            0.00984683,  0.00766284, -0.18980607, -0.19641446, -0.22800779,
                            0.09010898,  0.39178532,  0.18818057, -0.20875394,  0.03097027,
                           -0.21300618,  0.02532415,  0.07938635,  0.01000703, -0.07719778,
                           -0.12651891, -0.04318593,  0.06219772,  0.09163868,  0.05039065,
                           -0.04922386,  0.21839413, -0.02394437,  0.06173781,  0.0292527 ,
                            0.06160797, -0.15553983, -0.02440624, -0.17509389, -0.0630486 ,
                            0.01428208, -0.03637431,  0.03971229,  0.13983178, -0.23006812,
                            0.04999552,  0.0108454 , -0.03970895,  0.02501768,  0.08157793,
                           -0.03224047, -0.04502571,  0.0556995 , -0.24374914,  0.25514284,
                            0.24795187,  0.04060191,  0.17597422,  0.07966681,  0.01920104,
                           -0.01194376, -0.02300822, -0.17204897, -0.0596558 ,  0.05307484,
                            0.07417042,  0.07126575,  0.00209804]

    # 载入用户上传的图片
    img = face_recognition.load_image_file(file_stream)
    # 为用户上传的图片中的人脸编码
    unknown_face_encodings = face_recognition.face_encodings(img)

    face_found = False
    is_obama = False

    if len(unknown_face_encodings) > 0:
        face_found = True
        # 看看图片中的第一张脸是不是奥巴马
        match_results = face_recognition.compare_faces([known_face_encoding], unknown_face_encodings[0])
        if match_results[0]:
            is_obama = True

    # 讲识别结果以json键值对的数据结构输出
    result = {
        "face_found_in_image": face_found,
        "is_picture_of_obama": is_obama
    }
    return jsonify(result) 
Example #13
Source File: view-mongo-images.py    From smart-zoneminder with MIT License 4 votes vote down vote up
def knn_face_classifier(encoding, compare_face_tolerance, name_threshold, name_count):
    # attempt to match each face in the input image to our known encodings
    matches = face_recognition.compare_faces(data['encodings'],
        encoding, compare_face_tolerance)

    # Assume face is unknown to start with. 
    name = 'Unknown'

    # check to see if we have found a match
    if True in matches:
        # find the indexes of all matched faces then initialize a
        # dictionary to count the total number of times each face
        # was matched
        matchedIdxs = [i for (i, b) in enumerate(matches) if b]

        # init all name counts to 0
        counts = {n: 0 for n in data['names']}
        #print('initial counts {}'.format(counts))

        # loop over the matched indexes and maintain a count for
        # each recognized face
        for i in matchedIdxs:
            name = data['names'][i]
            counts[name] = counts.get(name, 0) + 1
        #print('counts {}'.format(counts))

        # Find face name with the max count value.
        max_value = max(counts.values())
        max_name = max(counts, key=counts.get)

        # Compare each recognized face against the max face name.
        # The max face name count must be greater than a certain value for
        # it to be valid. This value is set at a percentage of the number of
        # embeddings for that face name. 
        name_thresholds = [max_value > value + name_threshold * name_count[max_name]
            for name, value in counts.items() if name != max_name]

        # If max face name passes against all other faces then declare it valid.
        if all(name_thresholds):
            name = max_name
            print('kkn says this is {}'.format(name))
        else:
            name = None
            print('kkn cannot recognize face')

    return name