Python face_recognition.face_locations() Examples

The following are 30 code examples of face_recognition.face_locations(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module face_recognition , or try the search function .
Example #1
Source File: facerec_from_webcam_faster.py    From face-attendance-machine with Apache License 2.0 6 votes vote down vote up
def face_process():
    myprint("face process start",time.time())
    # Find all the faces and face encodings in the current frame of video
    # face_locations = face_recognition.face_locations(rgb_small_frame, model="cnn")
    myprint('face_locations start', time.time())
    face_locations = face_recognition.face_locations(rgb_small_frame, model="hog")
    myprint('face_locations end', time.time())
    myprint('face_encodings start', time.time())
    face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
    myprint('face_encodings end', time.time())
    face_names = []
    for face_encoding in face_encodings:
        # optimize start 采用KNN 排名*权重, 在类别上进行叠加,然后排序取出top1
        name, dis = vote_class(face_encoding)
        # optimize end 采用 排名*权重, 在类别上进行叠加,然后排序取出top1
        face_names.append(name)  # 将人脸数据

    # Display the results
    for (top, right, bottom, left), name in zip(face_locations, face_names):
        # Scale back up face locations since the frame we detected in was scaled to 1/4 size
        top *= 4
        right *= 4
        bottom *= 4
        left *= 4
        myprint('putText start', time.time())
        # Draw a box around the face
        cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
        # Draw a label with a name below the face
        cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
        font = cv2.FONT_HERSHEY_DUPLEX
        cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
        myprint("putText end " + name, time.time())
        # say hello and save record to file
        myprint('process_face_records start', time.time())
        process_face_records(name)
        myprint('process_face_records end', time.time())

    # Display the resulting image
    cv2.imshow('Video', frame)
    myprint("face process end", time.time()) 
Example #2
Source File: web.py    From Mosaicer with MIT License 6 votes vote down vote up
def upload():
    print('tracker start')
    image_path = request.args.get('path').split(os.sep)[1:]
    print(image_path)
    image_path = os.sep.join(image_path)
    image_dir = os.path.dirname(image_path)
    image_name = os.path.basename(image_path)
    print(image_path)
    image = cv2.imread(image_path)
    faces = fr.face_locations(image,
                              number_of_times_to_upsample=0, model="cnn")
    index = 0

    for (top, right, bottom, left) in faces:
        imgFace = image[top:bottom, left:right]
        img_output = cv2.resize(imgFace, (299, 299),
                                interpolation=cv2.INTER_AREA)
        face_path = os.path.join(image_dir, str(index) + image_name)
        index += 1
        cv2.imwrite(face_path, img_output)
    os.remove(image_path)
    print('tracker end')
    return 'true' 
Example #3
Source File: t_find_faces_in_picture.py    From FaceRank with GNU General Public License v3.0 6 votes vote down vote up
def find_and_save_face(web_file,face_file):
    # Load the jpg file into a numpy array
    image = face_recognition.load_image_file(web_file)
    print(image.dtype)
    # Find all the faces in the image
    face_locations = face_recognition.face_locations(image)

    print("I found {} face(s) in this photograph.".format(len(face_locations)))

    for face_location in face_locations:

        # Print the location of each face in this image
        top, right, bottom, left = face_location
        print("A face is located at pixel location Top: {}, Left: {}, Bottom: {}, Right: {}".format(top, left, bottom, right))

        # You can access the actual face itself like this:
        face_image = image[top:bottom, left:right]
        pil_image = Image.fromarray(face_image)
        pil_image.save(face_file) 
Example #4
Source File: find_faces_in_picture.py    From FaceRank with GNU General Public License v3.0 6 votes vote down vote up
def find_and_save_face(web_file,face_file):
    # Load the jpg file into a numpy array
    image = face_recognition.load_image_file(web_file)
    print(image.dtype)
    # Find all the faces in the image
    face_locations = face_recognition.face_locations(image)

    print("I found {} face(s) in this photograph.".format(len(face_locations)))

    for face_location in face_locations:

        # Print the location of each face in this image
        top, right, bottom, left = face_location
        print("A face is located at pixel location Top: {}, Left: {}, Bottom: {}, Right: {}".format(top, left, bottom, right))

        # You can access the actual face itself like this:
        face_image = image[top:bottom, left:right]
        pil_image = Image.fromarray(face_image)
        pil_image.save(face_file) 
Example #5
Source File: face_utils.py    From GANimation with GNU General Public License v3.0 6 votes vote down vote up
def detect_biggest_face(img):
    '''
    Detect biggest face in image
    :param img: cv::mat HxWx3 RGB
    :return: 4 <x,y,w,h>
    '''
    # detect faces
    bbs = face_recognition.face_locations(img)

    max_area = float('-inf')
    max_area_i = 0
    for i, (y, right, bottom, x) in enumerate(bbs):
        area = (right - x) * (bottom - y)
        if max_area < area:
            max_area = area
            max_area_i = i

    if max_area != float('-inf'):
        y, right, bottom, x = bbs[max_area_i]
        return x, y, (right - x), (bottom - y)

    return None 
Example #6
Source File: encode_faces.py    From edge-tpu-servers with MIT License 6 votes vote down vote up
def dlib_face_det(image):
    # Detect and localize faces using dlib (via face_recognition).
    # Assumes only one face is in image passed.

    # Convert image from BGR (OpenCV ordering) to dlib ordering (RGB).
    rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

    # Detect the (x, y)-coordinates of the bounding boxes
    # corresponding to each face in the input image.
    # NB: model='cnn' causes OOM.
    boxes = face_recognition.face_locations(rgb,
        number_of_times_to_upsample=2, model='hog')

    if len(boxes) == 0:
        print('*** no face found! ***')
        return None

    # Return bounding box coords in dlib format.
    return boxes 
Example #7
Source File: extraction.py    From Mosaicer with MIT License 5 votes vote down vote up
def extract(folder, file_name):
    """Extract faces from images

    Args:
        folder: folder
        file_name : filename
    """
    if not os.path.exists('result'):
        os.makedirs('result')
    file_names = []
    if not file_name:
        for dirpath, dirnames, filenames in os.walk(folder):
            for file in filenames:
                if(check_img(file)):
                    full_path = os.path.join(dirpath, file)
                    file_names.append(full_path)
    else:
        file_names.append(os.path.join(folder, file_name))
    for file in file_names:
        print(file)
        image = face_recognition.load_image_file(file)
        #frontal_image = run(image)
        face_locations = face_recognition.face_locations(image)
        count = 0
        for face_locaiton in face_locations:
            top, right, bottom, left = face_locaiton
            face_image = image[top:bottom, left:right]
            img_output = cv2.resize(face_image, (299, 299),
                                    interpolation=cv2.INTER_AREA)
            file_name, file_ext = os.path.splitext(
                os.path.basename(file))
            delimiter = ''
            if count != 0:
                delimiter = '_' + str(count)

            path = file_name + delimiter + file_ext
            path = os.path.join('result', path)
            cv2.imwrite(path, cv2.cvtColor(img_output, cv2.COLOR_RGB2BGR))
            count += 1 
Example #8
Source File: main.py    From python-examples with MIT License 5 votes vote down vote up
def detect(args):
    arr = face_recognition.load_image_file(args.input)
    face_locations = face_recognition.face_locations(arr)
    print('found:', len(face_locations))

    img = Image.open(args.input)
    
    draw = ImageDraw.Draw(img)
    for item in face_locations:
        # array uses (row,column) which means (y,x) but I need (x,y)
        item = item[1], item[0], item[3], item[2]
        draw.rectangle(item, width=3)

    img.save(args.output) 
Example #9
Source File: FaceRecognition.py    From robot-camera-platform with GNU General Public License v3.0 5 votes vote down vote up
def find(self, image):
        import face_recognition
        rgb_frame = image[:, :, ::-1]
        # Find all the faces and face encodings in the current frame of video
        face_locations = face_recognition.face_locations(rgb_frame, model='hog')
        face_encodings = face_recognition.face_encodings(rgb_frame, face_locations)
        for (top, right, bottom, left), face_encoding in zip(face_locations, face_encodings):
            # for each face found try to match against known faces
            matches = face_recognition.compare_faces(self.__known_face_encodings, face_encoding)
            if True not in matches:
                return None
            first_match_index = matches.index(True)
            top, right, bottom, left = face_locations[first_match_index]
            return (left, top, right, bottom)
        return None 
Example #10
Source File: dlib_cnn.py    From rabbitVE with GNU General Public License v3.0 5 votes vote down vote up
def detection(self, frame):
        bboxes = []
        frameDraw = frame.copy()
        frameHeight = frameDraw.shape[0]
        faces = face_recognition.face_locations(frame, number_of_times_to_upsample=0, model="cnn") #top, right, bottom, left
        for (top, right, bottom, left) in faces:
            cv2.rectangle(frameDraw, (left, top), (right, bottom), (0, 255, 0), int(frameHeight / 150), 8)
            bboxes.append([top, right, bottom, left])
        return frameDraw, bboxes 
Example #11
Source File: dlib_hog.py    From rabbitVE with GNU General Public License v3.0 5 votes vote down vote up
def detection(self, frame):
        bboxes = []
        frameDraw = frame.copy()
        frameHeight = frameDraw.shape[0]
        faces = face_recognition.face_locations(frame, number_of_times_to_upsample=0, model="hog") #top, right, bottom, left
        for (top, right, bottom, left) in faces:
            cv2.rectangle(frameDraw, (left, top), (right, bottom), (0, 255, 0), int(frameHeight / 150), 8)
            bboxes.append([top, right, bottom, left])
        return frameDraw, bboxes 
Example #12
Source File: Person.py    From PyRecognizer with MIT License 5 votes vote down vote up
def init_dataset_core(detection_model, jitters, encoding_models, img_path=None):
        """
        Delegated core method for parallelize work
        :detection_model
        :jitters
        :param img_path:
        :return:
        """
        try:
            image = load_image_file(img_path)
        except OSError:
            log.error(
                "init_dataset | === FATAL === | Image {} is corrupted!!".format(img_path))
            return None
        # log.debug("initDataset | Image loaded! | Searching for face ...")
        # Array of w,x,y,z coordinates
        # NOTE: Can be used batch_face_locations in order to parallelize the image init, but unfortunately
        # it's the only GPU that i have right now. And, of course, i'll try to don't burn it
        face_bounding_boxes = face_locations(image, model=detection_model)
        face_data = None
        if len(face_bounding_boxes) == 1:
            log.info(
                "initDataset | Image {0} have only 1 face, loading for future training ...".format(img_path))
            # Loading the X [data] using 300 different distortion
            face_data = face_encodings(image, known_face_locations=face_bounding_boxes, num_jitters=jitters,
                                       model=encoding_models)[0]
        else:
            log.error(
                "initDataset | Image {0} not suitable for training!".format(img_path))
            if len(face_bounding_boxes) == 0:
                log.error("initDataset | I've not found any face :/ ")
            else:
                log.error(
                    "initDataset | Found more than one face, too much for me Sir :&")
        return face_data 
Example #13
Source File: Classifier.py    From PyRecognizer with MIT License 5 votes vote down vote up
def extract_face_from_image(X_img_path, detection_model, jitters, encoding_models):
        # Load image data in a numpy array
        try:
            log.debug("extract_face_from_image | Loading image {}".format(X_img_path))
            X_img, ratio = load_image_file(X_img_path)
        except OSError:
            log.error("extract_face_from_image | What have you uploaded ???")
            return -2, -2, -1
        log.debug("extract_face_from_image | Extracting faces locations ...")
        try:
            # TODO: Reduce size of the image at every iteration
            X_face_locations = face_recognition.face_locations(
                X_img, model=detection_model)  # model="cnn")
        except RuntimeError:
            log.error(
                "extract_face_from_image | GPU does not have enough memory: FIXME unload data and retry")
            return None, None, ratio

        log.debug("extract_face_from_image | Found {} face(s) for the given image".format(
            len(X_face_locations)))

        # If no faces are found in the image, return an empty result.
        if len(X_face_locations) == 0:
            log.warning("extract_face_from_image | Seems that no faces was found :( ")
            return -3, -3, ratio

        # Find encodings for faces in the test image
        log.debug("extract_face_from_image | Encoding faces using [{}] jitters ...".format(jitters))
        # num_jitters increase the distortion check
        faces_encodings = face_recognition.face_encodings(
            X_img, known_face_locations=X_face_locations, num_jitters=jitters, model=encoding_models)
        log.debug("extract_face_from_image | Face encoded! | Let's ask to the neural network ...")
        return faces_encodings, X_face_locations, ratio 
Example #14
Source File: prediction_producer.py    From eye_of_sauron with MIT License 5 votes vote down vote up
def get_processed_frame_object(frame_obj, scale=1.0):
        """Processes value produced by producer, returns prediction with png image.

        :param frame_obj: frame dictionary with frame information and frame itself
        :param scale: (0, 1] scale image before face recognition, speeds up processing, decreases accuracy
        :return: A dict updated with faces found in that frame, i.e. their location and encoding.
        """

        frame = np_from_json(frame_obj, prefix_name=ORIGINAL_PREFIX)  # frame_obj = json
        # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
        frame = cv2.cvtColor(frame.astype(np.uint8), cv2.COLOR_BGR2RGB)

        if scale != 1:
            # Resize frame of video to scale size for faster face recognition processing
            rgb_small_frame = cv2.resize(frame, (0, 0), fx=scale, fy=scale)

        else:
            rgb_small_frame = frame

        with timer("PROCESS RAW FRAME {}".format(frame_obj["frame_num"])):
            # Find all the faces and face encodings in the current frame of video
            with timer("Locations in frame"):
                face_locations = np.array(face_recognition.face_locations(rgb_small_frame))
                face_locations_dict = np_to_json(face_locations, prefix_name="face_locations")

            with timer("Encodings in frame"):
                face_encodings = np.array(face_recognition.face_encodings(rgb_small_frame, face_locations))
                face_encodings_dict = np_to_json(face_encodings, prefix_name="face_encodings")

        frame_obj.update(face_locations_dict)
        frame_obj.update(face_encodings_dict)

        return frame_obj 
Example #15
Source File: face_recognition_knn.py    From face_recognition with MIT License 5 votes vote down vote up
def predict(X_img_path, knn_clf=None, model_path=None, distance_threshold=0.6):
    """
    Recognizes faces in given image using a trained KNN classifier

    :param X_img_path: path to image to be recognized
    :param knn_clf: (optional) a knn classifier object. if not specified, model_save_path must be specified.
    :param model_path: (optional) path to a pickled knn classifier. if not specified, model_save_path must be knn_clf.
    :param distance_threshold: (optional) distance threshold for face classification. the larger it is, the more chance
           of mis-classifying an unknown person as a known one.
    :return: a list of names and face locations for the recognized faces in the image: [(name, bounding box), ...].
        For faces of unrecognized persons, the name 'unknown' will be returned.
    """
    if not os.path.isfile(X_img_path) or os.path.splitext(X_img_path)[1][1:] not in ALLOWED_EXTENSIONS:
        raise Exception("Invalid image path: {}".format(X_img_path))

    if knn_clf is None and model_path is None:
        raise Exception("Must supply knn classifier either thourgh knn_clf or model_path")

    # Load a trained KNN model (if one was passed in)
    if knn_clf is None:
        with open(model_path, 'rb') as f:
            knn_clf = pickle.load(f)

    # Load image file and find face locations
    X_img = face_recognition.load_image_file(X_img_path)
    X_face_locations = face_recognition.face_locations(X_img)

    # If no faces are found in the image, return an empty result.
    if len(X_face_locations) == 0:
        return []

    # Find encodings for faces in the test iamge
    faces_encodings = face_recognition.face_encodings(X_img, known_face_locations=X_face_locations)

    # Use the KNN model to find the best matches for the test face
    closest_distances = knn_clf.kneighbors(faces_encodings, n_neighbors=1)
    are_matches = [closest_distances[0][i][0] <= distance_threshold for i in range(len(X_face_locations))]

    # Predict classes and remove classifications that aren't within the threshold
    return [(pred, loc) if rec else ("unknown", loc) for pred, loc, rec in zip(knn_clf.predict(faces_encodings), X_face_locations, are_matches)] 
Example #16
Source File: facerec_ipcamera_knn.py    From face_recognition with MIT License 5 votes vote down vote up
def predict(X_frame, knn_clf=None, model_path=None, distance_threshold=0.5):
    """
    Recognizes faces in given image using a trained KNN classifier

    :param X_frame: frame to do the prediction on.
    :param knn_clf: (optional) a knn classifier object. if not specified, model_save_path must be specified.
    :param model_path: (optional) path to a pickled knn classifier. if not specified, model_save_path must be knn_clf.
    :param distance_threshold: (optional) distance threshold for face classification. the larger it is, the more chance
           of mis-classifying an unknown person as a known one.
    :return: a list of names and face locations for the recognized faces in the image: [(name, bounding box), ...].
        For faces of unrecognized persons, the name 'unknown' will be returned.
    """
    if knn_clf is None and model_path is None:
        raise Exception("Must supply knn classifier either thourgh knn_clf or model_path")

    # Load a trained KNN model (if one was passed in)
    if knn_clf is None:
        with open(model_path, 'rb') as f:
            knn_clf = pickle.load(f)

    X_face_locations = face_recognition.face_locations(X_frame)

    # If no faces are found in the image, return an empty result.
    if len(X_face_locations) == 0:
        return []

    # Find encodings for faces in the test image
    faces_encodings = face_recognition.face_encodings(X_frame, known_face_locations=X_face_locations)

    # Use the KNN model to find the best matches for the test face
    closest_distances = knn_clf.kneighbors(faces_encodings, n_neighbors=1)
    are_matches = [closest_distances[0][i][0] <= distance_threshold for i in range(len(X_face_locations))]

    # Predict classes and remove classifications that aren't within the threshold
    return [(pred, loc) if rec else ("unknown", loc) for pred, loc, rec in zip(knn_clf.predict(faces_encodings), X_face_locations, are_matches)] 
Example #17
Source File: test.py    From GANimation with GNU General Public License v3.0 5 votes vote down vote up
def _img_morph(self, img, expresion):
        bbs = face_recognition.face_locations(img)
        if len(bbs) > 0:
            y, right, bottom, x = bbs[0]
            bb = x, y, (right - x), (bottom - y)
            face = face_utils.crop_face_with_bb(img, bb)
            face = face_utils.resize_face(face)
        else:
            face = face_utils.resize_face(img)

        morphed_face = self._morph_face(face, expresion)

        return morphed_face 
Example #18
Source File: face_utils.py    From GANimation with GNU General Public License v3.0 5 votes vote down vote up
def detect_faces(img):
    '''
    Detect faces in image
    :param img: cv::mat HxWx3 RGB
    :return: yield 4 <x,y,w,h>
    '''
    # detect faces
    bbs = face_recognition.face_locations(img)

    for y, right, bottom, x in bbs:
        # Scale back up face bb
        yield x, y, (right - x), (bottom - y) 
Example #19
Source File: web_service.py    From MMFinder with MIT License 5 votes vote down vote up
def get_face_and_save(filename):
    img_path = f"{UPLOAD_DIR}/{filename}"
    image = face_recognition.load_image_file(img_path)
    locations = face_recognition.face_locations(image)
    if len(locations) == 1:  # save the face of mm
        top, right, bottom, left = locations[0]
        face_image = image[top:bottom, left:right]
        pil_image = Image.fromarray(face_image)
        with open(f"{UPLOAD_DIR}/face-{filename}", "wb") as f:
            pil_image.save(f)
    return len(locations) 
Example #20
Source File: filter_images.py    From MMFinder with MIT License 5 votes vote down vote up
def get_face_and_save(path):
    image_path = f'{IMAGES_PATH}/{path}'
    image = face_recognition.load_image_file(image_path)
    locations = face_recognition.face_locations(image)
    if len(locations) == 1:  # save the face of mm
        top, right, bottom, left = locations[0]
        face_image = image[top:bottom, left:right]
        pil_image = Image.fromarray(face_image)
        with open(f'{IMAGES_PATH}/faces/face-{path}', "wb") as f:
            pil_image.save(f)
    return len(locations) 
Example #21
Source File: face_track_server.py    From tf-insightface with MIT License 5 votes vote down vote up
def process(self, frame):
        self.reset()
        self.cam_h, self.cam_w, _ = frame.shape
        # Resize frame of video to 1/4 size for faster face recognition processing
        small_frame = cv2.resize(frame, (0, 0), fx=self.down_scale_factor, fy=self.down_scale_factor)

        # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
        rgb_small_frame = small_frame[:, :, ::-1]
        self.face_locations = face_recognition.face_locations(rgb_small_frame)
        # Display the results
        for y1_sm, x2_sm, y2_sm, x1_sm in self.face_locations:
            # Scale back up face locations since the frame we detected in was scaled to 1/4 size
            x1 = int(x1_sm / self.down_scale_factor)
            x2 = int(x2_sm / self.down_scale_factor)
            y1 = int(y1_sm / self.down_scale_factor)
            y2 = int(y2_sm / self.down_scale_factor)

            x1_rltv = x1 / self.cam_w
            x2_rltv = x2 / self.cam_w
            y1_rltv = y1 / self.cam_h
            y2_rltv = y2 / self.cam_h

            _face_area = frame[x1:x2, y1:y2, :]
            if _face_area.size == 0:
                continue
            self.faces.append(_face_area)
            self.face_relative_locations.append([x1_rltv, y1_rltv, x2_rltv, y2_rltv])
            # cv2.imshow('faces', frame[y1:y2, x1:x2, :])
            # cv2.waitKey(0)
        print('[FaceTracker Server] Found {} faces!'.format(len(self.faces)))
        return self.faces 
Example #22
Source File: face_track_server.py    From tf-insightface with MIT License 5 votes vote down vote up
def reset(self):
        self.face_relative_locations = []
        self.face_locations = []
        self.faces = [] 
Example #23
Source File: face_extractor.py    From youtube-video-face-swap with MIT License 5 votes vote down vote up
def _raw_face_landmarks(face_image, face_locations):
    face_locations = [_css_to_rect(face_location) for face_location in face_locations]
    return [pose_predictor(face_image, face_location) for face_location in face_locations] 
Example #24
Source File: face_extractor.py    From youtube-video-face-swap with MIT License 5 votes vote down vote up
def detect_faces(frame):
    face_locations = face_recognition.face_locations(frame)
    landmarks = _raw_face_landmarks(frame, face_locations)   

    for ((y, right, bottom, x), landmarks) in zip(face_locations, landmarks):
        yield DetectedFace(frame[y: bottom, x: right], x, right - x, y, bottom - y, landmarks)

# extract all faces in image 
Example #25
Source File: face_recog.py    From EagleEye with Do What The F*ck You Want To Public License 5 votes vote down vote up
def constructIndexes(self, label):
        valid_links = []
        console.section('Analyzing')
        file_name = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(6))
        file_name += '.jpg'
        tmp_path = os.path.join(tempfile.gettempdir(), file_name)
        console.task("Storing Image in {0}".format(tmp_path))
        for num, i in enumerate(self.profile_img):
            console.task('Analyzing {0}...'.format(i.strip()[:90]))
            urlretrieve(i, tmp_path)
            frame = cv2.imread(tmp_path)
            big_frame = cv2.resize(frame, (0, 0), fx=2.0, fy=2.0)
            rgb_small_frame = big_frame[:, :, ::-1]
            face_locations = face_recognition.face_locations(rgb_small_frame)
            face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations, num_jitters=self.num_jitters)
            face_names = []
            for face_encoding in face_encodings:
                # See if the face is a match for the known face(s)
                matches = face_recognition.compare_faces(self.known_face_encodings, face_encoding)
                name = "Unknown"
                # If a match was found in known_face_encodings, just use the first one.
                if True in matches:
                    first_match_index = matches.index(True)
                    name = self.known_face_names[first_match_index]
                face_names.append(name)

            for _, name in zip(face_locations, face_names):
                if name == label:
                    valid_links.append(num)
        if os.path.isfile(tmp_path):
            console.task("Removing {0}".format(tmp_path))
            os.remove(tmp_path)
        return valid_links 
Example #26
Source File: pipeline.py    From MesoNet with Apache License 2.0 5 votes vote down vote up
def __init__(self, path, load_first_face = True):
        super().__init__(path)
        self.faces = {}
        self.coordinates = {}  # stores the face (locations center, rotation, length)
        self.last_frame = self.get(0)
        self.frame_shape = self.last_frame.shape[:2]
        self.last_location = (0, 200, 200, 0)
        if (load_first_face):
            face_positions = face_recognition.face_locations(self.last_frame, number_of_times_to_upsample=2)
            if len(face_positions) > 0:
                self.last_location = face_positions[0] 
Example #27
Source File: facerec_from_webcam_multiprocessing.py    From face_recognition with MIT License 4 votes vote down vote up
def process(worker_id, read_frame_list, write_frame_list, Global, worker_num):
    known_face_encodings = Global.known_face_encodings
    known_face_names = Global.known_face_names
    while not Global.is_exit:

        # Wait to read
        while Global.read_num != worker_id or Global.read_num != prev_id(Global.buff_num, worker_num):
            # If the user has requested to end the app, then stop waiting for webcam frames
            if Global.is_exit:
                break

            time.sleep(0.01)

        # Delay to make the video look smoother
        time.sleep(Global.frame_delay)

        # Read a single frame from frame list
        frame_process = read_frame_list[worker_id]

        # Expect next worker to read frame
        Global.read_num = next_id(Global.read_num, worker_num)

        # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
        rgb_frame = frame_process[:, :, ::-1]

        # Find all the faces and face encodings in the frame of video, cost most time
        face_locations = face_recognition.face_locations(rgb_frame)
        face_encodings = face_recognition.face_encodings(rgb_frame, face_locations)

        # Loop through each face in this frame of video
        for (top, right, bottom, left), face_encoding in zip(face_locations, face_encodings):
            # See if the face is a match for the known face(s)
            matches = face_recognition.compare_faces(known_face_encodings, face_encoding)

            name = "Unknown"

            # If a match was found in known_face_encodings, just use the first one.
            if True in matches:
                first_match_index = matches.index(True)
                name = known_face_names[first_match_index]

            # Draw a box around the face
            cv2.rectangle(frame_process, (left, top), (right, bottom), (0, 0, 255), 2)

            # Draw a label with a name below the face
            cv2.rectangle(frame_process, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
            font = cv2.FONT_HERSHEY_DUPLEX
            cv2.putText(frame_process, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)

        # Wait to write
        while Global.write_num != worker_id:
            time.sleep(0.01)

        # Send frame to global
        write_frame_list[worker_id] = frame_process

        # Expect next worker to write frame
        Global.write_num = next_id(Global.write_num, worker_num) 
Example #28
Source File: faceblur.py    From faceblur with MIT License 4 votes vote down vote up
def face_blur(src_img, dest_img, zoom_in=1):
    '''
    Recognize and blur all faces in the source image file, then save as destination image file.
    '''
    sys.stdout.write("%s:processing... \r" % (src_img))
    sys.stdout.flush()

    # Initialize some variables
    face_locations = []
    photo = face_recognition.load_image_file(src_img)
    # Resize image to  1/zoom_in size for faster face detection processing
    small_photo = cv2.resize(photo, (0, 0), fx=1/zoom_in, fy=1/zoom_in)

    # Find all the faces and face encodings in the current frame of video
    face_locations = face_recognition.face_locations(small_photo, model="cnn")

    if face_locations:
        print("%s:There are %s faces at " % (src_img, len(face_locations)), face_locations)
    else:
        print('%s:There are no any face.' % (src_img))
        return False

    #Blur all face
    photo = cv2.imread(src_img)
    for top, right, bottom, left in face_locations:
        # Scale back up face locations since the frame we detected in was scaled to 1/zoom_in size
        top *= zoom_in
        right *= zoom_in
        bottom *= zoom_in
        left *= zoom_in

        # Extract the region of the image that contains the face
        face_image = photo[top:bottom, left:right]

        # Blur the face image
        face_image = cv2.GaussianBlur(face_image, (21, 21), 0)

        # Put the blurred face region back into the frame image
        photo[top:bottom, left:right] = face_image

    #Save image to file
    cv2.imwrite(dest_img, photo)

    print('Face blurred photo has been save in %s' % dest_img)

    return True 
Example #29
Source File: face-rec-emotion.py    From Face-and-Emotion-Recognition with MIT License 4 votes vote down vote up
def face_compare(frame,process_this_frame):
    print ("compare")
    # Resize frame of video to 1/4 size for faster face recognition processing
    small_frame = cv2.resize(frame, (0, 0), fx=0.50, fy=0.50)

    # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
    rgb_small_frame = small_frame[:, :, ::-1]

    # Only process every other frame of video to save time
    if process_this_frame:
        # Find all the faces and face encodings in the current frame of video
        face_locations = face_recognition.face_locations(rgb_small_frame)
        face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)

        face_names = []
        for face_encoding in face_encodings:
            # See if the face is a match for the known face(s)
            matches = face_recognition.compare_faces(known_face_encodings, face_encoding)
            name = "Unknown"

            # If a match was found in known_face_encodings, just use the first one.
            if True in matches:
                first_match_index = matches.index(True)
                name = known_face_names[first_match_index]

            face_names.append(name)

    process_this_frame = not process_this_frame

    return face_names
    # Display the results
    for (top, right, bottom, left), name in zip(face_locations, face_names):
        # Scale back up face locations since the frame we detected in was scaled to 1/4 size
        top *= 2
        right *= 2
        bottom *= 2
        left *= 2
        #cv2.rectangle(frame, (left, bottom+36), (right, bottom), (0, 0, 0), cv2.FILLED)
        font = cv2.FONT_HERSHEY_DUPLEX
        cv2.putText(frame, name, (left + 6, bottom+20), font, 0.3, (255, 255, 255), 1)
        print ("text print")

# starting video streaming 
Example #30
Source File: facerec_from_webcam_mult_thread.py    From face-attendance-machine with Apache License 2.0 4 votes vote down vote up
def face_process(frame):
    # Resize frame of video to 1/4 size for faster face recognition processing
    myprint("face process resize start", time.time())
    small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
    # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
    myprint("face process small_frame start", time.time())
    rgb_small_frame = small_frame[:, :, ::-1]

    # Find all the faces and face encodings in the current frame of video
    # face_locations = face_recognition.face_locations(rgb_small_frame, model="cnn")
    myprint('face_locations start', time.time())
    face_locations = face_recognition.face_locations(rgb_small_frame, model="hog")
    myprint('face_locations end', time.time())
    myprint('face_encodings start', time.time())
    face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
    myprint('face_encodings end', time.time())
    face_names = []
    for face_encoding in face_encodings:
        # optimize start 采用KNN 排名*权重, 在类别上进行叠加,然后排序取出top1
        name, dis = vote_class(face_encoding)
        # optimize end 采用 排名*权重, 在类别上进行叠加,然后排序取出top1
        face_names.append(name)  # 将人脸数据

    # Display the results
    for (top, right, bottom, left), name in zip(face_locations, face_names):
        # Scale back up face locations since the frame we detected in was scaled to 1/4 size
        top *= 4
        right *= 4
        bottom *= 4
        left *= 4
        myprint('putText start', time.time())
        # Draw a box around the face
        cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
        # Draw a label with a name below the face
        cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
        font = cv2.FONT_HERSHEY_DUPLEX
        cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
        myprint("putText end " + name, time.time())
        # say hello and save record to file
        myprint('process_face_records start', time.time())
        process_face_records(name)
        myprint('process_face_records end', time.time())

    # Display the resulting image
    # cv2.imshow('Video', frame)
    myprint("face process end", time.time())
    return frame