Python face_recognition.face_encodings() Examples
The following are 28
code examples of face_recognition.face_encodings().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
face_recognition
, or try the search function
.
Example #1
Source File: encoding_images.py From face-attendance-machine with Apache License 2.0 | 7 votes |
def encoding_images(path): """ 对path路径下的子文件夹中的图片进行编码, TODO: 对人脸数据进行历史库中的人脸向量进行欧式距离的比较,当距离小于某个阈值的时候提醒: 如果相似的是本人,则跳过该条记录,并提醒已经存在,否则警告人脸过度相似问题, :param path: :return: """ with open(name_and_encoding, 'w') as f: subdirs = [os.path.join(path, x) for x in os.listdir(path) if os.path.isdir(os.path.join(path, x))] for subdir in subdirs: print('process image name :', subdir) person_image_encoding = [] for y in os.listdir(subdir): print("image name is ", y) _image = face_recognition.load_image_file(os.path.join(subdir, y)) face_encodings = face_recognition.face_encodings(_image) name = os.path.split(subdir)[-1] if face_encodings and len(face_encodings) == 1: if len(person_image_encoding) == 0: person_image_encoding.append(face_encodings[0]) known_face_names.append(name) continue for i in range(len(person_image_encoding)): distances = face_recognition.compare_faces(person_image_encoding, face_encodings[0], tolerance=image_thread) if False in distances: person_image_encoding.append(face_encodings[0]) known_face_names.append(name) print(name, " new feature") f.write(name + ":" + str(face_encodings[0]) + "\n") break # face_encoding = face_recognition.face_encodings(_image)[0] # face_recognition.compare_faces() known_face_encodings.extend(person_image_encoding) bb = np.array(known_face_encodings) print("--------") np.save(KNOWN_FACE_ENCODINGS, known_face_encodings) np.save(KNOWN_FACE_NANE, known_face_names)
Example #2
Source File: face_recog.py From EagleEye with Do What The F*ck You Want To Public License | 6 votes |
def loadKnown(self, label): console.task('Loading known faces') pa_g = Path('./known') pathlist = [] for ext in ['.jpg', '.JPG', '.png', '.PNG', '.jpeg', '.JPEG', '.bmp', '.BMP']: tmp_pl = pa_g.glob('**/*{}'.format(ext)) for t in tmp_pl: pathlist.append(t) for path in pathlist: p_str = str(path) delim = '/' if platform == "win32": delim = '\\' console.subtask('Loading {0}'.format(p_str.split(delim)[1])) im = face_recognition.load_image_file(p_str) encoding = face_recognition.face_encodings(im, num_jitters=self.num_jitters) for e in encoding: self.known_face_encodings.append(e) self.known_face_names.append(label)
Example #3
Source File: find_same_person.py From FaceDataset with Apache License 2.0 | 5 votes |
def find_same_person(person_image_path): # 获取该人中的所有图片 image_paths = os.listdir(person_image_path) known_face_encodings = [] for image_path in image_paths: img_path = os.path.join(person_image_path, image_path) try: image = face_recognition.load_image_file(img_path) encodings = face_recognition.face_encodings(image, num_jitters=10)[0] known_face_encodings.append(encodings) except Exception as e: try: os.remove(img_path) except Exception as e: print(e) for image_path in image_paths: try: print(image_path) img_path = os.path.join(person_image_path, image_path) image = face_recognition.load_image_file(img_path) a_single_unknown_face_encoding = face_recognition.face_encodings(image, num_jitters=10)[0] results = face_recognition.compare_faces(known_face_encodings, a_single_unknown_face_encoding, tolerance=0.5) results = numpy.array(results).astype(numpy.int64) if numpy.sum(results) > 5: main_path = os.path.join(person_image_path, '0.jpg') if os.path.exists(main_path): os.remove(main_path) shutil.copyfile(img_path, main_path) break except: pass
Example #4
Source File: FaceRecognition.py From robot-camera-platform with GNU General Public License v3.0 | 5 votes |
def find(self, image): import face_recognition rgb_frame = image[:, :, ::-1] # Find all the faces and face encodings in the current frame of video face_locations = face_recognition.face_locations(rgb_frame, model='hog') face_encodings = face_recognition.face_encodings(rgb_frame, face_locations) for (top, right, bottom, left), face_encoding in zip(face_locations, face_encodings): # for each face found try to match against known faces matches = face_recognition.compare_faces(self.__known_face_encodings, face_encoding) if True not in matches: return None first_match_index = matches.index(True) top, right, bottom, left = face_locations[first_match_index] return (left, top, right, bottom) return None
Example #5
Source File: FaceRecognition.py From robot-camera-platform with GNU General Public License v3.0 | 5 votes |
def configure(self): import face_recognition image = face_recognition.load_image_file(self.__file_path) self.__known_face_encodings = [face_recognition.face_encodings(image)[0]]
Example #6
Source File: common.py From Intelegent_Lock with MIT License | 5 votes |
def get_users(): known_names=[] known_encods=[] for i in glob("people/*.jpg"): img = face_recognition.load_image_file(i) encoding = face_recognition.face_encodings(img)[0] known_encods.append(encoding) known_names.append(i[7:-4]) return known_names, known_encods
Example #7
Source File: dlib_compare.py From rabbitVE with GNU General Public License v3.0 | 5 votes |
def compare(self, img_list, img2=None): temp = [] for img in img_list: h, w, c = img.shape # img = cv2.resize(img, (64, 64)) code = face_recognition.face_encodings(img, [(0, w, h, 0)]) if len(code) > 0: temp.append(code[0]) res_list = [] for temp_ in temp: res = face_recognition.compare_faces(self.img_encode_code, temp_, tolerance=self.conf_threshold) # 越小越好 # res 一个bool 的向量 res_list.append(res) return np.sum(res_list) > 0, res_list
Example #8
Source File: dlib_compare.py From rabbitVE with GNU General Public License v3.0 | 5 votes |
def preprocess(self, imgs): for img in imgs: if img is None: continue h, w, c = img.shape # img = cv2.resize(img, (64, 64)) code = face_recognition.face_encodings(img, [(0, w, h, 0)])[0] self.img_encode_code.append(code) # self.img_encode_code_array = np.array(self.img_encode_code)
Example #9
Source File: Person.py From PyRecognizer with MIT License | 5 votes |
def init_dataset_core(detection_model, jitters, encoding_models, img_path=None): """ Delegated core method for parallelize work :detection_model :jitters :param img_path: :return: """ try: image = load_image_file(img_path) except OSError: log.error( "init_dataset | === FATAL === | Image {} is corrupted!!".format(img_path)) return None # log.debug("initDataset | Image loaded! | Searching for face ...") # Array of w,x,y,z coordinates # NOTE: Can be used batch_face_locations in order to parallelize the image init, but unfortunately # it's the only GPU that i have right now. And, of course, i'll try to don't burn it face_bounding_boxes = face_locations(image, model=detection_model) face_data = None if len(face_bounding_boxes) == 1: log.info( "initDataset | Image {0} have only 1 face, loading for future training ...".format(img_path)) # Loading the X [data] using 300 different distortion face_data = face_encodings(image, known_face_locations=face_bounding_boxes, num_jitters=jitters, model=encoding_models)[0] else: log.error( "initDataset | Image {0} not suitable for training!".format(img_path)) if len(face_bounding_boxes) == 0: log.error("initDataset | I've not found any face :/ ") else: log.error( "initDataset | Found more than one face, too much for me Sir :&") return face_data
Example #10
Source File: Classifier.py From PyRecognizer with MIT License | 5 votes |
def extract_face_from_image(X_img_path, detection_model, jitters, encoding_models): # Load image data in a numpy array try: log.debug("extract_face_from_image | Loading image {}".format(X_img_path)) X_img, ratio = load_image_file(X_img_path) except OSError: log.error("extract_face_from_image | What have you uploaded ???") return -2, -2, -1 log.debug("extract_face_from_image | Extracting faces locations ...") try: # TODO: Reduce size of the image at every iteration X_face_locations = face_recognition.face_locations( X_img, model=detection_model) # model="cnn") except RuntimeError: log.error( "extract_face_from_image | GPU does not have enough memory: FIXME unload data and retry") return None, None, ratio log.debug("extract_face_from_image | Found {} face(s) for the given image".format( len(X_face_locations))) # If no faces are found in the image, return an empty result. if len(X_face_locations) == 0: log.warning("extract_face_from_image | Seems that no faces was found :( ") return -3, -3, ratio # Find encodings for faces in the test image log.debug("extract_face_from_image | Encoding faces using [{}] jitters ...".format(jitters)) # num_jitters increase the distortion check faces_encodings = face_recognition.face_encodings( X_img, known_face_locations=X_face_locations, num_jitters=jitters, model=encoding_models) log.debug("extract_face_from_image | Face encoded! | Let's ask to the neural network ...") return faces_encodings, X_face_locations, ratio
Example #11
Source File: facerec_service.py From face_recognition with MIT License | 5 votes |
def detect_faces_in_image(file_stream): # Load the uploaded image file img = face_recognition.load_image_file(file_stream) # Get face encodings for any faces in the uploaded image uploaded_faces = face_recognition.face_encodings(img) # Defaults for the result object faces_found = len(uploaded_faces) faces = [] if faces_found: face_encodings = list(faces_dict.values()) for uploaded_face in uploaded_faces: match_results = face_recognition.compare_faces( face_encodings, uploaded_face) for idx, match in enumerate(match_results): if match: match = list(faces_dict.keys())[idx] match_encoding = face_encodings[idx] dist = face_recognition.face_distance([match_encoding], uploaded_face)[0] faces.append({ "id": match, "dist": dist }) return { "count": faces_found, "faces": faces } # <Picture functions> # # <Controller>
Example #12
Source File: facerec_service.py From face_recognition with MIT License | 5 votes |
def calc_face_encoding(image): # Currently only use first face found on picture loaded_image = face_recognition.load_image_file(image) faces = face_recognition.face_encodings(loaded_image) # If more than one face on the given image was found -> error if len(faces) > 1: raise Exception( "Found more than one face in the given training image.") # If none face on the given image was found -> error if not faces: raise Exception("Could not find any face in the given training image.") return faces[0]
Example #13
Source File: prediction_producer.py From eye_of_sauron with MIT License | 5 votes |
def get_processed_frame_object(frame_obj, scale=1.0): """Processes value produced by producer, returns prediction with png image. :param frame_obj: frame dictionary with frame information and frame itself :param scale: (0, 1] scale image before face recognition, speeds up processing, decreases accuracy :return: A dict updated with faces found in that frame, i.e. their location and encoding. """ frame = np_from_json(frame_obj, prefix_name=ORIGINAL_PREFIX) # frame_obj = json # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses) frame = cv2.cvtColor(frame.astype(np.uint8), cv2.COLOR_BGR2RGB) if scale != 1: # Resize frame of video to scale size for faster face recognition processing rgb_small_frame = cv2.resize(frame, (0, 0), fx=scale, fy=scale) else: rgb_small_frame = frame with timer("PROCESS RAW FRAME {}".format(frame_obj["frame_num"])): # Find all the faces and face encodings in the current frame of video with timer("Locations in frame"): face_locations = np.array(face_recognition.face_locations(rgb_small_frame)) face_locations_dict = np_to_json(face_locations, prefix_name="face_locations") with timer("Encodings in frame"): face_encodings = np.array(face_recognition.face_encodings(rgb_small_frame, face_locations)) face_encodings_dict = np_to_json(face_encodings, prefix_name="face_encodings") frame_obj.update(face_locations_dict) frame_obj.update(face_encodings_dict) return frame_obj
Example #14
Source File: encode_faces.py From edge-tpu-servers with MIT License | 5 votes |
def dlib_encoder(image, boxes): # Encode face into a 128-D representation (embeddings) using dlib. # Convert image from BGR (OpenCV ordering) to dlib ordering (RGB). rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # Generate encodings. Only one face is assumed so take the 1st element. encoding = face_recognition.face_encodings(face_image=rgb, known_face_locations=boxes, num_jitters=10)[0] return encoding # Loop over the image paths. # NB: Its assumed that only one face is in each image.
Example #15
Source File: face_recognition.py From ravestate with BSD 3-Clause "New" or "Revised" License | 5 votes |
def recognize_face_from_image_file(image_file: str) -> Optional[ndarray]: if PYROBOY_AVAILABLE: return FaceRec.get_biggest_face_encoding(image_file) else: logger.warning("Falling back to basic Face Recognition functions, since Pyroboy is unavailable!") image = fr.load_image_file(image_file) faces = fr.face_encodings(image) if faces: return faces[0] return None
Example #16
Source File: face_recognition_knn.py From face_recognition with MIT License | 5 votes |
def predict(X_img_path, knn_clf=None, model_path=None, distance_threshold=0.6): """ Recognizes faces in given image using a trained KNN classifier :param X_img_path: path to image to be recognized :param knn_clf: (optional) a knn classifier object. if not specified, model_save_path must be specified. :param model_path: (optional) path to a pickled knn classifier. if not specified, model_save_path must be knn_clf. :param distance_threshold: (optional) distance threshold for face classification. the larger it is, the more chance of mis-classifying an unknown person as a known one. :return: a list of names and face locations for the recognized faces in the image: [(name, bounding box), ...]. For faces of unrecognized persons, the name 'unknown' will be returned. """ if not os.path.isfile(X_img_path) or os.path.splitext(X_img_path)[1][1:] not in ALLOWED_EXTENSIONS: raise Exception("Invalid image path: {}".format(X_img_path)) if knn_clf is None and model_path is None: raise Exception("Must supply knn classifier either thourgh knn_clf or model_path") # Load a trained KNN model (if one was passed in) if knn_clf is None: with open(model_path, 'rb') as f: knn_clf = pickle.load(f) # Load image file and find face locations X_img = face_recognition.load_image_file(X_img_path) X_face_locations = face_recognition.face_locations(X_img) # If no faces are found in the image, return an empty result. if len(X_face_locations) == 0: return [] # Find encodings for faces in the test iamge faces_encodings = face_recognition.face_encodings(X_img, known_face_locations=X_face_locations) # Use the KNN model to find the best matches for the test face closest_distances = knn_clf.kneighbors(faces_encodings, n_neighbors=1) are_matches = [closest_distances[0][i][0] <= distance_threshold for i in range(len(X_face_locations))] # Predict classes and remove classifications that aren't within the threshold return [(pred, loc) if rec else ("unknown", loc) for pred, loc, rec in zip(knn_clf.predict(faces_encodings), X_face_locations, are_matches)]
Example #17
Source File: facerec_ipcamera_knn.py From face_recognition with MIT License | 5 votes |
def predict(X_frame, knn_clf=None, model_path=None, distance_threshold=0.5): """ Recognizes faces in given image using a trained KNN classifier :param X_frame: frame to do the prediction on. :param knn_clf: (optional) a knn classifier object. if not specified, model_save_path must be specified. :param model_path: (optional) path to a pickled knn classifier. if not specified, model_save_path must be knn_clf. :param distance_threshold: (optional) distance threshold for face classification. the larger it is, the more chance of mis-classifying an unknown person as a known one. :return: a list of names and face locations for the recognized faces in the image: [(name, bounding box), ...]. For faces of unrecognized persons, the name 'unknown' will be returned. """ if knn_clf is None and model_path is None: raise Exception("Must supply knn classifier either thourgh knn_clf or model_path") # Load a trained KNN model (if one was passed in) if knn_clf is None: with open(model_path, 'rb') as f: knn_clf = pickle.load(f) X_face_locations = face_recognition.face_locations(X_frame) # If no faces are found in the image, return an empty result. if len(X_face_locations) == 0: return [] # Find encodings for faces in the test image faces_encodings = face_recognition.face_encodings(X_frame, known_face_locations=X_face_locations) # Use the KNN model to find the best matches for the test face closest_distances = knn_clf.kneighbors(faces_encodings, n_neighbors=1) are_matches = [closest_distances[0][i][0] <= distance_threshold for i in range(len(X_face_locations))] # Predict classes and remove classifications that aren't within the threshold return [(pred, loc) if rec else ("unknown", loc) for pred, loc, rec in zip(knn_clf.predict(faces_encodings), X_face_locations, are_matches)]
Example #18
Source File: face_recog.py From EagleEye with Do What The F*ck You Want To Public License | 5 votes |
def constructIndexes(self, label): valid_links = [] console.section('Analyzing') file_name = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(6)) file_name += '.jpg' tmp_path = os.path.join(tempfile.gettempdir(), file_name) console.task("Storing Image in {0}".format(tmp_path)) for num, i in enumerate(self.profile_img): console.task('Analyzing {0}...'.format(i.strip()[:90])) urlretrieve(i, tmp_path) frame = cv2.imread(tmp_path) big_frame = cv2.resize(frame, (0, 0), fx=2.0, fy=2.0) rgb_small_frame = big_frame[:, :, ::-1] face_locations = face_recognition.face_locations(rgb_small_frame) face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations, num_jitters=self.num_jitters) face_names = [] for face_encoding in face_encodings: # See if the face is a match for the known face(s) matches = face_recognition.compare_faces(self.known_face_encodings, face_encoding) name = "Unknown" # If a match was found in known_face_encodings, just use the first one. if True in matches: first_match_index = matches.index(True) name = self.known_face_names[first_match_index] face_names.append(name) for _, name in zip(face_locations, face_names): if name == label: valid_links.append(num) if os.path.isfile(tmp_path): console.task("Removing {0}".format(tmp_path)) os.remove(tmp_path) return valid_links
Example #19
Source File: face_recognizer.py From ros_people_object_detection_tensorflow with Apache License 2.0 | 5 votes |
def initialize_database(self): """ Reads the PNG images from ./people folder and creates a list of peoples The names of the image files are considered as their real names. For example; /people - mario.png - jennifer.png - melanie.png Returns: (tuple) (people_list, name_list) (features of people, names of people) """ filenames = glob.glob(cd + '/people/*.png') people_list = [] name_list = [] for f in filenames: im = cv2.imread(f, 1) #cv2.imshow("Database Image", im) #cv2.waitKey(500) im = im.astype(np.uint8) people_list.append(fr.face_encodings(im)[0]) name_list.append(f.split('/')[-1].split('.')[0]) #cv2.destroyAllWindows() return (people_list, name_list)
Example #20
Source File: web_service_example_Simplified_Chinese.py From face_recognition with MIT License | 4 votes |
def detect_faces_in_image(file_stream): # 用face_recognition.face_encodings(img)接口提前把奥巴马人脸的编码录入 known_face_encoding = [-0.09634063, 0.12095481, -0.00436332, -0.07643753, 0.0080383, 0.01902981, -0.07184699, -0.09383309, 0.18518871, -0.09588896, 0.23951106, 0.0986533 , -0.22114635, -0.1363683 , 0.04405268, 0.11574756, -0.19899382, -0.09597053, -0.11969153, -0.12277931, 0.03416885, -0.00267565, 0.09203379, 0.04713435, -0.12731361, -0.35371891, -0.0503444 , -0.17841317, -0.00310897, -0.09844551, -0.06910533, -0.00503746, -0.18466514, -0.09851682, 0.02903969, -0.02174894, 0.02261871, 0.0032102 , 0.20312519, 0.02999607, -0.11646006, 0.09432904, 0.02774341, 0.22102901, 0.26725179, 0.06896867, -0.00490024, -0.09441824, 0.11115381, -0.22592428, 0.06230862, 0.16559327, 0.06232892, 0.03458837, 0.09459756, -0.18777156, 0.00654241, 0.08582542, -0.13578284, 0.0150229 , 0.00670836, -0.08195844, -0.04346499, 0.03347827, 0.20310158, 0.09987706, -0.12370517, -0.06683611, 0.12704916, -0.02160804, 0.00984683, 0.00766284, -0.18980607, -0.19641446, -0.22800779, 0.09010898, 0.39178532, 0.18818057, -0.20875394, 0.03097027, -0.21300618, 0.02532415, 0.07938635, 0.01000703, -0.07719778, -0.12651891, -0.04318593, 0.06219772, 0.09163868, 0.05039065, -0.04922386, 0.21839413, -0.02394437, 0.06173781, 0.0292527 , 0.06160797, -0.15553983, -0.02440624, -0.17509389, -0.0630486 , 0.01428208, -0.03637431, 0.03971229, 0.13983178, -0.23006812, 0.04999552, 0.0108454 , -0.03970895, 0.02501768, 0.08157793, -0.03224047, -0.04502571, 0.0556995 , -0.24374914, 0.25514284, 0.24795187, 0.04060191, 0.17597422, 0.07966681, 0.01920104, -0.01194376, -0.02300822, -0.17204897, -0.0596558 , 0.05307484, 0.07417042, 0.07126575, 0.00209804] # 载入用户上传的图片 img = face_recognition.load_image_file(file_stream) # 为用户上传的图片中的人脸编码 unknown_face_encodings = face_recognition.face_encodings(img) face_found = False is_obama = False if len(unknown_face_encodings) > 0: face_found = True # 看看图片中的第一张脸是不是奥巴马 match_results = face_recognition.compare_faces([known_face_encoding], unknown_face_encodings[0]) if match_results[0]: is_obama = True # 讲识别结果以json键值对的数据结构输出 result = { "face_found_in_image": face_found, "is_picture_of_obama": is_obama } return jsonify(result)
Example #21
Source File: facerec_from_webcam_multiprocessing.py From face_recognition with MIT License | 4 votes |
def process(worker_id, read_frame_list, write_frame_list, Global, worker_num): known_face_encodings = Global.known_face_encodings known_face_names = Global.known_face_names while not Global.is_exit: # Wait to read while Global.read_num != worker_id or Global.read_num != prev_id(Global.buff_num, worker_num): # If the user has requested to end the app, then stop waiting for webcam frames if Global.is_exit: break time.sleep(0.01) # Delay to make the video look smoother time.sleep(Global.frame_delay) # Read a single frame from frame list frame_process = read_frame_list[worker_id] # Expect next worker to read frame Global.read_num = next_id(Global.read_num, worker_num) # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses) rgb_frame = frame_process[:, :, ::-1] # Find all the faces and face encodings in the frame of video, cost most time face_locations = face_recognition.face_locations(rgb_frame) face_encodings = face_recognition.face_encodings(rgb_frame, face_locations) # Loop through each face in this frame of video for (top, right, bottom, left), face_encoding in zip(face_locations, face_encodings): # See if the face is a match for the known face(s) matches = face_recognition.compare_faces(known_face_encodings, face_encoding) name = "Unknown" # If a match was found in known_face_encodings, just use the first one. if True in matches: first_match_index = matches.index(True) name = known_face_names[first_match_index] # Draw a box around the face cv2.rectangle(frame_process, (left, top), (right, bottom), (0, 0, 255), 2) # Draw a label with a name below the face cv2.rectangle(frame_process, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED) font = cv2.FONT_HERSHEY_DUPLEX cv2.putText(frame_process, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1) # Wait to write while Global.write_num != worker_id: time.sleep(0.01) # Send frame to global write_frame_list[worker_id] = frame_process # Expect next worker to write frame Global.write_num = next_id(Global.write_num, worker_num)
Example #22
Source File: web_service_example.py From face_recognition with MIT License | 4 votes |
def detect_faces_in_image(file_stream): # Pre-calculated face encoding of Obama generated with face_recognition.face_encodings(img) known_face_encoding = [-0.09634063, 0.12095481, -0.00436332, -0.07643753, 0.0080383, 0.01902981, -0.07184699, -0.09383309, 0.18518871, -0.09588896, 0.23951106, 0.0986533 , -0.22114635, -0.1363683 , 0.04405268, 0.11574756, -0.19899382, -0.09597053, -0.11969153, -0.12277931, 0.03416885, -0.00267565, 0.09203379, 0.04713435, -0.12731361, -0.35371891, -0.0503444 , -0.17841317, -0.00310897, -0.09844551, -0.06910533, -0.00503746, -0.18466514, -0.09851682, 0.02903969, -0.02174894, 0.02261871, 0.0032102 , 0.20312519, 0.02999607, -0.11646006, 0.09432904, 0.02774341, 0.22102901, 0.26725179, 0.06896867, -0.00490024, -0.09441824, 0.11115381, -0.22592428, 0.06230862, 0.16559327, 0.06232892, 0.03458837, 0.09459756, -0.18777156, 0.00654241, 0.08582542, -0.13578284, 0.0150229 , 0.00670836, -0.08195844, -0.04346499, 0.03347827, 0.20310158, 0.09987706, -0.12370517, -0.06683611, 0.12704916, -0.02160804, 0.00984683, 0.00766284, -0.18980607, -0.19641446, -0.22800779, 0.09010898, 0.39178532, 0.18818057, -0.20875394, 0.03097027, -0.21300618, 0.02532415, 0.07938635, 0.01000703, -0.07719778, -0.12651891, -0.04318593, 0.06219772, 0.09163868, 0.05039065, -0.04922386, 0.21839413, -0.02394437, 0.06173781, 0.0292527 , 0.06160797, -0.15553983, -0.02440624, -0.17509389, -0.0630486 , 0.01428208, -0.03637431, 0.03971229, 0.13983178, -0.23006812, 0.04999552, 0.0108454 , -0.03970895, 0.02501768, 0.08157793, -0.03224047, -0.04502571, 0.0556995 , -0.24374914, 0.25514284, 0.24795187, 0.04060191, 0.17597422, 0.07966681, 0.01920104, -0.01194376, -0.02300822, -0.17204897, -0.0596558 , 0.05307484, 0.07417042, 0.07126575, 0.00209804] # Load the uploaded image file img = face_recognition.load_image_file(file_stream) # Get face encodings for any faces in the uploaded image unknown_face_encodings = face_recognition.face_encodings(img) face_found = False is_obama = False if len(unknown_face_encodings) > 0: face_found = True # See if the first face in the uploaded image matches the known face of Obama match_results = face_recognition.compare_faces([known_face_encoding], unknown_face_encodings[0]) if match_results[0]: is_obama = True # Return the result as json result = { "face_found_in_image": face_found, "is_picture_of_obama": is_obama } return jsonify(result)
Example #23
Source File: face-rec-emotion.py From Face-and-Emotion-Recognition with MIT License | 4 votes |
def face_compare(frame,process_this_frame): print ("compare") # Resize frame of video to 1/4 size for faster face recognition processing small_frame = cv2.resize(frame, (0, 0), fx=0.50, fy=0.50) # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses) rgb_small_frame = small_frame[:, :, ::-1] # Only process every other frame of video to save time if process_this_frame: # Find all the faces and face encodings in the current frame of video face_locations = face_recognition.face_locations(rgb_small_frame) face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations) face_names = [] for face_encoding in face_encodings: # See if the face is a match for the known face(s) matches = face_recognition.compare_faces(known_face_encodings, face_encoding) name = "Unknown" # If a match was found in known_face_encodings, just use the first one. if True in matches: first_match_index = matches.index(True) name = known_face_names[first_match_index] face_names.append(name) process_this_frame = not process_this_frame return face_names # Display the results for (top, right, bottom, left), name in zip(face_locations, face_names): # Scale back up face locations since the frame we detected in was scaled to 1/4 size top *= 2 right *= 2 bottom *= 2 left *= 2 #cv2.rectangle(frame, (left, bottom+36), (right, bottom), (0, 0, 0), cv2.FILLED) font = cv2.FONT_HERSHEY_DUPLEX cv2.putText(frame, name, (left + 6, bottom+20), font, 0.3, (255, 255, 255), 1) print ("text print") # starting video streaming
Example #24
Source File: encoding_images.py From face-attendance-machine with Apache License 2.0 | 4 votes |
def test_load(): face_encodings, face_names = load_encodings() print("===========face_encodings================") print(face_encodings) print("===========================") print(face_names) print("===========face_names================")
Example #25
Source File: face.py From face_recognition with GNU General Public License v3.0 | 4 votes |
def get_userinfo_fromapifile(self): # 用utf8编码读取接口数据,并遍历到数组中 f = open("./api",encoding='UTF-8') # 从接口文件读取用户数据 self.user_info = json.loads(f.read()) f.close() # 打印接口数据 #print(self.user_info['result']) # 遍历用户数据到相应字典或其它变量中 self.labels = [] # 每次使用前需要把存储的用户数据置空 # 下面是是人脸识别参数 self.labels = [] self.person = [] for user in self.user_info['result']: self.labels.append(user['name']) ## 下载照片 if os.path.exists('./avatar/'+user['avatar_name']): pass # else: # self.download_avatar(user['avatar'], './avatar/'+user['avatar_name']) ################################################ ## 设置用户头像位置 user['avatar'] = './avatar/'+user['avatar_name'] ################################################ ## 下载音频文件 ################################# if os.path.exists('./audio/'+user['uid']+'.mp3'): pass else: self.init_audio(user['name'], './audio/'+user['uid']+'.mp3') ############################################### self.person.append( face_recognition.face_encodings( face_recognition.load_image_file(user["avatar"]))[0]) # 打印用户姓名数据 # print(self.labels) # 存储签到者签到间隔的初始化时间 for i in range(len(self.labels)): #is_signto是计算识别的次数,如果3帧内为同一人,则签到成功 # 连续累计识别成功一定次数大于某个值才给予成功签到,此处的self.names['is_signto%s'%i]为累加变量 self.names['is_signto%s'%i] = 0 self.names['time_first%s'%i] = 0 self.names['time_last%s'%i] = int(time.time())
Example #26
Source File: encoding_images.py From face-attendance-machine with Apache License 2.0 | 4 votes |
def encoding_ones_images(name): """ 对path路径下的子文件夹中的图片进行编码, TODO: 对人脸数据进行历史库中的人脸向量进行欧式距离的比较,当距离小于某个阈值的时候提醒: 如果相似的是本人,则跳过该条记录,并提醒已经存在,否则警告人脸过度相似问题, :param path: :return: """ # with open("./dataset/encoded_face_names.txt", 'w') as f: # lines = f.readlines() # print(lines) with open(name_and_encoding, 'w') as f: image_dirs = os.path.join(data_path, name) files = [os.path.join(image_dirs, x) for x in os.listdir(image_dirs) if os.path.isfile(os.path.join(image_dirs, x))] print('---name :', files) person_image_encoding = [] for image_path in files: print("image name is ", image_path) _image = face_recognition.load_image_file(image_path ) face_encodings = face_recognition.face_encodings(_image) # name = os.path.split(image_path)[1] if face_encodings and len(face_encodings) == 1: if len(person_image_encoding) == 0: person_image_encoding.append(face_encodings[0]) known_face_names.append(name) continue for i in range(len(person_image_encoding)): distances = face_recognition.compare_faces(person_image_encoding, face_encodings[0], tolerance=image_thread) if False in distances: person_image_encoding.append(face_encodings[0]) known_face_names.append(name) print(name, " new feature") f.write(name + ":" + str(face_encodings[0]) + "\n") break # face_encoding = face_recognition.face_encodings(_image)[0] # face_recognition.compare_faces() known_face_encodings.extend(person_image_encoding) bb = np.array(known_face_encodings) print("--------") KNOWN_FACE_ENCODINGS = "./dataset/known_face_encodings_{}.npy" # 已知人脸向量 KNOWN_FACE_NANE = "./dataset/known_face_name_{}.npy" # 已知人脸名称 np.save(KNOWN_FACE_ENCODINGS.format(int(time.time())), known_face_encodings) np.save(KNOWN_FACE_NANE.format(int(time.time())), known_face_names)
Example #27
Source File: encoding_images.py From face-attendance-machine with Apache License 2.0 | 4 votes |
def encoding_images_mult_thread(path,threads=8): """ 对path路径下的子文件夹中的图片进行编码, TODO: 对人脸数据进行历史库中的人脸向量进行欧式距离的比较,当距离小于某个阈值的时候提醒: 如果相似的是本人,则跳过该条记录,并提醒已经存在,否则警告人脸过度相似问题, :param path: :return: """ # with open("./dataset/encoded_face_names.txt", 'w') as f: # lines = f.readlines() # print(lines) with open(name_and_encoding, 'w') as f: subdirs = [os.path.join(path, x) for x in os.listdir(path) if os.path.isdir(os.path.join(path, x))] # if for subdir in subdirs: print('---name :', subdir) person_image_encoding = [] for y in os.listdir(subdir): print("image name is ", y) _image = face_recognition.load_image_file(os.path.join(subdir, y)) face_encodings = face_recognition.face_encodings(_image) name = os.path.split(subdir)[-1] if face_encodings and len(face_encodings) == 1: if len(person_image_encoding) == 0: person_image_encoding.append(face_encodings[0]) known_face_names.append(name) continue for i in range(len(person_image_encoding)): distances = face_recognition.compare_faces(person_image_encoding, face_encodings[0], tolerance=image_thread) if False in distances: person_image_encoding.append(face_encodings[0]) known_face_names.append(name) print(name, " new feature") f.write(name + ":" + str(face_encodings[0]) + "\n") break # face_encoding = face_recognition.face_encodings(_image)[0] # face_recognition.compare_faces() known_face_encodings.extend(person_image_encoding) bb = np.array(known_face_encodings) print("--------") np.save(KNOWN_FACE_ENCODINGS, known_face_encodings) np.save(KNOWN_FACE_NANE, known_face_names)
Example #28
Source File: facerec_from_webcam_mult_thread.py From face-attendance-machine with Apache License 2.0 | 4 votes |
def face_process(frame): # Resize frame of video to 1/4 size for faster face recognition processing myprint("face process resize start", time.time()) small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25) # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses) myprint("face process small_frame start", time.time()) rgb_small_frame = small_frame[:, :, ::-1] # Find all the faces and face encodings in the current frame of video # face_locations = face_recognition.face_locations(rgb_small_frame, model="cnn") myprint('face_locations start', time.time()) face_locations = face_recognition.face_locations(rgb_small_frame, model="hog") myprint('face_locations end', time.time()) myprint('face_encodings start', time.time()) face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations) myprint('face_encodings end', time.time()) face_names = [] for face_encoding in face_encodings: # optimize start 采用KNN 排名*权重, 在类别上进行叠加,然后排序取出top1 name, dis = vote_class(face_encoding) # optimize end 采用 排名*权重, 在类别上进行叠加,然后排序取出top1 face_names.append(name) # 将人脸数据 # Display the results for (top, right, bottom, left), name in zip(face_locations, face_names): # Scale back up face locations since the frame we detected in was scaled to 1/4 size top *= 4 right *= 4 bottom *= 4 left *= 4 myprint('putText start', time.time()) # Draw a box around the face cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2) # Draw a label with a name below the face cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED) font = cv2.FONT_HERSHEY_DUPLEX cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1) myprint("putText end " + name, time.time()) # say hello and save record to file myprint('process_face_records start', time.time()) process_face_records(name) myprint('process_face_records end', time.time()) # Display the resulting image # cv2.imshow('Video', frame) myprint("face process end", time.time()) return frame