Python face_recognition.load_image_file() Examples
The following are 23
code examples of face_recognition.load_image_file().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
face_recognition
, or try the search function
.
Example #1
Source File: encoding_images.py From face-attendance-machine with Apache License 2.0 | 7 votes |
def encoding_images(path): """ 对path路径下的子文件夹中的图片进行编码, TODO: 对人脸数据进行历史库中的人脸向量进行欧式距离的比较,当距离小于某个阈值的时候提醒: 如果相似的是本人,则跳过该条记录,并提醒已经存在,否则警告人脸过度相似问题, :param path: :return: """ with open(name_and_encoding, 'w') as f: subdirs = [os.path.join(path, x) for x in os.listdir(path) if os.path.isdir(os.path.join(path, x))] for subdir in subdirs: print('process image name :', subdir) person_image_encoding = [] for y in os.listdir(subdir): print("image name is ", y) _image = face_recognition.load_image_file(os.path.join(subdir, y)) face_encodings = face_recognition.face_encodings(_image) name = os.path.split(subdir)[-1] if face_encodings and len(face_encodings) == 1: if len(person_image_encoding) == 0: person_image_encoding.append(face_encodings[0]) known_face_names.append(name) continue for i in range(len(person_image_encoding)): distances = face_recognition.compare_faces(person_image_encoding, face_encodings[0], tolerance=image_thread) if False in distances: person_image_encoding.append(face_encodings[0]) known_face_names.append(name) print(name, " new feature") f.write(name + ":" + str(face_encodings[0]) + "\n") break # face_encoding = face_recognition.face_encodings(_image)[0] # face_recognition.compare_faces() known_face_encodings.extend(person_image_encoding) bb = np.array(known_face_encodings) print("--------") np.save(KNOWN_FACE_ENCODINGS, known_face_encodings) np.save(KNOWN_FACE_NANE, known_face_names)
Example #2
Source File: face_recog.py From EagleEye with Do What The F*ck You Want To Public License | 6 votes |
def loadKnown(self, label): console.task('Loading known faces') pa_g = Path('./known') pathlist = [] for ext in ['.jpg', '.JPG', '.png', '.PNG', '.jpeg', '.JPEG', '.bmp', '.BMP']: tmp_pl = pa_g.glob('**/*{}'.format(ext)) for t in tmp_pl: pathlist.append(t) for path in pathlist: p_str = str(path) delim = '/' if platform == "win32": delim = '\\' console.subtask('Loading {0}'.format(p_str.split(delim)[1])) im = face_recognition.load_image_file(p_str) encoding = face_recognition.face_encodings(im, num_jitters=self.num_jitters) for e in encoding: self.known_face_encodings.append(e) self.known_face_names.append(label)
Example #3
Source File: t_find_faces_in_picture.py From FaceRank with GNU General Public License v3.0 | 6 votes |
def find_and_save_face(web_file,face_file): # Load the jpg file into a numpy array image = face_recognition.load_image_file(web_file) print(image.dtype) # Find all the faces in the image face_locations = face_recognition.face_locations(image) print("I found {} face(s) in this photograph.".format(len(face_locations))) for face_location in face_locations: # Print the location of each face in this image top, right, bottom, left = face_location print("A face is located at pixel location Top: {}, Left: {}, Bottom: {}, Right: {}".format(top, left, bottom, right)) # You can access the actual face itself like this: face_image = image[top:bottom, left:right] pil_image = Image.fromarray(face_image) pil_image.save(face_file)
Example #4
Source File: find_faces_in_picture.py From FaceRank with GNU General Public License v3.0 | 6 votes |
def find_and_save_face(web_file,face_file): # Load the jpg file into a numpy array image = face_recognition.load_image_file(web_file) print(image.dtype) # Find all the faces in the image face_locations = face_recognition.face_locations(image) print("I found {} face(s) in this photograph.".format(len(face_locations))) for face_location in face_locations: # Print the location of each face in this image top, right, bottom, left = face_location print("A face is located at pixel location Top: {}, Left: {}, Bottom: {}, Right: {}".format(top, left, bottom, right)) # You can access the actual face itself like this: face_image = image[top:bottom, left:right] pil_image = Image.fromarray(face_image) pil_image.save(face_file)
Example #5
Source File: facerec_service.py From face_recognition with MIT License | 5 votes |
def calc_face_encoding(image): # Currently only use first face found on picture loaded_image = face_recognition.load_image_file(image) faces = face_recognition.face_encodings(loaded_image) # If more than one face on the given image was found -> error if len(faces) > 1: raise Exception( "Found more than one face in the given training image.") # If none face on the given image was found -> error if not faces: raise Exception("Could not find any face in the given training image.") return faces[0]
Example #6
Source File: extraction.py From Mosaicer with MIT License | 5 votes |
def extract(folder, file_name): """Extract faces from images Args: folder: folder file_name : filename """ if not os.path.exists('result'): os.makedirs('result') file_names = [] if not file_name: for dirpath, dirnames, filenames in os.walk(folder): for file in filenames: if(check_img(file)): full_path = os.path.join(dirpath, file) file_names.append(full_path) else: file_names.append(os.path.join(folder, file_name)) for file in file_names: print(file) image = face_recognition.load_image_file(file) #frontal_image = run(image) face_locations = face_recognition.face_locations(image) count = 0 for face_locaiton in face_locations: top, right, bottom, left = face_locaiton face_image = image[top:bottom, left:right] img_output = cv2.resize(face_image, (299, 299), interpolation=cv2.INTER_AREA) file_name, file_ext = os.path.splitext( os.path.basename(file)) delimiter = '' if count != 0: delimiter = '_' + str(count) path = file_name + delimiter + file_ext path = os.path.join('result', path) cv2.imwrite(path, cv2.cvtColor(img_output, cv2.COLOR_RGB2BGR)) count += 1
Example #7
Source File: find_same_person.py From FaceDataset with Apache License 2.0 | 5 votes |
def find_same_person(person_image_path): # 获取该人中的所有图片 image_paths = os.listdir(person_image_path) known_face_encodings = [] for image_path in image_paths: img_path = os.path.join(person_image_path, image_path) try: image = face_recognition.load_image_file(img_path) encodings = face_recognition.face_encodings(image, num_jitters=10)[0] known_face_encodings.append(encodings) except Exception as e: try: os.remove(img_path) except Exception as e: print(e) for image_path in image_paths: try: print(image_path) img_path = os.path.join(person_image_path, image_path) image = face_recognition.load_image_file(img_path) a_single_unknown_face_encoding = face_recognition.face_encodings(image, num_jitters=10)[0] results = face_recognition.compare_faces(known_face_encodings, a_single_unknown_face_encoding, tolerance=0.5) results = numpy.array(results).astype(numpy.int64) if numpy.sum(results) > 5: main_path = os.path.join(person_image_path, '0.jpg') if os.path.exists(main_path): os.remove(main_path) shutil.copyfile(img_path, main_path) break except: pass
Example #8
Source File: main.py From python-examples with MIT License | 5 votes |
def detect(args): arr = face_recognition.load_image_file(args.input) face_locations = face_recognition.face_locations(arr) print('found:', len(face_locations)) img = Image.open(args.input) draw = ImageDraw.Draw(img) for item in face_locations: # array uses (row,column) which means (y,x) but I need (x,y) item = item[1], item[0], item[3], item[2] draw.rectangle(item, width=3) img.save(args.output)
Example #9
Source File: FaceRecognition.py From robot-camera-platform with GNU General Public License v3.0 | 5 votes |
def configure(self): import face_recognition image = face_recognition.load_image_file(self.__file_path) self.__known_face_encodings = [face_recognition.face_encodings(image)[0]]
Example #10
Source File: common.py From Intelegent_Lock with MIT License | 5 votes |
def get_users(): known_names=[] known_encods=[] for i in glob("people/*.jpg"): img = face_recognition.load_image_file(i) encoding = face_recognition.face_encodings(img)[0] known_encods.append(encoding) known_names.append(i[7:-4]) return known_names, known_encods
Example #11
Source File: Person.py From PyRecognizer with MIT License | 5 votes |
def init_dataset_core(detection_model, jitters, encoding_models, img_path=None): """ Delegated core method for parallelize work :detection_model :jitters :param img_path: :return: """ try: image = load_image_file(img_path) except OSError: log.error( "init_dataset | === FATAL === | Image {} is corrupted!!".format(img_path)) return None # log.debug("initDataset | Image loaded! | Searching for face ...") # Array of w,x,y,z coordinates # NOTE: Can be used batch_face_locations in order to parallelize the image init, but unfortunately # it's the only GPU that i have right now. And, of course, i'll try to don't burn it face_bounding_boxes = face_locations(image, model=detection_model) face_data = None if len(face_bounding_boxes) == 1: log.info( "initDataset | Image {0} have only 1 face, loading for future training ...".format(img_path)) # Loading the X [data] using 300 different distortion face_data = face_encodings(image, known_face_locations=face_bounding_boxes, num_jitters=jitters, model=encoding_models)[0] else: log.error( "initDataset | Image {0} not suitable for training!".format(img_path)) if len(face_bounding_boxes) == 0: log.error("initDataset | I've not found any face :/ ") else: log.error( "initDataset | Found more than one face, too much for me Sir :&") return face_data
Example #12
Source File: facerec_service.py From face_recognition with MIT License | 5 votes |
def detect_faces_in_image(file_stream): # Load the uploaded image file img = face_recognition.load_image_file(file_stream) # Get face encodings for any faces in the uploaded image uploaded_faces = face_recognition.face_encodings(img) # Defaults for the result object faces_found = len(uploaded_faces) faces = [] if faces_found: face_encodings = list(faces_dict.values()) for uploaded_face in uploaded_faces: match_results = face_recognition.compare_faces( face_encodings, uploaded_face) for idx, match in enumerate(match_results): if match: match = list(faces_dict.keys())[idx] match_encoding = face_encodings[idx] dist = face_recognition.face_distance([match_encoding], uploaded_face)[0] faces.append({ "id": match, "dist": dist }) return { "count": faces_found, "faces": faces } # <Picture functions> # # <Controller>
Example #13
Source File: detect_facial_features.py From Python-for-Everyday-Life with MIT License | 5 votes |
def show_facial_features(image_path): # Load the jpg file into an array image = face_recognition.load_image_file(image_path) # these are the features that will be detected and shown facial_features = [ 'chin', 'left_eyebrow', 'right_eyebrow', 'nose_bridge', 'nose_tip', 'left_eye', 'right_eye', 'top_lip', 'bottom_lip'] blue = ImageColor.getcolor('blue', 'RGB') # Find all facial landmarks for all the faces in the image face_landmarks_list = face_recognition.face_landmarks(image) img_obj = Image.fromarray(image) # draw lines upon facial features for face_landmarks in face_landmarks_list: drawing = ImageDraw.Draw(img_obj) for facial_feature in facial_features: drawing.line(face_landmarks[facial_feature], width=2, fill=blue) # show image img_obj.show()
Example #14
Source File: face_recognition.py From ravestate with BSD 3-Clause "New" or "Revised" License | 5 votes |
def recognize_face_from_image_file(image_file: str) -> Optional[ndarray]: if PYROBOY_AVAILABLE: return FaceRec.get_biggest_face_encoding(image_file) else: logger.warning("Falling back to basic Face Recognition functions, since Pyroboy is unavailable!") image = fr.load_image_file(image_file) faces = fr.face_encodings(image) if faces: return faces[0] return None
Example #15
Source File: face_recognition_knn.py From face_recognition with MIT License | 5 votes |
def predict(X_img_path, knn_clf=None, model_path=None, distance_threshold=0.6): """ Recognizes faces in given image using a trained KNN classifier :param X_img_path: path to image to be recognized :param knn_clf: (optional) a knn classifier object. if not specified, model_save_path must be specified. :param model_path: (optional) path to a pickled knn classifier. if not specified, model_save_path must be knn_clf. :param distance_threshold: (optional) distance threshold for face classification. the larger it is, the more chance of mis-classifying an unknown person as a known one. :return: a list of names and face locations for the recognized faces in the image: [(name, bounding box), ...]. For faces of unrecognized persons, the name 'unknown' will be returned. """ if not os.path.isfile(X_img_path) or os.path.splitext(X_img_path)[1][1:] not in ALLOWED_EXTENSIONS: raise Exception("Invalid image path: {}".format(X_img_path)) if knn_clf is None and model_path is None: raise Exception("Must supply knn classifier either thourgh knn_clf or model_path") # Load a trained KNN model (if one was passed in) if knn_clf is None: with open(model_path, 'rb') as f: knn_clf = pickle.load(f) # Load image file and find face locations X_img = face_recognition.load_image_file(X_img_path) X_face_locations = face_recognition.face_locations(X_img) # If no faces are found in the image, return an empty result. if len(X_face_locations) == 0: return [] # Find encodings for faces in the test iamge faces_encodings = face_recognition.face_encodings(X_img, known_face_locations=X_face_locations) # Use the KNN model to find the best matches for the test face closest_distances = knn_clf.kneighbors(faces_encodings, n_neighbors=1) are_matches = [closest_distances[0][i][0] <= distance_threshold for i in range(len(X_face_locations))] # Predict classes and remove classifications that aren't within the threshold return [(pred, loc) if rec else ("unknown", loc) for pred, loc, rec in zip(knn_clf.predict(faces_encodings), X_face_locations, are_matches)]
Example #16
Source File: web_service.py From MMFinder with MIT License | 5 votes |
def get_face_and_save(filename): img_path = f"{UPLOAD_DIR}/{filename}" image = face_recognition.load_image_file(img_path) locations = face_recognition.face_locations(image) if len(locations) == 1: # save the face of mm top, right, bottom, left = locations[0] face_image = image[top:bottom, left:right] pil_image = Image.fromarray(face_image) with open(f"{UPLOAD_DIR}/face-{filename}", "wb") as f: pil_image.save(f) return len(locations)
Example #17
Source File: filter_images.py From MMFinder with MIT License | 5 votes |
def get_face_and_save(path): image_path = f'{IMAGES_PATH}/{path}' image = face_recognition.load_image_file(image_path) locations = face_recognition.face_locations(image) if len(locations) == 1: # save the face of mm top, right, bottom, left = locations[0] face_image = image[top:bottom, left:right] pil_image = Image.fromarray(face_image) with open(f'{IMAGES_PATH}/faces/face-{path}', "wb") as f: pil_image.save(f) return len(locations)
Example #18
Source File: web_service_example_Simplified_Chinese.py From face_recognition with MIT License | 4 votes |
def detect_faces_in_image(file_stream): # 用face_recognition.face_encodings(img)接口提前把奥巴马人脸的编码录入 known_face_encoding = [-0.09634063, 0.12095481, -0.00436332, -0.07643753, 0.0080383, 0.01902981, -0.07184699, -0.09383309, 0.18518871, -0.09588896, 0.23951106, 0.0986533 , -0.22114635, -0.1363683 , 0.04405268, 0.11574756, -0.19899382, -0.09597053, -0.11969153, -0.12277931, 0.03416885, -0.00267565, 0.09203379, 0.04713435, -0.12731361, -0.35371891, -0.0503444 , -0.17841317, -0.00310897, -0.09844551, -0.06910533, -0.00503746, -0.18466514, -0.09851682, 0.02903969, -0.02174894, 0.02261871, 0.0032102 , 0.20312519, 0.02999607, -0.11646006, 0.09432904, 0.02774341, 0.22102901, 0.26725179, 0.06896867, -0.00490024, -0.09441824, 0.11115381, -0.22592428, 0.06230862, 0.16559327, 0.06232892, 0.03458837, 0.09459756, -0.18777156, 0.00654241, 0.08582542, -0.13578284, 0.0150229 , 0.00670836, -0.08195844, -0.04346499, 0.03347827, 0.20310158, 0.09987706, -0.12370517, -0.06683611, 0.12704916, -0.02160804, 0.00984683, 0.00766284, -0.18980607, -0.19641446, -0.22800779, 0.09010898, 0.39178532, 0.18818057, -0.20875394, 0.03097027, -0.21300618, 0.02532415, 0.07938635, 0.01000703, -0.07719778, -0.12651891, -0.04318593, 0.06219772, 0.09163868, 0.05039065, -0.04922386, 0.21839413, -0.02394437, 0.06173781, 0.0292527 , 0.06160797, -0.15553983, -0.02440624, -0.17509389, -0.0630486 , 0.01428208, -0.03637431, 0.03971229, 0.13983178, -0.23006812, 0.04999552, 0.0108454 , -0.03970895, 0.02501768, 0.08157793, -0.03224047, -0.04502571, 0.0556995 , -0.24374914, 0.25514284, 0.24795187, 0.04060191, 0.17597422, 0.07966681, 0.01920104, -0.01194376, -0.02300822, -0.17204897, -0.0596558 , 0.05307484, 0.07417042, 0.07126575, 0.00209804] # 载入用户上传的图片 img = face_recognition.load_image_file(file_stream) # 为用户上传的图片中的人脸编码 unknown_face_encodings = face_recognition.face_encodings(img) face_found = False is_obama = False if len(unknown_face_encodings) > 0: face_found = True # 看看图片中的第一张脸是不是奥巴马 match_results = face_recognition.compare_faces([known_face_encoding], unknown_face_encodings[0]) if match_results[0]: is_obama = True # 讲识别结果以json键值对的数据结构输出 result = { "face_found_in_image": face_found, "is_picture_of_obama": is_obama } return jsonify(result)
Example #19
Source File: face.py From face_recognition with GNU General Public License v3.0 | 4 votes |
def get_userinfo_fromapifile(self): # 用utf8编码读取接口数据,并遍历到数组中 f = open("./api",encoding='UTF-8') # 从接口文件读取用户数据 self.user_info = json.loads(f.read()) f.close() # 打印接口数据 #print(self.user_info['result']) # 遍历用户数据到相应字典或其它变量中 self.labels = [] # 每次使用前需要把存储的用户数据置空 # 下面是是人脸识别参数 self.labels = [] self.person = [] for user in self.user_info['result']: self.labels.append(user['name']) ## 下载照片 if os.path.exists('./avatar/'+user['avatar_name']): pass # else: # self.download_avatar(user['avatar'], './avatar/'+user['avatar_name']) ################################################ ## 设置用户头像位置 user['avatar'] = './avatar/'+user['avatar_name'] ################################################ ## 下载音频文件 ################################# if os.path.exists('./audio/'+user['uid']+'.mp3'): pass else: self.init_audio(user['name'], './audio/'+user['uid']+'.mp3') ############################################### self.person.append( face_recognition.face_encodings( face_recognition.load_image_file(user["avatar"]))[0]) # 打印用户姓名数据 # print(self.labels) # 存储签到者签到间隔的初始化时间 for i in range(len(self.labels)): #is_signto是计算识别的次数,如果3帧内为同一人,则签到成功 # 连续累计识别成功一定次数大于某个值才给予成功签到,此处的self.names['is_signto%s'%i]为累加变量 self.names['is_signto%s'%i] = 0 self.names['time_first%s'%i] = 0 self.names['time_last%s'%i] = int(time.time())
Example #20
Source File: faceblur.py From faceblur with MIT License | 4 votes |
def face_blur(src_img, dest_img, zoom_in=1): ''' Recognize and blur all faces in the source image file, then save as destination image file. ''' sys.stdout.write("%s:processing... \r" % (src_img)) sys.stdout.flush() # Initialize some variables face_locations = [] photo = face_recognition.load_image_file(src_img) # Resize image to 1/zoom_in size for faster face detection processing small_photo = cv2.resize(photo, (0, 0), fx=1/zoom_in, fy=1/zoom_in) # Find all the faces and face encodings in the current frame of video face_locations = face_recognition.face_locations(small_photo, model="cnn") if face_locations: print("%s:There are %s faces at " % (src_img, len(face_locations)), face_locations) else: print('%s:There are no any face.' % (src_img)) return False #Blur all face photo = cv2.imread(src_img) for top, right, bottom, left in face_locations: # Scale back up face locations since the frame we detected in was scaled to 1/zoom_in size top *= zoom_in right *= zoom_in bottom *= zoom_in left *= zoom_in # Extract the region of the image that contains the face face_image = photo[top:bottom, left:right] # Blur the face image face_image = cv2.GaussianBlur(face_image, (21, 21), 0) # Put the blurred face region back into the frame image photo[top:bottom, left:right] = face_image #Save image to file cv2.imwrite(dest_img, photo) print('Face blurred photo has been save in %s' % dest_img) return True
Example #21
Source File: web_service_example.py From face_recognition with MIT License | 4 votes |
def detect_faces_in_image(file_stream): # Pre-calculated face encoding of Obama generated with face_recognition.face_encodings(img) known_face_encoding = [-0.09634063, 0.12095481, -0.00436332, -0.07643753, 0.0080383, 0.01902981, -0.07184699, -0.09383309, 0.18518871, -0.09588896, 0.23951106, 0.0986533 , -0.22114635, -0.1363683 , 0.04405268, 0.11574756, -0.19899382, -0.09597053, -0.11969153, -0.12277931, 0.03416885, -0.00267565, 0.09203379, 0.04713435, -0.12731361, -0.35371891, -0.0503444 , -0.17841317, -0.00310897, -0.09844551, -0.06910533, -0.00503746, -0.18466514, -0.09851682, 0.02903969, -0.02174894, 0.02261871, 0.0032102 , 0.20312519, 0.02999607, -0.11646006, 0.09432904, 0.02774341, 0.22102901, 0.26725179, 0.06896867, -0.00490024, -0.09441824, 0.11115381, -0.22592428, 0.06230862, 0.16559327, 0.06232892, 0.03458837, 0.09459756, -0.18777156, 0.00654241, 0.08582542, -0.13578284, 0.0150229 , 0.00670836, -0.08195844, -0.04346499, 0.03347827, 0.20310158, 0.09987706, -0.12370517, -0.06683611, 0.12704916, -0.02160804, 0.00984683, 0.00766284, -0.18980607, -0.19641446, -0.22800779, 0.09010898, 0.39178532, 0.18818057, -0.20875394, 0.03097027, -0.21300618, 0.02532415, 0.07938635, 0.01000703, -0.07719778, -0.12651891, -0.04318593, 0.06219772, 0.09163868, 0.05039065, -0.04922386, 0.21839413, -0.02394437, 0.06173781, 0.0292527 , 0.06160797, -0.15553983, -0.02440624, -0.17509389, -0.0630486 , 0.01428208, -0.03637431, 0.03971229, 0.13983178, -0.23006812, 0.04999552, 0.0108454 , -0.03970895, 0.02501768, 0.08157793, -0.03224047, -0.04502571, 0.0556995 , -0.24374914, 0.25514284, 0.24795187, 0.04060191, 0.17597422, 0.07966681, 0.01920104, -0.01194376, -0.02300822, -0.17204897, -0.0596558 , 0.05307484, 0.07417042, 0.07126575, 0.00209804] # Load the uploaded image file img = face_recognition.load_image_file(file_stream) # Get face encodings for any faces in the uploaded image unknown_face_encodings = face_recognition.face_encodings(img) face_found = False is_obama = False if len(unknown_face_encodings) > 0: face_found = True # See if the first face in the uploaded image matches the known face of Obama match_results = face_recognition.compare_faces([known_face_encoding], unknown_face_encodings[0]) if match_results[0]: is_obama = True # Return the result as json result = { "face_found_in_image": face_found, "is_picture_of_obama": is_obama } return jsonify(result)
Example #22
Source File: encoding_images.py From face-attendance-machine with Apache License 2.0 | 4 votes |
def encoding_ones_images(name): """ 对path路径下的子文件夹中的图片进行编码, TODO: 对人脸数据进行历史库中的人脸向量进行欧式距离的比较,当距离小于某个阈值的时候提醒: 如果相似的是本人,则跳过该条记录,并提醒已经存在,否则警告人脸过度相似问题, :param path: :return: """ # with open("./dataset/encoded_face_names.txt", 'w') as f: # lines = f.readlines() # print(lines) with open(name_and_encoding, 'w') as f: image_dirs = os.path.join(data_path, name) files = [os.path.join(image_dirs, x) for x in os.listdir(image_dirs) if os.path.isfile(os.path.join(image_dirs, x))] print('---name :', files) person_image_encoding = [] for image_path in files: print("image name is ", image_path) _image = face_recognition.load_image_file(image_path ) face_encodings = face_recognition.face_encodings(_image) # name = os.path.split(image_path)[1] if face_encodings and len(face_encodings) == 1: if len(person_image_encoding) == 0: person_image_encoding.append(face_encodings[0]) known_face_names.append(name) continue for i in range(len(person_image_encoding)): distances = face_recognition.compare_faces(person_image_encoding, face_encodings[0], tolerance=image_thread) if False in distances: person_image_encoding.append(face_encodings[0]) known_face_names.append(name) print(name, " new feature") f.write(name + ":" + str(face_encodings[0]) + "\n") break # face_encoding = face_recognition.face_encodings(_image)[0] # face_recognition.compare_faces() known_face_encodings.extend(person_image_encoding) bb = np.array(known_face_encodings) print("--------") KNOWN_FACE_ENCODINGS = "./dataset/known_face_encodings_{}.npy" # 已知人脸向量 KNOWN_FACE_NANE = "./dataset/known_face_name_{}.npy" # 已知人脸名称 np.save(KNOWN_FACE_ENCODINGS.format(int(time.time())), known_face_encodings) np.save(KNOWN_FACE_NANE.format(int(time.time())), known_face_names)
Example #23
Source File: encoding_images.py From face-attendance-machine with Apache License 2.0 | 4 votes |
def encoding_images_mult_thread(path,threads=8): """ 对path路径下的子文件夹中的图片进行编码, TODO: 对人脸数据进行历史库中的人脸向量进行欧式距离的比较,当距离小于某个阈值的时候提醒: 如果相似的是本人,则跳过该条记录,并提醒已经存在,否则警告人脸过度相似问题, :param path: :return: """ # with open("./dataset/encoded_face_names.txt", 'w') as f: # lines = f.readlines() # print(lines) with open(name_and_encoding, 'w') as f: subdirs = [os.path.join(path, x) for x in os.listdir(path) if os.path.isdir(os.path.join(path, x))] # if for subdir in subdirs: print('---name :', subdir) person_image_encoding = [] for y in os.listdir(subdir): print("image name is ", y) _image = face_recognition.load_image_file(os.path.join(subdir, y)) face_encodings = face_recognition.face_encodings(_image) name = os.path.split(subdir)[-1] if face_encodings and len(face_encodings) == 1: if len(person_image_encoding) == 0: person_image_encoding.append(face_encodings[0]) known_face_names.append(name) continue for i in range(len(person_image_encoding)): distances = face_recognition.compare_faces(person_image_encoding, face_encodings[0], tolerance=image_thread) if False in distances: person_image_encoding.append(face_encodings[0]) known_face_names.append(name) print(name, " new feature") f.write(name + ":" + str(face_encodings[0]) + "\n") break # face_encoding = face_recognition.face_encodings(_image)[0] # face_recognition.compare_faces() known_face_encodings.extend(person_image_encoding) bb = np.array(known_face_encodings) print("--------") np.save(KNOWN_FACE_ENCODINGS, known_face_encodings) np.save(KNOWN_FACE_NANE, known_face_names)