Python dlib.get_frontal_face_detector() Examples
The following are 30
code examples of dlib.get_frontal_face_detector().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
dlib
, or try the search function
.
Example #1
Source File: training_script.py From dlib-minified-models with Apache License 2.0 | 7 votes |
def test(image_path, model_path): '''Test the given model by showing the detected landmarks. - image_path: the path of an image. Should contain a face. - model_path: the path of a shape predictor model. ''' image = cv2.imread(image_path) face_detector = dlib.get_frontal_face_detector() dets = face_detector(image, 1) predictor = dlib.shape_predictor(model_path) for d in dets: cv2.rectangle(image, (d.left(), d.top()), (d.right(), d.bottom()), 255, 1) shape = predictor(image, d) for i in range(shape.num_parts): p = shape.part(i) cv2.circle(image, (p.x, p.y), 2, 255, 1) cv2.putText(image, str(i), (p.x + 4, p.y), cv2.FONT_HERSHEY_SIMPLEX, 0.25, (255, 255, 255)) cv2.imshow("window", image) cv2.waitKey(0) cv2.destroyAllWindows() # uncomment to test # test("test_image.jpg", "shape_predictor.dat")
Example #2
Source File: face_utilities.py From Heart-rate-measurement-using-camera with Apache License 2.0 | 6 votes |
def face_detection(self, frame): ''' Detect faces in a frame Args: frame (cv2 image): a normal frame grab from camera or video Outputs: rects (array): detected faces as rectangles ''' if self.detector is None: self.detector = dlib.get_frontal_face_detector() if frame is None: return gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) #get all faces in the frame rects = self.detector(gray, 0) # to get the coords from a rect, use: (x, y, w, h) = face_utils.rect_to_bb(rects[0]) return rects
Example #3
Source File: align_dlib.py From Python-Tensorflow-Face-v2.0 with Apache License 2.0 | 6 votes |
def __init__(self, facePredictor): """ Instantiate an 'AlignDlib' object. :param facePredictor: The path to dlib's :type facePredictor: str """ assert facePredictor is not None self.detector = dlib.get_frontal_face_detector() self.predictor = dlib.shape_predictor(facePredictor)
Example #4
Source File: facial_feature_detector.py From EmotionClassifier with GNU General Public License v3.0 | 6 votes |
def get_landmarks(img, resource_dir, verbose=False): # if not automatically downloaded, get it from: # http://sourceforge.net/projects/dclib/files/dlib/v18.10/shape_predictor_68_face_landmarks.dat.bz2 predictor_path = resource_dir + "/dlib_models/shape_predictor_68_face_landmarks.dat" detector = dlib.get_frontal_face_detector() predictor = dlib.shape_predictor(predictor_path) lmarks = [] dets = detector(img, 1) if verbose: print("Number of faces detected: {}".format(len(dets))) shapes = [] for k, det in enumerate(dets): shape = predictor(img, det) shapes.append(shape) xy = _shape_to_np(shape) lmarks.append(xy) lmarks = np.asarray(lmarks, dtype='float32') # display_landmarks(img, dets, shapes) return lmarks
Example #5
Source File: face_detection.py From Heart-rate-measurement-using-camera with Apache License 2.0 | 5 votes |
def __init__(self): self.detector = dlib.get_frontal_face_detector() self.predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat") self.fa = face_utils.FaceAligner(self.predictor, desiredFaceWidth=256)
Example #6
Source File: video_emotion_rec.py From EmotionClassifier with GNU General Public License v3.0 | 5 votes |
def detect_faces(image=None,face_size =(224,224),offset = 20,method = 'dlib'): faces = [] if method =='cv': detector = cv2.CascadeClassifier(detect_model_path) dets = detector.detectMultiScale(image,scaleFactor=1.1,minNeighbors= 5,minSize=(48,48)) for face_coordinates in dets: x, y, w, h = face_coordinates x1,x2, y1, y2 = x - offset, x + w + offset, y - offset, y + h + offset face = image[y1:y2, x1: x2] try: faces.append(cv2.resize(face,face_size)) except: continue return faces, dets elif method =='dlib': detector = dlib.get_frontal_face_detector() dets, _ = detector(image, 1), [] rect = [] for _, d in enumerate(dets): left, right, top, bottom = d.left() - offset, d.right() + offset, d.top() - offset, d.bottom() + offset face = image[top:bottom, left:right] try: faces.append(cv2.resize(face,face_size)) rect.append((d.left(),d.top(),d.right() - d.left(),d.bottom() - d.top())) except: continue return faces, rect
Example #7
Source File: align_dlib.py From facenet with MIT License | 5 votes |
def __init__(self, facePredictor): """ Instantiate an 'AlignDlib' object. :param facePredictor: The path to dlib's :type facePredictor: str """ assert facePredictor is not None #pylint: disable=no-member self.detector = dlib.get_frontal_face_detector() self.predictor = dlib.shape_predictor(facePredictor)
Example #8
Source File: detect_from_video.py From Deepfake-Detection with Apache License 2.0 | 5 votes |
def test_full_image_network(video_path, model_path, output_path, start_frame=0, end_frame=None, cuda=True): """ Reads a video and evaluates a subset of frames with the a detection network that takes in a full frame. Outputs are only given if a face is present and the face is highlighted using dlib. :param video_path: path to video file :param model_path: path to model file (should expect the full sized image) :param output_path: path where the output video is stored :param start_frame: first frame to evaluate :param end_frame: last frame to evaluate :param cuda: enable cuda :return: """ print('Starting: {}'.format(video_path)) # Read and write reader = cv2.VideoCapture(video_path) video_fn = video_path.split('/')[-1].split('.')[0]+'.avi' os.makedirs(output_path, exist_ok=True) fourcc = cv2.VideoWriter_fourcc(*'MJPG') fps = reader.get(cv2.CAP_PROP_FPS) num_frames = int(reader.get(cv2.CAP_PROP_FRAME_COUNT)) writer = None # Face detector face_detector = dlib.get_frontal_face_detector() # Load model model = model_selection(modelname='xception', num_out_classes=2, dropout=0.5) model.load_state_dict(torch.load(model_path)) if isinstance(model, torch.nn.DataParallel): model = model.module
Example #9
Source File: alignface.py From deepfeatinterp with GNU General Public License v3.0 | 5 votes |
def compute_template(globspec='images/lfw_aegan/*/*.png',image_dims=[400,400],predictor_path='models/shape_predictor_68_face_landmarks.dat',center_crop=None,subsample=1): # Credit: http://dlib.net/face_landmark_detection.py.html detector=dlib.get_frontal_face_detector() predictor=dlib.shape_predictor(predictor_path) template=numpy.zeros((68,2),dtype=numpy.float64) count=0 if not center_crop is None: center_crop=numpy.asarray(center_crop) cy,cx=(numpy.asarray(image_dims)-center_crop)//2 # compute mean landmark locations S=sorted(glob.glob(globspec)) S=S[::subsample] for ipath in S: print("Processing file: {}".format(ipath)) img=(skimage.transform.resize(skimage.io.imread(ipath)/255.0,tuple(image_dims)+(3,),order=2,mode='nearest')*255).clip(0,255).astype(numpy.ubyte) if not center_crop is None: img=img[cy:cy+center_crop[0],cx:cx+center_crop[0]] upsample=0 dets=detector(img,upsample) if len(dets)!=1: continue for k,d in enumerate(dets): shape=predictor(img, d) for i in range(68): template[i]+=(shape.part(i).y,shape.part(i).x) count+=1 template/=float(count) return template # lfw_aegan 400x400 template map # [[ 251.58852868 201.50275826] # 33 where nose meets upper-lip # [ 172.69409809 168.66523086] # 39 inner-corner of left eye # [ 171.72236076 232.09718129]] # 42 inner-corner or right eye
Example #10
Source File: alignface.py From deepfeatinterp with GNU General Public License v3.0 | 5 votes |
def load_face_detector(predictor_path='models/shape_predictor_68_face_landmarks.dat'): detector=dlib.get_frontal_face_detector() predictor=dlib.shape_predictor(predictor_path) return detector,predictor
Example #11
Source File: alignface.py From deepfeatinterp with GNU General Public License v3.0 | 5 votes |
def compute_template(globspec='images/lfw_aegan/*/*.png',image_dims=[400,400],predictor_path='models/shape_predictor_68_face_landmarks.dat',center_crop=None,subsample=1): # Credit: http://dlib.net/face_landmark_detection.py.html detector=dlib.get_frontal_face_detector() predictor=dlib.shape_predictor(predictor_path) template=numpy.zeros((68,2),dtype=numpy.float64) count=0 if not center_crop is None: center_crop=numpy.asarray(center_crop) cy,cx=(numpy.asarray(image_dims)-center_crop)//2 # compute mean landmark locations S=sorted(glob.glob(globspec)) S=S[::subsample] for ipath in S: print("Processing file: {}".format(ipath)) img=(skimage.transform.resize(skimage.io.imread(ipath)/255.0,tuple(image_dims)+(3,),order=2,mode='nearest')*255).clip(0,255).astype(numpy.ubyte) if not center_crop is None: img=img[cy:cy+center_crop[0],cx:cx+center_crop[0]] upsample=0 dets=detector(img,upsample) if len(dets)!=1: continue for k,d in enumerate(dets): shape=predictor(img, d) for i in range(68): template[i]+=(shape.part(i).y,shape.part(i).x) count+=1 template/=float(count) return template # lfw_aegan 400x400 template map # [[ 251.58852868 201.50275826] # 33 where nose meets upper-lip # [ 172.69409809 168.66523086] # 39 inner-corner of left eye # [ 171.72236076 232.09718129]] # 42 inner-corner or right eye
Example #12
Source File: eval.py From Age-Gender-Estimate-TF with MIT License | 5 votes |
def load_image(image_path, shape_predictor): detector = dlib.get_frontal_face_detector() predictor = dlib.shape_predictor(shape_predictor) fa = FaceAligner(predictor, desiredFaceWidth=160) image = cv2.imread(image_path, cv2.IMREAD_COLOR) # image = imutils.resize(image, width=256) gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) rects = detector(gray, 2) rect_nums = len(rects) XY, aligned_images = [], [] if rect_nums == 0: aligned_images.append(image) return aligned_images, image, rect_nums, XY else: for i in range(rect_nums): aligned_image = fa.align(image, gray, rects[i]) aligned_images.append(aligned_image) (x, y, w, h) = rect_to_bb(rects[i]) image = cv2.rectangle(image, (x, y), (x + w, y + h), color=(255, 0, 0), thickness=2) XY.append((x, y)) return np.array(aligned_images), image, rect_nums, XY # def draw_label(image, point, ages, genders, font=cv2.FONT_HERSHEY_COMPLEX, font_scale=1, thickness=1): # for i in range(len(point)): # label = "{}, {}".format(int(ages[i]), "F" if genders[i] == 0 else "M") # size = cv2.getTextSize(label, font, font_scale, thickness)[0] # x, y = point[i] # # cv2.rectangle(image, (x, y - size[1]), (x + size[0], y), (255, 0, 0), cv2.FILLED) # cv2.putText(image, label, (x, np.max(y - 5, 0)), font, font_scale, (255, 255, 255), thickness)
Example #13
Source File: landmarks.py From photo-a-day-aligner with MIT License | 5 votes |
def __init__(self, predictor_path): self.detector = dlib.get_frontal_face_detector() self.predictor = dlib.shape_predictor(str(predictor_path))
Example #14
Source File: align_dlib.py From MaskInsightface with Apache License 2.0 | 5 votes |
def __init__(self, facePredictor): """ Instantiate an 'AlignDlib' object. :param facePredictor: The path to dlib's :type facePredictor: str """ assert facePredictor is not None #pylint: disable=no-member self.detector = dlib.get_frontal_face_detector() self.predictor = dlib.shape_predictor(facePredictor)
Example #15
Source File: AIMakeup.py From AIMakeup with Apache License 2.0 | 5 votes |
def __init__(self,predictor_path="./data/shape_predictor_68_face_landmarks.dat"): self.photo_path=[] self.PREDICTOR_PATH = predictor_path self.faces={} #人脸定位、特征提取器,来自dlib self.detector = dlib.get_frontal_face_detector() self.predictor = dlib.shape_predictor(self.PREDICTOR_PATH)
Example #16
Source File: detector_dlib.py From deepface with MIT License | 5 votes |
def __init__(self): super(FaceDetectorDlib, self).__init__() self.detector = dlib.get_frontal_face_detector() predictor_path = os.path.join( os.path.dirname(os.path.realpath(__file__)), DeepFaceConfs.get()['detector']['dlib']['landmark_detector'] ) self.predictor = dlib.shape_predictor(predictor_path) self.upsample_scale = DeepFaceConfs.get()['detector']['dlib']['scale']
Example #17
Source File: detect_features.py From visage with MIT License | 5 votes |
def __init__(self): """ Initiator for DetectLandmarks class. Downloads the predictor file if not available. Raises: `Exception`, if download of predictor fails. """ if not os.path.isfile(PREDICTOR_PATH): try: print ('Predictor not found. Downloading...this may take a while...') url = 'https://github.com/hriddhidey/visage/blob/master/visage/shape_predictor_68_face_landmarks.dat?raw=true' def dl_progress(count, block_size, total_size): """ Show download progress bar. """ percent = int(count*block_size*100/total_size) sys.stdout.write("\r" + 'Progress:' + "...%d%%" % percent) sys.stdout.flush() urlretrieve( url, PREDICTOR_PATH, reporthook=dl_progress ) print ('Predictor downloaded.') except IOError: print ('Download failed. Try again with reliable network connection.') raise IOError self.predictor = dlib.shape_predictor(PREDICTOR_PATH) self.cascade = cv2.CascadeClassifier(CASC_PATH) self.detector = dlib.get_frontal_face_detector()
Example #18
Source File: alignface.py From deepfeatinterp with GNU General Public License v3.0 | 5 votes |
def load_face_detector(predictor_path='models/shape_predictor_68_face_landmarks.dat'): detector=dlib.get_frontal_face_detector() predictor=dlib.shape_predictor(predictor_path) return detector,predictor
Example #19
Source File: preprocessing.py From face-identification-tpe with MIT License | 5 votes |
def __init__(self): self.detector = dlib.get_frontal_face_detector()
Example #20
Source File: align_dlib.py From Face_Recognition_Client with Apache License 2.0 | 5 votes |
def __init__(self, facePredictor): """ Instantiate an 'AlignDlib' object. :param facePredictor: The path to dlib's :type facePredictor: str """ assert facePredictor is not None self.detector = dlib.get_frontal_face_detector() self.predictor = dlib.shape_predictor(facePredictor)
Example #21
Source File: data_loader.py From DeepFake-Detection with MIT License | 5 votes |
def __init__(self, filenames, filepath, transform, output_image_size=224, recompute=False): self.filenames = filenames self.transform = transform self.image_size = output_image_size self.recompute = recompute self.cached_path = Path(filepath) self.cached_path.mkdir(exist_ok=True) self.face_detector = dlib.get_frontal_face_detector()
Example #22
Source File: FacePoints.py From PyQt with GNU General Public License v3.0 | 5 votes |
def startCapture(self): self.setText("请稍候,正在初始化数据和摄像头。。。") try: # 检测相关 self.detector = dlib.get_frontal_face_detector() self.predictor = dlib.shape_predictor( "Data/shape_predictor_68_face_landmarks.dat") cascade_fn = "Data/lbpcascades/lbpcascade_frontalface.xml" self.cascade = cv2.CascadeClassifier(cascade_fn) if not self.cascade: return QMessageBox.critical(self, "错误", cascade_fn + " 无法找到") self.cap = cv2.VideoCapture(0) if not self.cap or not self.cap.isOpened(): return QMessageBox.critical(self, "错误", "打开摄像头失败") # 开启定时器定时捕获 self.timer = QTimer(self, timeout=self.onCapture) self.timer.start(1000 / self.fps) except Exception as e: QMessageBox.critical(self, "错误", str(e))
Example #23
Source File: dlib_detector.py From face-alignment-pytorch with BSD 3-Clause "New" or "Revised" License | 5 votes |
def __init__(self, device, path_to_detector=None, verbose=False): super().__init__(device, verbose) base_path = os.path.join(appdata_dir('face_alignment'), "data") # Initialise the face detector if 'cuda' in device: if path_to_detector is None: path_to_detector = os.path.join( base_path, "mmod_human_face_detector.dat") if not os.path.isfile(path_to_detector): print("Downloading the face detection CNN. Please wait...") path_to_temp_detector = os.path.join( base_path, "mmod_human_face_detector.dat.download") if os.path.isfile(path_to_temp_detector): os.remove(os.path.join(path_to_temp_detector)) request_file.urlretrieve( "https://www.adrianbulat.com/downloads/dlib/mmod_human_face_detector.dat", os.path.join(path_to_temp_detector)) os.rename(os.path.join(path_to_temp_detector), os.path.join(path_to_detector)) self.face_detector = dlib.cnn_face_detection_model_v1(path_to_detector) else: self.face_detector = dlib.get_frontal_face_detector()
Example #24
Source File: dlib_detector.py From ColumbiaImageSearch with Apache License 2.0 | 5 votes |
def __init__(self): super(DLibFaceDetector, self).__init__() import dlib self.detector = dlib.get_frontal_face_detector()
Example #25
Source File: FeatureExtractor.py From adviser with GNU General Public License v3.0 | 5 votes |
def __init__(self, domain: Domain = ""): Service.__init__(self, domain=domain) self.module_dir = os.path.dirname(os.path.abspath(__file__)) # # CLAHE (Contrast Limited Adaptive Histogram Equalization) self.CLAHE = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8)) # for detecting faces (returns coordinates of rectangle(s) of face area(s)) self.DETECTOR = dlib.get_frontal_face_detector() # facial landmark predictor predictor_file = os.path.abspath(os.path.join(self.module_dir, '..', '..', '..', 'resources', 'models', 'video', 'shape_predictor_68_face_landmarks.dat')) self.PREDICTOR = dlib.shape_predictor(predictor_file)
Example #26
Source File: setup.py From multimodal-vae-public with MIT License | 5 votes |
def build_mask_dataset(in_dir, out_dir, model_path): """Generate a dataset of segmentation masks from images. @param in_dir: string input directory of images. @param out_dir: string output directory of images. @param model_path: string path to HOG model for facial features. """ # initialize dlib's face detector (HOG-based) and then create # the facial landmark predictor detector = dlib.get_frontal_face_detector() predictor = dlib.shape_predictor(model_path) image_paths = os.listdir(in_dir) n_images = len(image_paths) for i, image_path in enumerate(image_paths): print('Building face-mask dataset: [%d/%d] images.' % (i + 1, n_images)) image_full_path = os.path.join(in_dir, image_path) image = cv2.imread(image_full_path) gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # detect faces in the grayscale image rects = detector(gray, 1) try: rect = rects[0] # we are only going to use the first one # determine the facial landmarks for the face region, then # convert the landmark (x, y)-coordinates to a NumPy array shape = predictor(gray, rect) shape = face_utils.shape_to_np(shape) output = visualize_facial_landmarks(image, shape) cv2.imwrite(os.path.join(out_dir, image_path), output) except: # if for some reason no bounding box is found, send blank. output = np.ones_like(image) * 255 cv2.imwrite(os.path.join(out_dir, image_path), output)
Example #27
Source File: dlib.py From thug-memes with MIT License | 5 votes |
def find_thug_landmarks(self, img_path, show_result=False): face_det = dlib.get_frontal_face_detector() predictor = dlib.shape_predictor(self._landmarks_model) img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_det(gray) thug_results = [] for f in faces: predicted = predictor(gray, f) landmarks = predicted.parts() left_eye = self._center_of_two_points( landmarks[LEFT_EYE_CENTER_IDXS[0]], landmarks[LEFT_EYE_CENTER_IDXS[1]]) right_eye = self._center_of_two_points( landmarks[RIGHT_EYE_CENTER_IDXS[0]], landmarks[RIGHT_EYE_CENTER_IDXS[1]]) mouth = self._center_of_two_points(landmarks[MOUTH_CENTER_IDXS[0]], landmarks[MOUTH_CENTER_IDXS[1]]) thug = ThugLandmarks(l_eye=left_eye, r_eye=right_eye, mouth=mouth) thug_results.append(thug) if show_result: self._draw_result(img, f, landmarks, thug) if show_result: cv2.imshow('detection_result-{}'.format(timestamp()), img) cv2.waitKey(1) return thug_results
Example #28
Source File: face_detection.py From DeepWay with MIT License | 5 votes |
def __init__(self): self.detector = dlib.get_frontal_face_detector()
Example #29
Source File: faceswapper.py From FaceSwapper with Apache License 2.0 | 5 votes |
def __init__(self,heads_list=[],predictor_path="./data/shape_predictor_68_face_landmarks.dat"): ''' head_list: 头(背景和发型)来源图片的路径的字符串列表,根据此列表在实例化时载入多个头像资源, 并获得面部识别点坐标,以字典形式存储,键名为文件名 predictor_path: dlib资源的路径 ''' #五官等标记点 self.PREDICTOR_PATH = predictor_path self.FACE_POINTS = list(range(17, 68)) self.MOUTH_POINTS = list(range(48, 61)) self.RIGHT_BROW_POINTS = list(range(17, 22)) self.LEFT_BROW_POINTS = list(range(22, 27)) self.RIGHT_EYE_POINTS = list(range(36, 42)) self.LEFT_EYE_POINTS = list(range(42, 48)) self.NOSE_POINTS = list(range(27, 35)) self.JAW_POINTS = list(range(0, 17)) # 人脸的完整标记点 self.ALIGN_POINTS = (self.LEFT_BROW_POINTS + self.RIGHT_EYE_POINTS + self.LEFT_EYE_POINTS + self.RIGHT_BROW_POINTS + self.NOSE_POINTS + self.MOUTH_POINTS) # 来自第二张图(脸)的标记点,眼、眉、鼻子、嘴,这一部分标记点将覆盖第一张图的对应标记点 self.OVERLAY_POINTS = [self.LEFT_EYE_POINTS + self.RIGHT_EYE_POINTS + self.LEFT_BROW_POINTS + self.RIGHT_BROW_POINTS, self.NOSE_POINTS + self.MOUTH_POINTS] # 颜色校正参数 self.COLOUR_CORRECT_BLUR_FRAC = 0.6 #人脸定位、特征提取器,来自dlib self.detector = dlib.get_frontal_face_detector() self.predictor = dlib.shape_predictor(self.PREDICTOR_PATH) #头像资源 self.heads={} if heads_list: self.load_heads(heads_list)
Example #30
Source File: face_rectify.py From Face-Sketch-Wild with MIT License | 5 votes |
def detect_fiducial_points(img, predictor_path): """ Detect face landmarks and return the mean points of left and right eyes. If there are multiple faces in one image, only select the first one. """ detector = dlib.get_frontal_face_detector() predictor = dlib.shape_predictor(predictor_path) dets = detector(img, 1) if len(dets) < 1: return [] for k, d in enumerate(dets): shape = predictor(img, d) break landmarks = [] for i in range(68): landmarks.append([shape.part(i).x, shape.part(i).y]) landmarks = np.array(landmarks) left_eye = landmarks[36:42] right_eye = landmarks[42:48] mouth = landmarks[48:68] return np.array([np.mean(left_eye, 0), np.mean(right_eye, 0)]).astype('int')