Python dlib.net() Examples

The following are 5 code examples of dlib.net(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module dlib , or try the search function .
Example #1
Source File: data_preprocessing_autoencoder.py    From AVSR-Deep-Speech with GNU General Public License v2.0 6 votes vote down vote up
def prepare_data(video_dir, output_dir, max_video_limit=1, screen_display=False):
	"""
	Args:
		1. video_dir:			Directory storing all videos to be processed.
		2. output_dir:			Directory where all mouth region images are to be stored.
		3. max_video_limit:	 	Puts a limit on number of videos to be used for processing.
		4. screen_display:		Decides whether to use screen (to display video being processed).
	"""

	video_file_paths = sorted(glob.glob(video_dir + "*.mp4"))[:max_video_limit]

	load_trained_models()

	if not FACE_DETECTOR_MODEL:
		print "[ERROR]: Please ensure that you have dlib's landmarks predictor file " + \
			  "at data/dlib_data/. You can download it here: " + \
			  "http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2"
		return False

	for path in video_file_paths:
		extract_mouth_regions(path, output_dir, screen_display)

	return True 
Example #2
Source File: pre_process.py    From FaceNet with Apache License 2.0 5 votes vote down vote up
def ensure_dlib_model():
    if not os.path.isfile(predictor_path):
        import urllib.request
        urllib.request.urlretrieve("http://dlib.net/files/shape_predictor_5_face_landmarks.dat.bz2",
                                   filename="models/shape_predictor_5_face_landmarks.dat.bz2") 
Example #3
Source File: alignface.py    From deepfeatinterp with GNU General Public License v3.0 5 votes vote down vote up
def compute_template(globspec='images/lfw_aegan/*/*.png',image_dims=[400,400],predictor_path='models/shape_predictor_68_face_landmarks.dat',center_crop=None,subsample=1):
  # Credit: http://dlib.net/face_landmark_detection.py.html
  detector=dlib.get_frontal_face_detector()
  predictor=dlib.shape_predictor(predictor_path)

  template=numpy.zeros((68,2),dtype=numpy.float64)
  count=0

  if not center_crop is None:
    center_crop=numpy.asarray(center_crop)
    cy,cx=(numpy.asarray(image_dims)-center_crop)//2

  # compute mean landmark locations
  S=sorted(glob.glob(globspec))
  S=S[::subsample]
  for ipath in S:
    print("Processing file: {}".format(ipath))
    img=(skimage.transform.resize(skimage.io.imread(ipath)/255.0,tuple(image_dims)+(3,),order=2,mode='nearest')*255).clip(0,255).astype(numpy.ubyte)
    if not center_crop is None:
      img=img[cy:cy+center_crop[0],cx:cx+center_crop[0]]

    upsample=0
    dets=detector(img,upsample)
    if len(dets)!=1: continue

    for k,d in enumerate(dets):
      shape=predictor(img, d)
      for i in range(68):
        template[i]+=(shape.part(i).y,shape.part(i).x)
      count+=1
  template/=float(count)
  return template
  # lfw_aegan 400x400 template map
  # [[ 251.58852868  201.50275826]  # 33 where nose meets upper-lip
  #  [ 172.69409809  168.66523086]  # 39 inner-corner of left eye
  #  [ 171.72236076  232.09718129]] # 42 inner-corner or right eye 
Example #4
Source File: alignface.py    From deepfeatinterp with GNU General Public License v3.0 5 votes vote down vote up
def compute_template(globspec='images/lfw_aegan/*/*.png',image_dims=[400,400],predictor_path='models/shape_predictor_68_face_landmarks.dat',center_crop=None,subsample=1):
  # Credit: http://dlib.net/face_landmark_detection.py.html
  detector=dlib.get_frontal_face_detector()
  predictor=dlib.shape_predictor(predictor_path)

  template=numpy.zeros((68,2),dtype=numpy.float64)
  count=0

  if not center_crop is None:
    center_crop=numpy.asarray(center_crop)
    cy,cx=(numpy.asarray(image_dims)-center_crop)//2

  # compute mean landmark locations
  S=sorted(glob.glob(globspec))
  S=S[::subsample]
  for ipath in S:
    print("Processing file: {}".format(ipath))
    img=(skimage.transform.resize(skimage.io.imread(ipath)/255.0,tuple(image_dims)+(3,),order=2,mode='nearest')*255).clip(0,255).astype(numpy.ubyte)
    if not center_crop is None:
      img=img[cy:cy+center_crop[0],cx:cx+center_crop[0]]

    upsample=0
    dets=detector(img,upsample)
    if len(dets)!=1: continue

    for k,d in enumerate(dets):
      shape=predictor(img, d)
      for i in range(68):
        template[i]+=(shape.part(i).y,shape.part(i).x)
      count+=1
  template/=float(count)
  return template
  # lfw_aegan 400x400 template map
  # [[ 251.58852868  201.50275826]  # 33 where nose meets upper-lip
  #  [ 172.69409809  168.66523086]  # 39 inner-corner of left eye
  #  [ 171.72236076  232.09718129]] # 42 inner-corner or right eye 
Example #5
Source File: frames.py    From GazeML with MIT License 5 votes vote down vote up
def _get_dlib_data_file(dat_name):
    dat_dir = os.path.relpath('%s/../3rdparty' % os.path.basename(__file__))
    dat_path = '%s/%s' % (dat_dir, dat_name)
    if not os.path.isdir(dat_dir):
        os.mkdir(dat_dir)

    # Download trained shape detector
    if not os.path.isfile(dat_path):
        with urlopen('http://dlib.net/files/%s.bz2' % dat_name) as response:
            with bz2.BZ2File(response) as bzf, open(dat_path, 'wb') as f:
                shutil.copyfileobj(bzf, f)

    return dat_path