Python cv2.CascadeClassifier() Examples

The following are 30 code examples of cv2.CascadeClassifier(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module cv2 , or try the search function .
Example #1
Source File: detect.py    From pedestrian-haar-based-detector with GNU General Public License v2.0 7 votes vote down vote up
def main():
	#IMG PATHS
	imagePath = "test3.jpg"
	cascPath = "cascades/haarcascade_pedestrian.xml"

	pplCascade = cv2.CascadeClassifier(cascPath)
	image = cv2.imread(imagePath)
	gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
	
	gray = normalize_grayimage(gray)
	 
	pedestrians = pplCascade.detectMultiScale(
		gray,
		scaleFactor=1.2,
		minNeighbors=10,
		minSize=(32,96),
		flags = cv2.cv.CV_HAAR_SCALE_IMAGE
	)

	print "Found {0} ppl!".format(len(pedestrians))

	#Draw a rectangle around the detected objects
	for (x, y, w, h) in pedestrians:
		cv2.rectangle(image, (x, y), (x+w, y+h), (0, 255, 0), 2)

	cv2.imwrite("saida.jpg", image)
	cv2.imshow("Ppl found", image)
	cv2.waitKey(0)
	
	return 0 
Example #2
Source File: face_detect.py    From Attendance-Management-using-Face-Recognition with GNU General Public License v3.0 7 votes vote down vote up
def recognize(self, image_path, identity):
        face_recognizer = cv2.face.LBPHFaceRecognizer_create()
        try:
            face_recognizer.read(self.face_recognizer_file)
        except:
            messagebox.showerror('Error', "The class has no data to recongnize from\nor has not been trained yet")
            return "No Training Data"
        test_img = cv2.imread(image_path)
        test_img_gray = cv2.cvtColor(test_img, cv2.COLOR_BGR2GRAY)
        clf = cv2.CascadeClassifier(self.xml_file)
        all_detected = self.detect_faces(clf, test_img, 1.2)

        students_in_pic = set()
        for (x, y, w, h) in all_detected:
            label, conf = face_recognizer.predict(test_img_gray[y:y + w, x:x + h])
            # print(conf)
            cv2.waitKey(0)
            label_text = identity[label]
            students_in_pic.add(identity[label])
            cv2.rectangle(test_img, (x, y), (x + w, y + h), (0, 255, 0), 2)
            cv2.putText(test_img, label_text, (x, y - 5), cv2.FONT_HERSHEY_PLAIN, 1.0, (0, 255, 0), 2)
        cv2.imshow("Students in this picture", test_img)
        cv2.waitKey(0)
        cv2.destroyAllWindows()
        return list(students_in_pic) 
Example #3
Source File: face_detect.py    From face-search with MIT License 7 votes vote down vote up
def detect_face(img_path, cc_path='../files/haarcascade_frontalface_default.xml'):
    """
    Detect the face from the image, return colored face
    """

    cc = cv2.CascadeClassifier(os.path.abspath(cc_path))
    img_path = os.path.abspath(img_path)
    img = cv2.imread(img_path)
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    faces = cc.detectMultiScale(gray, 1.3, 5)
    roi_color = None

    if len(faces) == 0:
        logging.exception(img_path + ': No face found')
    else:
        x,y,w,h = faces[0]
        _h, _w = compute_size(h, w)
        roi_color = img[y - _h:y + h + _h, x - _w:x + w + _w]

    return roi_color 
Example #4
Source File: face.py    From face-search with MIT License 6 votes vote down vote up
def detect_face(img_path, cc_path='../files/haarcascade_frontalface_default.xml'):
    """
    Detect the face from the image, return colored face
    """

    cc = cv2.CascadeClassifier(os.path.abspath(cc_path))
    img_path = os.path.abspath(img_path)
    img = cv2.imread(img_path)
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    faces = cc.detectMultiScale(gray, 1.3, 5)
    roi_color = None

    if len(faces) == 0:
        logging.exception(img_path + ': No face found')
    else:
        x,y,w,h = faces[0]
        _h, _w = compute_size(h, w)
        roi_color = img[y - _h:y + h + _h, x - _w:x + w + _w]

    return roi_color 
Example #5
Source File: ImageMiniLab.py    From ImageMiniLab with GNU General Public License v3.0 6 votes vote down vote up
def face_recognize(self):
        src = self.cv_read_img(self.src_file)
        if src is None:
            return

        gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY)
        face_cascade = cv.CascadeClassifier('haarcascade_frontalface_alt2.xml')
        faces = face_cascade.detectMultiScale(
            gray,
            scaleFactor=1.15,
            minNeighbors=3,
            minSize=(5, 5)
        )

        for (x, y, w, h) in faces:
            cv.rectangle(src, (x, y), (x + w, y + h), (0, 255, 0), 2)
        self.decode_and_show_dst(src) 
Example #6
Source File: opencv-face.py    From streamlink with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def main(url, quality='best', fps=30.0):
    face_cascade = cv2.CascadeClassifier(os.path.join(cv2.haarcascades, 'haarcascade_frontalface_default.xml'))

    stream_url = stream_to_url(url, quality)
    log.info("Loading stream {0}".format(stream_url))
    cap = cv2.VideoCapture(stream_url)

    frame_time = int((1.0 / fps) * 1000.0)

    while True:
        try:
            ret, frame = cap.read()
            if ret:
                frame_f = detect_faces(face_cascade, frame, scale_factor=1.2)
                cv2.imshow('frame', frame_f)
                if cv2.waitKey(frame_time) & 0xFF == ord('q'):
                    break
            else:
                break
        except KeyboardInterrupt:
            break

    cv2.destroyAllWindows()
    cap.release() 
Example #7
Source File: videoOpencv.py    From faceai with MIT License 6 votes vote down vote up
def discern(img):
    grayImg = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    # OpenCV人脸识别分类器
    classifier = cv2.CascadeClassifier(
        "C:\Python36\Lib\site-packages\opencv-master\data\haarcascades\haarcascade_frontalface_default.xml"
    )
    color = (0, 255, 0)  # 定义绘制颜色
    # 调用识别人脸
    faceRects = classifier.detectMultiScale(
        grayImg, scaleFactor=1.2, minNeighbors=3, minSize=(32, 32))
    if len(faceRects):  # 大于0则检测到人脸
        for faceRect in faceRects:  # 单独框出每一张人脸
            x, y, w, h = faceRect
            # 框出人脸
            cv2.rectangle(img, (x, y), (x + h, y + w), color, 2)

    cv2.imshow("image", img)  # 显示图像 
Example #8
Source File: person.py    From ActionAI with GNU General Public License v3.0 6 votes vote down vote up
def __init__(self, expiration=5, nbs=5, scale=1.1, inset=150, min_size=10, model_file=face_mdl):
        self.count = 0
        self.eps = 1e-6
        self.activity = []
        self.expiration = expiration
        self.id = utils.id_gen()
        self.q = deque(maxlen=10)
        self.cubit_q = deque(maxlen=50)
        self.skeleton_color = tuple([random.randint(0, 255) for _ in range(3)]) #skeleton_color

        self.faces = []
        self.detector = cv2.CascadeClassifier(model_file)
        self.nbs = nbs
        self.scale = scale
        self.inset = inset
        self.min_size = (min_size, min_size)

        return 
Example #9
Source File: cascade_detector.py    From adience_align with MIT License 6 votes vote down vote up
def __init__(self, cascade_file = './resources/haarcascade_frontalface_default.xml', 
                 min_size = (10, 10),
                 min_neighbors = 20,
                 scale_factor = 1.04,
                 angles = [0],
                 thr = 0.4, 
                 cascade_type = 'haar'):
        '''
        cascade_type - is a string defining the type of cascade
        '''
        print expand_path('.')
        self.cascade_file = cascade_file.rsplit('/',1)[1]
        self._cascade_classifier = cv2.CascadeClassifier(cascade_file)
        self.scale_factor = scale_factor
        self.min_neighbors = min_neighbors
        self.min_size = min_size
        self.cascade_type = cascade_type
        self.angles = angles
        self.thr = thr 
Example #10
Source File: AMS_Run.py    From Attendace_management_system with MIT License 6 votes vote down vote up
def trainimg():
    recognizer = cv2.face.LBPHFaceRecognizer_create()
    global detector
    detector = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
    try:
        global faces,Id
        faces, Id = getImagesAndLabels("TrainingImage")
    except Exception as e:
        l='please make "TrainingImage" folder & put Images'
        Notification.configure(text=l, bg="SpringGreen3", width=50, font=('times', 18, 'bold'))
        Notification.place(x=350, y=400)

    recognizer.train(faces, np.array(Id))
    try:
        recognizer.save("TrainingImageLabel\Trainner.yml")
    except Exception as e:
        q='Please make "TrainingImageLabel" folder'
        Notification.configure(text=q, bg="SpringGreen3", width=50, font=('times', 18, 'bold'))
        Notification.place(x=350, y=400)

    res = "Model Trained"  # +",".join(str(f) for f in Id)
    Notification.configure(text=res, bg="SpringGreen3", width=50, font=('times', 18, 'bold'))
    Notification.place(x=250, y=400) 
Example #11
Source File: face_detection_utilities.py    From Real-Time-Facial-Expression-Recognition-with-DeepLearning with MIT License 6 votes vote down vote up
def getFaceCoordinates(image):
    cascade = cv2.CascadeClassifier(CASCADE_PATH)
    
    img_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    img_gray = cv2.equalizeHist(img_gray)
    rects = cascade.detectMultiScale(
        img_gray,
        scaleFactor=1.1,
        minNeighbors=3,
        minSize=(48, 48)
        )

    # For now, we only deal with the case that we detect one face.
    if(len(rects) != 1) :
        return None
    
    face = rects[0]
    bounding_box = [face[0], face[1], face[0] + face[2], face[1] + face[3]]

    # return map((lambda x: x), bounding_box)
    return bounding_box 
Example #12
Source File: faceApi.py    From FaceRecognition-RestApi with MIT License 6 votes vote down vote up
def detectFaces(image_name):
    starttime=time.time()
    img = cv2.imread(image_name)
    face_cascade = cv2.CascadeClassifier(root+"model/haarcascade_frontalface_default.xml")
    if img.ndim == 3:
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    else:
        gray = img #if语句:如果img维度为3,说明不是灰度图,先转化为灰度图gray,如果不为3,也就是2,原图就是灰度图

    faces = face_cascade.detectMultiScale(gray, 1.2, 5)#1.3和5是特征的最小、最大检测窗口,它改变检测结果也会改变
    result = "["

    for (x,y,width,height) in faces:
        result+=  '{ "x":'+ str(x) +' ,"y":'+ str(y) +' ,"height":'+ str(height)+',"width":'+ str(width)+' } ' +','

    endtime=time.time()

    result=result[:-1]+']'

    return '{"status":true, "data":'+ result +' ,"msg":"成功","runtime":'+ str(endtime-starttime)+'}'

#api返回函数 
Example #13
Source File: facerec_train.py    From deepvisualminer with MIT License 6 votes vote down vote up
def detect(img_file, detector_xml_path, dest_img_file):
    img = cv2.imread(img_file)
    
    gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    
    detector = cv2.CascadeClassifier(detector_xml_path)
    
    min_size = (min(50, gray_img.shape[0] // 10), min(50, gray_img.shape[1] // 10))
    hits = detector.detectMultiScale(gray_img, 1.1, 4, 0, min_size)
    #cv2.groupRectangles(hits, 2)
    print(hits)
    
    hits_img = np.copy(img)
    for (x,y,w,h) in hits:
        cv2.rectangle(hits_img, (x,y), (x+w, y+h), (0,0,255), 2)
    cv2.imwrite(dest_img_file, hits_img) 
Example #14
Source File: main.py    From flask_face_detection with MIT License 5 votes vote down vote up
def detect_faces(img):
    '''Detect face in an image'''
    
    faces_list = []

    # Convert the test image to gray scale (opencv face detector expects gray images)
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    # Load OpenCV face detector (LBP is faster)
    face_cascade = cv2.CascadeClassifier('opencv-files/lbpcascade_frontalface.xml')

    # Detect multiscale images (some images may be closer to camera than others)
    # result is a list of faces
    faces = face_cascade.detectMultiScale(gray, scaleFactor=1.2, minNeighbors=5);

    # If not face detected, return empty list  
    if  len(faces) == 0:
        return faces_list
    
    for i in range(0, len(faces)):
        (x, y, w, h) = faces[i]
        face_dict = {}
        face_dict['face'] = gray[y:y + w, x:x + h]
        face_dict['rect'] = faces[i]
        faces_list.append(face_dict)

    # Return the face image area and the face rectangle
    return faces_list
# ----------------------------------------------------------------------------------
# Draw rectangle on image
# according to given (x, y) coordinates and given width and heigh
# ---------------------------------------------------------------------------------- 
Example #15
Source File: face_detection.py    From deepgaze with MIT License 5 votes vote down vote up
def __init__(self, frontalFacePath, profileFacePath):
        """Init the face detector object

        @param frontalFacePath path to the classifier config file
        @param profileFacePath path to the classifier config file
        """
        self.is_face_present = False

        #Represent the face type found
        # 1=Frontal,  
        # 2=FrontRotLeft, 3=FronRotRight,  
        # 4=ProfileLeft, 5=ProfileRight.
        self.face_type = 0

        self.face_x = 0
        self.face_y = 0
        self.face_h = 0
        self.face_w = 0

        if(os.path.isfile(frontalFacePath) == False and os.path.isfile(profileFacePath)==False):
            raise ValueError('[DEEPGAZE] haarCascade: the files specified do not exist.') 

        self._frontalFacePath = frontalFacePath
        self._profileFacePath = profileFacePath

        self._frontalCascade = cv2.CascadeClassifier(frontalFacePath)
        self._profileCascade = cv2.CascadeClassifier(profileFacePath) 
Example #16
Source File: face_detection.py    From python-image-processing with MIT License 5 votes vote down vote up
def face_detect(file):
    image = cv2.imread(file)
    image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    cascade = cv2.CascadeClassifier(cascade_path)
    facerect = cascade.detectMultiScale(image_gray, scaleFactor=1.1, minNeighbors=1, minSize=(1, 1))

    print "face rectangle"
    print facerect

    if len(facerect) > 0:
        for rect in facerect:
            cv2.rectangle(image, tuple(rect[0:2]),tuple(rect[0:2]+rect[2:4]), color, thickness=2)

    return image 
Example #17
Source File: face.py    From pypot with GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, robot, name, cameras, freq,
                 cascade='/home/coyote/dev/opencv-3.0.0/data/haarcascades/haarcascade_frontalface_alt.xml'):
        SensorsController.__init__(self, None, [], freq)

        self.name = name

        self._robot = robot
        self._names = cameras
        self._faces = []

        self.cascade = cv2.CascadeClassifier(cascade) 
Example #18
Source File: face_dataset.py    From Comicolorization with MIT License 5 votes vote down vote up
def __init__(self, paths, classifier_path, input_resize=None, output_resize=None, root='.', margin_ratio=0.3):
        """
        :param paths: image files :see: https://github.com/pfnet/chainer/blob/master/chainer/datasets/image_dataset.py
        :param classifier_path: XML of pre-trained face detector.
        You can find it from https://github.com/opencv/opencv/tree/master/data/haarcascades
        :param input_resize: set it if you want to resize image **before** running face detector
        :param output_resize: target size of output image
        """
        super().__init__(paths=paths, resize=input_resize, root=root)
        self.classifier = cv2.CascadeClassifier(classifier_path)
        self.margin_ratio = margin_ratio
        self.output_resize = output_resize 
Example #19
Source File: facerec_train.py    From deepvisualminer with MIT License 5 votes vote down vote up
def detectvideo(vid_file, detector_xml_path, dest_img_dir):
    
    if not os.path.exists(dest_img_dir):
        os.makedirs(dest_img_dir)

    detector = cv2.CascadeClassifier(detector_xml_path)
    
    vid = imageio.get_reader(vid_file, 'ffmpeg')
    # If size and source_size are not equal, then device was probably
    # rotated (like a mobile) and we should compensate for the rotation.
    # Images will have 'source_size' dimensions but we need 'size'.
    metadata = vid.get_meta_data()
    rotate = False
    if metadata['source_size'] != metadata['size']:
        print('Rotating')
        rotate = True
    
    for i, img in enumerate(vid):
        if rotate:
            #img = np.transpose(img, axes=(1, 0, 2)).copy()
            img = np.rot90(img).copy()
            
        print('Frame ',i, img.shape)
        
        gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        
        min_size = (min(20, gray_img.shape[0] // 10), min(20, gray_img.shape[1] // 10))
        hits = detector.detectMultiScale(gray_img, 1.1, 3, 0, min_size)
        #cv2.groupRectangles(hits, 2)
        print(len(hits), ' hits')

        hits_img = np.copy(img)
        
        if len(hits) > 0:
            for (x,y,w,h) in hits:
                cv2.rectangle(hits_img, (x,y), (x+w, y+h), (0,0,255), 2)

        cv2.imwrite(os.path.join(dest_img_dir, 'frame-%d.png'%(i)), hits_img) 
Example #20
Source File: emotionclassification.py    From ConvolutionalEmotion with MIT License 5 votes vote down vote up
def getPeakFaceFeatures():
    net = DecafNet()
    cascade = cv2.CascadeClassifier('haarcascade_frontalface_alt2.xml')

    features = numpy.zeros((number_sequences,feature_length))
    labels = numpy.zeros((number_sequences,1))
    counter = 0
    # Maybe sort them
    for participant in os.listdir(os.path.join(data_dir,image_dir)):
        for sequence in os.listdir(os.path.join(data_dir,image_dir, participant)):
            if sequence != ".DS_Store":
                image_files = sorted(os.listdir(os.path.join(data_dir,image_dir, participant,sequence)))
                image_file = image_files[-1]
                print counter, image_file
                imarray = cv2.imread(os.path.join(data_dir,image_dir, participant,sequence,image_file))
                imarray = cv2.cvtColor(imarray,cv2.COLOR_BGR2GRAY)
                rects = cascade.detectMultiScale(imarray, 1.3, 3, cv2.cv.CV_HAAR_SCALE_IMAGE, (150,150))
                if len(rects) > 0:
                    facerect=rects[0]
                    imarray = imarray[facerect[1]:facerect[1]+facerect[3], facerect[0]:facerect[0]+facerect[2]]
                scores = net.classify(imarray, center_only=True)
                features[counter] = net.feature(feature_level).flatten()
                label_file = open(os.path.join(data_dir,label_dir, participant,sequence,image_file[:-4]+"_emotion.txt"))
                labels[counter] = eval(label_file.read())
                label_file.close()
                counter += 1

    numpy.save("featuresPeakFace5",features)
    numpy.save("labelsPeakFace5",labels) 
Example #21
Source File: simpledetector.py    From deepvisualminer with MIT License 5 votes vote down vote up
def __init__(self, cfg):
        BaseComponent.__init__(self, cfg)
        
        params = cfg['params']
        
        self.detector = cv2.CascadeClassifier(params['model'])
        self.scaledown_factor = params.get('scaledown_factor', 1.1)
        self.min_neighbors = params.get('min_neighbors', 3)
        self.output_label = params['outputlabel'] 
Example #22
Source File: dataManage.py    From face_recognition_py with GNU General Public License v3.0 5 votes vote down vote up
def detectFace(self, img):
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        if self.isEqualizeHistEnabled:
            gray = cv2.equalizeHist(gray)
        face_cascade = cv2.CascadeClassifier('./haarcascades/haarcascade_frontalface_default.xml')
        faces = face_cascade.detectMultiScale(gray, scaleFactor=1.3, minNeighbors=5, minSize=(90, 90))

        if (len(faces) == 0):
            return None, None
        (x, y, w, h) = faces[0]
        return gray[y:y + w, x:x + h], faces[0]

    # 准备图片数据 
Example #23
Source File: detect_features.py    From visage with MIT License 5 votes vote down vote up
def __init__(self):
        """ Initiator for DetectLandmarks class.
        Downloads the predictor file if not available.
        Raises:
            `Exception`, if download of predictor fails.
        """
        if not os.path.isfile(PREDICTOR_PATH):
            try:
                print ('Predictor not found. Downloading...this may take a while...')
                url = 'https://github.com/hriddhidey/visage/blob/master/visage/shape_predictor_68_face_landmarks.dat?raw=true'
                def dl_progress(count, block_size, total_size):
                    """ Show download progress bar. """
                    percent = int(count*block_size*100/total_size)
                    sys.stdout.write("\r" + 'Progress:' + "...%d%%" % percent)
                    sys.stdout.flush()
                urlretrieve(
                    url,
                    PREDICTOR_PATH,
                    reporthook=dl_progress
                )
                print ('Predictor downloaded.')
            except IOError:
                print ('Download failed. Try again with reliable network connection.')
                raise IOError
        self.predictor = dlib.shape_predictor(PREDICTOR_PATH)
        self.cascade = cv2.CascadeClassifier(CASC_PATH)
        self.detector = dlib.get_frontal_face_detector() 
Example #24
Source File: Haarcascade.py    From Finger-Detection-and-Tracking with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def main():
    capture = cv2.VideoCapture(0)
    eye_path = "../classifier/haarcascade_eye.xml"
    face_path = "../classifier/haarcascade_frontalface_default.xml"

    eye_cascade = cv2.CascadeClassifier(eye_path)
    face_cascade = cv2.CascadeClassifier(face_path)

    while (True):
        _, frame = capture.read()

        gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        eyes = eye_cascade.detectMultiScale(gray_frame, scaleFactor=1.05, minNeighbors=5, minSize=(10,10))
        faces = face_cascade.detectMultiScale(gray_frame, scaleFactor=1.05, minNeighbors=5, minSize=(40, 40))

        print("Number of eyes : " + str(len(eyes)))
        print("Number of faces : " + str(len(faces)))

        for (x, y, w, h) in eyes:
            cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 255), 2)

        for (x, y, w, h) in faces:
            cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)

        cv2.imshow("Live Capture", frame)

        if cv2.waitKey(1) == 27:
            break

    cv2.destroyAllWindows()
    capture.release() 
Example #25
Source File: Train Classifier and Test Video Feed.py    From Emotion-Recognition-Using-SVMs with MIT License 5 votes vote down vote up
def detectFaces(frame):
    cascPath = "../data/haarcascade_frontalface_default.xml"
    faceCascade = cv2.CascadeClassifier(cascPath)
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    detected_faces = faceCascade.detectMultiScale(
            gray,
            scaleFactor=1.1,
            minNeighbors=6,
            minSize=(50, 50),
            flags=cv2.CASCADE_SCALE_IMAGE)
    return gray, detected_faces 
Example #26
Source File: face_detector.py    From FaceRecognition with MIT License 5 votes vote down vote up
def _create_haar_detector(self):
        """Create haar cascade classifier.

        # Arguments
            path: String, path to xml data.

        # Returns
            face_cascade: haar cascade classifier.
        """
        path = 'data/haarcascades/haarcascade_frontalface_default.xml'
        face_cascade = cv2.CascadeClassifier(path)

        return face_cascade 
Example #27
Source File: face_trainer.py    From robovision with GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, face_cascade_xml, face_images_dataset_dir, parent=None):
        super().__init__(parent)

        self.face_cascade = cv.CascadeClassifier(face_cascade_xml)
        self.recognizer = cv.face.LBPHFaceRecognizer_create()

        self.face_images_dataset_dir = face_images_dataset_dir 
Example #28
Source File: app.py    From robovision with GNU General Public License v3.0 5 votes vote down vote up
def detect_face_in_image_data(self, image_data):
        """
        function detects faces in image data,
        draws rectangle for faces in image data,
        and returns this updated image data with highlighted face/s
        """
        self._red = (0, 0, 255)
        self._width = 2
        self._min_size = (30, 30)

        # haarclassifiers work better in black and white
        gray_image = cv2.cvtColor(image_data, cv2.COLOR_BGR2GRAY)
        gray_image = cv2.equalizeHist(gray_image)

        # path to Haar face classfier's xml file
        face_cascade_xml = './cascades/haarcascades_cuda/" \
                "haarcascade_frontalface_default.xml'
        self.classifier = cv2.CascadeClassifier(face_cascade_xml)
        faces = self.classifier.detectMultiScale(gray_image,
                                                 scaleFactor=1.3,
                                                 minNeighbors=4,
                                                 flags=cv2.CASCADE_SCALE_IMAGE,
                                                 minSize=self._min_size)

        for (x, y, w, h) in faces:
            cv2.rectangle(image_data,
                          (x, y),
                          (x+w, y+h),
                          self._red,
                          self._width)

        return image_data 
Example #29
Source File: dashboard.py    From robovision with GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, haar_cascade_filepath, parent=None):
        super().__init__(parent)
        self.classifier = cv2.CascadeClassifier(haar_cascade_filepath)
        self.image = QtGui.QImage()
        self._red = (0, 0, 255)
        self._width = 2
        self._min_size = (30, 30) 
Example #30
Source File: face_crop_simple.py    From python-image-processing with MIT License 5 votes vote down vote up
def face_detect(file):
    image = cv2.imread(file)
    image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    cascade_f = cv2.CascadeClassifier(path.join(cascades_dir, 'haarcascade_frontalface_alt2.xml'))
    cascade_e = cv2.CascadeClassifier(path.join(cascades_dir, 'haarcascade_eye.xml'))

    facerect = cascade_f.detectMultiScale(image_gray, scaleFactor=1.08, minNeighbors=1, minSize=(50, 50))

    # print("face rectangle")
    # print(facerect)
    if not os.path.exists("face_images"):
        os.mkdir("face_images")

    base =  os.path.splitext(os.path.basename(sys.argv[1]))[0] + "_"

    if len(facerect) > 0:
        # filename numbering
        numb = 0
        for rect in facerect:
            x, y, w, h = rect
            # eyes in face?
            y_offset = int(h * 0.1)
            eye_area = image_gray[y + y_offset: y + h, x: x + w]
            eyes = cascade_e.detectMultiScale(eye_area, 1.05)
            eyes = filter(lambda e: (e[0] > w / 2 or e[0] + e[2] < w / 2) and e[1] + e[3] < h / 2, eyes)
            # print(len(eyes))
            if len(eyes) > 0:
                image_face = image[y:y+h, x:x+h]
                cv2.imwrite("face_images/" + base + str("{0:02d}".format(numb)) + ".jpg", image_face)
                numb += 1