Python cv2.hconcat() Examples

The following are 8 code examples of cv2.hconcat(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module cv2 , or try the search function .
Example #1
Source File: head_pose_solver.py    From talking-head-anime-demo with MIT License 8 votes vote down vote up
def solve_head_pose(self, face_landmarks):
        indices = [17, 21, 22, 26, 36, 39, 42, 45, 31, 35]
        image_pts = np.zeros((len(indices), 2))
        for i in range(len(indices)):
            part = face_landmarks.part(indices[i])
            image_pts[i, 0] = part.x
            image_pts[i, 1] = part.y

        _, rotation_vec, translation_vec = cv2.solvePnP(self.face_model_points,
                                                        image_pts,
                                                        self.camera_matrix,
                                                        self.distortion_coeffs)
        projected_head_pose_box_points, _ = cv2.projectPoints(self.head_pose_box_points,
                                                              rotation_vec,
                                                              translation_vec,
                                                              self.camera_matrix,
                                                              self.distortion_coeffs)
        projected_head_pose_box_points = tuple(map(tuple, projected_head_pose_box_points.reshape(8, 2)))

        # Calculate euler angle
        rotation_mat, _ = cv2.Rodrigues(rotation_vec)
        pose_mat = cv2.hconcat((rotation_mat, translation_vec))
        _, _, _, _, _, _, euler_angles = cv2.decomposeProjectionMatrix(pose_mat)
        return projected_head_pose_box_points, euler_angles 
Example #2
Source File: headpose.py    From face_landmark with Apache License 2.0 5 votes vote down vote up
def get_head_pose(shape,img):
    h,w,_=img.shape
    K = [w, 0.0, w//2,
         0.0, w, h//2,
         0.0, 0.0, 1.0]
    # Assuming no lens distortion
    D = [0, 0, 0.0, 0.0, 0]

    cam_matrix = np.array(K).reshape(3, 3).astype(np.float32)
    dist_coeffs = np.array(D).reshape(5, 1).astype(np.float32)



    # image_pts = np.float32([shape[17], shape[21], shape[22], shape[26], shape[36],
    #                         shape[39], shape[42], shape[45], shape[31], shape[35],
    #                         shape[48], shape[54], shape[57], shape[8]])
    image_pts = np.float32([shape[17], shape[21], shape[22], shape[26], shape[36],
                            shape[39], shape[42], shape[45], shape[31], shape[35]])
    _, rotation_vec, translation_vec = cv2.solvePnP(object_pts, image_pts, cam_matrix, dist_coeffs)

    reprojectdst, _ = cv2.projectPoints(reprojectsrc, rotation_vec, translation_vec, cam_matrix,
                                        dist_coeffs)

    reprojectdst = tuple(map(tuple, reprojectdst.reshape(8, 2)))

    # calc euler angle
    rotation_mat, _ = cv2.Rodrigues(rotation_vec)
    pose_mat = cv2.hconcat((rotation_mat, translation_vec))
    _, _, _, _, _, _, euler_angle = cv2.decomposeProjectionMatrix(pose_mat)

    return reprojectdst, euler_angle 
Example #3
Source File: pose.py    From Peppa_Pig_Face_Engine with Apache License 2.0 5 votes vote down vote up
def get_head_pose(shape,img):
    h,w,_=img.shape
    K = [w, 0.0, w//2,
         0.0, w, h//2,
         0.0, 0.0, 1.0]
    # Assuming no lens distortion
    D = [0, 0, 0.0, 0.0, 0]

    cam_matrix = np.array(K).reshape(3, 3).astype(np.float32)
    dist_coeffs = np.array(D).reshape(5, 1).astype(np.float32)



    # image_pts = np.float32([shape[17], shape[21], shape[22], shape[26], shape[36],
    #                         shape[39], shape[42], shape[45], shape[31], shape[35],
    #                         shape[48], shape[54], shape[57], shape[8]])
    image_pts = np.float32([shape[17], shape[21], shape[22], shape[26], shape[36],
                            shape[39], shape[42], shape[45], shape[31], shape[35]])
    _, rotation_vec, translation_vec = cv2.solvePnP(object_pts, image_pts, cam_matrix, dist_coeffs)

    reprojectdst, _ = cv2.projectPoints(reprojectsrc, rotation_vec, translation_vec, cam_matrix,
                                        dist_coeffs)

    reprojectdst = tuple(map(tuple, reprojectdst.reshape(8, 2)))

    # calc euler angle
    rotation_mat, _ = cv2.Rodrigues(rotation_vec)
    pose_mat = cv2.hconcat((rotation_mat, translation_vec))
    _, _, _, _, _, _, euler_angle = cv2.decomposeProjectionMatrix(pose_mat)

    return reprojectdst, euler_angle 
Example #4
Source File: headpose.py    From PINTO_model_zoo with MIT License 5 votes vote down vote up
def get_head_pose(shape,img):
    h,w,_=img.shape
    K = [w, 0.0, w//2,
         0.0, w, h//2,
         0.0, 0.0, 1.0]
    # Assuming no lens distortion
    D = [0, 0, 0.0, 0.0, 0]

    cam_matrix = np.array(K).reshape(3, 3).astype(np.float32)
    dist_coeffs = np.array(D).reshape(5, 1).astype(np.float32)



    # image_pts = np.float32([shape[17], shape[21], shape[22], shape[26], shape[36],
    #                         shape[39], shape[42], shape[45], shape[31], shape[35],
    #                         shape[48], shape[54], shape[57], shape[8]])
    image_pts = np.float32([shape[17], shape[21], shape[22], shape[26], shape[36],
                            shape[39], shape[42], shape[45], shape[31], shape[35]])
    _, rotation_vec, translation_vec = cv2.solvePnP(object_pts, image_pts, cam_matrix, dist_coeffs)

    reprojectdst, _ = cv2.projectPoints(reprojectsrc, rotation_vec, translation_vec, cam_matrix,
                                        dist_coeffs)

    reprojectdst = tuple(map(tuple, reprojectdst.reshape(8, 2)))

    # calc euler angle
    rotation_mat, _ = cv2.Rodrigues(rotation_vec)
    pose_mat = cv2.hconcat((rotation_mat, translation_vec))
    _, _, _, _, _, _, euler_angle = cv2.decomposeProjectionMatrix(pose_mat)

    return reprojectdst, euler_angle 
Example #5
Source File: movie_concat.py    From PINTO_model_zoo with MIT License 5 votes vote down vote up
def image_hcombine(im_info1, im_info2):
    img1 = im_info1[0]
    img2 = im_info2[0]
    color_flag1 = im_info1[1]
    color_flag2 = im_info2[1]
 
    if color_flag1 == 1:
        h1, w1, ch1 = img1.shape[:3]
    else:
        h1, w1 = img1.shape[:2]
 
    if color_flag2 == 1:
        h2, w2, ch2 = img2.shape[:3]
    else:
        h2, w2 = img2.shape[:2]
 
    if h1 < h2:
        h1 = h2
        w1 = int((h2 / h1) * w2)
        img1 = cv2.resize(img1, (w1, h1))
    else:
        h2 = h1
        w2 = int((h1 / h2) * w1)
        img2 = cv2.resize(img2, (w2, h2))
 
    img = cv2.hconcat([img1, img2])
    return img 
Example #6
Source File: raidnearby.py    From PGSS with GNU General Public License v3.0 5 votes vote down vote up
def detectTime(self, time_binary):
#        img_gray = cv2.cvtColor(time_img, cv2.COLOR_BGR2GRAY)
#        ret, thresh1 = cv2.threshold(img_gray, 230, 255, cv2.THRESH_BINARY_INV)
        final_img = np.zeros((time_binary.shape[0], int(time_binary.shape[1] * 0.25)), np.uint8)
        right_img = np.zeros((time_binary.shape[0], int(time_binary.shape[1] * 0.15)), np.uint8)
        separate_img = np.zeros((time_binary.shape[0], int(time_binary.shape[1] * 0.1)), np.uint8)
        profile = []
        letter_start = []
        letter_end = []
        count = 0
        valley_threshold = 256
        # get letters separation pixels
        for i in range(time_binary.shape[1]):
            sum_vertical = sum(time_binary[:, i])
            profile.append(sum_vertical)
            if len(letter_start) == len(letter_end):
                if sum_vertical > valley_threshold:
                    letter_start.append(i)
            else:
                if sum_vertical <= valley_threshold:
                    letter_end.append(i)
                    count = count + 1
        # Add blank(black) space between letters
        for i in range(count):
            final_img = cv2.hconcat([final_img, time_binary[0:time_binary.shape[0], letter_start[i]:letter_end[i]]])
            final_img = cv2.hconcat([final_img, separate_img])
        final_img = cv2.hconcat([final_img, right_img])
        kernel = np.ones((2, 2), np.uint8)
        final_img = cv2.dilate(final_img, kernel, iterations=1)
        cv2.imwrite(self.timefile, final_img)
        text = pytesseract.image_to_string(Image.open(self.timefile),
                                           config='-c tessedit_char_whitelist=1234567890:~-AMP -psm 7')
        return text 
Example #7
Source File: refiner.py    From eccv18-rgb_pose_refinement with MIT License 5 votes vote down vote up
def iterative_contour_alignment(self, refinable, max_iterations=3,
                                    min_rotation_displacement=0.5,
                                    min_translation_displacement=0.0025, display=False):
        assert refinable is not None

        last_pose = np.copy(refinable.hypo_pose)
        for i in range(max_iterations):

            refinable = self.refine(refinable=refinable)

            last_trans = last_pose[:3, 3]
            last_rot = Quaternion(matrix2quaternion(last_pose[:3, :3]))

            cur_trans = refinable.hypo_pose[:3, 3]
            cur_rot = Quaternion(matrix2quaternion(refinable.hypo_pose[:3, :3]))

            trans_diff = np.linalg.norm(cur_trans - last_trans)
            update_q = cur_rot * last_rot.inverse
            angular_diff = np.abs((update_q).degrees)

            last_pose = np.copy(refinable.hypo_pose)

            if display:
                concat = cv2.hconcat([refinable.input_col, refinable.hypo_col])
                cv2.imshow('test', concat)
                cv2.waitKey(500)

            if angular_diff <= min_rotation_displacement and trans_diff <= min_translation_displacement:
                refinable.iterations = i+1
                return refinable

        refinable.iterations = max_iterations
        return refinable 
Example #8
Source File: utils.py    From EndoscopyDepthEstimation-Pytorch with GNU General Public License v3.0 4 votes vote down vote up
def generate_training_output(colors_1, scaled_depth_maps_1, boundaries, intrinsic_matrices, is_hsv, epoch,
                             results_root):
    color_inputs_cpu = colors_1.data.cpu().numpy()
    pred_depths_cpu = scaled_depth_maps_1.data.cpu().numpy()
    boundaries_cpu = boundaries.data.cpu().numpy()
    intrinsics_cpu = intrinsic_matrices.data.cpu().numpy()
    color_imgs = []
    pred_depth_imgs = []

    for j in range(colors_1.shape[0]):
        color_img = color_inputs_cpu[j]
        pred_depth_img = pred_depths_cpu[j]

        color_img = np.moveaxis(color_img, source=[0, 1, 2], destination=[2, 0, 1])
        color_img = color_img * 0.5 + 0.5
        color_img[color_img < 0.0] = 0.0
        color_img[color_img > 1.0] = 1.0
        color_img = np.uint8(255 * color_img)
        if is_hsv:
            color_img = cv2.cvtColor(color_img, cv2.COLOR_HSV2BGR_FULL)

        pred_depth_img = np.moveaxis(pred_depth_img, source=[0, 1, 2], destination=[2, 0, 1])

        if j == 0:
            # Write point cloud
            boundary = boundaries_cpu[j]
            intrinsic = intrinsics_cpu[j]
            boundary = np.moveaxis(boundary, source=[0, 1, 2], destination=[2, 0, 1])
            point_cloud = point_cloud_from_depth(pred_depth_img, color_img, boundary,
                                                 intrinsic,
                                                 point_cloud_downsampling=1)
            write_point_cloud(
                str(results_root / "point_cloud_epoch_{epoch}_index_{index}.ply".format(epoch=epoch,
                                                                                        index=j)),
                point_cloud)

        color_img = cv2.resize(color_img, dsize=(300, 300))
        pred_depth_img = cv2.resize(pred_depth_img, dsize=(300, 300))
        color_imgs.append(color_img)

        if j == 0:
            histr = cv2.calcHist([pred_depth_img], [0], None, histSize=[100], ranges=[0, 1000])
            plt.plot(histr, color='b')
            plt.xlim([0, 40])
            plt.savefig(
                str(results_root / 'generated_depth_hist_{epoch}.jpg'.format(epoch=epoch)))
            plt.clf()
        display_depth_img = display_depth_map(pred_depth_img)
        pred_depth_imgs.append(display_depth_img)

    final_color = color_imgs[0]
    final_pred_depth = pred_depth_imgs[0]
    for j in range(colors_1.shape[0] - 1):
        final_color = cv2.hconcat((final_color, color_imgs[j + 1]))
        final_pred_depth = cv2.hconcat((final_pred_depth, pred_depth_imgs[j + 1]))

    final = cv2.vconcat((final_color, final_pred_depth))
    cv2.imwrite(str(results_root / 'generated_mask_{epoch}.jpg'.format(epoch=epoch)),
                final)