Python skimage.transform.estimate_transform() Examples

The following are 8 code examples of skimage.transform.estimate_transform(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module skimage.transform , or try the search function .
Example #1
Source File: lfw.py    From autoencoding_beyond_pixels with MIT License 6 votes vote down vote up
def lfw_imgs(alignment):
    if alignment == 'landmarks':
        dataset = dp.dataset.LFW('original')
        imgs = dataset.imgs
        landmarks = dataset.landmarks('68')
        n_landmarks = 68
        landmarks_mean = np.mean(landmarks, axis=0)
        landmarks_mean = np.array([landmarks_mean[:n_landmarks],
                                   landmarks_mean[n_landmarks:]])
        aligned_imgs = []
        for img, points in zip(imgs, landmarks):
            points = np.array([points[:n_landmarks], points[n_landmarks:]])
            transf = transform.estimate_transform('similarity',
                                                  landmarks_mean.T, points.T)
            img = img / 255.
            img = transform.warp(img, transf, order=3)
            img = np.round(img*255).astype(np.uint8)
            aligned_imgs.append(img)
        imgs = np.array(aligned_imgs)
    else:
        dataset = dp.dataset.LFW(alignment)
        imgs = dataset.imgs
    return imgs 
Example #2
Source File: astroalign.py    From astroalign with MIT License 5 votes vote down vote up
def fit(self, data):
        """
    Return the best 2D similarity transform from the points given in data.

    data: N sets of similar corresponding triangles.
        3 indices for a triangle in ref
        and the 3 indices for the corresponding triangle in target;
        arranged in a (N, 3, 2) array.
        """
        d1, d2, d3 = data.shape
        s, d = data.reshape(d1 * d2, d3).T
        approx_t = estimate_transform(
            "similarity", self.source[s], self.target[d]
        )
        return approx_t 
Example #3
Source File: test_align.py    From astroalign with MIT License 5 votes vote down vote up
def test_find_transform_givensources(self):
        from skimage.transform import estimate_transform, matrix_transform

        source = np.array(
            [
                [1.4, 2.2],
                [5.3, 1.0],
                [3.7, 1.5],
                [10.1, 9.6],
                [1.3, 10.2],
                [7.1, 2.0],
            ]
        )
        nsrc = source.shape[0]
        scale = 1.5  # scaling parameter
        alpha = np.pi / 8.0  # rotation angle
        mm = scale * np.array(
            [[np.cos(alpha), -np.sin(alpha)], [np.sin(alpha), np.cos(alpha)]]
        )
        tx, ty = 2.0, 1.0  # translation parameters
        transl = np.array([nsrc * [tx], nsrc * [ty]])
        dest = (mm.dot(source.T) + transl).T
        t_true = estimate_transform("similarity", source, dest)

        # disorder dest points so they don't match the order of source
        np.random.shuffle(dest)

        t, (src_pts, dst_pts) = aa.find_transform(source, dest)
        self.assertLess(t_true.scale - t.scale, 1e-10)
        self.assertLess(t_true.rotation - t.rotation, 1e-10)
        self.assertLess(
            np.linalg.norm(t_true.translation - t.translation), 1e-10
        )
        self.assertEqual(src_pts.shape[0], dst_pts.shape[0])
        self.assertEqual(src_pts.shape[1], 2)
        self.assertEqual(dst_pts.shape[1], 2)
        dst_pts_test = matrix_transform(src_pts, t.params)
        self.assertLess(np.linalg.norm(dst_pts_test - dst_pts), 1e-10) 
Example #4
Source File: preprocess.py    From AutoPortraitMatting with Apache License 2.0 5 votes vote down vote up
def gen_data(name):
    reftracker = scio.loadmat('data/images_tracker.00047.mat')['tracker']
    desttracker = scio.loadmat('data/images_tracker/'+name+'.mat')['tracker']
    refpos = np.floor(np.mean(reftracker, 0))
    xxc, yyc = np.meshgrid(np.arange(1, 1801, dtype=np.int), np.arange(1, 2001, dtype=np.int))
    #normalize x and y channels
    xxc = (xxc - 600 - refpos[0]) * 1.0 / 600
    yyc = (yyc - 600 - refpos[1]) * 1.0 / 600
    maskimg = Image.open('data/meanmask.png')
    maskc = np.array(maskimg, dtype=np.float)
    maskc = np.pad(maskc, (600, 600), 'minimum')
    # warp is an inverse transform, and so src and dst must be reversed here
    tform = transform.estimate_transform('affine', desttracker + 600, reftracker + 600)
    
    img_data = skio.imread('data/images_data/'+name+'.jpg')
    # save org mat
    warpedxx = transform.warp(xxc, tform, output_shape=xxc.shape)
    warpedyy = transform.warp(yyc, tform, output_shape=xxc.shape)
    warpedmask = transform.warp(maskc, tform, output_shape=xxc.shape)
    warpedxx = warpedxx[600:1400, 600:1200, :]
    warpedyy = warpedyy[600:1400, 600:1200, :]
    warpedmask = warpedmask[600:1400, 600:1200, :]
    img_h, img_w, _ = img_data.shape
    mat = np.zeros((img_h, img_w, 6), dtype=np.float)
    mat[:, :, 0] = (img_data[2] * 1.0 - 104.008) / 255
    mat[:, :, 1] = (img_data[1] * 1.0 - 116.669) / 255
    mat[:, :, 2] = (img_data[0] * 1.0 - 122.675) / 255
    scio.savemat('portraitFCN_data/' + name + '.mat', {'img':mat})
    mat_plus = np.zeros((img_h, img_w, 6), dtype=np.float)
    mat_plus[:, :, 0:3] = mat
    mat_plus[:, :, 3] = warpedxx
    mat_plus[:, :, 4] = warpedyy
    mat_plus[:, :, 5] = warpedmask 
Example #5
Source File: __init__.py    From sima with GNU General Public License v2.0 5 votes vote down vote up
def estimate_coordinate_transform(source, target, method, **method_kwargs):
    """Calculates a transformation from a source list of coordinates to a
    target list of coordinates.

    Parameters
    ----------
    source : Nx2 array
        (x, y) coordinate pairs from source image.
    target : Nx2 array
        (x, y) coordinate pairs from target image. Must be same shape as
        'source'.
    method : string, optional
        Method to use for transform estimation.
    **method_kwargs : optional
        Additional arguments can be passed in specific to the particular
        method. For example, 'order' for a polynomial transform estimation.

    Returns
    -------
    transform : skimage.transform._geometric.GeometricTransform
        An skimage transform object.

    See Also
    --------
    skimage.transform.estimate_transform

    """

    return tf.estimate_transform(method, source, target, **method_kwargs) 
Example #6
Source File: test_align.py    From astroalign with MIT License 4 votes vote down vote up
def check_if_ok(self, numstars):
        "Helper function with common test code for 3, 4, 5, and 6 stars"
        from skimage.transform import estimate_transform, matrix_transform

        if numstars > 6:
            raise NotImplementedError

        # x and y of stars in the ref frame (int's)
        self.star_refx = np.array([100, 120, 400, 400, 200, 200])[:numstars]
        self.star_refy = np.array([150, 200, 200, 320, 210, 350])[:numstars]
        self.num_stars = numstars
        # Fluxes of stars
        self.star_f = np.array(numstars * [700.0])

        (
            self.image,
            self.image_ref,
            self.star_ref_pos,
            self.star_new_pos,
        ) = simulate_image_pair(
            shape=(self.h, self.w),
            translation=(self.x_offset, self.y_offset),
            rot_angle_deg=50.0,
            num_stars=self.num_stars,
            star_refx=self.star_refx,
            star_refy=self.star_refy,
            star_flux=self.star_f,
        )

        source = self.star_ref_pos
        dest = self.star_new_pos.copy()
        t_true = estimate_transform("similarity", source, dest)

        # disorder dest points so they don't match the order of source
        np.random.shuffle(dest)

        t, (src_pts, dst_pts) = aa.find_transform(source, dest)
        self.assertLess(t_true.scale - t.scale, 1e-10)
        self.assertLess(t_true.rotation - t.rotation, 1e-10)
        self.assertLess(
            np.linalg.norm(t_true.translation - t.translation), 1.0
        )
        self.assertEqual(src_pts.shape[0], dst_pts.shape[0])
        self.assertLessEqual(src_pts.shape[0], source.shape[0])
        self.assertEqual(src_pts.shape[1], 2)
        self.assertEqual(dst_pts.shape[1], 2)
        dst_pts_test = matrix_transform(src_pts, t.params)
        self.assertLess(np.linalg.norm(dst_pts_test - dst_pts), 1.0) 
Example #7
Source File: prnet.py    From centerpose with MIT License 4 votes vote down vote up
def process(self, input, image_info = None):
        ''' process image with crop operation.
        Args:
            input: (h,w,3) array or str(image path). image value range:1~255. 
            image_info(optional): the bounding box information of faces. if None, will use dlib to detect face. 

        Returns:
            pos: the 3D position map. (256, 256, 3).
        '''
        if isinstance(input, str):
            try:
                image = imread(input)
            except IOError:
                print("error opening file: ", input)
                return None
        else:
            image = input

        if image.ndim < 3:
            image = np.tile(image[:,:,np.newaxis], [1,1,3])


        if np.max(image_info.shape) > 4: # key points to get bounding box
            kpt = image_info
            if kpt.shape[0] > 3:
                kpt = kpt.T
            left = np.min(kpt[0, :]); right = np.max(kpt[0, :]); 
            top = np.min(kpt[1,:]); bottom = np.max(kpt[1,:])
        else:  # bounding box
            bbox = image_info
            left = bbox[0]; right = bbox[1]; top = bbox[2]; bottom = bbox[3]
        old_size = (right - left + bottom - top)/2
        center = np.array([right - (right - left) / 2.0, bottom - (bottom - top) / 2.0])
        size = int(old_size*1.6)

        # crop image
        src_pts = np.array([[center[0]-size/2, center[1]-size/2], [center[0] - size/2, center[1]+size/2], [center[0]+size/2, center[1]-size/2]])
        DST_PTS = np.array([[0,0], [0,self.resolution_inp - 1], [self.resolution_inp - 1, 0]])
        tform = estimate_transform('similarity', src_pts, DST_PTS)
        
        image = image/255.
        cropped_image = warp(image, tform.inverse, output_shape=(self.resolution_inp, self.resolution_inp))
        # run our net
        #st = time()
        cropped_image = torch.from_numpy(cropped_image[np.newaxis, ...].transpose(0,3,1,2).astype(np.float32)).cuda()
        cropped_pos = self.net_forward(cropped_image)*self.resolution_inp*1.1
        #print 'net time:', time() - st

        # restore 
        cropped_vertices = np.reshape(cropped_pos, [-1, 3]).T
        z = cropped_vertices[2,:].copy()/tform.params[0,0]
        cropped_vertices[2,:] = 1
        vertices = np.dot(np.linalg.inv(tform.params), cropped_vertices)
        vertices = np.vstack((vertices[:2,:], z))
        pos = np.reshape(vertices.T, [self.resolution_op, self.resolution_op, 3])
        
        return pos 
Example #8
Source File: api.py    From MaskInsightface with Apache License 2.0 4 votes vote down vote up
def process(self, image, bbox):
        ''' process image with crop operation.
        Args:
            input: (h,w,3) array or str(image path). image value range:1~255.
            image_info(optional): the bounding box information of faces. if None, will use dlib to detect face.

        Returns:
            pos: the 3D position map. (256, 256, 3).
        '''
        if image.ndim < 3:
            image = np.tile(image[:, :, np.newaxis], [1, 1, 3])

        left = bbox[0];
        right = bbox[2]
        top = bbox[1]
        bottom = bbox[3]
        old_size = (right - left + bottom - top) / 2
        center = np.array([right - (right - left) / 2.0, bottom - (bottom - top) / 2.0 + old_size * 0.14])
        size = int(old_size * 1.318)

        # crop image
        src_pts = np.array([[center[0] - size / 2, center[1] - size / 2], [center[0] - size / 2, center[1] + size / 2],
                            [center[0] + size / 2, center[1] - size / 2]])
        DST_PTS = np.array([[0, 0], [0, self.resolution_inp - 1], [self.resolution_inp - 1, 0]])
        tform = estimate_transform('similarity', src_pts, DST_PTS)

        image = image / 255.
        cropped_image = warp(image, tform.inverse, output_shape=(self.resolution_inp, self.resolution_inp))

        # run our net
        # st = time()
        cropped_pos = self.net_forward(cropped_image)
        # print 'net time:', time() - st
        crop_pos = cropped_pos.copy()
        # restore
        cropped_vertices = np.reshape(cropped_pos, [-1, 3]).T

        z = cropped_vertices[2, :].copy() / tform.params[0, 0]
        cropped_vertices[2, :] = 1
        vertices = np.dot(np.linalg.inv(tform.params), cropped_vertices)
        vertices = np.vstack((vertices[:2, :], z))
        pos = np.reshape(vertices.T, [self.resolution_op, self.resolution_op, 3])

        return pos