Python skimage.exposure.rescale_intensity() Examples

The following are 23 code examples of skimage.exposure.rescale_intensity(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module skimage.exposure , or try the search function .
Example #1
Source File: dataset.py    From BIRL with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def project_object_edge(img, dimension):
    """ scale the image, binarise with Othu and project to one dimension

    :param ndarray img:
    :param int dimension: select dimension for projection
    :return list(float):

    >>> img = np.zeros((20, 10, 3))
    >>> img[2:6, 1:7, :] = 1
    >>> img[10:17, 4:6, :] = 1
    >>> project_object_edge(img, 0).tolist()  # doctest: +NORMALIZE_WHITESPACE
    [0.0, 0.0, 0.7, 0.7, 0.7, 0.7, 0.0, 0.0, 0.0, 0.0,
     0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.0, 0.0, 0.0]
    """
    assert dimension in (0, 1), 'not supported dimension %i' % dimension
    assert img.ndim == 3, 'unsupported image shape %r' % img.shape
    img_gray = np.mean(img, axis=-1)
    img_gray = GaussianBlur(img_gray, (5, 5), 0)
    p_low, p_high = np.percentile(img_gray, (1, 95))
    img_gray = rescale_intensity(img_gray, in_range=(p_low, p_high))
    img_bin = img_gray > threshold_otsu(img_gray)
    img_edge = np.mean(img_bin, axis=1 - dimension)
    return img_edge 
Example #2
Source File: plot.py    From earthpy with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def _stretch_im(arr, str_clip):
    """Stretch an image in numpy ndarray format using a specified clip value.

    Parameters
    ----------
    arr: numpy array
        N-dimensional array in rasterio band order (bands, rows, columns)
    str_clip: int
        The % of clip to apply to the stretch. Default = 2 (2 and 98)

    Returns
    ----------
    arr: numpy array with values stretched to the specified clip %

    """
    s_min = str_clip
    s_max = 100 - str_clip
    arr_rescaled = np.zeros_like(arr)
    for ii, band in enumerate(arr):
        lower, upper = np.percentile(band, (s_min, s_max))
        arr_rescaled[ii] = exposure.rescale_intensity(
            band, in_range=(lower, upper)
        )
    return arr_rescaled.copy() 
Example #3
Source File: helper_dataset.py    From reseg with GNU General Public License v3.0 6 votes vote down vote up
def rgb2illumination_invariant(img, alpha, hist_eq=False):
    """
    this is an implementation of the illuminant-invariant color space published
    by Maddern2014
    http://www.robots.ox.ac.uk/~mobile/Papers/2014ICRA_maddern.pdf

    :param img:
    :param alpha: camera paramete
    :return:
    """
    ii_img = 0.5 + np.log(img[:, :, 1] + 1e-8) - \
        alpha * np.log(img[:, :, 2] + 1e-8) - \
        (1 - alpha) * np.log(img[:, :, 0] + 1e-8)

    # ii_img = exposure.rescale_intensity(ii_img, out_range=(0, 1))
    if hist_eq:
        ii_img = exposure.equalize_hist(ii_img)

    print np.max(ii_img)
    print np.min(ii_img)

    return ii_img 
Example #4
Source File: spfunctions.py    From spfeas with MIT License 6 votes vote down vote up
def scale_rgb(layers, min_max, lidx):

    layers_c = np.empty(layers.shape, dtype='float32')

    # Rescale and blur.
    for li in range(0, 3):

        layer = layers[li]

        layer = np.float32(rescale_intensity(layer,
                                             in_range=(min_max[li][0],
                                                       min_max[li][1]),
                                             out_range=(0, 1)))

        layers_c[lidx[li]] = rescale_intensity(cv2.GaussianBlur(layer,
                                                                ksize=(3, 3),
                                                                sigmaX=3),
                                               in_range=(0, 1),
                                               out_range=(-1, 1))

    return layers_c 
Example #5
Source File: data_io.py    From pyImSegm with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def scale_image_intensity(img, im_range=1., quantiles=(2, 98)):
    """ scale image values with in give quntile range to filter some outlaiers

    :param ndarray img: input image
    :param im_range: range to scale image values (1. or 255)
    :param tuple(int,int) quantiles: scale image values in certain quantile range
    :return ndarray:

    >>> np.random.seed(0)
    >>> img = np.random.randint(10, 255, (25, 30))
    >>> im = scale_image_intensity(img)
    >>> im.min()
    0.0
    >>> im.max()
    1.0
    """
    p_low = np.percentile(img, quantiles[0])
    p_high = np.percentile(img, quantiles[1])
    img = exposure.rescale_intensity(img.astype(float), in_range=(p_low, p_high),
                                     out_range='float')
    if im_range == 255:
        img = np.array(img * im_range).astype(np.uint8)
    return img 
Example #6
Source File: util.py    From Attention-Gated-Networks with MIT License 5 votes vote down vote up
def tensor2im(image_tensor, imgtype='img', datatype=np.uint8):
    image_numpy = image_tensor[0].cpu().float().numpy()
    if image_numpy.ndim == 4:# image_numpy (C x W x H x S)
        mid_slice = image_numpy.shape[-1]//2
        image_numpy = image_numpy[:,:,:,mid_slice]
    if image_numpy.shape[0] == 1:
        image_numpy = np.tile(image_numpy, (3, 1, 1))
    image_numpy = np.transpose(image_numpy, (1, 2, 0))
    if imgtype == 'img':
        image_numpy = (image_numpy + 8) / 16.0 * 255.0
    if np.unique(image_numpy).size == int(1):
        return image_numpy.astype(datatype)
    return rescale_intensity(image_numpy.astype(datatype)) 
Example #7
Source File: LST.py    From python-urbanPlanning with MIT License 5 votes vote down vote up
def LSTConvolue(self):
        kernel_rate= np.array([[1/8, 1/8 , 1/8],
                              [1/8, -1, 1/8],
                              [1/8, 1/8, 1/8]])  #卷积核    
        kernel_id= np.array([[-1, -1 ,-1],
                                [-1 ,8, -1],
                                [-1, -1, -1]])  #卷积核        
        kernel=kernel_rate
        t0=time.time()
#        print(self.LST)
        array_convolve2d=convolve2d(self.LST,kernel,mode='same')*-1
#        print(array_convolve2d.max(),array_convolve2d.min())
#        array_convolve2d=exposure.equalize_hist(array_convolve2d)
        p2, p98 = np.percentile(array_convolve2d, (2,96))
        array_convolve2dRescale = exposure.rescale_intensity(array_convolve2d, in_range=(p2, p98))
#        print()
        array_convolve2dZero=np.copy(array_convolve2d)
        array_convolve2dZero[array_convolve2dZero>0]=1
        array_convolve2dZero[array_convolve2dZero<0]=-1
        array_convolve2dZero[array_convolve2dZero==0]=0
        
        
        t1=time.time()
        t_convolve2d=t1-t0
        print("lasting time:",t_convolve2d)
        
        self.imgShow(imges=(self.LST,array_convolve2dRescale,array_convolve2dZero),titleName=("array","array_convolve2d_rescale","0",),xyticksRange=(1,1))
      
        return array_convolve2d,array_convolve2dZero
        
##显示图像 
Example #8
Source File: transforms.py    From KagglePlanetPytorch with MIT License 5 votes vote down vote up
def random_contrast(weight=lambda: np.random.rand() * 0.3 + 0.7):
    def call(x):
        w = weight()
        return x * w + (1 - w) * exposure.rescale_intensity(x)

    return call 
Example #9
Source File: dsb_utils.py    From diagnose-heart with MIT License 5 votes vote down vote up
def segmenter_data_transform(imb, rotate=None, normalize_pctwise=False):
    if isinstance(imb, tuple) and len(imb) == 2:
        imgs,labels = imb
    else:
        imgs = imb
    # rotate image if training
    if rotate is not None:
        for i in xrange(imgs.shape[0]):
            degrees = float(np.random.randint(rotate[0], rotate[1])) if \
                    isinstance(rotate, tuple) else rotate
            imgs[i,0] = scipy.misc.imrotate(imgs[i,0], degrees, interp='bilinear')
            if isinstance(imb, tuple):
                labels[i,0] = scipy.misc.imrotate(labels[i,0], degrees, interp='bilinear')
    # assume they are square
    sz = c.fcn_img_size
    x,y = np.random.randint(0,imgs.shape[2]-sz,2) if imgs.shape[2] > sz else (0,0)
    imgs = nn.utils.floatX(imgs[:,:, x:x+sz, y:y+sz])/255.
    if not normalize_pctwise:
        pad = imgs.shape[2] // 5
        cut = imgs[:,0,pad:-pad,pad:-pad]
        mu = cut.mean(axis=(1,2)).reshape(imgs.shape[0],1,1,1)
        sigma = cut.std(axis=(1,2)).reshape(imgs.shape[0],1,1,1)
        imgs = (imgs - mu) / sigma
        imgs = np.minimum(3, np.maximum(-3, imgs))
    else:
        pclow, pchigh = normalize_pctwise if isinstance(normalize_pctwise, tuple) else (20,70)
        for i in xrange(imgs.shape[0]):
            pl,ph = np.percentile(imgs[i],(pclow, pchigh))
            imgs[i] = exposure.rescale_intensity(imgs[i], in_range=(pl, ph));
            imgs[i] = 2*imgs[i]/imgs[i].max() - 1.
        # or other rescaling here to approximate ~ N(0,1)
    if isinstance(imb, tuple):
        labels = nn.utils.floatX(labels[:,:, x:x+sz, y:y+sz])
        return imgs, labels
    return imgs 
Example #10
Source File: spfunctions.py    From spfeas with MIT License 5 votes vote down vote up
def segment_image(im, parameter_object):

    dims, rows, cols = im.shape

    image2segment = np.dstack((rescale_intensity(im[0],
                                                 in_range=(parameter_object.image_min,
                                                           parameter_object.image_max),
                                                 out_range=(0, 255)),
                               rescale_intensity(im[1],
                                                 in_range=(parameter_object.image_min,
                                                           parameter_object.image_max),
                                                 out_range=(0, 255)),
                               rescale_intensity(im[2],
                                                 in_range=(parameter_object.image_min,
                                                           parameter_object.image_max),
                                                 out_range=(0, 255))))

    felzer = felzenszwalb(np.uint8(image2segment),
                          scale=50,
                          sigma=.01,
                          min_size=5,
                          multichannel=True).reshape(rows, cols)

    props = regionprops(felzer)
    props = np.array([p.area for p in props], dtype='uint64')

    return fill_labels(np.uint64(felzer), props) 
Example #11
Source File: run_overlap_images_segms.py    From pyImSegm with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def visualise_overlap(path_img, path_seg, path_out,
                      b_img_scale=BOOL_IMAGE_RESCALE_INTENSITY,
                      b_img_contour=BOOL_SAVE_IMAGE_CONTOUR,
                      b_relabel=BOOL_ANNOT_RELABEL,
                      segm_alpha=MIDDLE_ALPHA_OVERLAP):
    img, _ = tl_data.load_image_2d(path_img)
    seg, _ = tl_data.load_image_2d(path_seg)

    # normalise alpha in range (0, 1)
    segm_alpha = tl_visu.norm_aplha(segm_alpha)

    if b_relabel:
        seg, _, _ = segmentation.relabel_sequential(seg)

    if img.ndim == 2:  # for gray images of ovary
        img = np.rollaxis(np.tile(img, (3, 1, 1)), 0, 3)

    if b_img_scale:
        p_low, p_high = np.percentile(img, q=(3, 98))
        # plt.imshow(255 - img, cmap='Greys')
        img = exposure.rescale_intensity(img, in_range=(p_low, p_high),
                                         out_range='uint8')

    if b_img_contour:
        path_im_visu = os.path.splitext(path_out)[0] + '_contour.png'
        img_contour = segmentation.mark_boundaries(img[:, :, :3], seg,
                                                   color=COLOR_CONTOUR, mode='subpixel')
        plt.imsave(path_im_visu, img_contour)
    # else:  # for colour images of disc
    #     mask = (np.sum(img, axis=2) == 0)
    #     img[mask] = [255, 255, 255]

    fig = tl_visu.figure_image_segm_results(img, seg, SIZE_SUB_FIGURE,
                                            mid_labels_alpha=segm_alpha,
                                            mid_image_gray=MIDDLE_IMAGE_GRAY)
    fig.savefig(path_out)
    plt.close(fig) 
Example #12
Source File: utils.py    From brain-segmentation-pytorch with MIT License 5 votes vote down vote up
def normalize_volume(volume):
    p10 = np.percentile(volume, 10)
    p99 = np.percentile(volume, 99)
    volume = rescale_intensity(volume, in_range=(p10, p99))
    m = np.mean(volume, axis=(0, 1, 2))
    s = np.std(volume, axis=(0, 1, 2))
    volume = (volume - m) / s
    return volume 
Example #13
Source File: core.py    From spinalcordtoolbox with MIT License 5 votes vote down vote up
def scale_intensity(data, out_min=0, out_max=255):
    """Scale intensity of data in a range defined by [out_min, out_max], based on the 2nd and 98th percentiles."""
    p2, p98 = np.percentile(data, (2, 98))
    return rescale_intensity(data, in_range=(p2, p98), out_range=(out_min, out_max)) 
Example #14
Source File: data_generator.py    From Advanced-Deep-Learning-with-Keras with MIT License 5 votes vote down vote up
def apply_random_intensity_rescale(self, image, percent=30):
        """Apply random intensity rescale on an image (not used)"""
        random = np.random.randint(0, 100)
        if random < percent:
            v_min, v_max = np.percentile(image, (0.2, 99.8))
            image = exposure.rescale_intensity(image, in_range=(v_min, v_max))
        return image 
Example #15
Source File: ensembleSubmissions.py    From luna16 with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def hist_stretch(im, percentiles=(1, 99)):
	p2, p98 = np.percentile(im, percentiles)
	im = im *100000
	#im = np.array(im, np.int64)

	return exposure.rescale_intensity(im, in_range=percentiles) 
Example #16
Source File: utils.py    From zhusuan with MIT License 5 votes vote down vote up
def save_image_collections(x, filename, shape=(10, 10), scale_each=False,
                           transpose=False):
    """
    :param shape: tuple
        The shape of final big images.
    :param x: numpy array
        Input image collections. (number_of_images, rows, columns, channels) or
        (number_of_images, channels, rows, columns)
    :param scale_each: bool
        If true, rescale intensity for each image.
    :param transpose: bool
        If true, transpose x to (number_of_images, rows, columns, channels),
        i.e., put channels behind.
    :return: `uint8` numpy array
        The output image.
    """
    from skimage import io, img_as_ubyte
    from skimage.exposure import rescale_intensity
    makedirs(filename)
    n = x.shape[0]
    if transpose:
        x = x.transpose(0, 2, 3, 1)
    if scale_each is True:
        for i in range(n):
            x[i] = rescale_intensity(x[i], out_range=(0, 1))
    n_channels = x.shape[3]
    x = img_as_ubyte(x)
    r, c = shape
    if r * c < n:
        print('Shape too small to contain all images')
    h, w = x.shape[1:3]
    ret = np.zeros((h * r, w * c, n_channels), dtype='uint8')
    for i in range(r):
        for j in range(c):
            if i * c + j < n:
                ret[i * h:(i + 1) * h, j * w:(j + 1) * w, :] = x[i * c + j]
    ret = ret.squeeze()
    io.imsave(filename, ret) 
Example #17
Source File: digital_display_ocr.py    From display_ocr with GNU General Public License v2.0 5 votes vote down vote up
def process_image(orig_image_arr):
  ratio = orig_image_arr.shape[0] / 300.0

  display_image_arr = normalize_contrs(orig_image_arr,crop_display(orig_image_arr))
  #display image is now segmented.
  gry_disp_arr = cv2.cvtColor(display_image_arr, cv2.COLOR_BGR2GRAY)
  gry_disp_arr = exposure.rescale_intensity(gry_disp_arr, out_range= (0,255))

  #thresholding
  ret, thresh = cv2.threshold(gry_disp_arr,127,255,cv2.THRESH_BINARY)
  return thresh 
Example #18
Source File: SDS_preprocess.py    From CoastSat with GNU General Public License v3.0 4 votes vote down vote up
def rescale_image_intensity(im, cloud_mask, prob_high):
    """
    Rescales the intensity of an image (multispectral or single band) by applying
    a cloud mask and clipping the prob_high upper percentile. This functions allows
    to stretch the contrast of an image, only for visualisation purposes.

    KV WRL 2018

    Arguments:
    -----------
    im: np.array
        Image to rescale, can be 3D (multispectral) or 2D (single band)
    cloud_mask: np.array
        2D cloud mask with True where cloud pixels are
    prob_high: float
        probability of exceedence used to calculate the upper percentile

    Returns:
    -----------
    im_adj: np.array
        rescaled image
    """

    # lower percentile is set to 0
    prc_low = 0

    # reshape the 2D cloud mask into a 1D vector
    vec_mask = cloud_mask.reshape(im.shape[0] * im.shape[1])

    # if image contains several bands, stretch the contrast for each band
    if len(im.shape) > 2:
        # reshape into a vector
        vec =  im.reshape(im.shape[0] * im.shape[1], im.shape[2])
        # initiliase with NaN values
        vec_adj = np.ones((len(vec_mask), im.shape[2])) * np.nan
        # loop through the bands
        for i in range(im.shape[2]):
            # find the higher percentile (based on prob)
            prc_high = np.percentile(vec[~vec_mask, i], prob_high)
            # clip the image around the 2 percentiles and rescale the contrast
            vec_rescaled = exposure.rescale_intensity(vec[~vec_mask, i],
                                                      in_range=(prc_low, prc_high))
            vec_adj[~vec_mask,i] = vec_rescaled
        # reshape into image
        im_adj = vec_adj.reshape(im.shape[0], im.shape[1], im.shape[2])

    # if image only has 1 bands (grayscale image)
    else:
        vec =  im.reshape(im.shape[0] * im.shape[1])
        vec_adj = np.ones(len(vec_mask)) * np.nan
        prc_high = np.percentile(vec[~vec_mask], prob_high)
        vec_rescaled = exposure.rescale_intensity(vec[~vec_mask], in_range=(prc_low, prc_high))
        vec_adj[~vec_mask] = vec_rescaled
        im_adj = vec_adj.reshape(im.shape[0], im.shape[1])

    return im_adj 
Example #19
Source File: data_generator.py    From Silhouette-Guided-3D with MIT License 4 votes vote down vote up
def __getitem__(self, idx):
        pkl_path = os.path.join(self.root_dir,self.namelist[idx])
        pkl = pickle.load(open(pkl_path, 'rb'), encoding='bytes')
        img = pkl[0].astype('float32')/255.0
        label = pkl[1][:,:3]
        
        # re-sample ground truth, ShapeNet point cloud ground truth by Wang et al. is not of the same number across images
        if label.shape[0]<self.refine_size:
            # re-sample
            sub_iter = self.refine_size // label.shape[0]
            sub_num =  self.refine_size - label.shape[0]*sub_iter
            label_n = label.copy()
            for i in range(sub_iter-1):
                label = np.concatenate((label, label_n), axis=0)
            subidx = np.random.permutation(label_n.shape[0])
            subidx = subidx[:sub_num]
            label = np.concatenate((label, label_n[subidx]), axis=0)

        # load mask
        mask_path = self.root_dir+self.namelist[idx][:5]+'mask/'+self.namelist[idx][19:-3]+'png'
        mask = scipy.ndimage.imread(mask_path)
        mask = np.expand_dims(mask,axis=2)

        subidx = np.random.permutation(label.shape[0])
        subidx = subidx[:self.refine_size]
        label_f = label[subidx]
        label_f = np.float32(label_f)

        # data augmentation
        if self.train_type == 'train':
            # gamma
            random.seed()
            g_prob = np.random.random()*1+0.5
            img = exposure.adjust_gamma(img, g_prob)
            # intensity
            random.seed()
            g_prob = np.random.random()*127
            img = exposure.rescale_intensity(img*255.0, in_range=(g_prob, 255))
            # color channel
            random.seed()
            g_prob = np.random.random()*0.4+0.8
            img[:,:,0] = img[:,:,0]*g_prob
            random.seed()
            g_prob = np.random.random()*0.4+0.8
            img[:,:,1] = img[:,:,1]*g_prob
            random.seed()
            g_prob = np.random.random()*0.4+0.8
            img[:,:,2] = img[:,:,2]*g_prob
            np.clip(img, 0.0, 1.0 , out=img)

        # permute dim
        if self.transform:
            if self.train_type == 'train':
                img = data_transforms['train'](img).float()
                mask = data_transforms['train'](mask).float() 
            else:
                img = data_transforms['val'](img).float()
                mask = data_transforms['val'](mask).float()

        return img, label_f,  mask 
Example #20
Source File: spfunctions.py    From spfeas with MIT License 4 votes vote down vote up
def saliency(i_info, parameter_object, i_sect, j_sect, n_rows, n_cols):

    """
    References:
        Federico Perazzi, Philipp Krahenbul, Yael Pritch, Alexander Hornung. Saliency Filters. (2012).
            Contrast Based Filtering for Salient Region Detection. IEEE CVPR, Providence, Rhode Island, USA, June 16-21.

            https://graphics.ethz.ch/~perazzif/saliency_filters/

        Ming-Ming Cheng, Niloy J. Mitra, Xiaolei Huang, Philip H. S. Torr, Shi-Min Hu. (2015).
            Global Contrast based Salient Region detection. IEEE TPAMI.
    """

    # min_max = sputilities.get_layer_min_max(i_info)
    min_max = [(parameter_object.image_min, parameter_object.image_max)] * 3

    if parameter_object.vis_order == 'bgr':
        lidx = [2, 1, 0]
    else:
        lidx = [0, 1, 2]

    # Read the section.
    layers = i_info.read(bands2open=[1, 2, 3],
                         i=i_sect,
                         j=j_sect,
                         rows=n_rows,
                         cols=n_cols,
                         d_type='float32')

    layers = scale_rgb(layers, min_max, lidx)

    # Transpose the image to RGB
    layers = layers.transpose(1, 2, 0)

    # Perform RGB to CIE Lab color space conversion
    layers = rgb2rgbcie(layers)

    # Compute Lab average values
    # lm = layers[:, :, 0].mean(axis=0).mean()
    # am = layers[:, :, 1].mean(axis=0).mean()
    # bm = layers[:, :, 2].mean(axis=0).mean()
    lm = parameter_object.lab_means[0]
    am = parameter_object.lab_means[1]
    bm = parameter_object.lab_means[2]

    return np.uint8(rescale_intensity((layers[:, :, 0] - lm)**2. +
                                      (layers[:, :, 1] - am)**2. +
                                      (layers[:, :, 2] - bm)**2.,
                                      in_range=(-1, 1),
                                      out_range=(0, 255))) 
Example #21
Source File: spfunctions.py    From spfeas with MIT License 4 votes vote down vote up
def get_orb_keypoints(bd, image_min, image_max):

    """
    Computes the ORB key points

    Args:
        bd (2d array)
        image_min (int or float)
        image_max (int or float)
    """

    # We want odd patch sizes.
    # if parameter_object.scales[-1] % 2 == 0:
    #     patch_size = parameter_object.scales[-1] - 1

    if bd.dtype != 'uint8':

        bd = np.uint8(rescale_intensity(bd,
                                        in_range=(image_min,
                                                  image_max),
                                        out_range=(0, 255)))

    patch_size = 31
    patch_size_d = patch_size * 3

    # Initiate ORB detector
    orb = cv2.ORB_create(nfeatures=int(.25*(bd.shape[0]*bd.shape[1])),
                         edgeThreshold=patch_size,
                         scaleFactor=1.2,
                         nlevels=8,
                         patchSize=patch_size,
                         WTA_K=4,
                         scoreType=cv2.ORB_FAST_SCORE)

    # Add padding because ORB ignores edges.
    bd = cv2.copyMakeBorder(bd, patch_size_d, patch_size_d, patch_size_d, patch_size_d, cv2.BORDER_REFLECT)

    # Compute ORB keypoints
    key_points = orb.detectAndCompute(bd, None)[0]

    # img = cv2.drawKeypoints(np.uint8(ch_bd), key_points, np.uint8(ch_bd).copy())

    return fill_key_points(np.float32(bd), key_points)[patch_size_d:-patch_size_d, patch_size_d:-patch_size_d] 
Example #22
Source File: spfunctions.py    From spfeas with MIT License 4 votes vote down vote up
def convolve_gabor(bd, image_min, image_max, scales):

    """
    Convolves an image with a series of Gabor kernels

    Args:
        bd (2d array)
        image_min (int or float)
        image_max (int or float)
        scales (1d array like)
    """

    if bd.dtype != 'uint8':

        bd = np.uint8(rescale_intensity(bd,
                                        in_range=(image_min,
                                                  image_max),
                                        out_range=(0, 255)))

    # Each set of Gabor kernels
    #   has 8 orientations.
    out_block = np.empty((8*len(scales),
                          bd.shape[0],
                          bd.shape[1]), dtype='uint8')

    ki = 0

    for scale in scales:

        # Check for even or
        #   odd scale size.
        if scale % 2 == 0:
            ssub = 1
        else:
            ssub = 0

        gabor_kernels = prep_gabor(kernel_size=(scale-ssub, scale-ssub))

        for kernel in gabor_kernels:

            out_block[ki] = cv2.filter2D(bd, cv2.CV_8U, kernel)

            ki += 1

    return out_block 
Example #23
Source File: dsb_utils.py    From diagnose-heart with MIT License 4 votes vote down vote up
def segmenter_data_transform(imb, shift=0, rotate=0, scale=0, normalize_pctwise=(20,95), istest=False):
    if isinstance(imb, tuple) and len(imb) == 2:
        imgs,labels = imb
    else:
        imgs = imb

    # rotate image if training
    if rotate>0:
        for i in xrange(imgs.shape[0]):
            degrees = rotate if istest else np.clip(np.random.normal(),-2,2)*rotate;
            imgs[i,0] = scipy.misc.imrotate(imgs[i,0], degrees, interp='bilinear')
            if isinstance(imb, tuple):
                labels[i,0] = scipy.misc.imrotate(labels[i,0], degrees, interp='bilinear')
    #rescale
    if scale>0:
        assert(scale>0 and scale<=0.5);
        for i in xrange(imgs.shape[0]):
            sc = 1 + (scale if istest else np.clip(np.random.normal(),-2,2)*scale);
            imgs[i,0] = rescale(imgs[i,0],sc);
            if isinstance(imb, tuple):
                labels[i,0] = rescale(labels[i,0], sc);

    #shift
    if shift>0 and not istest:
        for i in xrange(imgs.shape[0]):
            x,y = np.random.randint(-shift,shift,2);
            imgs[i,0] = img_shift(imgs[i,0], (x,y));
            if isinstance(imb, tuple):
                labels[i,0] = img_shift(labels[i,0], (x,y));

    imgs = nn.utils.floatX(imgs)/255.0;
    for i in xrange(imgs.shape[0]):
        pclow, pchigh = normalize_pctwise 
        if isinstance(pclow,tuple):
            pclow = np.random.randint(pclow[0],pclow[1]);
            pchigh = np.random.randint(pchigh[0],pchigh[1]);
        pl,ph = np.percentile(imgs[i],(pclow, pchigh))
        imgs[i] = exposure.rescale_intensity(imgs[i], in_range=(pl, ph));
        imgs[i] = 2*imgs[i]/imgs[i].max() - 1.

    if isinstance(imb,tuple):
        labels = nn.utils.floatX(labels)/255.0;
        return imgs,labels
    else:
        return imgs;