Python scipy.ndimage.filters.gaussian_filter() Examples

The following are 30 code examples of scipy.ndimage.filters.gaussian_filter(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module scipy.ndimage.filters , or try the search function .
Example #1
Source File: preprocess.py    From brain_segmentation with MIT License 7 votes vote down vote up
def preprocess_img(inputfile, output_preprocessed, zooms):
    img = nib.load(inputfile)
    data = img.get_data()
    affine = img.affine
    zoom = img.header.get_zooms()[:3]
    data, affine = reslice(data, affine, zoom, zooms, 1)
    data = np.squeeze(data)
    data = np.pad(data, [(0, 256 - len_) for len_ in data.shape], "constant")

    data_sub = data - gaussian_filter(data, sigma=1)
    img = sitk.GetImageFromArray(np.copy(data_sub))
    img = sitk.AdaptiveHistogramEqualization(img)
    data_clahe = sitk.GetArrayFromImage(img)[:, :, :, None]
    data = np.concatenate((data_clahe, data[:, :, :, None]), 3)
    data = (data - np.mean(data, (0, 1, 2))) / np.std(data, (0, 1, 2))
    assert data.ndim == 4, data.ndim
    assert np.allclose(np.mean(data, (0, 1, 2)), 0.), np.mean(data, (0, 1, 2))
    assert np.allclose(np.std(data, (0, 1, 2)), 1.), np.std(data, (0, 1, 2))
    data = np.float32(data)

    img = nib.Nifti1Image(data, affine)
    nib.save(img, output_preprocessed) 
Example #2
Source File: baseline_utils.py    From pysaliency with MIT License 6 votes vote down vote up
def _log_density(self, stimulus):
        shape = stimulus.shape[0], stimulus.shape[1]
        if shape not in self.shape_cache:
            ZZ = np.zeros(shape)
            height, width = shape
            if self.keep_aspect:
                max_size = max(height, width)
                y_factor = max_size
                x_factor = max_size
            else:
                y_factor = height
                x_factor = width
            _fixations = np.array([self.ys*y_factor, self.xs*x_factor]).T
            fill_fixation_map(ZZ, _fixations)
            ZZ = gaussian_filter(ZZ, [self.bandwidth*y_factor, self.bandwidth*x_factor])
            ZZ *= (1-self.eps)
            ZZ += self.eps * 1.0/(shape[0]*shape[1])
            ZZ = np.log(ZZ)

            ZZ -= logsumexp(ZZ)
            self.shape_cache[shape] = ZZ

        return self.shape_cache[shape] 
Example #3
Source File: myImageTransformations.py    From Attention-Gated-Networks with MIT License 6 votes vote down vote up
def elastic_transform(image, alpha=1000, sigma=30, spline_order=1, mode='nearest', random_state=np.random):
    """Elastic deformation of image as described in [Simard2003]_.
    .. [Simard2003] Simard, Steinkraus and Platt, "Best Practices for
       Convolutional Neural Networks applied to Visual Document Analysis", in
       Proc. of the International Conference on Document Analysis and
       Recognition, 2003.
    """
    assert image.ndim == 3
    shape = image.shape[:2]

    dx = gaussian_filter((random_state.rand(*shape) * 2 - 1),
                         sigma, mode="constant", cval=0) * alpha
    dy = gaussian_filter((random_state.rand(*shape) * 2 - 1),
                         sigma, mode="constant", cval=0) * alpha

    x, y = np.meshgrid(np.arange(shape[0]), np.arange(shape[1]), indexing='ij')
    indices = [np.reshape(x + dx, (-1, 1)), np.reshape(y + dy, (-1, 1))]
    result = np.empty_like(image)
    for i in range(image.shape[2]):
        result[:, :, i] = map_coordinates(
            image[:, :, i], indices, order=spline_order, mode=mode).reshape(shape)
    return result 
Example #4
Source File: custom_transforms.py    From ECN with Apache License 2.0 6 votes vote down vote up
def elastic_transform(image, alpha=1000, sigma=30, spline_order=1, mode='nearest', random_state=np.random):
    """Elastic deformation of image as described in [Simard2003]_.
    .. [Simard2003] Simard, Steinkraus and Platt, "Best Practices for
       Convolutional Neural Networks applied to Visual Document Analysis", in
       Proc. of the International Conference on Document Analysis and
       Recognition, 2003.
    """
    assert image.ndim == 3
    shape = image.shape[:2]

    dx = gaussian_filter((random_state.rand(*shape) * 2 - 1),
                         sigma, mode="constant", cval=0) * alpha
    dy = gaussian_filter((random_state.rand(*shape) * 2 - 1),
                         sigma, mode="constant", cval=0) * alpha

    x, y = np.meshgrid(np.arange(shape[0]), np.arange(shape[1]), indexing='ij')
    indices = [np.reshape(x + dx, (-1, 1)), np.reshape(y + dy, (-1, 1))]
    result = np.empty_like(image)
    for i in range(image.shape[2]):
        result[:, :, i] = map_coordinates(
            image[:, :, i], indices, order=spline_order, mode=mode).reshape(shape)
    return result 
Example #5
Source File: make_diffraction_test_data.py    From pyxem with GNU General Public License v3.0 6 votes vote down vote up
def get_diffraction_test_image(self, dtype=np.float32):
        image_x, image_y = self.image_x, self.image_y
        cx, cy = image_x / 2, image_y / 2
        image = np.zeros((image_y, image_x), dtype=np.float32)
        iterator = zip(self._x_list, self._y_list, self._intensity_list)
        for x, y, i in iterator:
            if self.diff_intensity_reduction is not False:
                dr = np.hypot(x - cx, y - cy)
                i = self._get_diff_intensity_reduction(dr, i)
            image[y, x] = i
        disk = morphology.disk(self.disk_r, dtype=dtype)
        image = convolve2d(image, disk, mode="same")
        if self.rotation != 0:
            image = rotate(image, self.rotation, reshape=False)
        if self.blur != 0:
            image = gaussian_filter(image, self.blur)
        if self._background_lorentz_width is not False:
            image += self._get_background_lorentz()
        if self.intensity_noise is not False:
            noise = np.random.random((image_y, image_x)) * self.intensity_noise
            image += noise
        return image 
Example #6
Source File: segmenter.py    From msaf with MIT License 6 votes vote down vote up
def pick_peaks(nc, L=16, offset_denom=0.1):
    """Obtain peaks from a novelty curve using an adaptive threshold."""
    offset = nc.mean() * float(offset_denom)
    th = filters.median_filter(nc, size=L) + offset
    #th = filters.gaussian_filter(nc, sigma=L/2., mode="nearest") + offset
    #import pylab as plt
    #plt.plot(nc)
    #plt.plot(th)
    #plt.show()
    # th = np.ones(nc.shape[0]) * nc.mean() - 0.08
    peaks = []
    for i in range(1, nc.shape[0] - 1):
        # is it a peak?
        if nc[i - 1] < nc[i] and nc[i] > nc[i + 1]:
            # is it above the threshold?
            if nc[i] > th[i]:
                peaks.append(i)
    return peaks 
Example #7
Source File: LucasKanade.py    From pyoptflow with GNU Affero General Public License v3.0 6 votes vote down vote up
def lucas_kanade(stem, pat: str, kernel: int = 5, Nfilter: int = 7):
    flist = getimgfiles(stem, pat)

    # %% priming read
    im1 = imageio.imread(flist[0], as_gray=True)

    # %% evaluate the first frame's POI
    X = im1.shape[1] // 16
    Y = im1.shape[0] // 16
    poi = getPOI(X, Y, kernel)
    # % get the weights
    W = gaussianWeight(kernel)
    # %% loop over all images in directory
    for i in range(1, len(flist)):
        im2 = imageio.imread(flist[i], as_gray=True)

        im2 = gaussian_filter(im2, Nfilter)

        V = LucasKanade(im1, im2, kernel, poi, W)

        compareGraphsLK(im1, im2, poi, V)

        im1 = im2.copy() 
Example #8
Source File: segmenter.py    From msaf with MIT License 6 votes vote down vote up
def pick_peaks(nc, L=16):
    """Obtain peaks from a novelty curve using an adaptive threshold."""
    offset = nc.mean() / 20.

    nc = filters.gaussian_filter1d(nc, sigma=4)  # Smooth out nc

    th = filters.median_filter(nc, size=L) + offset
    #th = filters.gaussian_filter(nc, sigma=L/2., mode="nearest") + offset

    peaks = []
    for i in range(1, nc.shape[0] - 1):
        # is it a peak?
        if nc[i - 1] < nc[i] and nc[i] > nc[i + 1]:
            # is it above the threshold?
            if nc[i] > th[i]:
                peaks.append(i)
    #plt.plot(nc)
    #plt.plot(th)
    #for peak in peaks:
        #plt.axvline(peak)
    #plt.show()

    return peaks 
Example #9
Source File: baseline_utils.py    From pysaliency with MIT License 6 votes vote down vote up
def _log_density(self, stimulus):
        shape = stimulus.shape[0], stimulus.shape[1]

        stimulus_id = get_image_hash(stimulus)
        stimulus_index = self.stimuli.stimulus_ids.index(stimulus_id)

        #fixations = self.fixations[self.fixations.n == stimulus_index]
        inds = self.fixations.n != stimulus_index

        ZZ = np.zeros(shape)

        _fixations = np.array([self.ys[inds]*shape[0], self.xs[inds]*shape[1]]).T
        fill_fixation_map(ZZ, _fixations)
        ZZ = gaussian_filter(ZZ, [self.bandwidth*shape[0], self.bandwidth*shape[1]])
        ZZ *= (1-self.eps)
        ZZ += self.eps * 1.0/(shape[0]*shape[1])
        ZZ = np.log(ZZ)

        ZZ -= logsumexp(ZZ)
        #ZZ -= np.log(np.exp(ZZ).sum())

        return ZZ 
Example #10
Source File: feature_vis.py    From facies_net with GNU Lesser General Public License v3.0 6 votes vote down vote up
def smoothing(im, mode = None):
    # utility function to smooth an image
    if mode is None:
        return im
    elif mode == 'L2':
        # L2 norm
        return im / (np.sqrt(np.mean(np.square(im))) + K.epsilon())
    elif mode == 'GaussianBlur':
        # Gaussian Blurring with width of 3
        return filters.gaussian_filter(im,1/8)
    elif mode == 'Decay':
        # Decay regularization
        decay = 0.98
        return decay * im
    elif mode == 'Clip_weak':
        # Clip weak pixel regularization
        percentile = 1
        threshold = np.percentile(np.abs(im),percentile)
        im[np.where(np.abs(im) < threshold)] = 0
        return im
    else:
        # print error message
        print('Unknown smoothing parameter. No smoothing implemented.')
        return im 
Example #11
Source File: segmentation.py    From kraken with Apache License 2.0 5 votes vote down vote up
def denoising_hysteresis_thresh(im, low, high, sigma):
    im = gaussian_filter(im, sigma)
    return apply_hysteresis_threshold(im, low, high) 
Example #12
Source File: baselineRes18Conc.py    From AnatomyNet-for-anatomical-segmentation with Apache License 2.0 5 votes vote down vote up
def elastic_transform3Dv2(self, image, alpha, sigma, alpha_affine, random_state=None):
        """Elastic deformation of images as described in [Simard2003]_ (with modifications).
        .. [Simard2003] Simard, Steinkraus and Platt, "Best Practices for
             Convolutional Neural Networks applied to Visual Document Analysis", in
             Proc. of the International Conference on Document Analysis and
             Recognition, 2003.
         Based on https://gist.github.com/erniejunior/601cdf56d2b424757de5
         From https://www.kaggle.com/bguberfain/elastic-transform-for-data-augmentation
        """
        # affine and deformation must be slice by slice and fixed for slices
        if random_state is None:
            random_state = np.random.RandomState(None)
        shape = image.shape # image is contatenated, the first channel [:,:,:,0] is the image, the second channel 
        # [:,:,:,1] is the mask. The two channel are under the same tranformation.
        shape_size = shape[:-1] # z y x
        # Random affine
        shape_size_aff = shape[1:-1] # y x
        center_square = np.float32(shape_size_aff) // 2
        square_size = min(shape_size_aff) // 3
        pts1 = np.float32([center_square + square_size, [center_square[0]+square_size, center_square[1]-square_size],                           center_square - square_size])
        pts2 = pts1 + random_state.uniform(-alpha_affine, alpha_affine, size=pts1.shape).astype(np.float32)
        M = cv2.getAffineTransform(pts1, pts2)
        new_img = np.zeros_like(image)
        for i in range(shape[0]):
            new_img[i,:,:,0] = cv2.warpAffine(image[i,:,:,0], M, shape_size_aff[::-1],                                               borderMode=cv2.BORDER_CONSTANT, borderValue=0.)
            for j in range(1, 10):
                new_img[i,:,:,j] = cv2.warpAffine(image[i,:,:,j], M, shape_size_aff[::-1], flags=cv2.INTER_NEAREST,                                                  borderMode=cv2.BORDER_TRANSPARENT, borderValue=0)
        dx = gaussian_filter((random_state.rand(*shape[1:-1]) * 2 - 1), sigma) * alpha
        dy = gaussian_filter((random_state.rand(*shape[1:-1]) * 2 - 1), sigma) * alpha
        x, y = np.meshgrid(np.arange(shape_size_aff[1]), np.arange(shape_size_aff[0]))
        indices = np.reshape(y+dy, (-1, 1)), np.reshape(x+dx, (-1, 1))
        new_img2 = np.zeros_like(image)
        for i in range(shape[0]):
            new_img2[i,:,:,0] = map_coordinates(new_img[i,:,:,0], indices, order=1, mode='constant').reshape(shape[1:-1])
            for j in range(1, 10):
                new_img2[i,:,:,j] = map_coordinates(new_img[i,:,:,j], indices, order=0, mode='constant').reshape(shape[1:-1])
        return np.array(new_img2), new_img 
Example #13
Source File: baseline.py    From AnatomyNet-for-anatomical-segmentation with Apache License 2.0 5 votes vote down vote up
def elastic_transform3Dv2(self, image, alpha, sigma, alpha_affine, random_state=None):
        """Elastic deformation of images as described in [Simard2003]_ (with modifications).
        .. [Simard2003] Simard, Steinkraus and Platt, "Best Practices for
             Convolutional Neural Networks applied to Visual Document Analysis", in
             Proc. of the International Conference on Document Analysis and
             Recognition, 2003.
         Based on https://gist.github.com/erniejunior/601cdf56d2b424757de5
         From https://www.kaggle.com/bguberfain/elastic-transform-for-data-augmentation
        """
        # affine and deformation must be slice by slice and fixed for slices
        if random_state is None:
            random_state = np.random.RandomState(None)
        shape = image.shape # image is contatenated, the first channel [:,:,:,0] is the image, the second channel 
        # [:,:,:,1] is the mask. The two channel are under the same tranformation.
        shape_size = shape[:-1] # z y x
        # Random affine
        shape_size_aff = shape[1:-1] # y x
        center_square = np.float32(shape_size_aff) // 2
        square_size = min(shape_size_aff) // 3
        pts1 = np.float32([center_square + square_size, [center_square[0]+square_size, center_square[1]-square_size],                           center_square - square_size])
        pts2 = pts1 + random_state.uniform(-alpha_affine, alpha_affine, size=pts1.shape).astype(np.float32)
        M = cv2.getAffineTransform(pts1, pts2)
        new_img = np.zeros_like(image)
        for i in range(shape[0]):
            new_img[i,:,:,0] = cv2.warpAffine(image[i,:,:,0], M, shape_size_aff[::-1],                                               borderMode=cv2.BORDER_CONSTANT, borderValue=0.)
            for j in range(1, 10):
                new_img[i,:,:,j] = cv2.warpAffine(image[i,:,:,j], M, shape_size_aff[::-1], flags=cv2.INTER_NEAREST,                                                  borderMode=cv2.BORDER_TRANSPARENT, borderValue=0)
        dx = gaussian_filter((random_state.rand(*shape[1:-1]) * 2 - 1), sigma) * alpha
        dy = gaussian_filter((random_state.rand(*shape[1:-1]) * 2 - 1), sigma) * alpha
        x, y = np.meshgrid(np.arange(shape_size_aff[1]), np.arange(shape_size_aff[0]))
        indices = np.reshape(y+dy, (-1, 1)), np.reshape(x+dx, (-1, 1))
        new_img2 = np.zeros_like(image)
        for i in range(shape[0]):
            new_img2[i,:,:,0] = map_coordinates(new_img[i,:,:,0], indices, order=1, mode='constant').reshape(shape[1:-1])
            for j in range(1, 10):
                new_img2[i,:,:,j] = map_coordinates(new_img[i,:,:,j], indices, order=0, mode='constant').reshape(shape[1:-1])
        return np.array(new_img2), new_img 
Example #14
Source File: analysis.py    From ChainConsumer with MIT License 5 votes vote down vote up
def _get_smoothed_histogram(self, chain, parameter):
        data = chain.get_data(parameter)
        smooth = chain.config["smooth"]
        if chain.grid:
            bins = get_grid_bins(data)
        else:
            bins = chain.config["bins"]
            bins, smooth = get_smoothed_bins(smooth, bins, data, chain.weights)

        hist, edges = np.histogram(data, bins=bins, density=True, weights=chain.weights)
        if chain.power is not None:
            hist = hist ** chain.power
        edge_centers = 0.5 * (edges[1:] + edges[:-1])
        xs = np.linspace(edge_centers[0], edge_centers[-1], 10000)

        if smooth:
            hist = gaussian_filter(hist, smooth, mode=self.parent._gauss_mode)
        kde = chain.config["kde"]
        if kde:
            kde_xs = np.linspace(edge_centers[0], edge_centers[-1], max(200, int(bins.max())))
            ys = MegKDE(data, chain.weights, factor=kde).evaluate(kde_xs)
            area = simps(ys, x=kde_xs)
            ys = ys / area
            ys = interp1d(kde_xs, ys, kind="linear")(xs)
        else:
            ys = interp1d(edge_centers, hist, kind="linear")(xs)
        cs = ys.cumsum()
        cs /= cs.max()
        return xs, ys, cs 
Example #15
Source File: dummy_data.py    From pyxem with GNU General Public License v3.0 5 votes vote down vote up
def get_hot_pixel_signal(lazy=False):
    """Get Diffraction2D signal with a disk in the middle.

    Has 4 pixels with value equal to 50000, to simulate hot pixels.

    Example
    -------
    >>> s = ps.dummy_data.get_hot_pixel_signal()

    Lazy signal

    >>> s_lazy = ps.dummy_data.get_hot_pixel_signal(lazy=True)

    """
    data = mdtd.MakeTestData(size_x=128, size_y=128, default=False, blur=True)
    data.add_disk(64, 64, r=30, intensity=10000)
    s = data.signal
    s.change_dtype("int64")
    s.data += gaussian_filter(s.data, sigma=50)
    s.data[76, 4] = 50000
    s.data[12, 102] = 50000
    s.data[32, 10] = 50000
    s.data[120, 61] = 50000
    if lazy:
        s = LazyDiffraction2D(s)
        s.data = da.from_array(s.data, chunks=(64, 64))
    else:
        s = Diffraction2D(s)
    return s 
Example #16
Source File: lineest.py    From kraken with Apache License 2.0 5 votes vote down vote up
def measure(self, line):
        h, w = line.shape
        # XXX: this filter is awfully slow
        smoothed = filters.gaussian_filter(line, (h*0.5, h*self.smoothness),
                                           mode='constant')
        smoothed += 0.001*filters.uniform_filter(smoothed, (h*0.5, w),
                                                 mode='constant')
        self.shape = (h, w)
        a = np.argmax(smoothed, axis=0)
        a = filters.gaussian_filter(a, h*self.extra)
        self.center = np.array(a, 'i')
        deltas = np.abs(np.arange(h)[:, np.newaxis]-self.center[np.newaxis, :])
        self.mad = np.mean(deltas[line != 0])
        self.r = int(1+self.range*self.mad) 
Example #17
Source File: sct_maths.py    From spinalcordtoolbox with MIT License 5 votes vote down vote up
def smooth(data, sigmas):
    """
    Smooth data by convolving Gaussian kernel
    :param data: input 3D numpy array
    :param sigmas: Kernel SD in voxel
    :return:
    """
    assert len(data.shape) == len(sigmas)
    from scipy.ndimage.filters import gaussian_filter
    return gaussian_filter(data.astype(float), sigmas, order=0, truncate=4.0) 
Example #18
Source File: dummy_data.py    From pyxem with GNU General Public License v3.0 5 votes vote down vote up
def get_dead_pixel_signal(lazy=False):
    """Get Diffraction2D signal with a disk in the middle.

    Has 4 pixels with value equal to 0, to simulate dead pixels.

    Example
    -------
    >>> s = ps.dummy_data.get_dead_pixel_signal()

    Lazy signal

    >>> s_lazy = ps.dummy_data.get_dead_pixel_signal(lazy=True)

    """
    data = mdtd.MakeTestData(size_x=128, size_y=128, default=False, blur=True)
    data.add_disk(64, 64, r=30, intensity=10000)
    s = data.signal
    s.change_dtype("int64")
    s.data += gaussian_filter(s.data, sigma=50)
    s.data[61, 73] = 0
    s.data[46, 53] = 0
    s.data[88, 88] = 0
    s.data[112, 20] = 0
    if lazy:
        s = LazyDiffraction2D(s)
        s.data = da.from_array(s.data, chunks=(64, 64))
    else:
        s = Diffraction2D(s)
    return s 
Example #19
Source File: PredictClassifierEnsemble.py    From kaggle-rsna18 with MIT License 5 votes vote down vote up
def data_augmentation(image):
    # Input should be ONE image with shape: (L, W, CH)
    options = ["gaussian_smooth", "vertical_flip", "rotate", "zoom", "adjust_gamma"] 
    # Probabilities for each augmentation were arbitrarily assigned 
    which_option = np.random.choice(options)
    if which_option == "vertical_flip":
        image = np.fliplr(image)
    if which_option == "horizontal_flip": 
        image = np.flipud(image) 
    elif which_option == "gaussian_smooth": 
        sigma = np.random.uniform(0.2, 1.0)
        image = gaussian_filter(image, sigma)
    elif which_option == "zoom": 
      # Assumes image is square
        min_crop = int(image.shape[0]*0.85)
        max_crop = int(image.shape[0]*0.95)
        crop_size = np.random.randint(min_crop, max_crop) 
        crop = crop_center(image, crop_size, crop_size)
        if crop.shape[-1] == 1: crop = crop[:,:,0]
        image = scipy.misc.imresize(crop, image.shape) 
    elif which_option == "rotate":
        angle = np.random.uniform(-15, 15)
        image = rotate(image, angle, reshape=False)
    elif which_option == "adjust_gamma": 
        image = image / 255. 
        image = exposure.adjust_gamma(image, np.random.uniform(0.75,1.25))
        image = image * 255. 
    if len(image.shape) == 2: image = np.expand_dims(image, axis=2)
    return image 
Example #20
Source File: baselineDiceCrossEntropy.py    From AnatomyNet-for-anatomical-segmentation with Apache License 2.0 5 votes vote down vote up
def elastic_transform3Dv2(self, image, alpha, sigma, alpha_affine, random_state=None):
        """Elastic deformation of images as described in [Simard2003]_ (with modifications).
        .. [Simard2003] Simard, Steinkraus and Platt, "Best Practices for
             Convolutional Neural Networks applied to Visual Document Analysis", in
             Proc. of the International Conference on Document Analysis and
             Recognition, 2003.
         Based on https://gist.github.com/erniejunior/601cdf56d2b424757de5
         From https://www.kaggle.com/bguberfain/elastic-transform-for-data-augmentation
        """
        # affine and deformation must be slice by slice and fixed for slices
        if random_state is None:
            random_state = np.random.RandomState(None)
        shape = image.shape # image is contatenated, the first channel [:,:,:,0] is the image, the second channel 
        # [:,:,:,1] is the mask. The two channel are under the same tranformation.
        shape_size = shape[:-1] # z y x
        # Random affine
        shape_size_aff = shape[1:-1] # y x
        center_square = np.float32(shape_size_aff) // 2
        square_size = min(shape_size_aff) // 3
        pts1 = np.float32([center_square + square_size, [center_square[0]+square_size, center_square[1]-square_size],                           center_square - square_size])
        pts2 = pts1 + random_state.uniform(-alpha_affine, alpha_affine, size=pts1.shape).astype(np.float32)
        M = cv2.getAffineTransform(pts1, pts2)
        new_img = np.zeros_like(image)
        for i in range(shape[0]):
            new_img[i,:,:,0] = cv2.warpAffine(image[i,:,:,0], M, shape_size_aff[::-1],                                               borderMode=cv2.BORDER_CONSTANT, borderValue=0.)
            for j in range(1, 10):
                new_img[i,:,:,j] = cv2.warpAffine(image[i,:,:,j], M, shape_size_aff[::-1], flags=cv2.INTER_NEAREST,                                                  borderMode=cv2.BORDER_TRANSPARENT, borderValue=0)
        dx = gaussian_filter((random_state.rand(*shape[1:-1]) * 2 - 1), sigma) * alpha
        dy = gaussian_filter((random_state.rand(*shape[1:-1]) * 2 - 1), sigma) * alpha
        x, y = np.meshgrid(np.arange(shape_size_aff[1]), np.arange(shape_size_aff[0]))
        indices = np.reshape(y+dy, (-1, 1)), np.reshape(x+dx, (-1, 1))
        new_img2 = np.zeros_like(image)
        for i in range(shape[0]):
            new_img2[i,:,:,0] = map_coordinates(new_img[i,:,:,0], indices, order=1, mode='constant').reshape(shape[1:-1])
            for j in range(1, 10):
                new_img2[i,:,:,j] = map_coordinates(new_img[i,:,:,j], indices, order=0, mode='constant').reshape(shape[1:-1])
        return np.array(new_img2), new_img 
Example #21
Source File: baselineDiceFocalLoss.py    From AnatomyNet-for-anatomical-segmentation with Apache License 2.0 5 votes vote down vote up
def elastic_transform3Dv2(self, image, alpha, sigma, alpha_affine, random_state=None):
        """Elastic deformation of images as described in [Simard2003]_ (with modifications).
        .. [Simard2003] Simard, Steinkraus and Platt, "Best Practices for
             Convolutional Neural Networks applied to Visual Document Analysis", in
             Proc. of the International Conference on Document Analysis and
             Recognition, 2003.
         Based on https://gist.github.com/erniejunior/601cdf56d2b424757de5
         From https://www.kaggle.com/bguberfain/elastic-transform-for-data-augmentation
        """
        # affine and deformation must be slice by slice and fixed for slices
        if random_state is None:
            random_state = np.random.RandomState(None)
        shape = image.shape # image is contatenated, the first channel [:,:,:,0] is the image, the second channel 
        # [:,:,:,1] is the mask. The two channel are under the same tranformation.
        shape_size = shape[:-1] # z y x
        # Random affine
        shape_size_aff = shape[1:-1] # y x
        center_square = np.float32(shape_size_aff) // 2
        square_size = min(shape_size_aff) // 3
        pts1 = np.float32([center_square + square_size, [center_square[0]+square_size, center_square[1]-square_size],                           center_square - square_size])
        pts2 = pts1 + random_state.uniform(-alpha_affine, alpha_affine, size=pts1.shape).astype(np.float32)
        M = cv2.getAffineTransform(pts1, pts2)
        new_img = np.zeros_like(image)
        for i in range(shape[0]):
            new_img[i,:,:,0] = cv2.warpAffine(image[i,:,:,0], M, shape_size_aff[::-1],                                               borderMode=cv2.BORDER_CONSTANT, borderValue=0.)
            for j in range(1, 10):
                new_img[i,:,:,j] = cv2.warpAffine(image[i,:,:,j], M, shape_size_aff[::-1], flags=cv2.INTER_NEAREST,                                                  borderMode=cv2.BORDER_TRANSPARENT, borderValue=0)
        dx = gaussian_filter((random_state.rand(*shape[1:-1]) * 2 - 1), sigma) * alpha
        dy = gaussian_filter((random_state.rand(*shape[1:-1]) * 2 - 1), sigma) * alpha
        x, y = np.meshgrid(np.arange(shape_size_aff[1]), np.arange(shape_size_aff[0]))
        indices = np.reshape(y+dy, (-1, 1)), np.reshape(x+dx, (-1, 1))
        new_img2 = np.zeros_like(image)
        for i in range(shape[0]):
            new_img2[i,:,:,0] = map_coordinates(new_img[i,:,:,0], indices, order=1, mode='constant').reshape(shape[1:-1])
            for j in range(1, 10):
                new_img2[i,:,:,j] = map_coordinates(new_img[i,:,:,j], indices, order=0, mode='constant').reshape(shape[1:-1])
        return np.array(new_img2), new_img 
Example #22
Source File: baselineSERes18Conc.py    From AnatomyNet-for-anatomical-segmentation with Apache License 2.0 5 votes vote down vote up
def elastic_transform3Dv2(self, image, alpha, sigma, alpha_affine, random_state=None):
        """Elastic deformation of images as described in [Simard2003]_ (with modifications).
        .. [Simard2003] Simard, Steinkraus and Platt, "Best Practices for
             Convolutional Neural Networks applied to Visual Document Analysis", in
             Proc. of the International Conference on Document Analysis and
             Recognition, 2003.
         Based on https://gist.github.com/erniejunior/601cdf56d2b424757de5
         From https://www.kaggle.com/bguberfain/elastic-transform-for-data-augmentation
        """
        # affine and deformation must be slice by slice and fixed for slices
        if random_state is None:
            random_state = np.random.RandomState(None)
        shape = image.shape # image is contatenated, the first channel [:,:,:,0] is the image, the second channel 
        # [:,:,:,1] is the mask. The two channel are under the same tranformation.
        shape_size = shape[:-1] # z y x
        # Random affine
        shape_size_aff = shape[1:-1] # y x
        center_square = np.float32(shape_size_aff) // 2
        square_size = min(shape_size_aff) // 3
        pts1 = np.float32([center_square + square_size, [center_square[0]+square_size, center_square[1]-square_size],                           center_square - square_size])
        pts2 = pts1 + random_state.uniform(-alpha_affine, alpha_affine, size=pts1.shape).astype(np.float32)
        M = cv2.getAffineTransform(pts1, pts2)
        new_img = np.zeros_like(image)
        for i in range(shape[0]):
            new_img[i,:,:,0] = cv2.warpAffine(image[i,:,:,0], M, shape_size_aff[::-1],                                               borderMode=cv2.BORDER_CONSTANT, borderValue=0.)
            for j in range(1, 10):
                new_img[i,:,:,j] = cv2.warpAffine(image[i,:,:,j], M, shape_size_aff[::-1], flags=cv2.INTER_NEAREST,                                                  borderMode=cv2.BORDER_TRANSPARENT, borderValue=0)
        dx = gaussian_filter((random_state.rand(*shape[1:-1]) * 2 - 1), sigma) * alpha
        dy = gaussian_filter((random_state.rand(*shape[1:-1]) * 2 - 1), sigma) * alpha
        x, y = np.meshgrid(np.arange(shape_size_aff[1]), np.arange(shape_size_aff[0]))
        indices = np.reshape(y+dy, (-1, 1)), np.reshape(x+dx, (-1, 1))
        new_img2 = np.zeros_like(image)
        for i in range(shape[0]):
            new_img2[i,:,:,0] = map_coordinates(new_img[i,:,:,0], indices, order=1, mode='constant').reshape(shape[1:-1])
            for j in range(1, 10):
                new_img2[i,:,:,j] = map_coordinates(new_img[i,:,:,j], indices, order=0, mode='constant').reshape(shape[1:-1])
        return np.array(new_img2), new_img 
Example #23
Source File: baseline3Pool.py    From AnatomyNet-for-anatomical-segmentation with Apache License 2.0 5 votes vote down vote up
def elastic_transform3Dv2(self, image, alpha, sigma, alpha_affine, random_state=None):
        """Elastic deformation of images as described in [Simard2003]_ (with modifications).
        .. [Simard2003] Simard, Steinkraus and Platt, "Best Practices for
             Convolutional Neural Networks applied to Visual Document Analysis", in
             Proc. of the International Conference on Document Analysis and
             Recognition, 2003.
         Based on https://gist.github.com/erniejunior/601cdf56d2b424757de5
         From https://www.kaggle.com/bguberfain/elastic-transform-for-data-augmentation
        """
        # affine and deformation must be slice by slice and fixed for slices
        if random_state is None:
            random_state = np.random.RandomState(None)
        shape = image.shape # image is contatenated, the first channel [:,:,:,0] is the image, the second channel 
        # [:,:,:,1] is the mask. The two channel are under the same tranformation.
        shape_size = shape[:-1] # z y x
        # Random affine
        shape_size_aff = shape[1:-1] # y x
        center_square = np.float32(shape_size_aff) // 2
        square_size = min(shape_size_aff) // 3
        pts1 = np.float32([center_square + square_size, [center_square[0]+square_size, center_square[1]-square_size],                           center_square - square_size])
        pts2 = pts1 + random_state.uniform(-alpha_affine, alpha_affine, size=pts1.shape).astype(np.float32)
        M = cv2.getAffineTransform(pts1, pts2)
        new_img = np.zeros_like(image)
        for i in range(shape[0]):
            new_img[i,:,:,0] = cv2.warpAffine(image[i,:,:,0], M, shape_size_aff[::-1],                                               borderMode=cv2.BORDER_CONSTANT, borderValue=0.)
            for j in range(1, 10):
                new_img[i,:,:,j] = cv2.warpAffine(image[i,:,:,j], M, shape_size_aff[::-1], flags=cv2.INTER_NEAREST,                                                  borderMode=cv2.BORDER_TRANSPARENT, borderValue=0)
        dx = gaussian_filter((random_state.rand(*shape[1:-1]) * 2 - 1), sigma) * alpha
        dy = gaussian_filter((random_state.rand(*shape[1:-1]) * 2 - 1), sigma) * alpha
        x, y = np.meshgrid(np.arange(shape_size_aff[1]), np.arange(shape_size_aff[0]))
        indices = np.reshape(y+dy, (-1, 1)), np.reshape(x+dx, (-1, 1))
        new_img2 = np.zeros_like(image)
        for i in range(shape[0]):
            new_img2[i,:,:,0] = map_coordinates(new_img[i,:,:,0], indices, order=1, mode='constant').reshape(shape[1:-1])
            for j in range(1, 10):
                new_img2[i,:,:,j] = map_coordinates(new_img[i,:,:,j], indices, order=0, mode='constant').reshape(shape[1:-1])
        return np.array(new_img2), new_img 
Example #24
Source File: gauss.py    From logo-gen with MIT License 5 votes vote down vote up
def _gauss_blur(img_batch, sigma, truncate=2):
    return np.array([gaussian_filter(image, (sigma, sigma, 0), truncate=truncate, mode='constant').astype('float32')
                     for image in img_batch]) 
Example #25
Source File: gauss.py    From logo-gen with MIT License 5 votes vote down vote up
def _gkern(sigma, truncate=2, dim=1):
    """Returns a 1D or 2D Gaussian kernel array."""
    size = truncate * 2 + 1
    if dim == 1:
        delta = np.eye(size)[truncate]
    if dim == 2:
        # create nxn zeros
        delta = np.zeros((size, size))
        # set element at the middle to one, a kronecker delta
        delta[truncate, truncate] = 1
    # gaussian-smooth the dirac, resulting in a gaussian filter mask
    return gaussian_filter(delta, sigma, truncate=truncate, mode='constant').astype('float32') 
Example #26
Source File: gauss.py    From logo-gen with MIT License 5 votes vote down vote up
def gauss_kernel(sigma, eps, truncate):
    # Adaptive kernel size based on sigma,
    # for fixed kernel size, hardcode N
    # truncate limits kernel size as in scipy's gaussian_filter

    N = np.clip(np.ceil(sigma * np.sqrt(2 * np.log(1 / eps))), 1, truncate)
    x = np.arange(-N, N + 1, 1.0)
    g = np.exp(-x * x / (2 * sigma * sigma))
    g = g / np.sum(np.abs(g))
    return g 
Example #27
Source File: input.py    From TensorFlow_DCIGN with MIT License 5 votes vote down vote up
def apply_gaussian(images, sigma):
  if sigma == 0:
    return images

  res = images.copy()
  for i, image in enumerate(res):
    for channel in range(image.shape[-1]):
      image[:, :, channel] = filters.gaussian_filter(image[:, :, channel], sigma)
  return res 
Example #28
Source File: process.py    From plumo with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def save_mesh (binary, path):
    binary = mesh.pad(binary, dtype=np.float) 
    binary = gaussian_filter(binary, 2, mode='constant')
    verts, faces = measure.marching_cubes(binary, 0.5)
    Three(path, verts, faces) 
Example #29
Source File: 4_CreateResizedNumpyArrays.py    From kaggle-rsna18 with MIT License 5 votes vote down vote up
def resize_image(img, size, smooth=None):
  """
  Resizes image to new_length x new_length and pads with black. 
  Only works with grayscale right now. 

  Arguments:
    - smooth (float/None) : sigma value for Gaussian smoothing
  """
  resize_factor = float(size) / np.max(img.shape)
  if resize_factor > 1: 
    # Cubic spline interpolation
    resized_img = zoom(img, resize_factor)
  else:
    # Linear interpolation 
    resized_img = zoom(img, resize_factor, order=1, prefilter=False)
  if smooth is not None: 
    resized_img = gaussian_filter(resized_img, sigma=smooth) 
  l = resized_img.shape[0] ; w = resized_img.shape[1] 
  if l != w: 
    ldiff = (size-l) / 2 
    wdiff = (size-w) / 2
    pad_list = [(ldiff, size-l-ldiff), (wdiff, size-w-wdiff)] 
    resized_img = np.pad(resized_img, pad_list, "constant", 
                         constant_values=0)
  assert size == resized_img.shape[0] == resized_img.shape[1]
  return resized_img.astype("uint8") 
Example #30
Source File: PredictOneClassifier.py    From kaggle-rsna18 with MIT License 5 votes vote down vote up
def data_augmentation(image):
    # Input should be ONE image with shape: (L, W, CH)
    options = ["gaussian_smooth", "vertical_flip", "rotate", "zoom", "adjust_gamma"] 
    # Probabilities for each augmentation were arbitrarily assigned 
    which_option = np.random.choice(options)
    if which_option == "vertical_flip":
        image = np.fliplr(image)
    if which_option == "horizontal_flip": 
        image = np.flipud(image) 
    elif which_option == "gaussian_smooth": 
        sigma = np.random.uniform(0.2, 1.0)
        image = gaussian_filter(image, sigma)
    elif which_option == "zoom": 
      # Assumes image is square
        min_crop = int(image.shape[0]*0.85)
        max_crop = int(image.shape[0]*0.95)
        crop_size = np.random.randint(min_crop, max_crop) 
        crop = crop_center(image, crop_size, crop_size)
        if crop.shape[-1] == 1: crop = crop[:,:,0]
        image = scipy.misc.imresize(crop, image.shape) 
    elif which_option == "rotate":
        angle = np.random.uniform(-15, 15)
        image = rotate(image, angle, reshape=False)
    elif which_option == "adjust_gamma": 
        image = image / 255. 
        image = exposure.adjust_gamma(image, np.random.uniform(0.75,1.25))
        image = image * 255. 
    if len(image.shape) == 2: image = np.expand_dims(image, axis=2)
    return image