Python scipy.ndimage.filters.uniform_filter() Examples
The following are 30
code examples of scipy.ndimage.filters.uniform_filter().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
scipy.ndimage.filters
, or try the search function
.
Example #1
Source File: ft2d.py From nussl with MIT License | 6 votes |
def filter_local_maxima_with_std(self, ft2d): data = np.abs(np.fft.fftshift(ft2d)) data /= (np.max(data) + 1e-7) data_max = maximum_filter(data, self.neighborhood_size) data_mean = uniform_filter(data, self.neighborhood_size) data_mean_sq = uniform_filter(data ** 2, self.neighborhood_size) data_std = np.sqrt(data_mean_sq - data_mean ** 2) + 1e-7 maxima = ((data_max - data_mean) / data_std) fraction_of_local_max = (data == data_max) maxima *= fraction_of_local_max maxima = maxima.astype(float) maxima /= (np.max(maxima) + 1e-7) maxima = np.maximum(maxima, np.fliplr(maxima), np.flipud(maxima)) maxima = np.fft.ifftshift(maxima) background_ft2d = np.multiply(maxima, ft2d) foreground_ft2d = np.multiply(1 - maxima, ft2d) return background_ft2d, foreground_ft2d
Example #2
Source File: Filter.py From PyRAT with Mozilla Public License 2.0 | 6 votes |
def filter(self, array, *args, **kwargs): accuracy = 0.9 # initial classification accuracy. Fixed, doesn't influence the result a lot win = [3, 3, 1] nc = array.max() + 1 # number of classes pxx = np.zeros((nc, nc), dtype='f4') + 1.0 / (self.compatibility + nc - 1.0) # init compatibility matric np.fill_diagonal(pxx, self.compatibility / (self.compatibility + nc - 1.0)) pxx /= pxx.max() ms = np.zeros(array.shape + (nc, ), dtype='f4') + (1 - accuracy) # probability of class membership for k in range(nc): ms[..., k][array == k] = accuracy # initialisation for k in range(self.niter): # PLR interation mss = sp.ndimage.filters.uniform_filter(ms, win) q = np.dot(mss, pxx) ms *= q ms /= np.sum(ms, axis=2)[..., np.newaxis] return np.ndarray.astype(np.argmax(ms, axis=2), dtype=array.dtype)
Example #3
Source File: Filter.py From PyRAT with Mozilla Public License 2.0 | 6 votes |
def filter(self, array, *args, **kwargs): win = self.win if array.ndim == 3: win = [1] + self.win if array.ndim == 4: win = [1, 1] + self.win array[np.isnan(array)] = 0.0 if np.iscomplexobj(array): return sp.ndimage.filters.uniform_filter(array.real, win) + 1j * sp.ndimage.filters.uniform_filter( array.imag, win) elif self.phase is True: tmp = np.exp(1j * array) tmp = sp.ndimage.filters.uniform_filter(tmp.real, win) + 1j * sp.ndimage.filters.uniform_filter(tmp.imag, win) return np.angle(tmp) else: return sp.ndimage.filters.uniform_filter(array.real, win)
Example #4
Source File: test_eclipse_depth_calculator.py From platon with GNU General Public License v3.0 | 6 votes |
def test_ktables_unbinned(self): profile = Profile() profile.set_from_radiative_solution( 5052, 0.75 * R_sun, 0.03142 * AU, 1.129 * M_jup, 1.115 * R_jup, 0.983, -1.77, -0.44, -0.56, 0.23) xsec_calc = EclipseDepthCalculator(method="xsec") ktab_calc = EclipseDepthCalculator(method="ktables") xsec_wavelengths, xsec_depths = xsec_calc.compute_depths( profile, 0.75 * R_sun, 1.129 * M_jup, 1.115 * R_jup, 5052) N = 10 smoothed_xsec_wavelengths = uniform_filter(xsec_wavelengths, N)[::N] smoothed_xsec_depths = uniform_filter(xsec_depths, N)[::N] ktab_wavelengths, ktab_depths = ktab_calc.compute_depths( profile, 0.75 * R_sun, 1.129 * M_jup, 1.115 * R_jup, 5052) rel_diffs = np.abs(ktab_depths - smoothed_xsec_depths[:-1])/ ktab_depths self.assertTrue(np.median(rel_diffs) < 0.05)
Example #5
Source File: test_transit_depth_calculator.py From platon with GNU General Public License v3.0 | 6 votes |
def test_k_coeffs_unbinned(self): xsec_calc = TransitDepthCalculator(method="xsec") ktab_calc = TransitDepthCalculator(method="ktables") xsec_wavelengths, xsec_depths = xsec_calc.compute_depths(R_sun, M_jup, R_jup, 1000) #Smooth from R=1000 to R=100 to match ktables N = 10 smoothed_xsec_wavelengths = uniform_filter(xsec_wavelengths, N)[::N] smoothed_xsec_depths = uniform_filter(xsec_depths, N)[::N] ktab_wavelengths, ktab_depths = ktab_calc.compute_depths(R_sun, M_jup, R_jup, 1000) diffs = np.abs(ktab_depths - smoothed_xsec_depths[:-1]) self.assertTrue(np.median(diffs) < 20e-6) self.assertTrue(np.percentile(diffs, 95) < 50e-6) self.assertTrue(np.max(diffs) < 150e-6)
Example #6
Source File: Denoise.py From PyRAT with Mozilla Public License 2.0 | 6 votes |
def filter(self, array, *args, **kwargs): array = np.exp(1j * array) win = (7, 7) coh = filters.uniform_filter( np.abs(filters.uniform_filter(array.real, win) + 1j * filters.uniform_filter(array.imag, win)), win) bp = Blocxy(array, (self.bs, self.bs), (self.bs // 2, self.bs // 2)) bc = Blocxy(coh, (self.bs, self.bs), (self.bs // 2, self.bs // 2)) for block, cblock in zip(bp.getiterblocks(), bc.getiterblocks()): block = np.fft.fft2(block) block *= abs(block) ** (1.0 - np.mean(cblock[self.bs // 4:self.bs // 4 * 3, self.bs // 4:self.bs // 4 * 3])) block = np.fft.ifft2(block) bp.setiterblocks(block) output = bp.getresult() output = np.angle(output) return output
Example #7
Source File: ocrd_anybaseocr_textline.py From ocrd_anybaseocr with Apache License 2.0 | 6 votes |
def compute_gradmaps(self, binary, scale): # use gradient filtering to find baselines boxmap = psegutils.compute_boxmap(binary, scale, (0.4, 5)) cleaned = boxmap*binary if self.parameter['usegauss']: # this uses Gaussians grad = gaussian_filter(1.0*cleaned, (self.parameter['vscale']*0.3*scale, self.parameter['hscale']*6*scale), order=(1, 0)) else: # this uses non-Gaussian oriented filters grad = gaussian_filter(1.0*cleaned, (max(4, self.parameter['vscale']*0.3*scale), self.parameter['hscale']*scale), order=(1, 0)) grad = uniform_filter(grad, (self.parameter['vscale'], self.parameter['hscale']*6*scale)) bottom = ocrolib.norm_max((grad < 0)*(-grad)) top = ocrolib.norm_max((grad > 0)*grad) testseeds = np.zeros(binary.shape, 'i') return bottom, top, boxmap
Example #8
Source File: test__smooth.py From dask-image with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_uniform_comprehensions(): da_func = lambda arr: da_ndf.uniform_filter(arr, 1, origin=0) # noqa: E731 np.random.seed(0) a = np.random.random((3, 12, 14)) d = da.from_array(a, chunks=(3, 6, 7)) l2s = [da_func(d[i]) for i in range(len(d))] l2c = [da_func(d[i])[None] for i in range(len(d))] dau.assert_eq(np.stack(l2s), da.stack(l2s)) dau.assert_eq(np.concatenate(l2c), da.concatenate(l2c))
Example #9
Source File: tools.py From PyRAT with Mozilla Public License 2.0 | 5 votes |
def smooth(array, box, phase=False): """ Imitates IDL's smooth function. Can also (correctly) smooth interferometric phases with the phase=True keyword. """ if np.iscomplexobj(array): return filters.uniform_filter(array.real, box) + 1j * filters.uniform_filter(array.imag, box) elif phase is True: return np.angle(smooth(np.exp(1j * array), box)) else: return filters.uniform_filter(array.real, box)
Example #10
Source File: Spectrum.py From PyRAT with Mozilla Public License 2.0 | 5 votes |
def spec_correction(array, alpha=1.00, fix=True, cutoff=0.05, func=(None, None)): if array.ndim == 1: array = array[np.newaxis, ...] corr = np.empty_like(array) cent = [None] * len(array) for k, arr in enumerate(array): spec = arr / np.mean(arr) spec = filters.uniform_filter(spec, len(array) // 16, mode='wrap') offset = np.argmax(spec) spec = np.roll(spec, -offset) up_bound = 0 while up_bound < len(spec) and spec[up_bound] > cutoff * spec[0]: up_bound += 1 lo_bound = -1 while lo_bound > -len(spec) and spec[lo_bound] > cutoff * spec[0]: lo_bound -= 1 if func[0] == "Hamming" or func[0] is None: w = func[1] - (1.0 - func[1]) * np.cos( 2 * np.pi * np.arange(up_bound - lo_bound) / (up_bound - lo_bound - 1)) elif func[0] == "Lanczos": w = np.sinc(2 * np.arange(up_bound - lo_bound) / (up_bound - lo_bound - 1) - 1) if fix is True: corr[k, ...] = 1.0 / spec else: corr[k, ...] = 1.0 corr[k, up_bound - 1:lo_bound + 1] = 0.0 corr[k, lo_bound:] *= w[0:-lo_bound] corr[k, :up_bound] *= w[-up_bound:] corr[k, ...] = np.roll(corr[k, ...], offset) corr[k, ...] /= np.mean(corr[k, ...]) if offset > len(spec) // 2: offset -= len(spec) cent[k] = (up_bound + lo_bound) // 2 + offset return np.squeeze(corr), cent
Example #11
Source File: Filter.py From PyRAT with Mozilla Public License 2.0 | 5 votes |
def filter(self, array, *args, **kwargs): win = self.win if array.ndim == 3: win = [1] + self.win if array.ndim == 4: win = [1, 1] + self.win if np.iscomplexobj(array): return filters.median_filter(array.real, size=win) + 1j * filters.median_filter(array.imag, size=win) elif self.phase is True: tmp = np.exp(1j * array) tmp = filters.uniform_filter(tmp.real, win) + 1j * filters.uniform_filter(tmp.imag, win) return np.angle(tmp) else: return filters.median_filter(array.real, size=win)
Example #12
Source File: Denoise.py From PyRAT with Mozilla Public License 2.0 | 5 votes |
def fluc_sub(self, pha, upha, a1, alpha1, a2): cum = np.exp(1j*(pha-upha)) if a1 > 1: cum = self.smooth(cum, a1) if alpha1 > 1: cum = np.angle(self.median(cum, alpha1)) else: cum = np.angle(cum) ucum = self.ergo_m(cum) upha += ucum if a2 > 1: upha = filters.uniform_filter(upha, a2) return upha
Example #13
Source File: Denoise.py From PyRAT with Mozilla Public License 2.0 | 5 votes |
def filter(self, array, *args, **kwargs): res = np.zeros_like(array) res = self.fluc_sub(array, res, 21, 3, 11) res = self.fluc_sub(array, res, 9, 3, 11) res = self.fluc_sub(array, res, 3, 3, 11) res = filters.uniform_filter(res, 11) qef = np.exp(1j*(array-res)) feq = self.median(qef, self.win) qef = feq*self.strength + qef*(1-self.strength) return np.angle(qef*np.exp(1j*res))
Example #14
Source File: Denoise.py From PyRAT with Mozilla Public License 2.0 | 5 votes |
def filter(self, array, *args, **kwargs): win = self.win if array.ndim == 3: win = [1] + self.win if array.ndim == 4: win = [1, 1] + self.win array[np.isnan(array)] = 0.0 if np.iscomplexobj(array): return filters.uniform_filter(array.real, win) + 1j * filters.uniform_filter(array.imag, win) else: tmp = np.exp(1j * array) tmp = filters.uniform_filter(tmp.real, win) + 1j * filters.uniform_filter(tmp.imag, win) return np.angle(tmp)
Example #15
Source File: Parameters.py From PyRAT with Mozilla Public License 2.0 | 5 votes |
def filter(self, array, *args, **kwargs): meta = kwargs['meta'] pol = meta['CH_pol'] idx_hh = pol.index('HH') idx_vv = pol.index('VV') idx_xx = pol.index('XX') Srr = (array[idx_hh, ...] - array[idx_vv, ...] + 1j * 2.0 * array[idx_xx, ...]) / 2.0 Sll = (array[idx_vv, ...] - array[idx_hh, ...] + 1j * 2.0 * array[idx_xx, ...]) / 2.0 a1 = Srr * np.conj(Sll) a1 = filters.uniform_filter(a1.real, self.win) + 1j * filters.uniform_filter(a1.imag, self.win) return np.angle(a1) / 4
Example #16
Source File: test__smooth.py From dask-image with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_uniform_compare(size, origin): s = (100, 110) a = np.arange(float(np.prod(s))).reshape(s) d = da.from_array(a, chunks=(50, 55)) dau.assert_eq( sp_ndf.uniform_filter(a, size, origin=origin), da_ndf.uniform_filter(d, size, origin=origin) )
Example #17
Source File: test__smooth.py From dask-image with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_uniform_identity(size, origin): a = np.arange(140.0).reshape(10, 14) d = da.from_array(a, chunks=(5, 7)) dau.assert_eq( d, da_ndf.uniform_filter(d, size, origin=origin) ) dau.assert_eq( sp_ndf.uniform_filter(a, size, origin=origin), da_ndf.uniform_filter(d, size, origin=origin) )
Example #18
Source File: center_normalizer.py From calamari with Apache License 2.0 | 5 votes |
def measure(self, line): h, w = line.shape smoothed = filters.gaussian_filter(line, (h * 0.5, h * self.smoothness), mode='constant') smoothed += 0.001 * filters.uniform_filter(smoothed, (h * 0.5, w), mode='constant') a = np.argmax(smoothed, axis=0) a = filters.gaussian_filter(a, h * self.extra) center = np.array(a, 'i') deltas = abs(np.arange(h)[:, np.newaxis] - center[np.newaxis, :]) mad = np.mean(deltas[line != 0]) r = int(1 + self.range * mad) return center, r
Example #19
Source File: test__smooth.py From dask-image with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_uniform_shape_type(): size = 1 origin = 0 a = np.arange(140.0).reshape(10, 14) d = da.from_array(a, chunks=(5, 7)) assert all([(type(s) is int) for s in d.shape]) d2 = da_ndf.uniform_filter(d, size, origin=origin) assert all([(type(s) is int) for s in d2.shape])
Example #20
Source File: test__smooth.py From dask-image with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_uniform_filter_params(err_type, size, origin): a = np.arange(140.0).reshape(10, 14) d = da.from_array(a, chunks=(5, 7)) with pytest.raises(err_type): da_ndf.uniform_filter(d, size, origin=origin)
Example #21
Source File: sparsedetect.py From suite2p with GNU General Public License v3.0 | 5 votes |
def square_conv2(mov,lx): """ convolve in pixels binned movie Parameters ---------------- mov : 3D array binned movie, size [nbinned x Lyc x Lxc] lx : int filter size Returns ---------------- movt : 3D array convolved + binned movie, size [nbinned x Lyc x Lxc] """ if len(mov.shape)<3: mov = mov[np.newaxis, :, :] nbinned, Ly, Lx = mov.shape movt = np.zeros((nbinned, Ly, Lx), 'float32') for t in range(nbinned): movt[t] = lx * uniform_filter(mov[t], size=[lx, lx], mode = 'constant') return movt
Example #22
Source File: sparsedetect.py From suite2p with GNU General Public License v3.0 | 5 votes |
def neuropil_subtraction(mov,lx): """ subtract low-pass filtered version of binned movie low-pass filtered version ~ neuropil subtract to help ignore neuropil Parameters ---------------- mov : 3D array binned movie, size [nbins x Ly x Lx] lx : int size of filter Returns ---------------- mov : 3D array binned movie with "neuropil" subtracted, size [nbins x Ly x Lx] """ if len(mov.shape)<3: mov = mov[np.newaxis, :, :] nbinned, Ly, Lx = mov.shape c1 = uniform_filter(np.ones((Ly,Lx)), size=[lx, lx], mode = 'constant') for j in range(nbinned): mov[j] -= uniform_filter(mov[j], size=[lx, lx], mode = 'constant') / c1 return mov
Example #23
Source File: mean_high_pass.py From starfish with MIT License | 5 votes |
def _high_pass( image: xr.DataArray, size: Number, rescale: bool = False ) -> xr.DataArray: """ Applies a mean high pass filter to an image Parameters ---------- image : xr.DataArray 2-d or 3-d image data size : Union[Number, Tuple[Number]] width of the kernel rescale : bool If true scales data by max value, if false clips max values to one Returns ------- xr.DataArray: Filtered image, same shape as input """ blurred: np.ndarray = uniform_filter(image, size) filtered: np.ndarray = image - blurred return filtered
Example #24
Source File: no_ref.py From sewar with MIT License | 5 votes |
def d_s (pan,ms,fused,q=1,r=4,ws=7): """calculates Spatial Distortion Index (D_S). :param pan: high resolution panchromatic image. :param ms: low resolution multispectral image. :param fused: high resolution fused image. :param q: parameter to emphasize large spatial differences (default = 1). :param r: ratio of high resolution to low resolution (default=4). :param ws: sliding window size (default = 7). :returns: float -- D_S. """ pan = pan.astype(np.float64) fused = fused.astype(np.float64) pan_degraded = uniform_filter(pan.astype(np.float64), size=ws)/(ws**2) pan_degraded = imresize(pan_degraded,(pan.shape[0]//r,pan.shape[1]//r)) L = ms.shape[2] M1 = np.zeros(L) M2 = np.zeros(L) for l in range(L): M1[l] = uqi(fused[:,:,l],pan) M2[l] = uqi(ms[:,:,l],pan_degraded) diff = np.abs(M1 - M2)**q return ((1./L)*(np.sum(diff)))**(1./q)
Example #25
Source File: lineest.py From kraken with Apache License 2.0 | 5 votes |
def measure(self, line): h, w = line.shape # XXX: this filter is awfully slow smoothed = filters.gaussian_filter(line, (h*0.5, h*self.smoothness), mode='constant') smoothed += 0.001*filters.uniform_filter(smoothed, (h*0.5, w), mode='constant') self.shape = (h, w) a = np.argmax(smoothed, axis=0) a = filters.gaussian_filter(a, h*self.extra) self.center = np.array(a, 'i') deltas = np.abs(np.arange(h)[:, np.newaxis]-self.center[np.newaxis, :]) self.mad = np.mean(deltas[line != 0]) self.r = int(1+self.range*self.mad)
Example #26
Source File: morph.py From kraken with Apache License 2.0 | 5 votes |
def rb_erosion(image, size, origin=0): """Binary erosion using linear filters.""" output = np.zeros(image.shape, 'f') filters.uniform_filter(image, size, output=output, origin=origin, mode='constant', cval=1) return np.array(output == 1, 'i')
Example #27
Source File: morph.py From kraken with Apache License 2.0 | 5 votes |
def rb_dilation(image, size, origin=0): """Binary dilation using linear filters.""" output = np.zeros(image.shape, 'f') filters.uniform_filter(image, size, output=output, origin=origin, mode='constant', cval=0) return np.array(output > 0, 'i')
Example #28
Source File: functions.py From prnu-python with MIT License | 5 votes |
def wiener_adaptive(x: np.ndarray, noise_var: float, **kwargs) -> np.ndarray: """ WaveNoise as from Binghamton toolbox. Wiener adaptive flter aimed at extracting the noise component For each input pixel the average variance over a neighborhoods of different window sizes is first computed. The smaller average variance is taken into account when filtering according to Wiener. :param x: 2D matrix :param noise_var: Power spectral density of the noise we wish to extract (S) :param window_size_list: list of window sizes :return: wiener filtered version of input x """ window_size_list = list(kwargs.pop('window_size_list', [3, 5, 7, 9])) energy = x ** 2 avg_win_energy = np.zeros(x.shape + (len(window_size_list),)) for window_idx, window_size in enumerate(window_size_list): avg_win_energy[:, :, window_idx] = filters.uniform_filter(energy, window_size, mode='constant') coef_var = threshold(avg_win_energy, noise_var) coef_var_min = np.min(coef_var, axis=2) x = x * noise_var / (coef_var_min + noise_var) return x
Example #29
Source File: test_generic_separable_filters.py From gputools with BSD 3-Clause "New" or "Revised" License | 4 votes |
def test_all(): print("~"*40, " maximum filter") _test_some(max_filter, spf.maximum_filter, cval = -np.inf) print("~" * 40, " minimum filter") _test_some(min_filter, spf.minimum_filter, cval = np.inf) print("~" * 40, " uniform filter") _test_some(uniform_filter, spf.uniform_filter, cval = 0.)
Example #30
Source File: spatialscores.py From pysteps with BSD 3-Clause "New" or "Revised" License | 4 votes |
def fss_accum(fss, X_f, X_o): """Accumulate forecast-observation pairs to an FSS object. Parameters ----------- fss : dict The FSS object initialized with :py:func:`pysteps.verification.spatialscores.fss_init`. X_f : array_like Array of shape (m, n) containing the forecast field. X_o : array_like Array of shape (m, n) containing the observation field. """ if len(X_f.shape) != 2 or len(X_o.shape) != 2 or X_f.shape != X_o.shape: message = "X_f and X_o must be two-dimensional arrays" message += " having the same shape" raise ValueError(message) X_f = X_f.copy() X_f[~np.isfinite(X_f)] = fss["thr"] - 1 X_o = X_o.copy() X_o[~np.isfinite(X_o)] = fss["thr"] - 1 # Convert to binary fields with the given intensity threshold I_f = (X_f >= fss["thr"]).astype(float) I_o = (X_o >= fss["thr"]).astype(float) # Compute fractions of pixels above the threshold within a square # neighboring area by applying a 2D moving average to the binary fields if fss["scale"] > 1: S_f = uniform_filter(I_f, size=fss["scale"], mode="constant", cval=0.0) S_o = uniform_filter(I_o, size=fss["scale"], mode="constant", cval=0.0) else: S_f = I_f S_o = I_o fss["sum_obs_sq"] += np.nansum(S_o ** 2) fss["sum_fct_obs"] += np.nansum(S_f * S_o) fss["sum_fct_sq"] += np.nansum(S_f ** 2)