Python scipy.fftpack.next_fast_len() Examples
The following are 23
code examples of scipy.fftpack.next_fast_len().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
scipy.fftpack
, or try the search function
.
Example #1
Source File: mesh_affine_equ.py From pyscf with Apache License 2.0 | 6 votes |
def __init__(self, **kw): """ Constructor of affine, equidistant 3d mesh class ucell : unit cell vectors (in coordinate space) Ecut : Energy cutoff to parametrize the discretization """ from scipy.fftpack import next_fast_len self.ucell = kw['ucell'] if 'ucell' in kw else 30.0*np.eye(3) # Not even unit cells vectors are required by default self.Ecut = Ecut = kw['Ecut'] if 'Ecut' in kw else 50.0 # 50.0 Hartree by default luc = np.sqrt(np.einsum('ix,ix->i', self.ucell, self.ucell)) self.shape = nn = np.array([next_fast_len( int(np.rint(l * np.sqrt(Ecut)/2))) for l in luc], dtype=int) self.size = np.prod(self.shape) gc = self.ucell/(nn) # This is probable the best for finite systems, for PBC use nn, not (nn-1) self.dv = np.abs(np.dot(gc[0], np.cross(gc[1], gc[2] ))) rr = [np.array([gc[i]*j for j in range(nn[i])]) for i in range(3)] self.rr = rr self.origin = kw['origin'] if 'origin' in kw else np.zeros(3)
Example #2
Source File: fourier_transform.py From diffsims with GNU General Public License v3.0 | 6 votes |
def fast_fft_len(n): """ Returns the smallest integer greater than input such that the fft can be computed efficiently at this size Parameters ---------- n : `int` minimum size Returns ------- N : `int` smallest integer greater than n which permits efficient ffts. """ N = next_fast_len(n) return N if N % 2 == 0 else fast_fft_len(N + 1)
Example #3
Source File: windows.py From GraphicDesignPatternByPython with MIT License | 5 votes |
def _fftautocorr(x): """Compute the autocorrelation of a real array and crop the result.""" N = x.shape[-1] use_N = fftpack.next_fast_len(2*N-1) x_fft = np.fft.rfft(x, use_N, axis=-1) cxy = np.fft.irfft(x_fft * x_fft.conj(), n=use_N)[:, :N] # Or equivalently (but in most cases slower): # cxy = np.array([np.convolve(xx, yy[::-1], mode='full') # for xx, yy in zip(x, x)])[:, N-1:2*N-1] return cxy
Example #4
Source File: test_diagnostics.py From numpyro with Apache License 2.0 | 5 votes |
def test_fft_next_fast_len(target): assert _fft_next_fast_len(target) == next_fast_len(target)
Example #5
Source File: noise_module.py From NoisePy with MIT License | 5 votes |
def pws(arr,sampling_rate,power=2,pws_timegate=5.): """ Performs phase-weighted stack on array of time series. Modified on the noise function by Tim Climents. Follows methods of Schimmel and Paulssen, 1997. If s(t) is time series data (seismogram, or cross-correlation), S(t) = s(t) + i*H(s(t)), where H(s(t)) is Hilbert transform of s(t) S(t) = s(t) + i*H(s(t)) = A(t)*exp(i*phi(t)), where A(t) is envelope of s(t) and phi(t) is phase of s(t) Phase-weighted stack, g(t), is then: g(t) = 1/N sum j = 1:N s_j(t) * | 1/N sum k = 1:N exp[i * phi_k(t)]|^v where N is number of traces used, v is sharpness of phase-weighted stack :type arr: numpy.ndarray :param arr: N length array of time series data :type power: float :param power: exponent for phase stack :type sampling_rate: float :param sampling_rate: sampling rate of time series :type pws_timegate: float :param pws_timegate: number of seconds to smooth phase stack :Returns: Phase weighted stack of time series data :rtype: numpy.ndarray """ if arr.ndim == 1: return arr N,M = arr.shape analytic = hilbert(arr,axis=1, N=next_fast_len(M))[:,:M] phase = np.angle(analytic) phase_stack = np.mean(np.exp(1j*phase),axis=0) phase_stack = np.abs(phase_stack)**(power) # smoothing #timegate_samples = int(pws_timegate * sampling_rate) #phase_stack = moving_ave(phase_stack,timegate_samples) weighted = np.multiply(arr,phase_stack) return np.mean(weighted,axis=0)
Example #6
Source File: noise_module.py From NoisePy with MIT License | 5 votes |
def pws(arr,power=2.,sampling_rate=20.,pws_timegate = 5.): """ Performs phase-weighted stack on array of time series. Follows methods of Schimmel and Paulssen, 1997. If s(t) is time series data (seismogram, or cross-correlation), S(t) = s(t) + i*H(s(t)), where H(s(t)) is Hilbert transform of s(t) S(t) = s(t) + i*H(s(t)) = A(t)*exp(i*phi(t)), where A(t) is envelope of s(t) and phi(t) is phase of s(t) Phase-weighted stack, g(t), is then: g(t) = 1/N sum j = 1:N s_j(t) * | 1/N sum k = 1:N exp[i * phi_k(t)]|^v where N is number of traces used, v is sharpness of phase-weighted stack :type arr: numpy.ndarray :param arr: N length array of time series data :type power: float :param power: exponent for phase stack :type sampling_rate: float :param sampling_rate: sampling rate of time series :type pws_timegate: float :param pws_timegate: number of seconds to smooth phase stack :Returns: Phase weighted stack of time series data :rtype: numpy.ndarray """ if arr.ndim == 1: return arr N,M = arr.shape analytic = arr + 1j * hilbert(arr,axis=1, N=next_fast_len(M))[:,:M] phase = np.angle(analytic) phase_stack = np.mean(np.exp(1j*phase),axis=0)/N phase_stack = np.abs(phase_stack)**2 # smoothing timegate_samples = int(pws_timegate * sampling_rate) phase_stack = runningMean(phase_stack,timegate_samples) weighted = np.multiply(arr,phase_stack) return np.mean(weighted,axis=0)/N
Example #7
Source File: noise_module.py From NoisePy with MIT License | 5 votes |
def pws(arr,sampling_rate,power=2,pws_timegate=5.): ''' Performs phase-weighted stack on array of time series. Modified on the noise function by Tim Climents. Follows methods of Schimmel and Paulssen, 1997. If s(t) is time series data (seismogram, or cross-correlation), S(t) = s(t) + i*H(s(t)), where H(s(t)) is Hilbert transform of s(t) S(t) = s(t) + i*H(s(t)) = A(t)*exp(i*phi(t)), where A(t) is envelope of s(t) and phi(t) is phase of s(t) Phase-weighted stack, g(t), is then: g(t) = 1/N sum j = 1:N s_j(t) * | 1/N sum k = 1:N exp[i * phi_k(t)]|^v where N is number of traces used, v is sharpness of phase-weighted stack PARAMETERS: --------------------- arr: N length array of time series data (numpy.ndarray) sampling_rate: sampling rate of time series arr (int) power: exponent for phase stack (int) pws_timegate: number of seconds to smooth phase stack (float) RETURNS: --------------------- weighted: Phase weighted stack of time series data (numpy.ndarray) ''' if arr.ndim == 1: return arr N,M = arr.shape analytic = hilbert(arr,axis=1, N=next_fast_len(M))[:,:M] phase = np.angle(analytic) phase_stack = np.mean(np.exp(1j*phase),axis=0) phase_stack = np.abs(phase_stack)**(power) # smoothing #timegate_samples = int(pws_timegate * sampling_rate) #phase_stack = moving_ave(phase_stack,timegate_samples) weighted = np.multiply(arr,phase_stack) return np.mean(weighted,axis=0)
Example #8
Source File: noise_module.py From NoisePy with MIT License | 5 votes |
def noise_processing(fft_para,dataS): ''' this function performs time domain and frequency domain normalization if needed. in real case, we prefer use include the normalization in the cross-correaltion steps by selecting coherency or decon (Prieto et al, 2008, 2009; Denolle et al, 2013) PARMAETERS: ------------------------ fft_para: dictionary containing all useful variables used for fft and cc dataS: 2D matrix of all segmented noise data # OUTPUT VARIABLES: source_white: 2D matrix of data spectra ''' # load parameters first time_norm = fft_para['time_norm'] freq_norm = fft_para['freq_norm'] smooth_N = fft_para['smooth_N'] N = dataS.shape[0] #------to normalize in time or not------ if time_norm != 'no': if time_norm == 'one_bit': # sign normalization white = np.sign(dataS) elif time_norm == 'rma': # running mean: normalization over smoothed absolute average white = np.zeros(shape=dataS.shape,dtype=dataS.dtype) for kkk in range(N): white[kkk,:] = dataS[kkk,:]/moving_ave(np.abs(dataS[kkk,:]),smooth_N) else: # don't normalize white = dataS #-----to whiten or not------ if freq_norm != 'no': source_white = whiten(white,fft_para) # whiten and return FFT else: Nfft = int(next_fast_len(int(dataS.shape[1]))) source_white = scipy.fftpack.fft(white, Nfft, axis=1) # return FFT return source_white
Example #9
Source File: noise_module.py From NoisePy with MIT License | 5 votes |
def pws(arr,sampling_rate,power=2,pws_timegate=5.): ''' Performs phase-weighted stack on array of time series. Modified on the noise function by Tim Climents. Follows methods of Schimmel and Paulssen, 1997. If s(t) is time series data (seismogram, or cross-correlation), S(t) = s(t) + i*H(s(t)), where H(s(t)) is Hilbert transform of s(t) S(t) = s(t) + i*H(s(t)) = A(t)*exp(i*phi(t)), where A(t) is envelope of s(t) and phi(t) is phase of s(t) Phase-weighted stack, g(t), is then: g(t) = 1/N sum j = 1:N s_j(t) * | 1/N sum k = 1:N exp[i * phi_k(t)]|^v where N is number of traces used, v is sharpness of phase-weighted stack PARAMETERS: --------------------- arr: N length array of time series data (numpy.ndarray) sampling_rate: sampling rate of time series arr (int) power: exponent for phase stack (int) pws_timegate: number of seconds to smooth phase stack (float) RETURNS: --------------------- weighted: Phase weighted stack of time series data (numpy.ndarray) ''' if arr.ndim == 1: return arr N,M = arr.shape analytic = hilbert(arr,axis=1, N=next_fast_len(M))[:,:M] phase = np.angle(analytic) phase_stack = np.mean(np.exp(1j*phase),axis=0) phase_stack = np.abs(phase_stack)**(power) # smoothing #timegate_samples = int(pws_timegate * sampling_rate) #phase_stack = moving_ave(phase_stack,timegate_samples) weighted = np.multiply(arr,phase_stack) return np.mean(weighted,axis=0)
Example #10
Source File: noise_module.py From NoisePy with MIT License | 5 votes |
def pws(arr,sampling_rate,power=2,pws_timegate=5.): ''' Performs phase-weighted stack on array of time series. Modified on the noise function by Tim Climents. Follows methods of Schimmel and Paulssen, 1997. If s(t) is time series data (seismogram, or cross-correlation), S(t) = s(t) + i*H(s(t)), where H(s(t)) is Hilbert transform of s(t) S(t) = s(t) + i*H(s(t)) = A(t)*exp(i*phi(t)), where A(t) is envelope of s(t) and phi(t) is phase of s(t) Phase-weighted stack, g(t), is then: g(t) = 1/N sum j = 1:N s_j(t) * | 1/N sum k = 1:N exp[i * phi_k(t)]|^v where N is number of traces used, v is sharpness of phase-weighted stack PARAMETERS: --------------------- arr: N length array of time series data (numpy.ndarray) sampling_rate: sampling rate of time series arr (int) power: exponent for phase stack (int) pws_timegate: number of seconds to smooth phase stack (float) RETURNS: --------------------- weighted: Phase weighted stack of time series data (numpy.ndarray) ''' if arr.ndim == 1: return arr N,M = arr.shape analytic = hilbert(arr,axis=1, N=next_fast_len(M))[:,:M] phase = np.angle(analytic) phase_stack = np.mean(np.exp(1j*phase),axis=0) phase_stack = np.abs(phase_stack)**(power) # smoothing #timegate_samples = int(pws_timegate * sampling_rate) #phase_stack = moving_ave(phase_stack,timegate_samples) weighted = np.multiply(arr,phase_stack) return np.mean(weighted,axis=0)
Example #11
Source File: noise_module.py From NoisePy with MIT License | 5 votes |
def noise_processing(fft_para,dataS): ''' this function performs time domain and frequency domain normalization if needed. in real case, we prefer use include the normalization in the cross-correaltion steps by selecting coherency or decon (Prieto et al, 2008, 2009; Denolle et al, 2013) PARMAETERS: ------------------------ fft_para: dictionary containing all useful variables used for fft and cc dataS: 2D matrix of all segmented noise data # OUTPUT VARIABLES: source_white: 2D matrix of data spectra ''' # load parameters first time_norm = fft_para['time_norm'] freq_norm = fft_para['freq_norm'] smooth_N = fft_para['smooth_N'] N = dataS.shape[0] #------to normalize in time or not------ if time_norm != 'no': if time_norm == 'one_bit': # sign normalization white = np.sign(dataS) elif time_norm == 'rma': # running mean: normalization over smoothed absolute average white = np.zeros(shape=dataS.shape,dtype=dataS.dtype) for kkk in range(N): white[kkk,:] = dataS[kkk,:]/moving_ave(np.abs(dataS[kkk,:]),smooth_N) else: # don't normalize white = dataS #-----to whiten or not------ if freq_norm != 'no': source_white = whiten(white,fft_para) # whiten and return FFT else: Nfft = int(next_fast_len(int(dataS.shape[1]))) source_white = scipy.fftpack.fft(white, Nfft, axis=1) # return FFT return source_white
Example #12
Source File: comp_stacking.py From NoisePy with MIT License | 5 votes |
def pws(cc_array,sampling_rate,power=2,pws_timegate=5.): ''' Performs phase-weighted stack on array of time series. Follows methods of Schimmel and Paulssen, 1997. If s(t) is time series data (seismogram, or cross-correlation), S(t) = s(t) + i*H(s(t)), where H(s(t)) is Hilbert transform of s(t) S(t) = s(t) + i*H(s(t)) = A(t)*exp(i*phi(t)), where A(t) is envelope of s(t) and phi(t) is phase of s(t) Phase-weighted stack, g(t), is then: g(t) = 1/N sum j = 1:N s_j(t) * | 1/N sum k = 1:N exp[i * phi_k(t)]|^v where N is number of traces used, v is sharpness of phase-weighted stack PARAMETERS: --------------------- arr: N length array of time series data (numpy.ndarray) sampling_rate: sampling rate of time series arr (int) power: exponent for phase stack (int) pws_timegate: number of seconds to smooth phase stack (float) RETURNS: --------------------- weighted: Phase weighted stack of time series data (numpy.ndarray) Originally written by Tim Clements Modified by Chengxin Jiang @Harvard ''' if cc_array.ndim == 1: print('2D matrix is needed for pws') return cc_array N,M = cc_array.shape # construct analytical signal analytic = hilbert(cc_array,axis=1, N=next_fast_len(M))[:,:M] phase = np.angle(analytic) phase_stack = np.mean(np.exp(1j*phase),axis=0) phase_stack = np.abs(phase_stack)**(power) # weighted is the final waveforms weighted = np.multiply(cc_array,phase_stack) return np.mean(weighted,axis=0)
Example #13
Source File: transforms.py From spectral_connectivity with GNU General Public License v3.0 | 5 votes |
def _auto_correlation(data, axis=-1): n_time_samples_per_window = data.shape[axis] n_fft_samples = next_fast_len(2 * n_time_samples_per_window - 1) dpss_fft = fft(data, n_fft_samples, axis=axis) power = dpss_fft * dpss_fft.conj() return np.real(ifft(power, axis=axis))
Example #14
Source File: transforms.py From spectral_connectivity with GNU General Public License v3.0 | 5 votes |
def n_fft_samples(self): if self._n_fft_samples is None: self._n_fft_samples = next_fast_len( self.n_time_samples_per_window) return self._n_fft_samples
Example #15
Source File: rigid.py From suite2p with GNU General Public License v3.0 | 5 votes |
def shift_data_subpixel(inputs): ''' rigid shift of X by ymax and xmax ''' ''' allows subpixel shifts ''' ''' ** not being used ** ''' X, ymax, xmax, pad_fft = inputs ymax = ymax.flatten() xmax = xmax.flatten() if X.ndim<3: X = X[np.newaxis,:,:] nimg, Ly0, Lx0 = X.shape if pad_fft: X = fft2(X.astype('float32'), (next_fast_len(Ly0), next_fast_len(Lx0))) else: X = fft2(X.astype('float32')) nimg, Ly, Lx = X.shape Ny = fft.ifftshift(np.arange(-np.fix(Ly/2), np.ceil(Ly/2))) Nx = fft.ifftshift(np.arange(-np.fix(Lx/2), np.ceil(Lx/2))) [Nx,Ny] = np.meshgrid(Nx,Ny) Nx = Nx.astype('float32') / Lx Ny = Ny.astype('float32') / Ly dph = Nx * np.reshape(xmax, (-1,1,1)) + Ny * np.reshape(ymax, (-1,1,1)) Y = np.real(ifft2(X * np.exp((2j * np.pi) * dph))) # crop back to original size if Ly0<Ly or Lx0<Lx: Lyhalf = int(np.floor(Ly/2)) Lxhalf = int(np.floor(Lx/2)) Y = Y[np.ix_(np.arange(0,nimg,1,int), np.arange(-np.fix(Ly0/2), np.ceil(Ly0/2),1,int) + Lyhalf, np.arange(-np.fix(Lx0/2), np.ceil(Lx0/2),1,int) + Lxhalf)] return Y
Example #16
Source File: stats_utils.py From arviz with Apache License 2.0 | 5 votes |
def autocov(ary, axis=-1): """Compute autocovariance estimates for every lag for the input array. Parameters ---------- ary : Numpy array An array containing MCMC samples Returns ------- acov: Numpy array same size as the input array """ axis = axis if axis > 0 else len(ary.shape) + axis n = ary.shape[axis] m = next_fast_len(2 * n) ary = ary - ary.mean(axis, keepdims=True) # added to silence tuple warning for a submodule with warnings.catch_warnings(): warnings.simplefilter("ignore") ifft_ary = np.fft.rfft(ary, n=m, axis=axis) ifft_ary *= np.conjugate(ifft_ary) shape = tuple( slice(None) if dim_len != axis else slice(0, n) for dim_len, _ in enumerate(ary.shape) ) cov = np.fft.irfft(ifft_ary, n=m, axis=axis)[shape] cov /= n return cov
Example #17
Source File: indexation_utils.py From pyxem with GNU General Public License v3.0 | 5 votes |
def optimal_fft_size(target, real = False): """Wrapper around scipy function next_fast_len() for calculating optimal FFT padding. scipy.fft was only added in 1.4.0, so we fall back to scipy.fftpack if it is not available. The main difference is that next_fast_len() does not take a second argument in the older implementation. Parameters ---------- target : int Length to start searching from. Must be a positive integer. real : bool, optional True if the FFT involves real input or output, only available for scipy > 1.4.0 Returns ------- int Optimal FFT size. """ try: # pragma: no cover from scipy.fft import next_fast_len support_real = True except ImportError: # pragma: no cover from scipy.fftpack import next_fast_len support_real = False if support_real: # pragma: no cover return next_fast_len(target, real) else: # pragma: no cover return next_fast_len(target) # Functions used in correlate_library.
Example #18
Source File: noise_module.py From NoisePy with MIT License | 4 votes |
def adaptive_filter(arr,g): ''' the adaptive covariance filter to enhance coherent signals. Fellows the method of Nakata et al., 2015 (Appendix B) the filtered signal [x1] is given by x1 = ifft(P*x1(w)) where x1 is the ffted spectra and P is the filter. P is constructed by using the temporal covariance matrix. PARAMETERS: ---------------------- arr: numpy.ndarray contains the 2D traces of daily/hourly cross-correlation functions g: a positive number to adjust the filter harshness RETURNS: ---------------------- narr: numpy vector contains the stacked cross correlation function ''' if arr.ndim == 1: return arr N,M = arr.shape Nfft = next_fast_len(M) # fft the 2D array spec = scipy.fftpack.fft(arr,axis=1,n=Nfft)[:,:M] # make cross-spectrm matrix cspec = np.zeros(shape=(N*N,M),dtype=np.complex64) for ii in range(N): for jj in range(N): kk = ii*N+jj cspec[kk] = spec[ii]*np.conjugate(spec[jj]) S1 = np.zeros(M,dtype=np.complex64) S2 = np.zeros(M,dtype=np.complex64) # construct the filter P for ii in range(N): mm = ii*N+ii S2 += cspec[mm] for jj in range(N): kk = ii*N+jj S1 += cspec[kk] p = np.power((S1-S2)/(S2*(N-1)),g) # make ifft narr = np.real(scipy.fftpack.ifft(np.multiply(p,spec),Nfft,axis=1)[:,:M]) return np.mean(narr,axis=0)
Example #19
Source File: comp_stacking.py From NoisePy with MIT License | 4 votes |
def adaptive_filter(cc_array,g): ''' the adaptive covariance filter to enhance coherent signals. Fellows the method of Nakata et al., 2015 (Appendix B) the filtered signal [x1] is given by x1 = ifft(P*x1(w)) where x1 is the ffted spectra and P is the filter. P is constructed by using the temporal covariance matrix. PARAMETERS: ---------------------- cc_array: numpy.ndarray contains the 2D traces of daily/hourly cross-correlation functions g: a positive number to adjust the filter harshness RETURNS: ---------------------- narr: numpy vector contains the stacked cross correlation function Written by Chengxin Jiang @Harvard (Oct2019) ''' if cc_array.ndim == 1: print('2D matrix is needed for adaptive filtering') return cc_array N,M = cc_array.shape Nfft = next_fast_len(M) # fft the 2D array spec = scipy.fftpack.fft(cc_array,axis=1,n=Nfft)[:,:M] # make cross-spectrm matrix cspec = np.zeros(shape=(N*N,M),dtype=np.complex64) for ii in range(N): for jj in range(N): kk = ii*N+jj cspec[kk] = spec[ii]*np.conjugate(spec[jj]) S1 = np.zeros(M,dtype=np.complex64) S2 = np.zeros(M,dtype=np.complex64) # construct the filter P for ii in range(N): mm = ii*N+ii S2 += cspec[mm] for jj in range(N): kk = ii*N+jj S1 += cspec[kk] p = np.power((S1-S2)/(S2*(N-1)),g) # make ifft narr = np.real(scipy.fftpack.ifft(np.multiply(p,spec),Nfft,axis=1)[:,:M]) return np.mean(narr,axis=0)
Example #20
Source File: noise_module.py From NoisePy with MIT License | 4 votes |
def adaptive_filter(arr,g): ''' the adaptive covariance filter to enhance coherent signals. Fellows the method of Nakata et al., 2015 (Appendix B) the filtered signal [x1] is given by x1 = ifft(P*x1(w)) where x1 is the ffted spectra and P is the filter. P is constructed by using the temporal covariance matrix. PARAMETERS: ---------------------- arr: numpy.ndarray contains the 2D traces of daily/hourly cross-correlation functions g: a positive number to adjust the filter harshness RETURNS: ---------------------- narr: numpy vector contains the stacked cross correlation function ''' if arr.ndim == 1: return arr N,M = arr.shape Nfft = next_fast_len(M) # fft the 2D array spec = scipy.fftpack.fft(arr,axis=1,n=Nfft)[:,:M] # make cross-spectrm matrix cspec = np.zeros(shape=(N*N,M),dtype=np.complex64) for ii in range(N): for jj in range(N): kk = ii*N+jj cspec[kk] = spec[ii]*np.conjugate(spec[jj]) S1 = np.zeros(M,dtype=np.complex64) S2 = np.zeros(M,dtype=np.complex64) # construct the filter P for ii in range(N): mm = ii*N+ii S2 += cspec[mm] for jj in range(N): kk = ii*N+jj S1 += cspec[kk] p = np.power((S1-S2)/(S2*(N-1)),g) # make ifft narr = np.real(scipy.fftpack.ifft(np.multiply(p,spec),Nfft,axis=1)[:,:M]) return np.mean(narr,axis=0)
Example #21
Source File: noise_module.py From NoisePy with MIT License | 4 votes |
def adaptive_filter(arr,g): ''' the adaptive covariance filter to enhance coherent signals. Fellows the method of Nakata et al., 2015 (Appendix B) the filtered signal [x1] is given by x1 = ifft(P*x1(w)) where x1 is the ffted spectra and P is the filter. P is constructed by using the temporal covariance matrix. PARAMETERS: ---------------------- arr: numpy.ndarray contains the 2D traces of daily/hourly cross-correlation functions g: a positive number to adjust the filter harshness RETURNS: ---------------------- narr: numpy vector contains the stacked cross correlation function ''' if arr.ndim == 1: return arr N,M = arr.shape Nfft = next_fast_len(M) # fft the 2D array spec = scipy.fftpack.fft(arr,axis=1,n=Nfft)[:,:M] # make cross-spectrm matrix cspec = np.zeros(shape=(N*N,M),dtype=np.complex64) for ii in range(N): for jj in range(N): kk = ii*N+jj cspec[kk] = spec[ii]*np.conjugate(spec[jj]) S1 = np.zeros(M,dtype=np.complex64) S2 = np.zeros(M,dtype=np.complex64) # construct the filter P for ii in range(N): mm = ii*N+ii S2 += cspec[mm] for jj in range(N): kk = ii*N+jj S1 += cspec[kk] p = np.power((S1-S2)/(S2*(N-1)),g) # make ifft narr = np.real(scipy.fftpack.ifft(np.multiply(p,spec),Nfft,axis=1)[:,:M]) return np.mean(narr,axis=0)
Example #22
Source File: noise_module.py From NoisePy with MIT License | 4 votes |
def noise_processing(fft_para,dataS,flag): ''' perform time domain and frequency normalization according to user's need. note that this step is not recommended if deconv or coherency method is selected for calculating cross-correlation functions. fft_para: dictionary containing all useful variables used for fft dataS: data matrix containing all segmented noise data flag: boolen variable to output intermediate variables or not ''' # load parameters first time_norm = fft_para['time_norm'] to_whiten = fft_para['to_whiten'] smooth_N = fft_para['smooth_N'] N = dataS.shape[0] #------to normalize in time or not------ if time_norm: t0=time.time() if time_norm == 'one_bit': white = np.sign(dataS) elif time_norm == 'running_mean': #--------convert to 1D array for smoothing in time-domain--------- white = np.zeros(shape=dataS.shape,dtype=dataS.dtype) for kkk in range(N): white[kkk,:] = dataS[kkk,:]/moving_ave(np.abs(dataS[kkk,:]),smooth_N) t1=time.time() if flag: print("temporal normalization takes %f s"%(t1-t0)) else: white = dataS #-----to whiten or not------ if to_whiten: t0=time.time() source_white = whiten(white,fft_para) t1=time.time() if flag: print("spectral whitening takes %f s"%(t1-t0)) else: Nfft = int(next_fast_len(int(dataS.shape[1]))) source_white = scipy.fftpack.fft(white, Nfft, axis=1) return source_white
Example #23
Source File: rigid.py From suite2p with GNU General Public License v3.0 | 4 votes |
def phasecorr_reference(refImg0, ops): """ computes masks and fft'ed reference image for phasecorr Parameters ---------- refImg0 : 2D array, int16 reference image ops : dictionary 'smooth_sigma' (if ```ops['1Preg']```, need 'spatial_taper', 'spatial_hp', 'pre_smooth') Returns ------- maskMul : 2D array mask that is multiplied to spatially taper maskOffset : 2D array shifts in x from cfRefImg to data for each frame cfRefImg : 2D array, complex64 reference image fft'ed and complex conjugate and multiplied by gaussian filter in the fft domain with standard deviation 'smooth_sigma' """ refImg = refImg0.copy() if '1Preg' in ops and ops['1Preg']: maskSlope = ops['spatial_taper'] # slope of taper mask at the edges else: maskSlope = 3 * ops['smooth_sigma'] # slope of taper mask at the edges Ly,Lx = refImg.shape maskMul = utils.spatial_taper(maskSlope, Ly, Lx) if ops['1Preg']: refImg = utils.one_photon_preprocess(refImg[np.newaxis,:,:], ops).squeeze() maskOffset = refImg.mean() * (1. - maskMul); # reference image in fourier domain if 'pad_fft' in ops and ops['pad_fft']: cfRefImg = np.conj(fft2(refImg, (next_fast_len(Ly), next_fast_len(Lx)))) else: cfRefImg = np.conj(fft2(refImg)) absRef = np.absolute(cfRefImg) cfRefImg = cfRefImg / (1e-5 + absRef) # gaussian filter in space fhg = utils.gaussian_fft(ops['smooth_sigma'], cfRefImg.shape[0], cfRefImg.shape[1]) cfRefImg *= fhg maskMul = maskMul.astype('float32') maskOffset = maskOffset.astype('float32') cfRefImg = cfRefImg.astype('complex64') cfRefImg = np.reshape(cfRefImg, (1, cfRefImg.shape[0], cfRefImg.shape[1])) return maskMul, maskOffset, cfRefImg