Python scipy.special.erf() Examples

The following are 30 code examples of scipy.special.erf(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module scipy.special , or try the search function .
Example #1
Source File: tpe.py    From auptimizer with GNU General Public License v3.0 6 votes vote down vote up
def lognormal_cdf(x, mu, sigma):
    # wikipedia claims cdf is
    # .5 + .5 erf( log(x) - mu / sqrt(2 sigma^2))
    #
    # the maximum is used to move negative values and 0 up to a point
    # where they do not cause nan or inf, but also don't contribute much
    # to the cdf.
    if len(x) == 0:
        return np.asarray([])
    if x.min() < 0:
        raise ValueError('negative arg to lognormal_cdf', x)
    olderr = np.seterr(divide='ignore')
    try:
        top = np.log(np.maximum(x, EPS)) - mu
        bottom = np.maximum(np.sqrt(2) * sigma, EPS)
        z = old_div(top, bottom)
        return .5 + .5 * erf(z)
    finally:
        np.seterr(**olderr) 
Example #2
Source File: _continuous_distns.py    From lambda-packs with MIT License 6 votes vote down vote up
def _stats(self, c):
        # Regina C. Elandt, Technometrics 3, 551 (1961)
        # http://www.jstor.org/stable/1266561
        #
        c2 = c*c
        expfac = np.exp(-0.5*c2) / np.sqrt(2.*np.pi)

        mu = 2.*expfac + c * sc.erf(c/np.sqrt(2))
        mu2 = c2 + 1 - mu*mu

        g1 = 2. * (mu*mu*mu - c2*mu - expfac)
        g1 /= np.power(mu2, 1.5)

        g2 = c2 * (c2 + 6.) + 3 + 8.*expfac*mu
        g2 += (2. * (c2 - 3.) - 3. * mu**2) * mu**2
        g2 = g2 / mu2**2.0 - 3.

        return mu, mu2, g1, g2 
Example #3
Source File: test_transf.py    From vnpy_crypto with MIT License 6 votes vote down vote up
def foldnorm_stats(self, c):
    arr, where, inf, sqrt, nan = np.array, np.where, np.inf, np.sqrt, np.nan
    exp = np.exp
    pi = np.pi

    fac = special.erf(c/sqrt(2))
    mu = sqrt(2.0/pi)*exp(-0.5*c*c)+c*fac
    mu2 = c*c + 1 - mu*mu
    c2 = c*c
    g1 = sqrt(2/pi)*exp(-1.5*c2)*(4-pi*exp(c2)*(2*c2+1.0))
    g1 += 2*c*fac*(6*exp(-c2) + 3*sqrt(2*pi)*c*exp(-c2/2.0)*fac + \
                   pi*c*(fac*fac-1))
    g1 /= pi*mu2**1.5

    g2 = c2*c2+6*c2+3+6*(c2+1)*mu*mu - 3*mu**4
    g2 -= 4*exp(-c2/2.0)*mu*(sqrt(2.0/pi)*(c2+2)+c*(c2+3)*exp(c2/2.0)*fac)
    g2 /= mu2**2.0
    g2 -= 3.
    return mu, mu2, g1, g2

#stats.distributions.foldnorm_gen._stats = foldnorm_stats 
Example #4
Source File: test_pulse_simulator.py    From qiskit-aer with Apache License 2.0 6 votes vote down vote up
def _analytic_gaussian_statevector(self, total_samples, gauss_sigma, omega_a):
        r"""Computes analytic statevector for gaussian drive. Solving the Schrodinger equation in
        the rotating frame leads to the analytic solution `(\cos(x), -i\sin(x)) with
        `x = \frac{1}{2}\sqrt{\frac{\pi}{2}}\sigma\omega_a erf(\frac{t}{\sqrt{2}\sigma}).

        Args:
            total_samples (int): length of pulses
            gauss_sigma (float): std dev for the gaussian drive
            omega_a (float): Q0 drive amplitude
        Returns:
            exp_statevector (list): analytic form of the statevector computed for gaussian drive
                (Returned in the rotating frame)
        """
        time = total_samples
        arg = 1 / 2 * np.sqrt(np.pi / 2) * gauss_sigma * omega_a * erf(
            time / np.sqrt(2) / gauss_sigma)
        exp_statevector = [np.cos(arg), -1j * np.sin(arg)]
        return exp_statevector 
Example #5
Source File: test_special.py    From mars with Apache License 2.0 6 votes vote down vote up
def testElf(self):
        raw = np.random.rand(10, 8, 5)
        t = tensor(raw, chunk_size=3)

        r = erf(t)
        expect = scipy_erf(raw)

        self.assertEqual(r.shape, raw.shape)
        self.assertEqual(r.dtype, expect.dtype)

        r = r.tiles()
        t = get_tiled(t)

        self.assertEqual(r.nsplits, t.nsplits)
        for c in r.chunks:
            self.assertIsInstance(c.op, TensorErf)
            self.assertEqual(c.index, c.inputs[0].index)
            self.assertEqual(c.shape, c.inputs[0].shape) 
Example #6
Source File: resampling.py    From spectral with MIT License 6 votes vote down vote up
def erf_local(x):
    # save the sign of x
    sign = 1 if x >= 0 else -1
    x = abs(x)

    # constants
    a1 =  0.254829592
    a2 = -0.284496736
    a3 =  1.421413741
    a4 = -1.453152027
    a5 =  1.061405429
    p  =  0.3275911

    # A&S formula 7.1.26
    t = 1.0/(1.0 + p*x)
    y = 1.0 - (((((a5*t + a4)*t) + a3)*t + a2)*t + a1)*t*math.exp(-x*x)
    return sign*y # erf(-x) = -erf(x) 
Example #7
Source File: test_erf.py    From fastats with MIT License 6 votes vote down vote up
def test_erf_basic_sanity():
    """
    Taken from literature directory 'error_functions.pdf'
    which is from U. Waterloo, Canada.
    """
    test_data = ((0.0, 0.0000000000),
                 (0.5, 0.5204998778),
                 (1.0, 0.8427007929),
                 (1.5, 0.9661051465),
                 (2.0, 0.9953222650),
                 (2.5, 0.9995930480),
                 (3.0, 0.9999779095),
                 (3.5, 0.9999992569),
                 (4.0, 0.9999999846),
                 (4.5, 0.9999999998))

    x, expected = zip(*test_data)
    output = erf(np.array(x))
    assert np.allclose(expected, output, atol=1.5e-7)  # max error 1.5e-7 
Example #8
Source File: vis_topic.py    From corex_topic with Apache License 2.0 6 votes vote down vote up
def anomalies(log_z, row_label=None, prefix=''):
    from scipy.special import erf

    ns = log_z.shape[0]
    if row_label is None:
        row_label = list(map(str, range(ns)))
    a_score = np.sum(log_z[:, :], axis=1)
    mean, std = np.mean(a_score), np.std(a_score)
    a_score = (a_score - mean) / std
    percentile = 1. / ns
    anomalies = np.where(0.5 * (1 - erf(a_score / np.sqrt(2)) ) < percentile)[0]
    f = safe_open(prefix + '/anomalies.txt', 'w+')
    for i in anomalies:
        f.write(row_label[i] + ', %0.1f\n' % a_score[i])
    f.close()


# Utilities
# IT UTILITIES 
Example #9
Source File: gaussian_fitting.py    From FRETBursts with GNU General Public License v2.0 6 votes vote down vote up
def gaussian_fit_cdf(s, mu0=0, sigma0=1, return_all=False, **leastsq_kwargs):
    """Gaussian fit of samples s fitting the empirical CDF.
    Additional kwargs are passed to the leastsq() function.
    If return_all=False then return only the fitted (mu,sigma) values
    If return_all=True (or full_output=True is passed to leastsq)
    then the full output of leastsq and the histogram is returned.
    """
    ## Empirical CDF
    ecdf = [np.sort(s), np.arange(0.5, s.size+0.5)*1./s.size]

    ## Analytical Gaussian CDF
    gauss_cdf = lambda x, mu, sigma: 0.5*(1+erf((x-mu)/(np.sqrt(2)*sigma)))

    ## Fitting the empirical CDF
    err_func = lambda p, x, y: y - gauss_cdf(x, p[0], p[1])
    res = leastsq(err_func, x0=[mu0, sigma0], args=(ecdf[0], ecdf[1]),
            **leastsq_kwargs)
    if return_all: return res, ecdf
    return res[0] 
Example #10
Source File: gaussian_fitting.py    From FRETBursts with GNU General Public License v2.0 6 votes vote down vote up
def gaussian2d_fit(sx, sy, guess=[0.5,1]):
    """2D-Gaussian fit of samples S using a fit to the empirical CDF."""
    assert sx.size == sy.size

    ## Empirical CDF
    ecdfx = [np.sort(sx), np.arange(0.5,sx.size+0.5)*1./sx.size]
    ecdfy = [np.sort(sy), np.arange(0.5,sy.size+0.5)*1./sy.size]

    ## Analytical gaussian CDF
    gauss_cdf = lambda x, mu, sigma: 0.5*(1+erf((x-mu)/(np.sqrt(2)*sigma)))

    ## Fitting the empirical CDF
    fitfunc = lambda p, x: gauss_cdf(x, p[0], p[1])
    errfunc = lambda p, x, y: fitfunc(p, x) - y
    px,v = leastsq(errfunc, x0=guess, args=(ecdfx[0],ecdfx[1]))
    py,v = leastsq(errfunc, x0=guess, args=(ecdfy[0],ecdfy[1]))
    print("2D Gaussian CDF fit", px, py)

    mux, sigmax = px[0], px[1]
    muy, sigmay = py[0], py[1]
    return mux, sigmax, muy, sigmay 
Example #11
Source File: _continuous_distns.py    From GraphicDesignPatternByPython with MIT License 6 votes vote down vote up
def _stats(self, c):
        # Regina C. Elandt, Technometrics 3, 551 (1961)
        # http://www.jstor.org/stable/1266561
        #
        c2 = c*c
        expfac = np.exp(-0.5*c2) / np.sqrt(2.*np.pi)

        mu = 2.*expfac + c * sc.erf(c/np.sqrt(2))
        mu2 = c2 + 1 - mu*mu

        g1 = 2. * (mu*mu*mu - c2*mu - expfac)
        g1 /= np.power(mu2, 1.5)

        g2 = c2 * (c2 + 6.) + 3 + 8.*expfac*mu
        g2 += (2. * (c2 - 3.) - 3. * mu**2) * mu**2
        g2 = g2 / mu2**2.0 - 3.

        return mu, mu2, g1, g2 
Example #12
Source File: test_mpmath.py    From GraphicDesignPatternByPython with MIT License 6 votes vote down vote up
def test_erf_complex():
    # need to increase mpmath precision for this test
    old_dps, old_prec = mpmath.mp.dps, mpmath.mp.prec
    try:
        mpmath.mp.dps = 70
        x1, y1 = np.meshgrid(np.linspace(-10, 1, 31), np.linspace(-10, 1, 11))
        x2, y2 = np.meshgrid(np.logspace(-80, .8, 31), np.logspace(-80, .8, 11))
        points = np.r_[x1.ravel(),x2.ravel()] + 1j*np.r_[y1.ravel(), y2.ravel()]

        assert_func_equal(sc.erf, lambda x: complex(mpmath.erf(x)), points,
                          vectorized=False, rtol=1e-13)
        assert_func_equal(sc.erfc, lambda x: complex(mpmath.erfc(x)), points,
                          vectorized=False, rtol=1e-13)
    finally:
        mpmath.mp.dps, mpmath.mp.prec = old_dps, old_prec


# ------------------------------------------------------------------------------
# lpmv
# ------------------------------------------------------------------------------ 
Example #13
Source File: gmpe_residuals.py    From gmpe-smtk with GNU Affero General Public License v3.0 6 votes vote down vote up
def _get_likelihood_values_for(self, gmpe, imt):
        """
        Returns the likelihood values for Total, plus inter- and intra-event
        residuals according to Equation 9 of Scherbaum et al (2004) for the
        given gmpe and the given intensity measure type.
        `gmpe` must be in this object gmpe(s) list and imt must be defined
        for the given gmpe: this two conditions are not checked for here.

        :return: a dict mapping the residual type(s) (string) to the tuple
        lh, median_lh where the first is the array of likelihood values and
        the latter is the median of those values
        """

        ret = {}
        for res_type in self.types[gmpe][imt]:
            zvals = np.fabs(self.residuals[gmpe][imt][res_type])
            l_h = 1.0 - erf(zvals / sqrt(2.))
            median_lh = np.nanpercentile(l_h, 50.0)
            ret[res_type] = l_h, median_lh
        return ret 
Example #14
Source File: bandpass_filter.py    From spiketoolkit with MIT License 6 votes vote down vote up
def _create_filter_kernel(N, sampling_frequency, freq_min, freq_max, freq_wid=1000):
    # Matches ahb's code /matlab/processors/ms_bandpass_filter.m
    # improved ahb, changing tanh to erf, correct -3dB pts  6/14/16
    T = N / sampling_frequency  # total time
    df = 1 / T  # frequency grid
    relwid = 3.0  # relative bottom-end roll-off width param, kills low freqs by factor 1e-5.

    k_inds = np.arange(0, N)
    k_inds = np.where(k_inds <= (N + 1) / 2, k_inds, k_inds - N)

    fgrid = df * k_inds
    absf = np.abs(fgrid)

    val = np.ones(fgrid.shape)
    if freq_min != 0:
        val = val * (1 + special.erf(relwid * (absf - freq_min) / freq_min)) / 2
        val = np.where(np.abs(k_inds) < 0.1, 0, val)  # kill DC part exactly
    if freq_max != 0:
        val = val * (1 - special.erf((absf - freq_max) / freq_wid)) / 2;
    val = np.sqrt(val)  # note sqrt of filter func to apply to spectral intensity not ampl
    return val 
Example #15
Source File: step_and_impulse.py    From empymod with Apache License 2.0 6 votes vote down vote up
def ee_xx_step(res, aniso, off, time):
    """VTI-Halfspace step response, xx, inline.

    res   : horizontal resistivity [Ohm.m]
    aniso : anisotropy [-]
    off   : offset [m]
    time  : time(s) [s]
    """
    tau_h = np.sqrt(mu_0*off**2/(res*time))
    t0 = erf(tau_h/2)
    t1 = 2*aniso*erf(tau_h/(2*aniso))
    t2 = tau_h/np.sqrt(np.pi)*np.exp(-tau_h**2/(4*aniso**2))
    Exx = res/(2*np.pi*off**3)*(2*aniso + t0 - t1 + t2)
    return Exx


###############################################################################
# Example 1: Source and receiver at z=0m
# --------------------------------------
#
# Comparison with analytical solution; put 1 mm below the interface, as they
# would be regarded as in the air by `emmod` otherwise. 
Example #16
Source File: transform.py    From empymod with Apache License 2.0 6 votes vote down vote up
def test_time(res, off, t, signal):
    r"""Time domain analytical half-space solution.
    - Source at x = y = z = 0 m
    - Receiver at y = z = 0 m; x = off
    - Resistivity of halfspace res
    - Times t, t > 0 s
    - Impulse response if signal = 0
    - Switch-on response if signal = 1
    """
    tau = np.sqrt(mu_0*off**2/(res*t))
    fact1 = res/(2*np.pi*off**3)
    fact2 = tau/np.sqrt(np.pi)
    if signal == 0:
        return fact1*tau**3/(4*t*np.sqrt(np.pi))*np.exp(-tau**2/4)
    else:
        resp = fact1*(2 - special.erf(tau/2) + fact2*np.exp(-tau**2/4))
        if signal < 0:
            DC = test_time(res, off, 1000000, 1)
            resp = DC-resp
        return resp


# Time-domain solution 
Example #17
Source File: quadrature_rbf.py    From emukit with Apache License 2.0 6 votes vote down vote up
def dqK_dx(self, x2: np.ndarray) -> np.ndarray:
        """
        gradient of the kernel mean (integrated in first argument) evaluated at x2
        :param x2: points at which to evaluate, shape (n_point N, input_dim)
        :return: the gradient with shape (input_dim, N)
        """
        lower_bounds = self.integral_bounds.lower_bounds
        upper_bounds = self.integral_bounds.upper_bounds
        exp_lo = np.exp(- self._scaled_vector_diff(x2, lower_bounds) ** 2)
        exp_up = np.exp(- self._scaled_vector_diff(x2, upper_bounds) ** 2)
        erf_lo = erf(self._scaled_vector_diff(lower_bounds, x2))
        erf_up = erf(self._scaled_vector_diff(upper_bounds, x2))

        fraction = ((exp_lo - exp_up) / (self.lengthscale * np.sqrt(np.pi / 2.) * (erf_up - erf_lo))).T

        return self.qK(x2) * fraction 
Example #18
Source File: get_selu_parameters.py    From AmusingPythonCodes with MIT License 6 votes vote down vote up
def get_selu_parameters(fixed_point_mean=0.0, fixed_point_var=1.0):
    """ Finding the parameters of the SELU activation function. The function returns alpha and lambda for the desired
    fixed point. """
    aa = Symbol('aa')
    ll = Symbol('ll')
    nu = fixed_point_mean
    tau = fixed_point_var
    mean = 0.5 * ll * (nu + np.exp(-nu ** 2 / (2 * tau)) * np.sqrt(2 / np.pi) * np.sqrt(tau) +
                       nu * erf(nu / (np.sqrt(2 * tau))) - aa * erfc(nu / (np.sqrt(2 * tau))) +
                       np.exp(nu + tau / 2) * aa * erfc((nu + tau) / (np.sqrt(2 * tau))))
    var = 0.5 * ll ** 2 * (np.exp(-nu ** 2 / (2 * tau)) * np.sqrt(2 / np.pi * tau) * nu + (nu ** 2 + tau) *
                           (1 + erf(nu / (np.sqrt(2 * tau)))) + aa ** 2 * erfc(nu / (np.sqrt(2 * tau))) -
                           aa ** 2 * 2 * np.exp(nu + tau / 2) * erfc((nu + tau) / (np.sqrt(2 * tau))) +
                           aa ** 2 * np.exp(2 * (nu + tau)) * erfc((nu + 2 * tau) / (np.sqrt(2 * tau)))) - mean ** 2
    eq1 = mean - nu
    eq2 = var - tau
    res = nsolve((eq2, eq1), (aa, ll), (1.67, 1.05))
    return float(res[0]), float(res[1]) 
Example #19
Source File: _continuous_distns.py    From Splunking-Crime with GNU Affero General Public License v3.0 6 votes vote down vote up
def _stats(self, c):
        # Regina C. Elandt, Technometrics 3, 551 (1961)
        # http://www.jstor.org/stable/1266561
        #
        c2 = c*c
        expfac = np.exp(-0.5*c2) / np.sqrt(2.*np.pi)

        mu = 2.*expfac + c * sc.erf(c/np.sqrt(2))
        mu2 = c2 + 1 - mu*mu

        g1 = 2. * (mu*mu*mu - c2*mu - expfac)
        g1 /= np.power(mu2, 1.5)

        g2 = c2 * (c2 + 6.) + 3 + 8.*expfac*mu
        g2 += (2. * (c2 - 3.) - 3. * mu**2) * mu**2
        g2 = g2 / mu2**2.0 - 3.

        return mu, mu2, g1, g2 
Example #20
Source File: UI_corriente.py    From pychemqt with GNU General Public License v3.0 5 votes vote down vote up
def aceptar(self):
        if self.standard.currentIndex() < 6:
            d = self.estandares
        else:
            # TODO:
            pass
        if self.modelo.currentIndex() == 0:
            funcion = lambda p, d: 1.-exp(-(d/p[0]/1000.)**p[1])
            parametros = [i.value for i in self.entries["Rosin Rammler Sperling"]]
        elif self.modelo.currentIndex() == 1:
            funcion = lambda p, d: (d/p[0]/1000.)**p[1]
            parametros = [i.value for i in self.entries["Gates Gaudin Schumann"]]
        elif self.modelo.currentIndex() == 2:
            funcion = lambda p, d: 1-(1-d/p[0]/1000.)**p[1]
            parametros = [i.value for i in self.entries["Broadbent Callcott"]]
        elif self.modelo.currentIndex() == 3:
            funcion = lambda p, d: 1.-exp(-(d/p[0]/1000.)**p[1])/(1-exp(-1.))
            parametros = [i.value for i in self.entries["Gaudin Meloy"]]
        elif self.modelo.currentIndex() == 4:
            funcion = lambda p, d: erf(log(d/p[0]/1000.)/p[1])
            parametros = [i.value for i in self.entries["Lognormal"]]
        elif self.modelo.currentIndex() == 5:
            funcion = lambda p, d: 1-(1-d/(p[0]/1000.)**p[1])**p[2]
            parametros = [i.value for i in self.entries["Harris"]]

        diametros = [unidades.Length(x) for x in d]
        acumulado = [0]+[funcion(parametros, x) for x in d]
        if acumulado[-1] < 1.:
            acumulado[-1] = 1.
        diferencia = [acumulado[i+1]-acumulado[i] for i in range(len(d))]
        self.matriz = [[di.config("ParticleDiameter"), diff] for di, diff in
                       zip(diametros, diferencia)]
        self.accept() 
Example #21
Source File: kernels.py    From HpBandSter with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def _compute_weights(self):
		if not self.fix_boundary:
			return(1.)

		weights = np.zeros(self.data.shape[0])
		for i,d in enumerate(self.data):
			weights[i] = 2./(erf((1-d)/(np.sqrt(2)*self.bw)) + erf(d/(np.sqrt(2)*self.bw)))
		
		return(weights[:,None]) 
Example #22
Source File: __init__.py    From fluids with MIT License 5 votes vote down vote up
def erf(*args, **kwargs):
            from scipy.special import erf
            return erf(*args, **kwargs)


#    from scipy.special import lambertw, ellipe, gammaincc, gamma # fluids
#    from scipy.special import i1, i0, k1, k0, iv # ht
#    from scipy.special import hyp2f1    
#    if erf is None:
#        from scipy.special import erf 
Example #23
Source File: fem_tem.py    From empymod with Apache License 2.0 5 votes vote down vote up
def hz(t, res, r, m=1.):
    r"""Return equation 4.69a, Ward and Hohmann, 1988.

    Switch-off response (i.e., Hz(t)) of a homogeneous isotropic half-space,
    where the vertical magnetic source and receiver are at the interface.

    Parameters
    ----------
    t : array
        Times (t)
    res : float
        Halfspace resistivity (Ohm.m)
    r : float
        Offset (m)
    m : float, optional
        Magnetic moment, default is 1.

    Returns
    -------
    hz : array
        Vertical magnetic field (A/m)

    """
    theta = np.sqrt(mu_0/(4*res*t))
    theta_r = theta*r

    s = -(9/theta_r+4*theta_r)*np.exp(-theta_r**2)/np.sqrt(np.pi)
    s += erf(theta_r)*(9/(2*theta_r**2)-1)
    s *= m/(4*np.pi*r**3)

    return s


############################################################################### 
Example #24
Source File: fem_tem.py    From empymod with Apache License 2.0 5 votes vote down vote up
def dhzdt(t, res, r, m=1.):
    r"""Return equation 4.70, Ward and Hohmann, 1988.

    Impulse response (i.e., dHz(t)/dt) of a homogeneous isotropic half-space,
    where the vertical magnetic source and receiver are at the interface.

    Parameters
    ----------
    t : array
        Times (t)
    res : float
        Halfspace resistivity (Ohm.m)
    r : float
        Offset (m)
    m : float, optional
        Magnetic moment, default is 1.

    Returns
    -------
    dhz : array
        Time-derivative of the vertical magnetic field (A/m/s)

    """
    theta = np.sqrt(mu_0/(4*res*t))
    theta_r = theta*r

    s = (9 + 6 * theta_r**2 + 4 * theta_r**4) * np.exp(-theta_r**2)
    s *= -2 * theta_r / np.sqrt(np.pi)
    s += 9 * erf(theta_r)
    s *= -(m*res)/(2*np.pi*mu_0*r**5)

    return s


###############################################################################
# Survey parameters
# ~~~~~~~~~~~~~~~~~ 
Example #25
Source File: test_model.py    From empymod with Apache License 2.0 5 votes vote down vote up
def test_iso_hs(self):
        # 3. Test with isotropic half-space solution, Ward and Hohmann, 1988.
        # => mm with ab=66; Eq. 4.70, Ward and Hohmann, 1988.

        # Survey parameters.
        # time: cut out zero crossing.
        mu_0 = 4e-7*np.pi
        time = np.r_[np.logspace(-7.3, -5.7, 101), np.logspace(-4.3, 0, 101)]
        src = [0, 0, 0, 0, 90]
        rec = [100, 0, 0, 0, 90]
        res = 100.

        # Calculation.
        fhz_num1 = loop(src, rec, 0, [2e14, res], time, xdirect=True, verb=1,
                        epermH=[0, 1], epermV=[0, 1], signal=0)

        # Analytical solution.
        theta = np.sqrt(mu_0/(4*res*time))
        theta_r = theta*rec[0]
        ana_sol1 = (9 + 6 * theta_r**2 + 4 * theta_r**4) * np.exp(-theta_r**2)
        ana_sol1 *= -2 * theta_r / np.sqrt(np.pi)
        ana_sol1 += 9 * erf(theta_r)
        ana_sol1 *= -res/(2*np.pi*mu_0*rec[0]**5)

        # Check.
        assert_allclose(fhz_num1, ana_sol1, rtol=1e-4) 
Example #26
Source File: test_basic.py    From Computable with MIT License 5 votes vote down vote up
def test_erf(self):
        assert_equal(cephes.erf(0),0.0) 
Example #27
Source File: probls.py    From kusanagi with MIT License 5 votes vote down vote up
def gauss_cdf(z):
    return 0.5*(1.0 + erf(z/np.sqrt(2.0))) 
Example #28
Source File: utility.py    From idr with GNU General Public License v2.0 5 votes vote down vote up
def py_cdf(x, mu, sigma, lamda):
    norm_x = (x-mu)/sigma
    return 0.5*( (1-lamda)*erf(0.707106781186547461715*norm_x) 
             + lamda*erf(0.707106781186547461715*x) + 1 ) 
Example #29
Source File: models.py    From GSTools with GNU Lesser General Public License v3.0 5 votes vote down vote up
def spectral_rad_cdf(self, r):
        """Radial spectral cdf."""
        r = np.array(r, dtype=np.double)
        if self.dim == 1:
            return sps.erf(self.len_scale * r / np.sqrt(np.pi))
        if self.dim == 2:
            return 1.0 - np.exp(-((r * self.len_scale) ** 2) / np.pi)
        if self.dim == 3:
            return sps.erf(
                self.len_scale * r / np.sqrt(np.pi)
            ) - 2 * r * self.len_scale / np.pi * np.exp(
                -((r * self.len_scale) ** 2) / np.pi
            )
        return None 
Example #30
Source File: ri_utils.py    From pyxem with GNU General Public License v3.0 5 votes vote down vote up
def damp_ri_low_q_region_erfc(
    z, scale, offset, s_scale, s_size, s_offset, *args, **kwargs
):
    """Used by hs.map in the ReducedIntensity1D to damp the reduced
    intensity signal in the low q region as a correction to central beam
    effects. The reduced intensity profile is damped by
    (erf(scale * s - offset) + 1) / 2

    Parameters
    ----------
    z : np.array
        A reduced intensity np.array to be transformed.
    scale : float
        A scalar multiplier for s in the error function
    offset : float
        A scalar offset affecting the error function.
    scale : float
        The scattering vector calibation of the reduced intensity array.
    size : int
        The size of the reduced intensity signal. (in pixels)
    *args:
        Arguments to be passed to map().
    **kwargs:
        Keyword arguments to be passed to map().
    """

    scattering_axis = s_scale * np.arange(s_size, dtype="float64") + s_offset

    damping_term = (special.erf(scattering_axis * scale - offset) + 1) / 2
    return z * damping_term