Python numdifftools.Jacobian() Examples

The following are 30 code examples of numdifftools.Jacobian(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module numdifftools , or try the search function .
Example #1
Source File: uv_bayes.py    From hawkeslib with MIT License 6 votes vote down vote up
def marginal_likelihood(self, t, T = None):
        """
        Calculate log marginal likelihood of the process under given data, using Laplace's approximation.
        This method uses a Gaussian approximation around the currently fit parameters (i.e. expects MAP
        parameters to already have been fit, e.g. through :meth:`fit`).

        :param numpy.array[float] t: Observation timestamps of the process up to time T. 1-d array of timestamps.
            must be sorted (asc)
        :param T: (optional) maximum time
        :type T: float or None

        :return: the log marginal likelihood
        :rtype: float
        """
        t, T = self._prep_t_T(t, T)

        f = self._log_posterior(t, T)
        g = self._log_posterior_grad(t, T)

        xopt = np.array(self.get_params())
        # xopt = self._fit_grad_desc(t, T).x
        H = nd.Jacobian(g)(xopt)

        return f(xopt) + 1.5 * np.log(2 * np.pi) - .5 * np.linalg.slogdet(H)[1] 
Example #2
Source File: run_benchmark.py    From numdifftools with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def run_gradient_and_hessian_benchmarks(gradient_funs, hessian_funs,
                                        problem_sizes=(4, 8, 16, 32, 64, 96)):

    symbols = ('-kx', ':k>', ':k<', '--k^', '--kv', '-kp', '-ks',
               'b', '--b', '-b+', 'r', '--r', '-r+')

    results_gradients = compute_gradients(gradient_funs, problem_sizes)
    results_hessians = compute_hessians(hessian_funs, problem_sizes)

    print(results_gradients.shape)

    for i, txt in enumerate(['run times', 'errors']):
        objects = [('Jacobian ' + txt, gradient_funs, results_gradients[..., i].T),
                   ('Hessian ' + txt, hessian_funs, results_hessians[..., i].T)]
        if i == 0:
            plot_runtimes(objects, problem_sizes, symbols)
        else:
            plot_errors(objects, problem_sizes, symbols) 
Example #3
Source File: garch.py    From vnpy_crypto with MIT License 6 votes vote down vote up
def score(self, params):
        """
        Notes
        -----
        Need to generalize for AR(p) and for a constant.
        Not correct yet.  Returns numerical gradient.  Depends on package
        numdifftools.
        """
        y = self.endog
        ylag = self.exog
        nobs = self.nobs
        diffsumsq = sumofsq(y-np.dot(ylag,params))
        dsdr = 1/nobs * -2 *np.sum(ylag*(y-np.dot(ylag,params))[:,None])+\
                2*params*ylag[0]**2
        sigma2 = 1/nobs*(diffsumsq-ylag[0]**2*(1-params**2))
        gradient = -nobs/(2*sigma2)*dsdr + params/(1-params**2) + \
                1/sigma2*np.sum(ylag*(y-np.dot(ylag, params))[:,None])+\
                .5*sigma2**-2*diffsumsq*dsdr+\
                ylag[0]**2*params/sigma2 +\
                ylag[0]**2*(1-params**2)/(2*sigma2**2)*dsdr
        if self.penalty:
            pass
        j = Jacobian(self.loglike)
        return j(params)
#        return gradient 
Example #4
Source File: differentiation.py    From threeML with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def get_jacobian(function, point, minima, maxima):

    wrapper, scaled_deltas, scaled_point, orders_of_magnitude, n_dim = _get_wrapper(
        function, point, minima, maxima
    )

    # Compute the Jacobian matrix at best_fit_values
    jacobian_vector = nd.Jacobian(wrapper, scaled_deltas, method="central")(
        scaled_point
    )

    # Transform it to numpy matrix

    jacobian_vector = np.array(jacobian_vector)

    # Now correct back the Jacobian for the scales

    jacobian_vector /= orders_of_magnitude

    return jacobian_vector[0] 
Example #5
Source File: garch.py    From Splunking-Crime with GNU Affero General Public License v3.0 6 votes vote down vote up
def score(self, params):
        """
        Notes
        -----
        Need to generalize for AR(p) and for a constant.
        Not correct yet.  Returns numerical gradient.  Depends on package
        numdifftools.
        """
        y = self.endog
        ylag = self.exog
        nobs = self.nobs
        diffsumsq = sumofsq(y-np.dot(ylag,params))
        dsdr = 1/nobs * -2 *np.sum(ylag*(y-np.dot(ylag,params))[:,None])+\
                2*params*ylag[0]**2
        sigma2 = 1/nobs*(diffsumsq-ylag[0]**2*(1-params**2))
        gradient = -nobs/(2*sigma2)*dsdr + params/(1-params**2) + \
                1/sigma2*np.sum(ylag*(y-np.dot(ylag, params))[:,None])+\
                .5*sigma2**-2*diffsumsq*dsdr+\
                ylag[0]**2*params/sigma2 +\
                ylag[0]**2*(1-params**2)/(2*sigma2**2)*dsdr
        if self.penalty:
            pass
        j = Jacobian(self.loglike)
        return j(params)
#        return gradient 
Example #6
Source File: mlemodel.py    From vnpy_crypto with MIT License 5 votes vote down vote up
def hessian(self, params):
        """
        Hessian of arma model.  Currently uses numdifftools
        """
        #return None
        Hfun = ndt.Jacobian(self.score, stepMax=1e-4)
        return Hfun(params)[-1] 
Example #7
Source File: test.py    From newton_admm with Apache License 2.0 5 votes vote down vote up
def test_PSD():
    from numdifftools import Jacobian
    x = np.random.randn(6)
    J = Jacobian(lambda x: PSD.P(x))

    assert np.allclose(J(x), PSD.J(x)) 
Example #8
Source File: vector_calculus.py    From dynamo-release with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def curl2d(f, x):
    """Curl of the reconstructed vector field f evaluated at x in 2D"""
    jac = nd.Jacobian(f)(x)
    curl = jac[1, 0] - jac[0, 1]

    return curl 
Example #9
Source File: vector_calculus.py    From dynamo-release with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def _curl(f, x):
    """Curl of the reconstructed vector field f evaluated at x in 3D"""
    jac = nd.Jacobian(f)(x)
    return np.array([jac[2, 1] - jac[1, 2], jac[0, 2] - jac[2, 0], jac[1, 0] - jac[0, 1]]) 
Example #10
Source File: vector_calculus.py    From dynamo-release with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def compute_divergence(f_jac, X, vectorize=True):
    """calculate divergence for many samples by taking the trace of a Jacobian matrix"""
    if vectorize:
        J = f_jac(X)
        div = np.trace(J)
    else:
        div = np.zeros(len(X))
        for i in tqdm(range(len(X)), desc="Calculating divergence"):
            J = f_jac(X[i])
            div[i] = np.trace(J)

    return div 
Example #11
Source File: vector_calculus.py    From dynamo-release with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def _divergence(f, x):
    """Divergence of the reconstructed vector field function f evaluated at x"""
    jac = nd.Jacobian(f)(x)
    return np.trace(jac) 
Example #12
Source File: vector_calculus.py    From dynamo-release with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def elementwise_jacobian_transformation(fjac, X, qi, qj):
    """Inverse transform low dimension Jacobian matrix (:math:`\partial F_i / \partial x_j`) back to original space.
    The formula used to inverse transform Jacobian matrix calculated from low dimension (PCs) is:
                                            :math:`Jac = Q J Q^T`,
    where `Q, J, Jac` are the PCA loading matrix, low dimensional Jacobian matrix and the inverse transformed high
    dimensional Jacobian matrix. This function takes only one row from Q to form qi or qj.

    Parameters
    ----------
        fjac: `function`:
            The function for calculating numerical Jacobian matrix.
        X: `np.ndarray`:
            The samples coordinates with dimension n_obs x n_PCs, from which Jacobian will be calculated.
        Qi: `np.ndarray`:
            One sampled gene's PCs loading matrix with dimension n' x n_PCs, from which local dimension Jacobian matrix
            (k x k) will be inverse transformed back to high dimension.
        Qj: `np.ndarray`
            Another gene's (can be the same as those in Qi or different) PCs loading matrix with dimension  n' x n_PCs,
            from which local dimension Jacobian matrix (k x k) will be inverse transformed back to high dimension.

    Returns
    -------
        ret `np.ndarray`
            The calculated vector of Jacobian matrix (:math:`\partial F_i / \partial x_j`) for each cell.
    """

    Js = fjac(X)
    ret = np.zeros(len(X))
    for i in tqdm(range(len(X)), "calculating Jacobian for each cell"):
        J = Js[:, :, i]
        ret[i] = qi @ J @ qj
    return ret 
Example #13
Source File: vector_calculus.py    From dynamo-release with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def get_fjac(f, input_vector_convention='row'):
    '''
        Get the numerical Jacobian of the vector field function.
        If the input_vector_convention is 'row', it means that fjac takes row vectors
        as input, otherwise the input should be an array of column vectors. Note that
        the returned Jacobian would behave exactly the same if the input is an 1d array.

        The column vector convention is slightly faster than the row vector convention.
        So the matrix of row vector convention is converted into column vector convention
        under the hood.

        No matter the input vector convention, the returned Jacobian is of the following
        format:
                df_1/dx_1   df_1/dx_2   df_1/dx_3   ...
                df_2/dx_1   df_2/dx_2   df_2/dx_3   ...
                df_3/dx_1   df_3/dx_2   df_3/dx_3   ...
                ...         ...         ...         ...
    '''
    fjac = nd.Jacobian(lambda x: f(x.T).T)
    if input_vector_convention == 'row' or input_vector_convention == 0:
        def f_aux(x):
            x = x.T
            return fjac(x)

        return f_aux
    else:
        return fjac 
Example #14
Source File: metric_velocity.py    From dynamo-release with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def curl(f, x):
    jac = nd.Jacobian(f)(x)
    return sp.array([jac[1, 0] - jac[0, 1]])  # 2D curl 
Example #15
Source File: Wang.py    From dynamo-release with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def V_jacobina(F, X):
    import numdifftools as nda

    V_jacobina = nda.Jacobian(F)

    return V_jacobina(X) 
Example #16
Source File: mlemodel.py    From vnpy_crypto with MIT License 5 votes vote down vote up
def score(self, params):
        """
        Score vector for Arma model
        """
        #return None
        #print params
        jac = ndt.Jacobian(self.loglike, stepMax=1e-4)
        return jac(params)[-1] 
Example #17
Source File: garch.py    From Splunking-Crime with GNU Affero General Public License v3.0 5 votes vote down vote up
def hessian(self, params):
        """
        Hessian of arma model.  Currently uses numdifftools
        """
        #return None
        Hfun = ndt.Jacobian(self.score, stepMax=1e-4)
        return Hfun(params)[-1] 
Example #18
Source File: garch.py    From Splunking-Crime with GNU Affero General Public License v3.0 5 votes vote down vote up
def score(self, params):
        """
        Score vector for Arma model
        """
        #return None
        #print(params
        jac = ndt.Jacobian(self.loglike, stepMax=1e-4)
        return jac(params)[-1] 
Example #19
Source File: mlemodel.py    From Splunking-Crime with GNU Affero General Public License v3.0 5 votes vote down vote up
def hessian(self, params):
        """
        Hessian of arma model.  Currently uses numdifftools
        """
        #return None
        Hfun = ndt.Jacobian(self.score, stepMax=1e-4)
        return Hfun(params)[-1] 
Example #20
Source File: mlemodel.py    From Splunking-Crime with GNU Affero General Public License v3.0 5 votes vote down vote up
def score(self, params):
        """
        Score vector for Arma model
        """
        #return None
        #print params
        jac = ndt.Jacobian(self.loglike, stepMax=1e-4)
        return jac(params)[-1] 
Example #21
Source File: test_numdiff_np.py    From estimagic with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_first_derivative_jacobian_richardson(example_function_jacobian_fixtures):
    f = example_function_jacobian_fixtures["func"]
    fprime = example_function_jacobian_fixtures["func_prime"]

    true_grad = fprime(np.ones(3))
    numdifftools_grad = Jacobian(f, order=2, n=3, method="central")(np.ones(3))
    grad = first_derivative(f, np.ones(3), n_steps=3, method="central")

    aaae(numdifftools_grad, grad)
    aaae(true_grad, grad) 
Example #22
Source File: test_numdiff_np.py    From estimagic with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def example_function_jacobian_fixtures():
    def f(x):
        """f:R^3 -> R^2"""
        x1, x2, x3 = x[0], x[1], x[2]
        y1, y2 = np.sin(x1) + np.cos(x2), np.exp(x3)
        return np.array([y1, y2])

    def fprime(x):
        """Jacobian(f)(x):R^3 -> R^(2x3)"""
        x1, x2, x3 = x[0], x[1], x[2]
        jac = np.array([[np.cos(x1), -np.sin(x2), 0], [0, 0, np.exp(x3)]])
        return jac

    return {"func": f, "func_prime": fprime} 
Example #23
Source File: garch.py    From Splunking-Crime with GNU Affero General Public License v3.0 5 votes vote down vote up
def hessian(self, params):
        """
        Hessian of arma model.  Currently uses numdifftools
        """
        #return None
        Hfun = ndt.Jacobian(self.score, stepMax=1e-4)
        return Hfun(params)[-1] 
Example #24
Source File: garch.py    From vnpy_crypto with MIT License 5 votes vote down vote up
def score(self, params):
        """
        Score vector for Arma model
        """
        #return None
        #print(params
        jac = ndt.Jacobian(self.loglike, stepMax=1e-4)
        return jac(params)[-1] 
Example #25
Source File: garch.py    From vnpy_crypto with MIT License 5 votes vote down vote up
def hessian(self, params):
        """
        Hessian of arma model.  Currently uses numdifftools
        """
        #return None
        Hfun = ndt.Jacobian(self.score, stepMax=1e-4)
        return Hfun(params)[-1] 
Example #26
Source File: garch.py    From vnpy_crypto with MIT License 5 votes vote down vote up
def hessian(self, params):
        """
        Hessian of arma model.  Currently uses numdifftools
        """
        #return None
        Hfun = ndt.Jacobian(self.score, stepMax=1e-4)
        return Hfun(params)[-1] 
Example #27
Source File: differentiation.py    From estimagic with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def jacobian(
    func,
    params,
    method="central",
    extrapolation=True,
    func_kwargs=None,
    step_options=None,
):
    """
    Calculate the jacobian of *func*.

    Args:
        func (function): A function that maps params into a numpy array or pd.Series.

        params (DataFrame): see :ref:`params`
        method (string): The method for the computation of the derivative. Default is
                         central as it gives the highest accuracy.
        extrapolation (bool): This variable allows to specify the use of the
                                richardson extrapolation.
        func_kwargs (dict): additional positional arguments for func.
        step_options (dict): Options for the numdifftools step generator.
            See :ref:`step_options`

    Returns:
        DataFrame: If func returns a Series, the index is the index of this Series or
        the index is 0,1,2... if func returns a numpy array. The columns are the
        index of params.

    """
    step_options = step_options if step_options is not None else {}

    if method not in ["central", "forward", "backward"]:
        raise ValueError("Method has to be in ['central', 'forward', 'backward']")

    func_kwargs = {} if func_kwargs is None else func_kwargs

    f_x0 = func(params, **func_kwargs)

    internal_func = _create_internal_func(func, params, func_kwargs)
    params_value = params["value"].to_numpy()

    if extrapolation:
        jac_np = nd.Jacobian(internal_func, method=method, **step_options)(params_value)
    else:
        jac_np = _no_extrapolation_jacobian(internal_func, params_value, method)

    if isinstance(f_x0, pd.Series):
        return pd.DataFrame(index=f_x0.index, columns=params.index, data=jac_np)
    else:
        return pd.DataFrame(columns=params.index, data=jac_np) 
Example #28
Source File: run_benchmark.py    From numdifftools with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def main(problem_sizes=(4, 8, 16, 32, 64, 96)):
    fixed_step = MinStepGenerator(num_steps=1, use_exact_steps=True, offset=0)
    epsilon = MaxStepGenerator(num_steps=14, use_exact_steps=True,
                               step_ratio=1.6, offset=0)
    adaptiv_txt = '_adaptive_{0:d}_{1!s}_{2:d}'.format(epsilon.num_steps,
                                                       str(epsilon.step_ratio),
                                                       epsilon.offset)
    gradient_funs = OrderedDict()
    hessian_funs = OrderedDict()

    # hessian_fun = 'Hessdiag'
    hessian_fun = 'Hessian'

    if nda is not None:
        nda_method = 'forward'
        nda_txt = 'algopy_' + nda_method
        gradient_funs[nda_txt] = nda.Jacobian(1, method=nda_method)

        hessian_funs[nda_txt] = getattr(nda, hessian_fun)(1, method=nda_method)
    ndc_hessian = getattr(nd, hessian_fun)

    order = 2
    for method in ['forward', 'central', 'complex']:
        method2 = method + adaptiv_txt
        options = dict(method=method, order=order)
        gradient_funs[method] = nd.Jacobian(1, step=fixed_step, **options)
        gradient_funs[method2] = nd.Jacobian(1, step=epsilon, **options)
        hessian_funs[method] = ndc_hessian(1, step=fixed_step, **options)
        hessian_funs[method2] = ndc_hessian(1, step=epsilon, **options)

    hessian_funs['forward_statsmodels'] = nds.Hessian(1, method='forward')
    hessian_funs['central_statsmodels'] = nds.Hessian(1, method='central')
    hessian_funs['complex_statsmodels'] = nds.Hessian(1, method='complex')

    gradient_funs['forward_statsmodels'] = nds.Jacobian(1, method='forward')
    gradient_funs['central_statsmodels'] = nds.Jacobian(1, method='central')
    gradient_funs['complex_statsmodels'] = nds.Jacobian(1, method='complex')
    gradient_funs['forward_scipy'] = nsc.Jacobian(1, method='forward')
    gradient_funs['central_scipy'] = nsc.Jacobian(1, method='central')
    gradient_funs['complex_scipy'] = nsc.Jacobian(1, method='complex')

    run_gradient_and_hessian_benchmarks(gradient_funs, hessian_funs, problem_sizes) 
Example #29
Source File: Ao.py    From dynamo-release with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def Ao_pot_map(vecFunc, X, D=None):
    """Mapping potential landscape with the algorithm developed by Ao method.
    References: Potential in stochastic differential equations: novel construction. Journal of physics A: mathematical and
        general, Ao Ping, 2004

    Parameters
    ----------
        vecFunc: `function`
            The vector field function
        X: `numpy.ndarray`
            A matrix of coordinates to calculate potential values for. Rows are observations (cells), columns are features (genes)
        D: None or `numpy.ndarray`
            Diffusion matrix. It must be a square matrix with size corresponds to the number of columns (features) in the X matrix.

    Returns
    -------
        X: `numpy.ndarray`
            A matrix storing the x-coordinates on the two-dimesional grid.
        U: `numpy.ndarray`
            A matrix storing the potential value at each position.
        P: `numpy.ndarray`
            Steady state distribution or the Boltzmann-Gibbs distribution for the state variable.
        vecMat: `list`
            List velocity vector at each position from X.
        S: `list`
            List of constant symmetric and semi-positive matrix or friction matrix, corresponding to the divergence part,
            at each position from X.
        A: `list`
            List of constant antisymmetric matrix or transverse matrix, corresponding to the curl part, at each position
            from X.
    """

    import numdifftools as nda

    nobs, ndim = X.shape
    D = 0.1 * np.eye(ndim) if D is None else D
    U = np.zeros((nobs, 1))
    vecMat, S, A = [None] * nobs, [None] * nobs, [None] * nobs

    for i in range(nobs):
        X_s = X[i, :]
        F = nda.Jacobian(vecFunc)(X_s)
        Q, _ = solveQ(D, F)
        H = np.linalg.inv(D + Q).dot(F)
        U[i] = -0.5 * X_s.dot(H).dot(X_s)

        vecMat[i] = vecFunc(X_s)
        S[i], A[i] = (
            (np.linalg.inv(D + Q) + np.linalg.inv((D + Q).T)) / 2,
            (np.linalg.inv(D + Q) - np.linalg.inv((D + Q).T)) / 2,
        )

    P = np.exp(-U)
    P = P / np.sum(P)

    return X, U, P, vecMat, S, A 
Example #30
Source File: scPotential.py    From dynamo-release with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def search_fixed_points(func, domain, x0, x0_method='lhs', 
    reverse=False, return_x0=False, fval_tol=1e-8, 
    remove_outliers=True, ignore_fsolve_err=False, **fsolve_kwargs):
    import numdifftools as nda

    func_ = (lambda x: -func(x)) if reverse else func
    k = domain.shape[1]

    if np.isscalar(x0):
        n = x0

        if k > 2 and x0_method == 'grid':
            warn('The dimensionality is too high (%dD). Using lhs instead...'%k)
            x0_method = 'lhs'
        
        if x0_method == 'lhs':
            print('Sampling initial points using latin hypercube sampling...')
            x0 = lhsclassic(n, k) 
        elif x0_method == 'grid':
            print('Sampling initial points on a grid...')
            pass
        else:
            print('Sampling initial points randomly (uniform distribution)...')
            x0 = np.random.rand(n, k)

        x0 = x0 * (domain[1] - domain[0]) + domain[0]

    fp = FixedPoints()
    succeed = 0
    for i in range(len(x0)):
        x, fval_dict, ier, mesg = sp.optimize.fsolve(
                func_, x0[i], full_output=True, **fsolve_kwargs)

        if ignore_fsolve_err:
            ier = 1
        if fval_dict["fvec"].dot(fval_dict["fvec"]) > fval_tol and ier == 1:
            ier = -1
            mesg = 'Function evaluated at the output is larger than the tolerance.'
        elif remove_outliers and is_outside_domain(x, domain) and ier == 1:
            ier = -2
            mesg = 'The output is outside the domain.'

        if ier == 1:
            jacobian_mat = nda.Jacobian(func_)(np.array(x))
            fp.add_fixed_points([x], [jacobian_mat])
            succeed += 1
        else:
            #jacobian_mat = nda.Jacobian(func_)(np.array(x))
            #fp.add_fixed_points([x], [jacobian_mat])
            print('Solution not found: ' + mesg)

    print('%d/%d solutions found.'%(succeed, len(x0)))
        
    if return_x0:
        return fp, x0
    else:
        return fp