Python scipy.optimize.check_grad() Examples

The following are 30 code examples of scipy.optimize.check_grad(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module scipy.optimize , or try the search function .
Example #1
Source File: test_huber.py    From Mastering-Elasticsearch-7.0 with MIT License 6 votes vote down vote up
def test_huber_gradient():
    # Test that the gradient calculated by _huber_loss_and_gradient is correct
    rng = np.random.RandomState(1)
    X, y = make_regression_with_outliers()
    sample_weight = rng.randint(1, 3, (y.shape[0]))

    def loss_func(x, *args):
        return _huber_loss_and_gradient(x, *args)[0]

    def grad_func(x, *args):
        return _huber_loss_and_gradient(x, *args)[1]

    # Check using optimize.check_grad that the gradients are equal.
    for _ in range(5):
        # Check for both fit_intercept and otherwise.
        for n_features in [X.shape[1] + 1, X.shape[1] + 2]:
            w = rng.randn(n_features)
            w[-1] = np.abs(w[-1])
            grad_same = optimize.check_grad(
                loss_func, grad_func, w, X, y, 0.01, 0.1, sample_weight)
            assert_almost_equal(grad_same, 1e-6, 4) 
Example #2
Source File: test_huber.py    From twitter-stock-recommendation with MIT License 6 votes vote down vote up
def test_huber_gradient():
    # Test that the gradient calculated by _huber_loss_and_gradient is correct
    rng = np.random.RandomState(1)
    X, y = make_regression_with_outliers()
    sample_weight = rng.randint(1, 3, (y.shape[0]))
    loss_func = lambda x, *args: _huber_loss_and_gradient(x, *args)[0]
    grad_func = lambda x, *args: _huber_loss_and_gradient(x, *args)[1]

    # Check using optimize.check_grad that the gradients are equal.
    for _ in range(5):
        # Check for both fit_intercept and otherwise.
        for n_features in [X.shape[1] + 1, X.shape[1] + 2]:
            w = rng.randn(n_features)
            w[-1] = np.abs(w[-1])
            grad_same = optimize.check_grad(
                loss_func, grad_func, w, X, y, 0.01, 0.1, sample_weight)
            assert_almost_equal(grad_same, 1e-6, 4) 
Example #3
Source File: test_t_sne.py    From twitter-stock-recommendation with MIT License 6 votes vote down vote up
def test_gradient():
    # Test gradient of Kullback-Leibler divergence.
    random_state = check_random_state(0)

    n_samples = 50
    n_features = 2
    n_components = 2
    alpha = 1.0

    distances = random_state.randn(n_samples, n_features).astype(np.float32)
    distances = np.abs(distances.dot(distances.T))
    np.fill_diagonal(distances, 0.0)
    X_embedded = random_state.randn(n_samples, n_components).astype(np.float32)

    P = _joint_probabilities(distances, desired_perplexity=25.0,
                             verbose=0)

    def fun(params):
        return _kl_divergence(params, P, alpha, n_samples, n_components)[0]

    def grad(params):
        return _kl_divergence(params, P, alpha, n_samples, n_components)[1]

    assert_almost_equal(check_grad(fun, grad, X_embedded.ravel()), 0.0,
                        decimal=5) 
Example #4
Source File: densities.py    From picard with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def check_density(density, tol=1e-6, n_test=10, rng=None):
    if rng is None:
        rng = np.random.RandomState(0)
    Y = rng.randn(n_test)

    def score(Y):
        return density.score_and_der(Y)[0]

    def score_der(Y):
        return density.score_and_der(Y)[1]

    err_msgs = ['score', 'score derivative']
    for f, fprime, err_msg in zip([density.log_lik, score], [score, score_der],
                                  err_msgs):
        for y in Y:
            err = check_grad(f, fprime, np.array([y]))
            assert_allclose(err, 0, atol=tol, rtol=0,
                            err_msg='Wrong %s' % err_msg) 
Example #5
Source File: test_nca.py    From scikit-hubness with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_finite_differences():
    """Test gradient of loss function

    Assert that the gradient is almost equal to its finite differences
    approximation.
    """
    # Initialize the transformation `M`, as well as `X` and `y` and `NCA`
    rng = np.random.RandomState(42)
    X, y = make_classification()
    M = rng.randn(rng.randint(1, X.shape[1] + 1),
                  X.shape[1])
    nca = NeighborhoodComponentsAnalysis()
    nca.n_iter_ = 0
    mask = y[:, np.newaxis] == y[np.newaxis, :]

    def fun(M):
        return nca._loss_grad_lbfgs(M, X, mask)[0]

    def grad(M):
        return nca._loss_grad_lbfgs(M, X, mask)[1]

    # compute relative error
    rel_diff = check_grad(fun, grad, M.ravel()) / np.linalg.norm(grad(M))
    np.testing.assert_almost_equal(rel_diff, 0., decimal=5) 
Example #6
Source File: test_update_d_multi.py    From alphacsc with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_simple():
    T = 100
    L = 10
    S = T - L + 1
    x = np.random.random(T)
    z = np.random.random(S)
    d = np.random.random(L)

    def func(d0):
        xr = signal.convolve(z, d0)
        residual = x - xr
        return .5 * np.sum(residual * residual)

    def grad(d0):
        xr = signal.convolve(z, d0)
        residual = x - xr
        grad_d = - signal.convolve(residual, z[::-1], mode='valid')
        return grad_d

    error = optimize.check_grad(func, grad, d, epsilon=1e-8)
    assert error < 1e-4, "Gradient is false: {:.4e}".format(error) 
Example #7
Source File: test_non_linear_models.py    From emukit with Apache License 2.0 6 votes vote down vote up
def test_non_linear_model_variance_gradient(self, non_linear_model):
        """
        Check the gradient of the predictive variance is correct
        """

        np.random.seed(1234)
        x0 = np.random.rand(2)

        # wrap function so fidelity index doesn't change
        def wrap_func(x):
            x_full = np.concatenate([x[None, :], [[2]]], axis=1)
            return non_linear_model.predict(x_full)[1]

        def wrap_gradients(x):
            x_full = np.concatenate([x[None, :], [[2]]], axis=1)
            return non_linear_model.get_prediction_gradients(x_full)[1]

        assert np.all(check_grad(wrap_func, wrap_gradients, x0) < 1e-6) 
Example #8
Source File: test_non_linear_models.py    From emukit with Apache License 2.0 6 votes vote down vote up
def test_non_linear_model_mean_gradient(self, non_linear_model):
        """
        Check the gradient of the mean prediction is correct
        """

        np.random.seed(1234)
        x0 = np.random.rand(2)

        # wrap function so fidelity index doesn't change
        def wrap_func(x):
            x_full = np.concatenate([x[None, :], [[2]]], axis=1)
            return non_linear_model.predict(x_full)[0]

        def wrap_gradients(x):
            x_full = np.concatenate([x[None, :], [[2]]], axis=1)
            return non_linear_model.get_prediction_gradients(x_full)[0]
        assert np.all(check_grad(wrap_func, wrap_gradients, x0) < 1e-6) 
Example #9
Source File: test_optimize.py    From GraphicDesignPatternByPython with MIT License 6 votes vote down vote up
def test_check_grad():
    # Verify if check_grad is able to estimate the derivative of the
    # logistic function.

    def logit(x):
        return 1 / (1 + np.exp(-x))

    def der_logit(x):
        return np.exp(-x) / (1 + np.exp(-x))**2

    x0 = np.array([1.5])

    r = optimize.check_grad(logit, der_logit, x0)
    assert_almost_equal(r, 0)

    r = optimize.check_grad(logit, der_logit, x0, epsilon=1e-6)
    assert_almost_equal(r, 0)

    # Check if the epsilon parameter is being considered.
    r = abs(optimize.check_grad(logit, der_logit, x0, epsilon=1e-1) - 0)
    assert_(r > 1e-7) 
Example #10
Source File: model_hawkes_sumexpkern_leastsq_test.py    From tick with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_model_hawkes_varying_baseline_least_sq_grad(self):
        """...Test that ModelHawkesExpKernLeastSq gradient is consistent
        with loss
        """
        for model in [self.model, self.model_list]:
            model.period_length = 1.
            model.n_baselines = 3
            coeffs = np.random.rand(model.n_coeffs)

            self.assertLess(check_grad(model.loss, model.grad, coeffs), 1e-5)

            coeffs_min = fmin_bfgs(model.loss, coeffs, fprime=model.grad,
                                   disp=False)

            self.assertAlmostEqual(
                norm(model.grad(coeffs_min)), .0, delta=1e-4) 
Example #11
Source File: model_hawkes_expkern_leastsq_test.py    From tick with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_ModelHawkesExpKernLeastSqHess(self):
        """...Numerical consistency check of hessian for Hawkes contrast
        """
        for model in [self.model, self.model_list]:
            # this hessian is independent of x but for more generality
            # we still put an used coeff as argument
            hessian = model.hessian(self.coeffs).todense()

            # Check that hessian is equal to its transpose
            np.testing.assert_array_almost_equal(hessian, hessian.T,
                                                 decimal=10)

            # Check that for all dimension hessian row is consistent
            # with its corresponding gradient coordinate.
            for i in range(model.n_coeffs):

                def g_i(x):
                    return model.grad(x)[i]

                def h_i(x):
                    return np.asarray(hessian)[i, :]

                self.assertLess(check_grad(g_i, h_i, self.coeffs), 1e-5) 
Example #12
Source File: model_hawkes_expkern_loglik_test.py    From tick with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_ModelHawkesExpKernLogLik_hessian(self):
        """...Numerical consistency check of hessian for Hawkes loglik
        """
        for model in [self.model]:
            hessian = model.hessian(self.coeffs).todense()
            # Check that hessian is equal to its transpose
            np.testing.assert_array_almost_equal(hessian, hessian.T,
                                                 decimal=10)

            # Check that for all dimension hessian row is consistent
            # with its corresponding gradient coordinate.
            for i in range(model.n_coeffs):

                def g_i(x):
                    return model.grad(x)[i]

                def h_i(x):
                    h = model.hessian(x).todense()
                    return np.asarray(h)[i, :]

                self.assertLess(check_grad(g_i, h_i, self.coeffs), 1e-5) 
Example #13
Source File: test_t_sne.py    From Mastering-Elasticsearch-7.0 with MIT License 6 votes vote down vote up
def test_gradient():
    # Test gradient of Kullback-Leibler divergence.
    random_state = check_random_state(0)

    n_samples = 50
    n_features = 2
    n_components = 2
    alpha = 1.0

    distances = random_state.randn(n_samples, n_features).astype(np.float32)
    distances = np.abs(distances.dot(distances.T))
    np.fill_diagonal(distances, 0.0)
    X_embedded = random_state.randn(n_samples, n_components).astype(np.float32)

    P = _joint_probabilities(distances, desired_perplexity=25.0,
                             verbose=0)

    def fun(params):
        return _kl_divergence(params, P, alpha, n_samples, n_components)[0]

    def grad(params):
        return _kl_divergence(params, P, alpha, n_samples, n_components)[1]

    assert_almost_equal(check_grad(fun, grad, X_embedded.ravel()), 0.0,
                        decimal=5) 
Example #14
Source File: model_hawkes_sumexpkern_loglik_test.py    From tick with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_ModelHawkesSumExpKernLogLik_hessian(self):
        """...Numerical consistency check of hessian for Hawkes loglik
        """
        for model in [self.model]:
            hessian = model.hessian(self.coeffs).todense()
            # Check that hessian is equal to its transpose
            np.testing.assert_array_almost_equal(hessian, hessian.T,
                                                 decimal=10)

            np.set_printoptions(precision=3, linewidth=200)

            # Check that for all dimension hessian row is consistent
            # with its corresponding gradient coordinate.
            for i in range(model.n_coeffs):

                def g_i(x):
                    return model.grad(x)[i]

                def h_i(x):
                    h = model.hessian(x).todense()
                    return np.asarray(h)[i, :]

                self.assertLess(check_grad(g_i, h_i, self.coeffs), 1e-5) 
Example #15
Source File: test_nca.py    From Mastering-Elasticsearch-7.0 with MIT License 6 votes vote down vote up
def test_finite_differences():
    """Test gradient of loss function

    Assert that the gradient is almost equal to its finite differences
    approximation.
    """
    # Initialize the transformation `M`, as well as `X` and `y` and `NCA`
    rng = np.random.RandomState(42)
    X, y = make_classification()
    M = rng.randn(rng.randint(1, X.shape[1] + 1),
                  X.shape[1])
    nca = NeighborhoodComponentsAnalysis()
    nca.n_iter_ = 0
    mask = y[:, np.newaxis] == y[np.newaxis, :]

    def fun(M):
        return nca._loss_grad_lbfgs(M, X, mask)[0]

    def grad(M):
        return nca._loss_grad_lbfgs(M, X, mask)[1]

    # compute relative error
    rel_diff = check_grad(fun, grad, M.ravel()) / np.linalg.norm(grad(M))
    np.testing.assert_almost_equal(rel_diff, 0., decimal=5) 
Example #16
Source File: test_dtw.py    From didyprog with MIT License 6 votes vote down vote up
def test_viterbi_hessian(operator):
    theta = make_data()
    Z = np.random.randn(*theta.shape)

    def func(X):
        X = X.reshape(theta.shape)
        _, grad, _, _ = dtw_grad(X, operator=operator)
        return np.sum(grad * Z)

    def grad(X):
        X = X.reshape(theta.shape)
        v, H = dtw_hessian_prod(X, Z, operator=operator)
        return H.ravel()

    # check_grad does not work with ndarray of dim > 2
    err = check_grad(func, grad, theta.ravel())
    assert err < 1e-6 
Example #17
Source File: test_viterbi.py    From didyprog with MIT License 6 votes vote down vote up
def test_viterbi_grad(operator):
    states, emissions, theta = make_data()
    theta /= 100

    def func(X):
        X = X.reshape(theta.shape)
        return viterbi_value(X, operator=operator)

    def grad(X):
        X = X.reshape(theta.shape)
        _, grad, _, _ = viterbi_grad(X, operator=operator)
        return grad.ravel()

    # check_grad does not work with ndarray of dim > 2
    err = check_grad(func, grad, theta.ravel())
    if operator == 'sparsemax':
        assert err < 1e-4
    else:
        assert err < 1e-6 
Example #18
Source File: test_viterbi.py    From didyprog with MIT License 6 votes vote down vote up
def test_viterbi_hessian(operator):
    states, emissions, theta = make_data()

    theta /= 100
    Z = np.random.randn(*theta.shape)

    def func(X):
        X = X.reshape(theta.shape)
        _, grad, _, _ = viterbi_grad(X, operator=operator)
        return np.sum(grad * Z)

    def grad(X):
        X = X.reshape(theta.shape)
        _, H = viterbi_hessian_prod(X, Z, operator=operator)
        return H.ravel()

    # check_grad does not work with ndarray of dim > 2
    err = check_grad(func, grad, theta.ravel())
    if operator == 'sparsemax':
        assert err < 1e-4
    else:
        assert err < 1e-6 
Example #19
Source File: model_hawkes_expkern_leastsq_test.py    From tick with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_model_hawkes_least_sq_grad(self):
        """...Test that ModelHawkesExpKernLeastSq gradient is consistent
        with loss
        """

        for model in [self.model, self.model_list]:
            self.assertLess(
                check_grad(model.loss, model.grad, self.coeffs), 1e-5) 
Example #20
Source File: quasistatic.py    From tyssue with GNU General Public License v3.0 5 votes vote down vote up
def check_grad(self, eptm, geom, model):
        pos0 = eptm.vert_df.loc[
            eptm.vert_df.is_active.astype(bool), eptm.coords
        ].values.ravel()
        grad_err = optimize.check_grad(
            self._opt_energy, self._opt_grad, pos0.flatten(), eptm, geom, model
        )
        return grad_err

    # The functions bellow are for a perodic square tissue in 2D 
Example #21
Source File: sheet_vertex_solver.py    From tyssue with GNU General Public License v3.0 5 votes vote down vote up
def check_grad(cls, sheet, geom, model):

        pos_idx = sheet.vert_df[sheet.vert_df["is_active"] == 1].index
        pos0 = sheet.vert_df.loc[pos_idx, sheet.coords].values.ravel()

        grad_err = optimize.check_grad(
            cls.opt_energy, cls.opt_grad, pos0.flatten(), pos_idx, sheet, geom, model
        )
        return grad_err 
Example #22
Source File: test_dtw.py    From didyprog with MIT License 5 votes vote down vote up
def test_dtw_grad(operator):
    C = make_data()

    def func(X):
        X = X.reshape(C.shape)
        return dtw_value(X, operator=operator)

    def grad(X):
        X = X.reshape(C.shape)
        _, g, _, _ = dtw_grad(X, operator=operator)
        return g.ravel()

    err = check_grad(func, grad, C.ravel())
    assert err < 1e-6 
Example #23
Source File: test_prior.py    From pyPESTO with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_derivatives():
    """
    Tests the finite gradients and second order derivatives.
    """

    scales = ['lin', 'log', 'log10']
    prior_types = ['uniform', 'normal', 'laplace', 'logNormal']

    for prior_type, scale in itertools.product(prior_types, scales):

        if prior_type == 'uniform':
            prior_parameters = [-1, 1]
        else:
            prior_parameters = [1, 1]

        prior_dict = get_parameter_prior_dict(
            0, prior_type, prior_parameters, scale)

        # use this x0, since it is a moderate value both in linear
        # and in log scale...
        x0 = np.array([0.5])

        err_grad = opt.check_grad(prior_dict['density_fun'],
                                  prior_dict['density_dx'], x0)
        err_hes = opt.check_grad(prior_dict['density_dx'],
                                 prior_dict['density_ddx'], x0)

        assert err_grad < 1e-3
        assert err_hes < 1e-3 
Example #24
Source File: test_crps.py    From properscoring with Apache License 2.0 5 votes vote down vote up
def test_grad(self):
        from scipy import optimize
        f = lambda z: crps_gaussian(self.obs[0, 0], z[0], z[1], grad=False)
        g = lambda z: crps_gaussian(self.obs[0, 0], z[0], z[1], grad=True)[1]
        x0 = np.array([self.mu.reshape(-1),
                       self.sig.reshape(-1)]).T
        for x in x0:
            self.assertLessEqual(optimize.check_grad(f, g, x), 1e-6) 
Example #25
Source File: test_chainer_func.py    From soft-dtw with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def test_grad():
    rng = np.random.RandomState(0)
    X = rng.randn(10, 2)
    Z = rng.randn(8, 2)
    print(check_grad(_func, _grad, Z.ravel(), X)) 
Example #26
Source File: test_local_penalization.py    From emukit with Apache License 2.0 5 votes vote down vote up
def _check_grad(lp, tol, x0):
    grad_error = check_grad(lambda x: lp.evaluate_with_gradients(x[None, :])[0],
                            lambda x: lp.evaluate_with_gradients(x[None, :])[1], x0)
    assert np.all(grad_error < tol) 
Example #27
Source File: generalized_linear_model.py    From tick with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def _test_grad(self, model, coeffs, delta_check_grad=None,
                   delta_model_grad=None):
        """Test that gradient is consistent with loss and that minimum is
        achievable with a small gradient
        """
        if coeffs.dtype is np.dtype("float32"):
            check_grad_epsilon = 3e-3
        else:
            check_grad_epsilon = 1e-7

        if delta_check_grad is None:
            delta_check_grad = self.delta_check_grad

        if delta_model_grad is None:
            delta_model_grad = self.delta_model_grad

        with warnings.catch_warnings(record=True):
            grad_check = check_grad(model.loss, model.grad, coeffs,
                                    epsilon=check_grad_epsilon)

        self.assertAlmostEqual(grad_check, 0., delta=delta_check_grad)
        # Check that minimum is achievable with a small gradient

        with warnings.catch_warnings(record=True):
            coeffs_min = fmin_bfgs(model.loss, coeffs, fprime=model.grad,
                                   disp=False)
            coeffs_min = coeffs_min.astype(self.dtype)

        self.assertAlmostEqual(
            norm(model.grad(coeffs_min)), .0, delta=delta_model_grad) 
Example #28
Source File: test_non_linear_models.py    From emukit with Apache License 2.0 5 votes vote down vote up
def test_non_linear_sample_fidelities_gradient(self, non_linear_model, fidelity_idx, func_idx, grad_idx):
        np.random.seed(1234)
        x0 = np.random.rand(2)

        func = lambda x: np.sum(non_linear_model._predict_samples_with_gradients(x[None, :], fidelity_idx)[func_idx],
                                axis=0)
        grad = lambda x: np.sum(non_linear_model._predict_samples_with_gradients(x[None, :], fidelity_idx)[grad_idx],
                                axis=0)
        assert check_grad(func, grad, x0) < 1e-6 
Example #29
Source File: test_acquisitions.py    From emukit with Apache License 2.0 5 votes vote down vote up
def test_acquisition_gradient_computation(acquisition, n_dims, tol):
    rng = np.random.RandomState(43)
    x_test = rng.rand(10, n_dims)

    acq = lambda x: acquisition.evaluate(np.array([x]))[0][0]
    grad = lambda x: acquisition.evaluate_with_gradients(np.array([x]))[1][0]

    for xi in x_test:
        err = check_grad(acq, grad, xi, epsilon=gradient_check_step_size)
        assert err < tol 
Example #30
Source File: test_opt.py    From choix with MIT License 5 votes vote down vote up
def _test_hessian(n_items, fcts):
    """Helper for testing the hessian of objective functions."""
    for sigma in np.linspace(1, 20, num=10):
        xs = sigma * RND.randn(n_items)
        for i in range(n_items):
            obj = lambda xs: fcts.gradient(xs)[i]
            grad = lambda xs: fcts.hessian(xs)[i]
            val = approx_fprime(xs, obj, EPS)
            err = check_grad(obj, grad, xs, epsilon=EPS)
            assert abs(err / np.linalg.norm(val)) < 1e-5