Python cvxpy.Parameter() Examples

The following are 30 code examples of cvxpy.Parameter(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module cvxpy , or try the search function .
Example #1
Source File: suitesparse_lasso.py    From osqp_benchmarks with Apache License 2.0 6 votes vote down vote up
def _generate_cvxpy_problem(self):
        '''
        Generate QP problem
        '''

        x = cvxpy.Variable(self.n)
        y = cvxpy.Variable(self.m)
        t = cvxpy.Variable(self.n)

        # Create parameeter and assign value
        lambda_cvxpy = cvxpy.Parameter()
        lambda_cvxpy.value = self.lambda_param

        objective = cvxpy.Minimize(cvxpy.quad_form(y, spa.eye(self.m))
                                   + self.lambda_param * (np.ones(self.n) * t))
        constraints = [y == self.Ad * x - self.bd,
                       -t <= x, x <= t]
        problem = cvxpy.Problem(objective, constraints)

        return problem, (x, y, t), lambda_cvxpy 
Example #2
Source File: test_cvxpylayer.py    From cvxpylayers with Apache License 2.0 6 votes vote down vote up
def test_example(self):
        n, m = 2, 3
        x = cp.Variable(n)
        A = cp.Parameter((m, n))
        b = cp.Parameter(m)
        constraints = [x >= 0]
        objective = cp.Minimize(0.5 * cp.pnorm(A @ x - b, p=1))
        problem = cp.Problem(objective, constraints)
        assert problem.is_dpp()

        cvxpylayer = CvxpyLayer(problem, parameters=[A, b], variables=[x])
        A_tch = torch.randn(m, n, requires_grad=True)
        b_tch = torch.randn(m, requires_grad=True)

        # solve the problem
        solution, = cvxpylayer(A_tch, b_tch)

        # compute the gradient of the sum of the solution with respect to A, b
        solution.sum().backward() 
Example #3
Source File: test_cvxpylayer.py    From cvxpylayers with Apache License 2.0 6 votes vote down vote up
def test_simple_batch_socp(self):
        set_seed(243)
        n = 5
        m = 1
        batch_size = 4

        P_sqrt = cp.Parameter((n, n), name='P_sqrt')
        q = cp.Parameter((n, 1), name='q')
        A = cp.Parameter((m, n), name='A')
        b = cp.Parameter((m, 1), name='b')

        x = cp.Variable((n, 1), name='x')

        objective = 0.5 * cp.sum_squares(P_sqrt @ x) + q.T @ x
        constraints = [A@x == b, cp.norm(x) <= 1]
        prob = cp.Problem(cp.Minimize(objective), constraints)

        prob_tch = CvxpyLayer(prob, [P_sqrt, q, A, b], [x])

        P_sqrt_tch = torch.randn(batch_size, n, n, requires_grad=True)
        q_tch = torch.randn(batch_size, n, 1, requires_grad=True)
        A_tch = torch.randn(batch_size, m, n, requires_grad=True)
        b_tch = torch.randn(batch_size, m, 1, requires_grad=True)

        torch.autograd.gradcheck(prob_tch, (P_sqrt_tch, q_tch, A_tch, b_tch)) 
Example #4
Source File: test_cvxpylayer.py    From cvxpylayers with Apache License 2.0 6 votes vote down vote up
def test_lml(self):
        tf.random.set_seed(0)
        k = 2
        x = cp.Parameter(4)
        y = cp.Variable(4)
        obj = -x * y - cp.sum(cp.entr(y)) - cp.sum(cp.entr(1. - y))
        cons = [cp.sum(y) == k]
        problem = cp.Problem(cp.Minimize(obj), cons)
        lml = CvxpyLayer(problem, [x], [y])
        x_tf = tf.Variable([1., -1., -1., -1.], dtype=tf.float64)

        with tf.GradientTape() as tape:
            y_opt = lml(x_tf, solver_args={'eps': 1e-10})[0]
            loss = -tf.math.log(y_opt[1])

        def f():
            problem.solve(solver=cp.SCS, eps=1e-10)
            return -np.log(y.value[1])

        grad = tape.gradient(loss, [x_tf])
        numgrad = numerical_grad(f, [x], [x_tf])
        np.testing.assert_almost_equal(grad, numgrad, decimal=3) 
Example #5
Source File: test_cvxpylayer.py    From cvxpylayers with Apache License 2.0 6 votes vote down vote up
def test_entropy_maximization(self):
        set_seed(243)
        n, m, p = 5, 3, 2

        tmp = np.random.rand(n)
        A_np = np.random.randn(m, n)
        b_np = A_np.dot(tmp)
        F_np = np.random.randn(p, n)
        g_np = F_np.dot(tmp) + np.random.rand(p)

        x = cp.Variable(n)
        A = cp.Parameter((m, n))
        b = cp.Parameter(m)
        F = cp.Parameter((p, n))
        g = cp.Parameter(p)
        obj = cp.Maximize(cp.sum(cp.entr(x)) - .01 * cp.sum_squares(x))
        constraints = [A * x == b,
                       F * x <= g]
        prob = cp.Problem(obj, constraints)
        layer = CvxpyLayer(prob, [A, b, F, g], [x])

        A_tch, b_tch, F_tch, g_tch = map(
            lambda x: torch.from_numpy(x).requires_grad_(True), [
                A_np, b_np, F_np, g_np])
        torch.autograd.gradcheck(
            lambda *x: layer(*x, solver_args={"eps": 1e-12,
                                              "max_iters": 10000}),
            (A_tch,
             b_tch,
             F_tch,
             g_tch),
            eps=1e-4,
            atol=1e-3,
            rtol=1e-3) 
Example #6
Source File: lasso.py    From osqp_benchmarks with Apache License 2.0 6 votes vote down vote up
def _generate_cvxpy_problem(self):
        '''
        Generate QP problem
        '''

        x = cvxpy.Variable(self.n)
        y = cvxpy.Variable(self.m)
        t = cvxpy.Variable(self.n)

        # Create parameeter and assign value
        lambda_cvxpy = cvxpy.Parameter()
        lambda_cvxpy.value = self.lambda_param

        objective = cvxpy.Minimize(cvxpy.quad_form(y, spa.eye(self.m))
                                   + self.lambda_param * (np.ones(self.n) * t))
        constraints = [y == self.Ad * x - self.bd,
                       -t <= x, x <= t]
        problem = cvxpy.Problem(objective, constraints)

        return problem, (x, y, t), lambda_cvxpy 
Example #7
Source File: portfolio.py    From osqp_benchmarks with Apache License 2.0 6 votes vote down vote up
def _generate_cvxpy_problem(self):
        '''
        Generate QP problem
        '''

        x = cvxpy.Variable(self.n)
        y = cvxpy.Variable(self.k)

        # Create parameters m
        mu = cvxpy.Parameter(self.n)
        mu.value = self.mu

        objective = cvxpy.Minimize(cvxpy.quad_form(x, self.D) +
                                   cvxpy.quad_form(y, spa.eye(self.k)) +
                                   - 1 / self.gamma * (mu.T * x))
        constraints = [np.ones(self.n) * x == 1,
                       self.F.T * x == y,
                       0 <= x, x <= 1]
        problem = cvxpy.Problem(objective, constraints)

        return problem, mu 
Example #8
Source File: cvxpylayer.py    From cvxpylayers with Apache License 2.0 6 votes vote down vote up
def __call__(self, *parameters, solver_args={}):
        """Solve problem (or a batch of problems) corresponding to `parameters`

        Args:
          parameters: a sequence of tf.Tensors; the n-th Tensor specifies
                      the value for the n-th CVXPY Parameter. These Tensors
                      can be batched: if a Tensor has 3 dimensions, then its
                      first dimension is interpreted as the batch size.
          solver_args: a dict of optional arguments, to send to `diffcp`. Keys
                       should be the names of keyword arguments.

        Returns:
          a list of optimal variable values, one for each CVXPY Variable
          supplied to the constructor.
        """
        if len(parameters) != len(self.params):
            raise ValueError('A tensor must be provided for each CVXPY '
                             'parameter; received %d tensors, expected %d' % (
                                 len(parameters), len(self.params)))
        compute = tf.custom_gradient(
            lambda *parameters: self._compute(parameters, solver_args))
        return compute(*parameters) 
Example #9
Source File: cvxpy_examples.py    From cvxpylayers with Apache License 2.0 6 votes vote down vote up
def running_example():
    print("running example")
    m = 20
    n = 10
    x = cp.Variable((n, 1))
    F = cp.Parameter((m, n))
    g = cp.Parameter((m, 1))
    lambd = cp.Parameter((1, 1), nonneg=True)
    objective_fn = cp.norm(F @ x - g) + lambd * cp.norm(x)
    constraints = [x >= 0]
    problem = cp.Problem(cp.Minimize(objective_fn), constraints)
    assert problem.is_dcp()
    assert problem.is_dpp()
    print("is_dpp: ", problem.is_dpp())

    F_t = torch.randn(m, n, requires_grad=True)
    g_t = torch.randn(m, 1, requires_grad=True)
    lambd_t = torch.rand(1, 1, requires_grad=True)
    layer = CvxpyLayer(problem, parameters=[F, g, lambd], variables=[x])
    x_star, = layer(F_t, g_t, lambd_t)
    x_star.sum().backward()
    print("F_t grad: ", F_t.grad)
    print("g_t grad: ", g_t.grad) 
Example #10
Source File: cvxpy_examples.py    From cvxpylayers with Apache License 2.0 6 votes vote down vote up
def relu():
    # print(f'--- {sys._getframe().f_code.co_name} ---')
    print('relu')
    npr.seed(0)

    n = 4
    _x = cp.Parameter(n)
    _y = cp.Variable(n)
    obj = cp.Minimize(cp.sum_squares(_y - _x))
    cons = [_y >= 0]
    prob = cp.Problem(obj, cons)

    _x.value = npr.randn(n)

    prob.solve(solver=cp.SCS)
    print(_y.value) 
Example #11
Source File: cvxpy_examples.py    From cvxpylayers with Apache License 2.0 6 votes vote down vote up
def ball_con():
    # print(f'--- {sys._getframe().f_code.co_name} ---')
    print('ball con')
    npr.seed(0)

    n = 2

    A = cp.Parameter((n, n))
    z = cp.Parameter(n)
    p = cp.Parameter(n)
    x = cp.Variable(n)
    t = cp.Variable(n)
    obj = cp.Minimize(0.5 * cp.sum_squares(x - p))
    # TODO automate introduction of variables.
    cons = [0.5 * cp.sum_squares(A * t) <= 1, t == (x - z)]
    prob = cp.Problem(obj, cons)

    L = npr.randn(n, n)
    A.value = L.T
    z.value = npr.randn(n)
    p.value = npr.randn(n)

    prob.solve(solver=cp.SCS)
    print(x.value) 
Example #12
Source File: l1lsq.py    From doatools.py with MIT License 6 votes vote down vote up
def __init__(self, m, k, n, complex=False):
        if not cvx_available:
            raise RuntimeError('Cannot initialize when cvxpy is not available.')
        # Initialize parameters and variables
        A = cvx.Parameter((m, k), complex=complex)
        B = cvx.Parameter((m, n), complex=complex)
        l = cvx.Parameter(nonneg=True)
        X = cvx.Variable((k, n), complex=complex)
        # Create the problem
        # CVXPY issue:
        #   cvx.norm does not work if axis is not 0.
        # Workaround:
        #   use cvx.norm(X.T, 2, axis=0) instead of cvx.norm(X, 2, axis=1)
        obj_func = 0.5 * cvx.norm(cvx.matmul(A, X) - B, 'fro')**2 + \
                   l * cvx.sum(cvx.norm(X.T, 2, axis=0))
        self._problem = cvx.Problem(cvx.Minimize(obj_func))
        self._A = A
        self._B = B
        self._l = l
        self._X = X 
Example #13
Source File: test_cvxpylayer.py    From cvxpylayers with Apache License 2.0 6 votes vote down vote up
def test_shared_parameter(self):
        set_seed(243)
        m, n = 10, 5

        A = cp.Parameter((m, n))
        x = cp.Variable(n)
        b1 = np.random.randn(m)
        b2 = np.random.randn(m)
        prob1 = cp.Problem(cp.Minimize(cp.sum_squares(A @ x - b1)))
        layer1 = CvxpyLayer(prob1, parameters=[A], variables=[x])
        prob2 = cp.Problem(cp.Minimize(cp.sum_squares(A @ x - b2)))
        layer2 = CvxpyLayer(prob2, parameters=[A], variables=[x])

        A_tch = torch.randn(m, n, requires_grad=True)
        solver_args = {
            "eps": 1e-10,
            "acceleration_lookback": 0,
            "max_iters": 10000
        }

        torch.autograd.gradcheck(lambda A: torch.cat(
            [layer1(A, solver_args=solver_args)[0],
             layer2(A, solver_args=solver_args)[0]]), (A_tch,)) 
Example #14
Source File: scproblem.py    From SuccessiveConvexificationFreeFinalTime with MIT License 6 votes vote down vote up
def set_parameters(self, **kwargs):
        """
        All parameters have to be filled before calling solve().
        Takes the following arguments as keywords:

        A_bar
        B_bar
        C_bar
        S_bar
        z_bar
        X_last
        U_last
        sigma_last
        E
        weight_sigma
        weight_nu
        radius_trust_region
        """

        for key in kwargs:
            if key in self.par:
                self.par[key].value = kwargs[key]
            else:
                print(f'Parameter \'{key}\' does not exist.') 
Example #15
Source File: test_cvxpylayer.py    From cvxpylayers with Apache License 2.0 5 votes vote down vote up
def test_basic_gp(self):
        set_seed(243)

        x = cp.Variable(pos=True)
        y = cp.Variable(pos=True)
        z = cp.Variable(pos=True)

        a = cp.Parameter(pos=True, value=2.0)
        b = cp.Parameter(pos=True, value=1.0)
        c = cp.Parameter(value=0.5)

        objective_fn = 1/(x*y*z)
        constraints = [a*(x*y + x*z + y*z) <= b, x >= y**c]
        problem = cp.Problem(cp.Minimize(objective_fn), constraints)
        problem.solve(cp.SCS, gp=True, eps=1e-12)

        layer = CvxpyLayer(
            problem, parameters=[a, b, c], variables=[x, y, z], gp=True)
        a_tch = torch.tensor(2.0, requires_grad=True)
        b_tch = torch.tensor(1.0, requires_grad=True)
        c_tch = torch.tensor(0.5, requires_grad=True)
        with torch.no_grad():
            x_tch, y_tch, z_tch = layer(a_tch, b_tch, c_tch)

        self.assertAlmostEqual(x.value, x_tch.detach().numpy(), places=5)
        self.assertAlmostEqual(y.value, y_tch.detach().numpy(), places=5)
        self.assertAlmostEqual(z.value, z_tch.detach().numpy(), places=5)

        torch.autograd.gradcheck(lambda a, b, c: layer(
            a, b, c, solver_args={
                "eps": 1e-12, "acceleration_lookback": 0})[0].sum(),
                (a_tch, b_tch, c_tch), atol=1e-3, rtol=1e-3) 
Example #16
Source File: test_cvxpylayer.py    From cvxpylayers with Apache License 2.0 5 votes vote down vote up
def test_infeasible(self):
        x = cp.Variable(1)
        param = cp.Parameter(1)
        prob = cp.Problem(cp.Minimize(param), [x >= 1, x <= -1])
        layer = CvxpyLayer(prob, [param], [x])
        param_tf = tf.ones(1)
        with self.assertRaises(diffcp.SolverError):
            layer(param_tf) 
Example #17
Source File: l1lsq.py    From doatools.py with MIT License 5 votes vote down vote up
def __init__(self, m, k, formulation='penalizedl1', nonnegative=False):
        if not cvx_available:
            raise RuntimeError('Cannot initialize when cvxpy is not available.')
        # Initialize parameters and variables
        A = cvx.Parameter((m, k))
        b = cvx.Parameter((m, 1))
        l = cvx.Parameter(nonneg=True)
        x = cvx.Variable((k, 1))
        # Create the problem
        if formulation == 'penalizedl1':
            obj_func = 0.5 * cvx.sum_squares(cvx.matmul(A, x) - b) + l * cvx.norm1(x)
            constraints = []
        elif formulation == 'constrainedl1':
            obj_func = cvx.sum_squares(cvx.matmul(A, x) - b)
            constraints = [cvx.norm1(x) <= l]
        elif formulation == 'constrainedl2':
            obj_func = cvx.norm1(x)
            constraints = [cvx.norm(cvx.matmul(A, x) - b) <= l]
        else:
            raise ValueError("Unknown formulation '{0}'.".format(formulation))
        if nonnegative:
            constraints.append(x >= 0)
        problem = cvx.Problem(cvx.Minimize(obj_func), constraints)
        self._formulation = formulation
        self._A = A
        self._b = b
        self._l = l
        self._x = x
        self._obj_func = obj_func
        self._constraints = constraints
        self._problem = problem 
Example #18
Source File: test_cvxpylayer.py    From cvxpylayers with Apache License 2.0 5 votes vote down vote up
def test_least_squares(self):
        set_seed(243)
        m, n = 100, 20

        A = cp.Parameter((m, n))
        b = cp.Parameter(m)
        x = cp.Variable(n)
        obj = cp.sum_squares(A@x - b) + cp.sum_squares(x)
        prob = cp.Problem(cp.Minimize(obj))
        prob_th = CvxpyLayer(prob, [A, b], [x])

        A_th = torch.randn(m, n).double().requires_grad_()
        b_th = torch.randn(m).double().requires_grad_()

        x = prob_th(A_th, b_th, solver_args={"eps": 1e-10})[0]

        def lstsq(
            A,
            b): return torch.solve(
            (A_th.t() @ b_th).unsqueeze(1),
            A_th.t() @ A_th +
            torch.eye(n).double())[0]
        x_lstsq = lstsq(A_th, b_th)

        grad_A_cvxpy, grad_b_cvxpy = grad(x.sum(), [A_th, b_th])
        grad_A_lstsq, grad_b_lstsq = grad(x_lstsq.sum(), [A_th, b_th])

        self.assertAlmostEqual(
            torch.norm(
                grad_A_cvxpy -
                grad_A_lstsq).item(),
            0.0)
        self.assertAlmostEqual(
            torch.norm(
                grad_b_cvxpy -
                grad_b_lstsq).item(),
            0.0) 
Example #19
Source File: test_cvxpylayer.py    From cvxpylayers with Apache License 2.0 5 votes vote down vote up
def test_logistic_regression(self):
        set_seed(243)
        N, n = 10, 2
        X_np = np.random.randn(N, n)
        a_true = np.random.randn(n, 1)
        y_np = np.round(sigmoid(X_np @ a_true + np.random.randn(N, 1) * 0.5))

        X_tch = torch.from_numpy(X_np)
        X_tch.requires_grad_(True)
        lam_tch = 0.1 * torch.ones(1, requires_grad=True, dtype=torch.double)

        a = cp.Variable((n, 1))
        X = cp.Parameter((N, n))
        lam = cp.Parameter(1, nonneg=True)
        y = y_np

        log_likelihood = cp.sum(
            cp.multiply(y, X @ a) -
            cp.log_sum_exp(cp.hstack([np.zeros((N, 1)), X @ a]).T, axis=0,
                           keepdims=True).T
        )
        prob = cp.Problem(
            cp.Minimize(-log_likelihood + lam * cp.sum_squares(a)))

        fit_logreg = CvxpyLayer(prob, [X, lam], [a])

        def layer_eps(*x):
            return fit_logreg(*x, solver_args={"eps": 1e-12})

        torch.autograd.gradcheck(layer_eps,
                                 (X_tch,
                                  lam_tch),
                                 eps=1e-4,
                                 atol=1e-3,
                                 rtol=1e-3) 
Example #20
Source File: robust_pca.py    From learning-circuits with Apache License 2.0 5 votes vote down vote up
def sparse_lowrank_mse(name_size):
    name, size = name_size
    print(name, size)
    matrix = named_target_matrix(name, size)
    M = matrix
    lambda1 = cp.Parameter(nonneg=True)
    lambda2 = cp.Parameter(nonneg=True)
    L = cp.Variable((size, size))
    S = cp.Variable((size, size))
    prob = cp.Problem(cp.Minimize(cp.sum_squares(M - L - S) / size**2 + lambda1 / size * cp.norm(L, 'nuc') + lambda2 / size**2 * cp.norm(S, 1)))

    result = []
    for _ in range(ntrials):
        l1 = np.exp(np.random.uniform(np.log(1e-2), np.log(1e4)))
        l2 = np.exp(np.random.uniform(np.log(1e-2), np.log(1e4)))
        lambda1.value = l1
        lambda2.value = l2
        try:
            prob.solve()
            nnz = (np.abs(S.value) >= 1e-7).sum()
            singular_values = np.linalg.svd(L.value, compute_uv=False)
            rank = (singular_values >= 1e-7).sum()
            n_params = nnz + 2 * rank * size
            mse = np.sum((matrix - L.value - S.value)**2) / size**2
            result.append((n_params, mse))
        except:
            pass
    budget = 2 * size * np.log2(size)
    if model[name] == 'BPBP':
        budget *= 2
    eligible = [res for res in result if res[0] <= budget]
    if eligible:
        mse = min(m for (n_params, m) in eligible)
    else:
        mse = np.sum(matrix**2) / size**2
    print(name, size, 'done')
    return (name, size, mse) 
Example #21
Source File: test_cvxpylayer.py    From cvxpylayers with Apache License 2.0 5 votes vote down vote up
def test_sdp(self):
        set_seed(2)

        n = 3
        p = 3
        C = cp.Parameter((n, n))
        A = [cp.Parameter((n, n)) for _ in range(p)]
        b = [cp.Parameter((1, 1)) for _ in range(p)]

        C_tch = torch.randn(n, n, requires_grad=True).double()
        A_tch = [torch.randn(n, n, requires_grad=True).double()
                 for _ in range(p)]
        b_tch = [torch.randn(1, 1, requires_grad=True).double()
                 for _ in range(p)]

        X = cp.Variable((n, n), symmetric=True)
        constraints = [X >> 0]
        constraints += [
            cp.trace(A[i]@X) == b[i] for i in range(p)
        ]
        prob = cp.Problem(cp.Minimize(cp.trace(C@X) + cp.sum_squares(X)),
                          constraints)
        layer = CvxpyLayer(prob, [C] + A + b, [X])
        torch.autograd.gradcheck(lambda *x: layer(*x,
                                                  solver_args={'eps': 1e-12}),
                                 [C_tch] + A_tch + b_tch,
                                 eps=1e-6,
                                 atol=1e-3,
                                 rtol=1e-3) 
Example #22
Source File: test_cvxpylayer.py    From cvxpylayers with Apache License 2.0 5 votes vote down vote up
def test_not_enough_parameters(self):
        x = cp.Variable(1)
        lam = cp.Parameter(1, nonneg=True)
        lam2 = cp.Parameter(1, nonneg=True)
        objective = lam * cp.norm(x, 1) + lam2 * cp.sum_squares(x)
        prob = cp.Problem(cp.Minimize(objective))
        with self.assertRaises(ValueError):
            layer = CvxpyLayer(prob, [lam], [x])  # noqa: F841 
Example #23
Source File: test_cvxpylayer.py    From cvxpylayers with Apache License 2.0 5 votes vote down vote up
def test_incorrect_parameter_shape(self):
        set_seed(243)
        m, n = 100, 20

        A = cp.Parameter((m, n))
        b = cp.Parameter(m)
        x = cp.Variable(n)
        obj = cp.sum_squares(A@x - b) + cp.sum_squares(x)
        prob = cp.Problem(cp.Minimize(obj))
        prob_th = CvxpyLayer(prob, [A, b], [x])

        A_th = torch.randn(32, m, n).double().requires_grad_()
        b_th = torch.randn(20, m).double().requires_grad_()

        with self.assertRaises(ValueError):
            prob_th(A_th, b_th)

        A_th = torch.randn(32, m, n).double().requires_grad_()
        b_th = torch.randn(32, 2 * m).double().requires_grad_()

        with self.assertRaises(ValueError):
            prob_th(A_th, b_th)

        A_th = torch.randn(m, n).double().requires_grad_()
        b_th = torch.randn(2 * m).double().requires_grad_()

        with self.assertRaises(ValueError):
            prob_th(A_th, b_th)

        A_th = torch.randn(32, m, n).double().requires_grad_()
        b_th = torch.randn(32, 32, m).double().requires_grad_()

        with self.assertRaises(ValueError):
            prob_th(A_th, b_th) 
Example #24
Source File: test_cvxpylayer.py    From cvxpylayers with Apache License 2.0 5 votes vote down vote up
def test_equality(self):
        set_seed(243)
        n = 10
        A = np.eye(n)
        x = cp.Variable(n)
        b = cp.Parameter(n)
        prob = cp.Problem(cp.Minimize(cp.sum_squares(x)), [A@x == b])
        layer = CvxpyLayer(prob, parameters=[b], variables=[x])
        b_tch = torch.randn(n, requires_grad=True)
        torch.autograd.gradcheck(lambda b: layer(
            b, solver_args={"eps": 1e-10,
                            "acceleration_lookback": 0})[0].sum(),
            (b_tch,)) 
Example #25
Source File: test_cvxpylayer.py    From cvxpylayers with Apache License 2.0 5 votes vote down vote up
def test_not_enough_parameters_at_call_time(self):
        x = cp.Variable(1)
        lam = cp.Parameter(1, nonneg=True)
        lam2 = cp.Parameter(1, nonneg=True)
        objective = lam * cp.norm(x, 1) + lam2 * cp.sum_squares(x)
        prob = cp.Problem(cp.Minimize(objective))
        layer = CvxpyLayer(prob, [lam, lam2], [x])  # noqa: F841
        with self.assertRaisesRegex(
                ValueError,
                'A tensor must be provided for each CVXPY parameter.*'):
            layer(lam) 
Example #26
Source File: test_cvxpylayer.py    From cvxpylayers with Apache License 2.0 5 votes vote down vote up
def test_too_many_variables(self):
        x = cp.Variable(1)
        y = cp.Variable(1)
        lam = cp.Parameter(1, nonneg=True)
        objective = lam * cp.norm(x, 1)
        prob = cp.Problem(cp.Minimize(objective))
        with self.assertRaises(ValueError):
            layer = CvxpyLayer(prob, [lam], [x, y])  # noqa: F841 
Example #27
Source File: test_cvxpylayer.py    From cvxpylayers with Apache License 2.0 5 votes vote down vote up
def test_infeasible(self):
        x = cp.Variable(1)
        param = cp.Parameter(1)
        prob = cp.Problem(cp.Minimize(param), [x >= 1, x <= -1])
        layer = CvxpyLayer(prob, [param], [x])
        param_tch = torch.ones(1)
        with self.assertRaises(diffcp.SolverError):
            layer(param_tch) 
Example #28
Source File: test_cvxpylayer.py    From cvxpylayers with Apache License 2.0 5 votes vote down vote up
def test_broadcasting(self):
        set_seed(243)
        n_batch, m, n = 2, 100, 20

        A = cp.Parameter((m, n))
        b = cp.Parameter(m)
        x = cp.Variable(n)
        obj = cp.sum_squares(A@x - b) + cp.sum_squares(x)
        prob = cp.Problem(cp.Minimize(obj))
        prob_th = CvxpyLayer(prob, [A, b], [x])

        A_th = torch.randn(m, n).double().requires_grad_()
        b_th = torch.randn(m).double().unsqueeze(0).repeat(n_batch, 1) \
            .requires_grad_()
        b_th_0 = b_th[0]

        x = prob_th(A_th, b_th, solver_args={"eps": 1e-10})[0]

        def lstsq(
            A,
            b): return torch.solve(
            (A.t() @ b).unsqueeze(1),
            A.t() @ A +
            torch.eye(n).double())[0]
        x_lstsq = lstsq(A_th, b_th_0)

        grad_A_cvxpy, grad_b_cvxpy = grad(x.sum(), [A_th, b_th])
        grad_A_lstsq, grad_b_lstsq = grad(x_lstsq.sum(), [A_th, b_th_0])

        self.assertAlmostEqual(
            torch.norm(
                grad_A_cvxpy / n_batch -
                grad_A_lstsq).item(),
            0.0)
        self.assertAlmostEqual(
            torch.norm(
                grad_b_cvxpy[0] -
                grad_b_lstsq).item(),
            0.0) 
Example #29
Source File: cvxpy_examples.py    From cvxpylayers with Apache License 2.0 5 votes vote down vote up
def sigmoid():
    # print(f'--- {sys._getframe().f_code.co_name} ---')
    print('sigmoid')
    npr.seed(0)

    n = 4
    _x = cp.Parameter((n, 1))
    _y = cp.Variable(n)
    obj = cp.Minimize(-_x.T * _y - cp.sum(cp.entr(_y) + cp.entr(1. - _y)))
    prob = cp.Problem(obj)

    _x.value = npr.randn(n, 1)

    prob.solve(solver=cp.SCS)
    print(_y.value) 
Example #30
Source File: noncvx_variable.py    From ncvx with GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, rows, cols, *args, **kwargs):
        super(NonCvxVariable, self).__init__((rows, cols,), *args, **kwargs)
        self.noncvx = True
        self.z = cvxpy.Parameter(self.shape)
        self.u = cvxpy.Parameter(self.shape)
        self.u.value = np.zeros(self.shape)