Python cvxpy.Minimize() Examples

The following are 30 code examples of cvxpy.Minimize(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module cvxpy , or try the search function .
Example #1
Source File: cvxpy_solver.py    From checkmate with Apache License 2.0 6 votes vote down vote up
def __init__(self, g: DFGraph, budget: int):
        self.budget = budget
        self.g = g
        self.T = self.g.size

        self.R = cp.Variable((self.T, self.T), name="R")
        self.S = cp.Variable((self.T, self.T), name="S")
        self.Free_E = cp.Variable((self.T, len(self.g.edge_list)), name="FREE_E")
        self.U = cp.Variable((self.T, self.T), name="U")

        cpu_cost_vec = np.asarray([self.g.cost_cpu[i] for i in range(self.T)])[np.newaxis, :].T
        assert cpu_cost_vec.shape == (self.T, 1)
        objective = cp.Minimize(cp.sum(self.R @ cpu_cost_vec))
        constraints = self.make_constraints(budget)
        self.problem = cp.Problem(objective, constraints)
        self.num_vars = self.problem.size_metrics.num_scalar_variables
        self.num_constraints = self.problem.size_metrics.num_scalar_eq_constr + self.problem.size_metrics.num_scalar_leq_constr 
Example #2
Source File: diffdrive_2d.py    From SCvx with MIT License 6 votes vote down vote up
def get_objective(self, X_v, U_v, X_last_p, U_last_p):
        """
        Get model specific objective to be minimized.

        :param X_v: cvx variable for current states
        :param U_v: cvx variable for current inputs
        :param X_last_p: cvx parameter for last states
        :param U_last_p: cvx parameter for last inputs
        :return: A cvx objective function.
        """

        slack = 0
        for j in range(len(self.obstacles)):
            slack += cvx.sum(self.s_prime[j])

        objective = cvx.Minimize(1e5 * slack)
        # objective += cvx.Minimize(cvx.sum(cvx.square(U_v)))
        return objective 
Example #3
Source File: measure_utils.py    From ambient-gan with MIT License 6 votes vote down vote up
def get_inpaint_func_tv():
    def inpaint_func(image, mask):
        """Total variation inpainting"""
        inpainted = np.zeros_like(image)
        for c in range(image.shape[2]):
            image_c = image[:, :, c]
            mask_c = mask[:, :, c]
            if np.min(mask_c) > 0:
                # if mask is all ones, no need to inpaint
                inpainted[:, :, c] = image_c
            else:
                h, w = image_c.shape
                inpainted_c_var = cvxpy.Variable(h, w)
                obj = cvxpy.Minimize(cvxpy.tv(inpainted_c_var))
                constraints = [cvxpy.mul_elemwise(mask_c, inpainted_c_var) == cvxpy.mul_elemwise(mask_c, image_c)]
                prob = cvxpy.Problem(obj, constraints)
                # prob.solve(solver=cvxpy.SCS, max_iters=100, eps=1e-2)  # scs solver
                prob.solve()  # default solver
                inpainted[:, :, c] = inpainted_c_var.value
        return inpainted
    return inpaint_func 
Example #4
Source File: cvxpy_examples.py    From cvxpylayers with Apache License 2.0 6 votes vote down vote up
def ball_con():
    # print(f'--- {sys._getframe().f_code.co_name} ---')
    print('ball con')
    npr.seed(0)

    n = 2

    A = cp.Parameter((n, n))
    z = cp.Parameter(n)
    p = cp.Parameter(n)
    x = cp.Variable(n)
    t = cp.Variable(n)
    obj = cp.Minimize(0.5 * cp.sum_squares(x - p))
    # TODO automate introduction of variables.
    cons = [0.5 * cp.sum_squares(A * t) <= 1, t == (x - z)]
    prob = cp.Problem(obj, cons)

    L = npr.randn(n, n)
    A.value = L.T
    z.value = npr.randn(n)
    p.value = npr.randn(n)

    prob.solve(solver=cp.SCS)
    print(x.value) 
Example #5
Source File: cvxpy_examples.py    From cvxpylayers with Apache License 2.0 6 votes vote down vote up
def relu():
    # print(f'--- {sys._getframe().f_code.co_name} ---')
    print('relu')
    npr.seed(0)

    n = 4
    _x = cp.Parameter(n)
    _y = cp.Variable(n)
    obj = cp.Minimize(cp.sum_squares(_y - _x))
    cons = [_y >= 0]
    prob = cp.Problem(obj, cons)

    _x.value = npr.randn(n)

    prob.solve(solver=cp.SCS)
    print(_y.value) 
Example #6
Source File: cvxpy_examples.py    From cvxpylayers with Apache License 2.0 6 votes vote down vote up
def running_example():
    print("running example")
    m = 20
    n = 10
    x = cp.Variable((n, 1))
    F = cp.Parameter((m, n))
    g = cp.Parameter((m, 1))
    lambd = cp.Parameter((1, 1), nonneg=True)
    objective_fn = cp.norm(F @ x - g) + lambd * cp.norm(x)
    constraints = [x >= 0]
    problem = cp.Problem(cp.Minimize(objective_fn), constraints)
    assert problem.is_dcp()
    assert problem.is_dpp()
    print("is_dpp: ", problem.is_dpp())

    F_t = torch.randn(m, n, requires_grad=True)
    g_t = torch.randn(m, 1, requires_grad=True)
    lambd_t = torch.rand(1, 1, requires_grad=True)
    layer = CvxpyLayer(problem, parameters=[F, g, lambd], variables=[x])
    x_star, = layer(F_t, g_t, lambd_t)
    x_star.sum().backward()
    print("F_t grad: ", F_t.grad)
    print("g_t grad: ", g_t.grad) 
Example #7
Source File: test_cvxpylayer.py    From cvxpylayers with Apache License 2.0 6 votes vote down vote up
def test_lml(self):
        tf.random.set_seed(0)
        k = 2
        x = cp.Parameter(4)
        y = cp.Variable(4)
        obj = -x * y - cp.sum(cp.entr(y)) - cp.sum(cp.entr(1. - y))
        cons = [cp.sum(y) == k]
        problem = cp.Problem(cp.Minimize(obj), cons)
        lml = CvxpyLayer(problem, [x], [y])
        x_tf = tf.Variable([1., -1., -1., -1.], dtype=tf.float64)

        with tf.GradientTape() as tape:
            y_opt = lml(x_tf, solver_args={'eps': 1e-10})[0]
            loss = -tf.math.log(y_opt[1])

        def f():
            problem.solve(solver=cp.SCS, eps=1e-10)
            return -np.log(y.value[1])

        grad = tape.gradient(loss, [x_tf])
        numgrad = numerical_grad(f, [x], [x_tf])
        np.testing.assert_almost_equal(grad, numgrad, decimal=3) 
Example #8
Source File: test_cvxpylayer.py    From cvxpylayers with Apache License 2.0 6 votes vote down vote up
def test_example(self):
        n, m = 2, 3
        x = cp.Variable(n)
        A = cp.Parameter((m, n))
        b = cp.Parameter(m)
        constraints = [x >= 0]
        objective = cp.Minimize(0.5 * cp.pnorm(A @ x - b, p=1))
        problem = cp.Problem(objective, constraints)
        assert problem.is_dpp()

        cvxpylayer = CvxpyLayer(problem, parameters=[A, b], variables=[x])
        A_tch = torch.randn(m, n, requires_grad=True)
        b_tch = torch.randn(m, requires_grad=True)

        # solve the problem
        solution, = cvxpylayer(A_tch, b_tch)

        # compute the gradient of the sum of the solution with respect to A, b
        solution.sum().backward() 
Example #9
Source File: linear_model.py    From scikit-lego with MIT License 6 votes vote down vote up
def fit(self, X, y):
        """
        Fit the model using X, y as training data.

        :param X: array-like, shape=(n_columns, n_samples, ) training data.
        :param y: array-like, shape=(n_samples, ) training data.
        :return: Returns an instance of self.
        """
        X, y = check_X_y(X, y, estimator=self, dtype=FLOAT_DTYPES)

        # Construct the problem.
        betas = cp.Variable(X.shape[1])
        objective = cp.Minimize(cp.sum_squares(X * betas - y))
        constraints = [sum(betas) == 1]
        if self.non_negative:
            constraints.append(0 <= betas)

        # Solve the problem.
        prob = cp.Problem(objective, constraints)
        prob.solve()
        self.coefs_ = betas.value
        return self 
Example #10
Source File: intervalencoder.py    From scikit-lego with MIT License 6 votes vote down vote up
def _mk_monotonic_average(xs, ys, intervals, method="increasing", **kwargs):
    """
    Creates smoothed averages of `ys` at the intervals given by `intervals`.
    :param xs: all the datapoints of a feature (represents the x-axis)
    :param ys: all the datapoints what we'd like to predict (represents the y-axis)
    :param intervals: the intervals at which we'd like to get a good average value
    :param method: the method that is used for smoothing, can be either `increasing` or `decreasing`.
    :return:
        An array as long as `intervals` that represents the average `y`-values at those intervals,
        keeping the constraint in mind.
    """
    x_internal = np.array([xs >= i for i in intervals]).T.astype(np.float)
    betas = cp.Variable(x_internal.shape[1])
    objective = cp.Minimize(cp.sum_squares(x_internal * betas - ys))
    if method == "increasing":
        constraints = [betas[i + 1] >= 0 for i in range(betas.shape[0] - 1)]
    elif method == "decreasing":
        constraints = [betas[i + 1] <= 0 for i in range(betas.shape[0] - 1)]
    else:
        raise ValueError(
            f"method must be either `increasing` or `decreasing`, got: {method}"
        )
    prob = cp.Problem(objective, constraints)
    prob.solve()
    return betas.value.cumsum() 
Example #11
Source File: svm.py    From osqp_benchmarks with Apache License 2.0 6 votes vote down vote up
def _generate_cvxpy_problem(self):
        '''
        Generate QP problem
        '''

        n = self.n
        m = self.m
        x = cvxpy.Variable(n)
        t = cvxpy.Variable(m)

        objective = cvxpy.Minimize(.5 * cvxpy.quad_form(x, spa.eye(n))
                                   + .5 * self.gamma * np.ones(m) * t)
        constraints = [t >= spa.diags(self.b_svm).dot(self.A_svm) * x + 1,
                       t >= 0]

        problem = cvxpy.Problem(objective, constraints)

        return problem, (x, t) 
Example #12
Source File: test_analysis.py    From controlpy with GNU General Public License v3.0 6 votes vote down vote up
def sys_norm_h2_LMI(Acl, Bdisturbance, C):
    #doesn't work very well, if problem poorly scaled Riccati works better.
    #Dullerud p 210
    n = Acl.shape[0]
    X = cvxpy.Semidef(n)
    Y = cvxpy.Semidef(n)

    constraints = [ Acl*X + X*Acl.T + Bdisturbance*Bdisturbance.T == -Y,
                  ]

    obj = cvxpy.Minimize(cvxpy.trace(Y))

    prob = cvxpy.Problem(obj, constraints)
    
    prob.solve()
    eps = 1e-16
    if np.max(np.linalg.eigvals((-Acl*X - X*Acl.T - Bdisturbance*Bdisturbance.T).value)) > -eps:
        print('Acl*X + X*Acl.T +Bdisturbance*Bdisturbance.T is not neg def.')
        return np.Inf

    if np.min(np.linalg.eigvals(X.value)) < eps:
        print('X is not pos def.')
        return np.Inf

    return np.sqrt(np.trace(C*X.value*C.T)) 
Example #13
Source File: tests.py    From diffcp with Apache License 2.0 6 votes vote down vote up
def test_proj_psd(self):
        import cvxpy as cp
        np.random.seed(0)
        n = 10
        for _ in range(15):
            x = np.random.randn(n, n)
            x = x + x.T
            x_vec = cone_lib.vec_symm(x)
            z = cp.Variable((n, n), PSD=True)
            objective = cp.Minimize(cp.sum_squares(z - x))
            prob = cp.Problem(objective)
            prob.solve(solver="SCS", eps=1e-10)
            p = cone_lib.unvec_symm(
                cone_lib._proj(x_vec, cone_lib.PSD, dual=False), n)
            np.testing.assert_allclose(p, z.value, atol=1e-5, rtol=1e-5)
            np.testing.assert_allclose(p, cone_lib.unvec_symm(
                cone_lib._proj(x_vec, cone_lib.PSD, dual=True), n)) 
Example #14
Source File: tests.py    From diffcp with Apache License 2.0 6 votes vote down vote up
def test_proj_soc(self):
        import cvxpy as cp
        np.random.seed(0)
        n = 100
        for _ in range(15):
            x = np.random.randn(n)
            z = cp.Variable(n)
            objective = cp.Minimize(cp.sum_squares(z - x))
            constraints = [cp.norm(z[1:], 2) <= z[0]]
            prob = cp.Problem(objective, constraints)
            prob.solve(solver="SCS", eps=1e-10)
            p = cone_lib._proj(x, cone_lib.SOC, dual=False)
            np.testing.assert_allclose(
                p, np.array(z.value))
            np.testing.assert_allclose(
                p, cone_lib._proj(x, cone_lib.SOC, dual=True)) 
Example #15
Source File: base_fitter.py    From qiskit-ignis with Apache License 2.0 6 votes vote down vote up
def _check_for_sdp_solver(cls):
        """Check if CVXPY solver is available"""
        if cls._HAS_SDP_SOLVER is None:
            if _HAS_CVX:
                # pylint:disable=import-error
                import cvxpy
                solvers = cvxpy.installed_solvers()
                if 'CVXOPT' in solvers:
                    cls._HAS_SDP_SOLVER = True
                    return
                if 'SCS' in solvers:
                    # Try example problem to see if built with BLAS
                    # SCS solver cannot solver larger than 2x2 matrix
                    # problems without BLAS
                    try:
                        var = cvxpy.Variable((4, 4), PSD=True)
                        obj = cvxpy.Minimize(cvxpy.norm(var))
                        cvxpy.Problem(obj).solve(solver='SCS')
                        cls._HAS_SDP_SOLVER = True
                        return
                    except cvxpy.error.SolverError:
                        pass
            cls._HAS_SDP_SOLVER = False 
Example #16
Source File: suitesparse_huber.py    From osqp_benchmarks with Apache License 2.0 6 votes vote down vote up
def _generate_cvxpy_problem(self):
        '''
        Generate QP problem
        '''

        # Construct the problem
        #       minimize    1/2 z.T * z + np.ones(m).T * (r + s)
        #       subject to  Ax - b - z = r - s
        #                   r >= 0
        #                   s >= 0
        # The problem reformulation follows from Eq. (24) of the following paper:
        # https://doi.org/10.1109/34.877518
        x = cvxpy.Variable(self.n)
        z = cvxpy.Variable(self.m)
        r = cvxpy.Variable(self.m)
        s = cvxpy.Variable(self.m)

        objective = cvxpy.Minimize(.5 * cvxpy.sum_squares(z) + cvxpy.sum(r + s))
        constraints = [self.Ad@x - self.bd - z == r - s,
                       r >= 0, s >= 0]
        problem = cvxpy.Problem(objective, constraints)

        return problem, (x, z, r, s) 
Example #17
Source File: portfolio.py    From osqp_benchmarks with Apache License 2.0 6 votes vote down vote up
def _generate_cvxpy_problem(self):
        '''
        Generate QP problem
        '''

        x = cvxpy.Variable(self.n)
        y = cvxpy.Variable(self.k)

        # Create parameters m
        mu = cvxpy.Parameter(self.n)
        mu.value = self.mu

        objective = cvxpy.Minimize(cvxpy.quad_form(x, self.D) +
                                   cvxpy.quad_form(y, spa.eye(self.k)) +
                                   - 1 / self.gamma * (mu.T * x))
        constraints = [np.ones(self.n) * x == 1,
                       self.F.T * x == y,
                       0 <= x, x <= 1]
        problem = cvxpy.Problem(objective, constraints)

        return problem, mu 
Example #18
Source File: lasso.py    From osqp_benchmarks with Apache License 2.0 6 votes vote down vote up
def _generate_cvxpy_problem(self):
        '''
        Generate QP problem
        '''

        x = cvxpy.Variable(self.n)
        y = cvxpy.Variable(self.m)
        t = cvxpy.Variable(self.n)

        # Create parameeter and assign value
        lambda_cvxpy = cvxpy.Parameter()
        lambda_cvxpy.value = self.lambda_param

        objective = cvxpy.Minimize(cvxpy.quad_form(y, spa.eye(self.m))
                                   + self.lambda_param * (np.ones(self.n) * t))
        constraints = [y == self.Ad * x - self.bd,
                       -t <= x, x <= t]
        problem = cvxpy.Problem(objective, constraints)

        return problem, (x, y, t), lambda_cvxpy 
Example #19
Source File: suitesparse_lasso.py    From osqp_benchmarks with Apache License 2.0 6 votes vote down vote up
def _generate_cvxpy_problem(self):
        '''
        Generate QP problem
        '''

        x = cvxpy.Variable(self.n)
        y = cvxpy.Variable(self.m)
        t = cvxpy.Variable(self.n)

        # Create parameeter and assign value
        lambda_cvxpy = cvxpy.Parameter()
        lambda_cvxpy.value = self.lambda_param

        objective = cvxpy.Minimize(cvxpy.quad_form(y, spa.eye(self.m))
                                   + self.lambda_param * (np.ones(self.n) * t))
        constraints = [y == self.Ad * x - self.bd,
                       -t <= x, x <= t]
        problem = cvxpy.Problem(objective, constraints)

        return problem, (x, y, t), lambda_cvxpy 
Example #20
Source File: test_cvxpylayer.py    From cvxpylayers with Apache License 2.0 5 votes vote down vote up
def test_incorrect_parameter_shape(self):
        set_seed(243)
        m, n = 100, 20

        A = cp.Parameter((m, n))
        b = cp.Parameter(m)
        x = cp.Variable(n)
        obj = cp.sum_squares(A@x - b) + cp.sum_squares(x)
        prob = cp.Problem(cp.Minimize(obj))
        prob_th = CvxpyLayer(prob, [A, b], [x])

        A_th = torch.randn(32, m, n).double().requires_grad_()
        b_th = torch.randn(20, m).double().requires_grad_()

        with self.assertRaises(ValueError):
            prob_th(A_th, b_th)

        A_th = torch.randn(32, m, n).double().requires_grad_()
        b_th = torch.randn(32, 2 * m).double().requires_grad_()

        with self.assertRaises(ValueError):
            prob_th(A_th, b_th)

        A_th = torch.randn(m, n).double().requires_grad_()
        b_th = torch.randn(2 * m).double().requires_grad_()

        with self.assertRaises(ValueError):
            prob_th(A_th, b_th)

        A_th = torch.randn(32, m, n).double().requires_grad_()
        b_th = torch.randn(32, 32, m).double().requires_grad_()

        with self.assertRaises(ValueError):
            prob_th(A_th, b_th) 
Example #21
Source File: calibration_tools.py    From pre-training with Apache License 2.0 5 votes vote down vote up
def tune_temp(logits, labels, binary_search=True, lower=0.2, upper=5.0, eps=0.0001):
    logits = np.array(logits)

    if binary_search:
        import torch
        import torch.nn.functional as F

        logits = torch.FloatTensor(logits)
        labels = torch.LongTensor(labels)
        t_guess = torch.FloatTensor([0.5*(lower + upper)]).requires_grad_()

        while upper - lower > eps:
            if torch.autograd.grad(F.cross_entropy(logits / t_guess, labels), t_guess)[0] > 0:
                upper = 0.5 * (lower + upper)
            else:
                lower = 0.5 * (lower + upper)
            t_guess = t_guess * 0 + 0.5 * (lower + upper)

        t = min([lower, 0.5 * (lower + upper), upper], key=lambda x: float(F.cross_entropy(logits / x, labels)))
    else:
        import cvxpy as cx

        set_size = np.array(logits).shape[0]

        t = cx.Variable()

        expr = sum((cx.Minimize(cx.log_sum_exp(logits[i, :] * t) - logits[i, labels[i]] * t)
                    for i in range(set_size)))
        p = cx.Problem(expr, [lower <= t, t <= upper])

        p.solve()   # p.solve(solver=cx.SCS)
        t = 1 / t.value

    return t 
Example #22
Source File: rocket_landing_3d.py    From SCvx with MIT License 5 votes vote down vote up
def get_objective(self, X_v, U_v, X_last_p, U_last_p):
        """
        Get model specific objective to be minimized.

        :param X_v: cvx variable for current states
        :param U_v: cvx variable for current inputs
        :param X_last_p: cvx parameter for last states
        :param U_last_p: cvx parameter for last inputs
        :return: A cvx objective function.
        """
        return cvx.Minimize(1e5 * cvx.sum(self.s_prime)) 
Example #23
Source File: test_cvxpylayer.py    From cvxpylayers with Apache License 2.0 5 votes vote down vote up
def test_basic_gp(self):
        set_seed(243)

        x = cp.Variable(pos=True)
        y = cp.Variable(pos=True)
        z = cp.Variable(pos=True)

        a = cp.Parameter(pos=True, value=2.0)
        b = cp.Parameter(pos=True, value=1.0)
        c = cp.Parameter(value=0.5)

        objective_fn = 1/(x*y*z)
        constraints = [a*(x*y + x*z + y*z) <= b, x >= y**c]
        problem = cp.Problem(cp.Minimize(objective_fn), constraints)
        problem.solve(cp.SCS, gp=True, eps=1e-12)

        layer = CvxpyLayer(
            problem, parameters=[a, b, c], variables=[x, y, z], gp=True)
        a_tch = torch.tensor(2.0, requires_grad=True)
        b_tch = torch.tensor(1.0, requires_grad=True)
        c_tch = torch.tensor(0.5, requires_grad=True)
        with torch.no_grad():
            x_tch, y_tch, z_tch = layer(a_tch, b_tch, c_tch)

        self.assertAlmostEqual(x.value, x_tch.detach().numpy(), places=5)
        self.assertAlmostEqual(y.value, y_tch.detach().numpy(), places=5)
        self.assertAlmostEqual(z.value, z_tch.detach().numpy(), places=5)

        torch.autograd.gradcheck(lambda a, b, c: layer(
            a, b, c, solver_args={
                "eps": 1e-12, "acceleration_lookback": 0})[0].sum(),
                (a_tch, b_tch, c_tch), atol=1e-3, rtol=1e-3) 
Example #24
Source File: test_cvxpylayer.py    From cvxpylayers with Apache License 2.0 5 votes vote down vote up
def test_equality(self):
        set_seed(243)
        n = 10
        A = np.eye(n)
        x = cp.Variable(n)
        b = cp.Parameter(n)
        prob = cp.Problem(cp.Minimize(cp.sum_squares(x)), [A@x == b])
        layer = CvxpyLayer(prob, parameters=[b], variables=[x])
        b_tch = torch.randn(n, requires_grad=True)
        torch.autograd.gradcheck(lambda b: layer(
            b, solver_args={"eps": 1e-10,
                            "acceleration_lookback": 0})[0].sum(),
            (b_tch,)) 
Example #25
Source File: test_cvxpylayer.py    From cvxpylayers with Apache License 2.0 5 votes vote down vote up
def test_broadcasting(self):
        set_seed(243)
        n_batch, m, n = 2, 100, 20

        A = cp.Parameter((m, n))
        b = cp.Parameter(m)
        x = cp.Variable(n)
        obj = cp.sum_squares(A@x - b) + cp.sum_squares(x)
        prob = cp.Problem(cp.Minimize(obj))
        prob_th = CvxpyLayer(prob, [A, b], [x])

        A_th = torch.randn(m, n).double().requires_grad_()
        b_th = torch.randn(m).double().unsqueeze(0).repeat(n_batch, 1) \
            .requires_grad_()
        b_th_0 = b_th[0]

        x = prob_th(A_th, b_th, solver_args={"eps": 1e-10})[0]

        def lstsq(
            A,
            b): return torch.solve(
            (A.t() @ b).unsqueeze(1),
            A.t() @ A +
            torch.eye(n).double())[0]
        x_lstsq = lstsq(A_th, b_th_0)

        grad_A_cvxpy, grad_b_cvxpy = grad(x.sum(), [A_th, b_th])
        grad_A_lstsq, grad_b_lstsq = grad(x_lstsq.sum(), [A_th, b_th_0])

        self.assertAlmostEqual(
            torch.norm(
                grad_A_cvxpy / n_batch -
                grad_A_lstsq).item(),
            0.0)
        self.assertAlmostEqual(
            torch.norm(
                grad_b_cvxpy[0] -
                grad_b_lstsq).item(),
            0.0) 
Example #26
Source File: test_cvxpylayer.py    From cvxpylayers with Apache License 2.0 5 votes vote down vote up
def test_too_many_variables(self):
        x = cp.Variable(1)
        y = cp.Variable(1)
        lam = cp.Parameter(1, nonneg=True)
        objective = lam * cp.norm(x, 1)
        prob = cp.Problem(cp.Minimize(objective))
        with self.assertRaises(ValueError):
            layer = CvxpyLayer(prob, [lam], [x, y])  # noqa: F841 
Example #27
Source File: test_cvxpylayer.py    From cvxpylayers with Apache License 2.0 5 votes vote down vote up
def test_sdp(self):
        set_seed(2)

        n = 3
        p = 3
        C = cp.Parameter((n, n))
        A = [cp.Parameter((n, n)) for _ in range(p)]
        b = [cp.Parameter((1, 1)) for _ in range(p)]

        C_tch = torch.randn(n, n, requires_grad=True).double()
        A_tch = [torch.randn(n, n, requires_grad=True).double()
                 for _ in range(p)]
        b_tch = [torch.randn(1, 1, requires_grad=True).double()
                 for _ in range(p)]

        X = cp.Variable((n, n), symmetric=True)
        constraints = [X >> 0]
        constraints += [
            cp.trace(A[i]@X) == b[i] for i in range(p)
        ]
        prob = cp.Problem(cp.Minimize(cp.trace(C@X) + cp.sum_squares(X)),
                          constraints)
        layer = CvxpyLayer(prob, [C] + A + b, [X])
        torch.autograd.gradcheck(lambda *x: layer(*x,
                                                  solver_args={'eps': 1e-12}),
                                 [C_tch] + A_tch + b_tch,
                                 eps=1e-6,
                                 atol=1e-3,
                                 rtol=1e-3) 
Example #28
Source File: test_cvxpylayer.py    From cvxpylayers with Apache License 2.0 5 votes vote down vote up
def test_not_enough_parameters(self):
        x = cp.Variable(1)
        lam = cp.Parameter(1, nonneg=True)
        lam2 = cp.Parameter(1, nonneg=True)
        objective = lam * cp.norm(x, 1) + lam2 * cp.sum_squares(x)
        prob = cp.Problem(cp.Minimize(objective))
        with self.assertRaises(ValueError):
            layer = CvxpyLayer(prob, [lam], [x])  # noqa: F841 
Example #29
Source File: qcqp.py    From qcqp with MIT License 5 votes vote down vote up
def improve_dccp(x0, prob, *args, **kwargs):
    try:
        import dccp
    except ImportError:
        raise Exception("DCCP package is not installed.")

    use_eigen_split = kwargs.get('use_eigen_split', False)
    tau = kwargs.get('tau', 0.005)

    x = cvx.Variable(prob.n)
    x.value = x0
    # dummy objective
    T = cvx.Variable()
    T.value = prob.f0.eval(x0)

    obj = cvx.Minimize(T)
    f0p, f0m = prob.f0.dc_split(use_eigen_split)
    cons = [f0p.eval_cvx(x) <= f0m.eval_cvx(x) + T]

    for f in prob.fs:
        fp, fm = f.dc_split(use_eigen_split)
        if f.relop == '==':
            cons.append(fp.eval_cvx(x) == fm.eval_cvx(x))
        else:
            cons.append(fp.eval_cvx(x) <= fm.eval_cvx(x))

    dccp_prob = cvx.Problem(obj, cons)
    bestx = np.copy(x0)
    try:
        result = dccp_prob.solve(method='dccp', tau=tau)
        if dccp_prob.status == "Converged":
            bestx = prob.better(bestx, np.asarray(x.value).flatten())
    except cvx.error.SolverError:
        pass
    return bestx 
Example #30
Source File: nuclear_norm_minimization.py    From fancyimpute with Apache License 2.0 5 votes vote down vote up
def _create_objective(self, m, n):
        """
        Parameters
        ----------
        m, n : int
            Dimensions that of solution matrix
        Returns the objective function and a variable representing the
        solution to the convex optimization problem.
        """
        # S is the completed matrix
        shape = (m, n)
        S = cvxpy.Variable(shape, name="S")
        norm = cvxpy.norm(S, "nuc")
        objective = cvxpy.Minimize(norm)
        return S, objective