Python cvxpy.SCS Examples
The following are 30
code examples of cvxpy.SCS().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
cvxpy
, or try the search function
.
Example #1
Source File: test_cvxpylayer.py From cvxpylayers with Apache License 2.0 | 6 votes |
def test_lml(self): tf.random.set_seed(0) k = 2 x = cp.Parameter(4) y = cp.Variable(4) obj = -x * y - cp.sum(cp.entr(y)) - cp.sum(cp.entr(1. - y)) cons = [cp.sum(y) == k] problem = cp.Problem(cp.Minimize(obj), cons) lml = CvxpyLayer(problem, [x], [y]) x_tf = tf.Variable([1., -1., -1., -1.], dtype=tf.float64) with tf.GradientTape() as tape: y_opt = lml(x_tf, solver_args={'eps': 1e-10})[0] loss = -tf.math.log(y_opt[1]) def f(): problem.solve(solver=cp.SCS, eps=1e-10) return -np.log(y.value[1]) grad = tape.gradient(loss, [x_tf]) numgrad = numerical_grad(f, [x], [x_tf]) np.testing.assert_almost_equal(grad, numgrad, decimal=3)
Example #2
Source File: glrm.py From GLRM with MIT License | 6 votes |
def fit(self, max_iters=100, eps=1e-2, use_indirect=False, warm_start=False): Xv, Yp, pX = self.probX Xp, Yv, pY = self.probY self.converge.reset() # alternating minimization while not self.converge.d(): objX = pX.solve(solver=cp.SCS, eps=eps, max_iters=max_iters, use_indirect=use_indirect, warm_start=warm_start) Xp.value[:,:-1] = copy(Xv.value) # can parallelize this for ypj, yvj, pyj in zip(Yp, Yv, pY): objY = pyj.solve(solver=cp.SCS, eps=eps, max_iters=max_iters, use_indirect=use_indirect, warm_start=warm_start) ypj.value = copy(yvj.value) self.converge.obj.append(objX) self._finalize_XY(Xv, Yv) return self.X, self.Y
Example #3
Source File: nuclear_norm_minimization.py From ME-Net with MIT License | 6 votes |
def solve(self, X, missing_mask): X = check_array(X, force_all_finite=False) m, n = X.shape S, objective = self._create_objective(m, n) constraints = self._constraints( X=X, missing_mask=missing_mask, S=S, error_tolerance=self.error_tolerance) problem = cvxpy.Problem(objective, constraints) problem.solve( verbose=self.verbose, solver=cvxpy.SCS, max_iters=self.max_iters, # use_indirect, see: https://github.com/cvxgrp/cvxpy/issues/547 use_indirect=False) return S.value
Example #4
Source File: nuclear_norm_minimization.py From fancyimpute with Apache License 2.0 | 6 votes |
def solve(self, X, missing_mask): X = check_array(X, force_all_finite=False) m, n = X.shape S, objective = self._create_objective(m, n) constraints = self._constraints( X=X, missing_mask=missing_mask, S=S, error_tolerance=self.error_tolerance) problem = cvxpy.Problem(objective, constraints) problem.solve( verbose=self.verbose, solver=cvxpy.SCS, max_iters=self.max_iters, # use_indirect, see: https://github.com/cvxgrp/cvxpy/issues/547 use_indirect=False) return S.value
Example #5
Source File: measure_utils.py From ambient-gan with MIT License | 6 votes |
def get_inpaint_func_tv(): def inpaint_func(image, mask): """Total variation inpainting""" inpainted = np.zeros_like(image) for c in range(image.shape[2]): image_c = image[:, :, c] mask_c = mask[:, :, c] if np.min(mask_c) > 0: # if mask is all ones, no need to inpaint inpainted[:, :, c] = image_c else: h, w = image_c.shape inpainted_c_var = cvxpy.Variable(h, w) obj = cvxpy.Minimize(cvxpy.tv(inpainted_c_var)) constraints = [cvxpy.mul_elemwise(mask_c, inpainted_c_var) == cvxpy.mul_elemwise(mask_c, image_c)] prob = cvxpy.Problem(obj, constraints) # prob.solve(solver=cvxpy.SCS, max_iters=100, eps=1e-2) # scs solver prob.solve() # default solver inpainted[:, :, c] = inpainted_c_var.value return inpainted return inpaint_func
Example #6
Source File: cvxpy_examples.py From cvxpylayers with Apache License 2.0 | 6 votes |
def ball_con(): # print(f'--- {sys._getframe().f_code.co_name} ---') print('ball con') npr.seed(0) n = 2 A = cp.Parameter((n, n)) z = cp.Parameter(n) p = cp.Parameter(n) x = cp.Variable(n) t = cp.Variable(n) obj = cp.Minimize(0.5 * cp.sum_squares(x - p)) # TODO automate introduction of variables. cons = [0.5 * cp.sum_squares(A * t) <= 1, t == (x - z)] prob = cp.Problem(obj, cons) L = npr.randn(n, n) A.value = L.T z.value = npr.randn(n) p.value = npr.randn(n) prob.solve(solver=cp.SCS) print(x.value)
Example #7
Source File: cvxpy_examples.py From cvxpylayers with Apache License 2.0 | 6 votes |
def relu(): # print(f'--- {sys._getframe().f_code.co_name} ---') print('relu') npr.seed(0) n = 4 _x = cp.Parameter(n) _y = cp.Variable(n) obj = cp.Minimize(cp.sum_squares(_y - _x)) cons = [_y >= 0] prob = cp.Problem(obj, cons) _x.value = npr.randn(n) prob.solve(solver=cp.SCS) print(_y.value)
Example #8
Source File: test.py From newton_admm with Apache License 2.0 | 5 votes |
def test_logistic_regression(): """ This test will construct inequality, and exponential cone constraints """ prob = problems.logistic_regression(5, 2, 1.0)[1] data = prob.get_problem_data(cp.SCS) out = newton_admm(data, data['dims']) cvx_out = prob.solve() assert np.allclose(out['info']['fstar'], cvx_out), _s( out['info']['fstar'], cvx_out)
Example #9
Source File: test_cvxpylayer.py From cvxpylayers with Apache License 2.0 | 5 votes |
def test_sdp(self): tf.random.set_seed(5) n = 3 p = 3 C = cp.Parameter((n, n)) A = [cp.Parameter((n, n)) for _ in range(p)] b = [cp.Parameter((1, 1)) for _ in range(p)] C_tf = tf.Variable(tf.random.normal((n, n), dtype=tf.float64)) A_tf = [tf.Variable(tf.random.normal((n, n), dtype=tf.float64)) for _ in range(p)] b_tf = [tf.Variable(tf.random.normal((1, 1), dtype=tf.float64)) for _ in range(p)] X = cp.Variable((n, n), symmetric=True) constraints = [X >> 0] constraints += [ cp.trace(A[i]@X) == b[i] for i in range(p) ] problem = cp.Problem(cp.Minimize( cp.trace(C @ X) - cp.log_det(X) + cp.sum_squares(X)), constraints) layer = CvxpyLayer(problem, [C] + A + b, [X]) values = [C_tf] + A_tf + b_tf with tf.GradientTape() as tape: soln = layer(*values, solver_args={'eps': 1e-10, 'max_iters': 10000})[0] summed = tf.math.reduce_sum(soln) grads = tape.gradient(summed, values) def f(): problem.solve(cp.SCS, eps=1e-10, max_iters=10000) return np.sum(X.value) numgrads = numerical_grad(f, [C] + A + b, values, delta=1e-4) for g, ng in zip(grads, numgrads): np.testing.assert_allclose(g, ng, atol=1e-1)
Example #10
Source File: test_cvxpylayer.py From cvxpylayers with Apache License 2.0 | 5 votes |
def test_basic_gp(self): tf.random.set_seed(243) x = cp.Variable(pos=True) y = cp.Variable(pos=True) z = cp.Variable(pos=True) a = cp.Parameter(pos=True, value=2.0) b = cp.Parameter(pos=True, value=1.0) c = cp.Parameter(value=0.5) objective_fn = 1/(x*y*z) constraints = [a*(x*y + x*z + y*z) <= b, x >= y**c] problem = cp.Problem(cp.Minimize(objective_fn), constraints) problem.solve(cp.SCS, gp=True, eps=1e-12) layer = CvxpyLayer( problem, parameters=[a, b, c], variables=[x, y, z], gp=True) a_tf = tf.Variable(2.0, dtype=tf.float64) b_tf = tf.Variable(1.0, dtype=tf.float64) c_tf = tf.Variable(0.5, dtype=tf.float64) with tf.GradientTape() as tape: x_tf, y_tf, z_tf = layer(a_tf, b_tf, c_tf) summed = x_tf + y_tf + z_tf grads = tape.gradient(summed, [a_tf, b_tf, c_tf]) def f(): problem.solve(cp.SCS, eps=1e-12, max_iters=10000, gp=True) return x.value + y.value + z.value numgrads = numerical_grad(f, [a, b, c], [a_tf, b_tf, c_tf]) for g, ng in zip(grads, numgrads): np.testing.assert_allclose(g, ng, atol=1e-2)
Example #11
Source File: problems.py From newton_admm with Apache License 2.0 | 5 votes |
def cvxpy_beta_from_x(prob, beta, m): def beta_from_x(x): """ From x, return beta """ scs_output = { "info" : { 'status': 'Solved', 'statusVal': 1, 'resPri': 1, 'resInfeas': 1, 'solveTime': 1, 'relGap': 1, 'iter': 1, 'dobj': 1, 'pobj': 1, 'setupTime': 1, 'resUnbdd': 1, 'resDual': 1}, "y" : np.zeros(m), "x" : x, "s" : np.zeros(m) } prob.unpack_results(cp.SCS, scs_output) if isinstance(beta, cp.Variable): return beta.value else: return tuple(b.value for b in beta) return beta_from_x
Example #12
Source File: problems.py From newton_admm with Apache License 2.0 | 5 votes |
def robust_pca(p, suppfrac): """ Create a robust PCA problem with a low rank matrix """ # First, create a rank = "rk" matrix: rk = int(round(p * 0.5)) assert rk <= min(p, p) Lstar = np.zeros((p, p)) for i in range(rk): vi = np.random.randn(p, 1) mati = vi.dot(vi.T) Lstar += mati # Then, create a sparse matrix: Mstar = np.random.randn(p, p) Mstar_vec = Mstar.T.ravel() nnz = int(np.floor((1.0 - suppfrac) * p * p)) # Num. nonzeros assert nnz <= p * p idxes = np.random.randint(0, p * p, nnz) Mstar_vec[idxes] = 0 Mstar = np.reshape(Mstar_vec, (p, p)).T # Finally, sum the two matrices "L" and "M": X = Lstar + Mstar lam = 1.0 Lhat = cp.Variable(p, p) Mhat = cp.Variable(p, p) prob = cp.Problem(cp.Minimize(cp.norm(Lhat, "nuc") + cp.sum_squares(Lhat)), [cp.norm(Mhat, 1) <= lam, Lhat + Mhat == X]) data = prob.get_problem_data(cp.SCS) data['beta_from_x'] = cvxpy_beta_from_x(prob, (Lhat, Mhat), data['A'].shape[0]) return ((Lhat, Mhat), prob, data)
Example #13
Source File: test.py From newton_admm with Apache License 2.0 | 5 votes |
def test_least_squares(): """ This test will construct second order cone constraints """ prob = problems.least_squares(10, 5)[1] data = prob.get_problem_data(cp.SCS) out = newton_admm(data, data['dims']) cvx_out = prob.solve() assert np.allclose(out['info']['fstar'], cvx_out), _s( out['info']['fstar'], cvx_out)
Example #14
Source File: test.py From newton_admm with Apache License 2.0 | 5 votes |
def test_lp(): """ This test will construct equality, inequality, and second order cone constraints """ prob = problems.lp(30, 60)[1] data = prob.get_problem_data(cp.SCS) out = newton_admm(data, data['dims']) cvx_out = prob.solve() assert np.allclose(out['info']['fstar'], cvx_out), _s( out['info']['fstar'], cvx_out)
Example #15
Source File: calibration_tools.py From pre-training with Apache License 2.0 | 5 votes |
def tune_temp(logits, labels, binary_search=True, lower=0.2, upper=5.0, eps=0.0001): logits = np.array(logits) if binary_search: import torch import torch.nn.functional as F logits = torch.FloatTensor(logits) labels = torch.LongTensor(labels) t_guess = torch.FloatTensor([0.5*(lower + upper)]).requires_grad_() while upper - lower > eps: if torch.autograd.grad(F.cross_entropy(logits / t_guess, labels), t_guess)[0] > 0: upper = 0.5 * (lower + upper) else: lower = 0.5 * (lower + upper) t_guess = t_guess * 0 + 0.5 * (lower + upper) t = min([lower, 0.5 * (lower + upper), upper], key=lambda x: float(F.cross_entropy(logits / x, labels))) else: import cvxpy as cx set_size = np.array(logits).shape[0] t = cx.Variable() expr = sum((cx.Minimize(cx.log_sum_exp(logits[i, :] * t) - logits[i, labels[i]] * t) for i in range(set_size))) p = cx.Problem(expr, [lower <= t, t <= upper]) p.solve() # p.solve(solver=cx.SCS) t = 1 / t.value return t
Example #16
Source File: test.py From newton_admm with Apache License 2.0 | 5 votes |
def test_robust_pca(): """ This test will construct positive semi-definite cone constraints """ prob = problems.robust_pca(5, 0.5)[1] data = prob.get_problem_data(cp.SCS) # solve the problem with ADMM instead for better accuracy out0 = newton_admm(data, data['dims'], admm_maxiters=4000, maxiters=4000) out = newton_admm(data, data['dims']) fstar, fstar0 = out['info']['fstar'], out0['info']['fstar'] assert np.allclose(fstar, fstar0), _s(fstar, fstar0)
Example #17
Source File: calibration_tools.py From outlier-exposure with Apache License 2.0 | 5 votes |
def tune_temp(logits, labels, binary_search=True, lower=0.2, upper=5.0, eps=0.0001): logits = np.array(logits) if binary_search: import torch import torch.nn.functional as F logits = torch.FloatTensor(logits) labels = torch.LongTensor(labels) t_guess = torch.FloatTensor([0.5*(lower + upper)]).requires_grad_() while upper - lower > eps: if torch.autograd.grad(F.cross_entropy(logits / t_guess, labels), t_guess)[0] > 0: upper = 0.5 * (lower + upper) else: lower = 0.5 * (lower + upper) t_guess = t_guess * 0 + 0.5 * (lower + upper) t = min([lower, 0.5 * (lower + upper), upper], key=lambda x: float(F.cross_entropy(logits / x, labels))) else: import cvxpy as cx set_size = np.array(logits).shape[0] t = cx.Variable() expr = sum((cx.Minimize(cx.log_sum_exp(logits[i, :] * t) - logits[i, labels[i]] * t) for i in range(set_size))) p = cx.Problem(expr, [lower <= t, t <= upper]) p.solve() # p.solve(solver=cx.SCS) t = 1 / t.value return t
Example #18
Source File: utils.py From diffcp with Apache License 2.0 | 5 votes |
def scs_data_from_cvxpy_problem(problem): import cvxpy as cp data = problem.get_problem_data(cp.SCS)[0] cone_dims = cp.reductions.solvers.conic_solvers.scs_conif.dims_to_solver_dict(data[ "dims"]) return data["A"], data["b"], data["c"], cone_dims
Example #19
Source File: sdp.py From diffcp with Apache License 2.0 | 5 votes |
def scs_data_from_cvxpy_problem(problem): data = problem.get_problem_data(cp.SCS)[0] cone_dims = cp.reductions.solvers.conic_solvers.scs_conif.dims_to_solver_dict(data[ "dims"]) return data["A"], data["b"], data["c"], cone_dims
Example #20
Source File: test_cvxpylayer.py From cvxpylayers with Apache License 2.0 | 5 votes |
def test_logistic_regression(self): np.random.seed(243) N, n = 10, 2 def sigmoid(z): return 1 / (1 + np.exp(-z)) X_np = np.random.randn(N, n) a_true = np.random.randn(n, 1) y_np = np.round(sigmoid(X_np @ a_true + np.random.randn(N, 1) * 0.5)) X_tf = tf.Variable(X_np) lam_tf = tf.Variable(1.0 * tf.ones(1)) a = cp.Variable((n, 1)) X = cp.Parameter((N, n)) lam = cp.Parameter(1, nonneg=True) y = y_np log_likelihood = cp.sum( cp.multiply(y, X @ a) - cp.log_sum_exp(cp.hstack([np.zeros((N, 1)), X @ a]).T, axis=0, keepdims=True).T ) prob = cp.Problem( cp.Minimize(-log_likelihood + lam * cp.sum_squares(a))) fit_logreg = CvxpyLayer(prob, [X, lam], [a]) with tf.GradientTape(persistent=True) as tape: weights = fit_logreg(X_tf, lam_tf, solver_args={'eps': 1e-8})[0] summed = tf.math.reduce_sum(weights) grad_X_tf, grad_lam_tf = tape.gradient(summed, [X_tf, lam_tf]) def f_train(): prob.solve(solver=cp.SCS, eps=1e-8) return np.sum(a.value) numgrad_X_tf, numgrad_lam_tf = numerical_grad( f_train, [X, lam], [X_tf, lam_tf], delta=1e-6) np.testing.assert_allclose(grad_X_tf, numgrad_X_tf, atol=1e-2) np.testing.assert_allclose(grad_lam_tf, numgrad_lam_tf, atol=1e-2)
Example #21
Source File: test_cvxpylayer.py From cvxpylayers with Apache License 2.0 | 5 votes |
def test_docstring_example(self): np.random.seed(0) tf.random.set_seed(0) n, m = 2, 3 x = cp.Variable(n) A = cp.Parameter((m, n)) b = cp.Parameter(m) constraints = [x >= 0] objective = cp.Minimize(0.5 * cp.pnorm(A @ x - b, p=1)) problem = cp.Problem(objective, constraints) assert problem.is_dpp() cvxpylayer = CvxpyLayer(problem, parameters=[A, b], variables=[x]) A_tf = tf.Variable(tf.random.normal((m, n))) b_tf = tf.Variable(tf.random.normal((m,))) with tf.GradientTape() as tape: # solve the problem, setting the values of A and b to A_tf and b_tf solution, = cvxpylayer(A_tf, b_tf) summed_solution = tf.math.reduce_sum(solution) gradA, gradb = tape.gradient(summed_solution, [A_tf, b_tf]) def f(): problem.solve(solver=cp.SCS, eps=1e-10) return np.sum(x.value) numgradA, numgradb = numerical_grad(f, [A, b], [A_tf, b_tf]) np.testing.assert_almost_equal(gradA, numgradA, decimal=4) np.testing.assert_almost_equal(gradb, numgradb, decimal=4)
Example #22
Source File: cvxpy_examples.py From cvxpylayers with Apache License 2.0 | 5 votes |
def sdp(): print('sdp') npr.seed(0) d = 2 X = cp.Variable((d, d), PSD=True) Y = cp.Parameter((d, d)) obj = cp.Minimize(cp.trace(Y * X)) prob = cp.Problem(obj, [X >= 1]) Y.value = np.abs(npr.randn(d, d)) print(Y.value.sum()) prob.solve(solver=cp.SCS, verbose=True) print(X.value)
Example #23
Source File: cvxpy_examples.py From cvxpylayers with Apache License 2.0 | 5 votes |
def sigmoid(): # print(f'--- {sys._getframe().f_code.co_name} ---') print('sigmoid') npr.seed(0) n = 4 _x = cp.Parameter((n, 1)) _y = cp.Variable(n) obj = cp.Minimize(-_x.T * _y - cp.sum(cp.entr(_y) + cp.entr(1. - _y))) prob = cp.Problem(obj) _x.value = npr.randn(n, 1) prob.solve(solver=cp.SCS) print(_y.value)
Example #24
Source File: cvxpy_examples.py From cvxpylayers with Apache License 2.0 | 5 votes |
def full_qp(): # print(f'--- {sys._getframe().f_code.co_name} ---') print('full qp') npr.seed(0) nx, ncon_eq, ncon_ineq = 5, 2, 3 Q = cp.Parameter((nx, nx)) p = cp.Parameter((nx, 1)) A = cp.Parameter((ncon_eq, nx)) b = cp.Parameter(ncon_eq) G = cp.Parameter((ncon_ineq, nx)) h = cp.Parameter(ncon_ineq) x = cp.Variable(nx) # obj = cp.Minimize(0.5*cp.quad_form(x, Q) + p.T * x) obj = cp.Minimize(0.5 * cp.sum_squares(Q@x) + p.T * x) cons = [A * x == b, G * x <= h] prob = cp.Problem(obj, cons) x0 = npr.randn(nx) s0 = npr.randn(ncon_ineq) G.value = npr.randn(ncon_ineq, nx) h.value = G.value.dot(x0) + s0 A.value = npr.randn(ncon_eq, nx) b.value = A.value.dot(x0) L = npr.randn(nx, nx) Q.value = L.T # L.dot(L.T) p.value = npr.randn(nx, 1) prob.solve(solver=cp.SCS) print(x.value)
Example #25
Source File: cvxpy_examples.py From cvxpylayers with Apache License 2.0 | 5 votes |
def simple_qp(): # print(f'--- {sys._getframe().f_code.co_name} ---') print('simple qp') npr.seed(0) nx, ncon = 2, 3 G = cp.Parameter((ncon, nx)) h = cp.Parameter(ncon) x = cp.Variable(nx) obj = cp.Minimize(0.5 * cp.sum_squares(x - 1)) cons = [G * x <= h] prob = cp.Problem(obj, cons) data, chain, inv_data = prob.get_problem_data(solver=cp.SCS) param_prob = data[cp.settings.PARAM_PROB] print(param_prob.A.A) x0 = npr.randn(nx) s0 = npr.randn(ncon) G.value = npr.randn(ncon, nx) h.value = G.value.dot(x0) + s0 prob.solve(solver=cp.SCS) delC = npr.randn(param_prob.c.shape[0])[:-1] delA = npr.randn(param_prob.A.shape[0]) num_con = delA.size // (param_prob.x.size + 1) delb = delA[-num_con:] delA = delA[:-num_con] delA = sp.csc_matrix(np.reshape(delA, (num_con, param_prob.x.size))) del_param_dict = param_prob.apply_param_jac(delC, delA, delb) print(del_param_dict) var_map = param_prob.split_solution(npr.randn(param_prob.x.size)) print(var_map) print(param_prob.split_adjoint(var_map)) print(x.value)
Example #26
Source File: calibration_tools.py From natural-adv-examples with MIT License | 5 votes |
def tune_temp(logits, labels, binary_search=True, lower=0.2, upper=5.0, eps=0.0001): logits = np.array(logits) if binary_search: import torch import torch.nn.functional as F logits = torch.FloatTensor(logits) labels = torch.LongTensor(labels) t_guess = torch.FloatTensor([0.5*(lower + upper)]).requires_grad_() while upper - lower > eps: if torch.autograd.grad(F.cross_entropy(logits / t_guess, labels), t_guess)[0] > 0: upper = 0.5 * (lower + upper) else: lower = 0.5 * (lower + upper) t_guess = t_guess * 0 + 0.5 * (lower + upper) t = min([lower, 0.5 * (lower + upper), upper], key=lambda x: float(F.cross_entropy(logits / x, labels))) else: import cvxpy as cx set_size = np.array(logits).shape[0] t = cx.Variable() expr = sum((cx.Minimize(cx.log_sum_exp(logits[i, :] * t) - logits[i, labels[i]] * t) for i in range(set_size))) p = cx.Problem(expr, [lower <= t, t <= upper]) p.solve() # p.solve(solver=cx.SCS) t = 1 / t.value return t
Example #27
Source File: cvxpy.py From qpth with Apache License 2.0 | 5 votes |
def forward_single_np(Q, p, G, h, A, b): nz, neq, nineq = p.shape[0], A.shape[0] if A is not None else 0, G.shape[0] z_ = cp.Variable(nz) obj = cp.Minimize(0.5 * cp.quad_form(z_, Q) + p.T * z_) eqCon = A * z_ == b if neq > 0 else None if nineq > 0: slacks = cp.Variable(nineq) ineqCon = G * z_ + slacks == h slacksCon = slacks >= 0 else: ineqCon = slacks = slacksCon = None cons = [x for x in [eqCon, ineqCon, slacksCon] if x is not None] prob = cp.Problem(obj, cons) prob.solve() # solver=cp.SCS, max_iters=5000, verbose=False) # prob.solve(solver=cp.SCS, max_iters=10000, verbose=True) assert('optimal' in prob.status) zhat = np.array(z_.value).ravel() nu = np.array(eqCon.dual_value).ravel() if eqCon is not None else None if ineqCon is not None: lam = np.array(ineqCon.dual_value).ravel() slacks = np.array(slacks.value).ravel() else: lam = slacks = None return prob.value, zhat, nu, lam, slacks
Example #28
Source File: test_cvxpylayer.py From cvxpylayers with Apache License 2.0 | 4 votes |
def test_simple_qp_with_solver_args(self): np.random.seed(0) tf.random.set_seed(0) nx, ncon = 2, 3 G = cp.Parameter((ncon, nx), name='G') h = cp.Parameter(ncon, name='h') x = cp.Variable(nx) obj = cp.Minimize(0.5 * cp.sum_squares(x - 1)) cons = [G * x <= h] problem = cp.Problem(obj, cons) cvxlayer = CvxpyLayer(problem, [G, h], [x]) x0 = tf.random.normal((nx, 1)) s0 = tf.random.normal((ncon, 1)) G_t = tf.random.normal((ncon, nx)) h_t = tf.squeeze(tf.matmul(G_t, x0) + s0) with tf.GradientTape() as tape: tape.watch(G_t) tape.watch(h_t) soln = cvxlayer(G_t, h_t, solver_args={'eps': 1e-10}) soln = {x.name(): soln[0]} grads = tape.gradient(soln, [G_t, h_t]) gradG = grads[0] gradh = grads[1] G.value = G_t.numpy() h.value = h_t.numpy() problem.solve(solver=cp.SCS) self.assertEqual(len(soln.values()), len(problem.variables())) np.testing.assert_almost_equal( x.value, list(soln.values())[0], decimal=5) def f(): problem.solve(solver=cp.SCS, eps=1e-10) return np.sum(x.value) numgradG, numgradh = numerical_grad(f, [G, h], [G_t, h_t]) np.testing.assert_almost_equal(gradG, numgradG, decimal=3) np.testing.assert_almost_equal(gradh, numgradh, decimal=3)
Example #29
Source File: test_cvxpylayer.py From cvxpylayers with Apache License 2.0 | 4 votes |
def test_simple_qp(self): np.random.seed(0) tf.random.set_seed(0) nx, ncon = 2, 3 G = cp.Parameter((ncon, nx), name='G') h = cp.Parameter(ncon, name='h') x = cp.Variable(nx) obj = cp.Minimize(0.5 * cp.sum_squares(x - 1)) cons = [G * x <= h] problem = cp.Problem(obj, cons) cvxlayer = CvxpyLayer(problem, [G, h], [x]) x0 = tf.random.normal((nx, 1)) s0 = tf.random.normal((ncon, 1)) G_t = tf.random.normal((ncon, nx)) h_t = tf.squeeze(tf.matmul(G_t, x0) + s0) with tf.GradientTape() as tape: tape.watch(G_t) tape.watch(h_t) soln = cvxlayer(G_t, h_t, solver_args={'eps': 1e-10}) soln = {x.name(): soln[0]} grads = tape.gradient(soln, [G_t, h_t]) gradG = grads[0] gradh = grads[1] G.value = G_t.numpy() h.value = h_t.numpy() problem.solve(solver=cp.SCS) self.assertEqual(len(soln.values()), len(problem.variables())) np.testing.assert_almost_equal( x.value, list(soln.values())[0], decimal=5) def f(): problem.solve(solver=cp.SCS, eps=1e-10) return np.sum(x.value) numgradG, numgradh = numerical_grad(f, [G, h], [G_t, h_t]) np.testing.assert_almost_equal(gradG, numgradG, decimal=3) np.testing.assert_almost_equal(gradh, numgradh, decimal=3)
Example #30
Source File: cvxpylayer.py From cvxpylayers with Apache License 2.0 | 4 votes |
def __init__(self, problem, parameters, variables, gp=False): """Construct a CvxpyLayer Args: problem: The CVXPY problem; must be DPP. parameters: A list of CVXPY Parameters in the problem; the order of the Parameters determines the order in which parameter values must be supplied in the forward pass. Must include every parameter involved in problem. variables: A list of CVXPY Variables in the problem; the order of the Variables determines the order of the optimal variable values returned from the forward pass. gp: Whether to parse the problem using DGP (True or False). """ super(CvxpyLayer, self).__init__() self.gp = gp if self.gp: if not problem.is_dgp(dpp=True): raise ValueError('Problem must be DPP.') else: if not problem.is_dcp(dpp=True): raise ValueError('Problem must be DPP.') if not set(problem.parameters()) == set(parameters): raise ValueError("The layer's parameters must exactly match " "problem.parameters") if not set(variables).issubset(set(problem.variables())): raise ValueError("Argument variables must be a subset of " "problem.variables") if not isinstance(parameters, list) and \ not isinstance(parameters, tuple): raise ValueError("The layer's parameters must be provided as " "a list or tuple") if not isinstance(variables, list) and \ not isinstance(variables, tuple): raise ValueError("The layer's variables must be provided as " "a list or tuple") self.param_order = parameters self.variables = variables self.var_dict = {v.id for v in self.variables} # Construct compiler self.dgp2dcp = None if self.gp: for param in parameters: if param.value is None: raise ValueError("An initial value for each parameter is " "required when gp=True.") data, solving_chain, _ = problem.get_problem_data( solver=cp.SCS, gp=True) self.compiler = data[cp.settings.PARAM_PROB] self.dgp2dcp = solving_chain.get(cp.reductions.Dgp2Dcp) self.param_ids = [p.id for p in self.compiler.parameters] else: data, _, _ = problem.get_problem_data(solver=cp.SCS) self.compiler = data[cp.settings.PARAM_PROB] self.param_ids = [p.id for p in self.param_order] self.cone_dims = dims_to_solver_dict(data["dims"])