Python cvxpy.sum_squares() Examples
The following are 30
code examples of cvxpy.sum_squares().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
cvxpy
, or try the search function
.
Example #1
Source File: huber.py From osqp_benchmarks with Apache License 2.0 | 6 votes |
def _generate_cvxpy_problem(self): ''' Generate QP problem ''' # Construct the problem # minimize 1/2 z.T * z + np.ones(m).T * (r + s) # subject to Ax - b - z = r - s # r >= 0 # s >= 0 # The problem reformulation follows from Eq. (24) of the following paper: # https://doi.org/10.1109/34.877518 x = cvxpy.Variable(self.n) z = cvxpy.Variable(self.m) r = cvxpy.Variable(self.m) s = cvxpy.Variable(self.m) objective = cvxpy.Minimize(.5 * cvxpy.sum_squares(z) + cvxpy.sum(r + s)) constraints = [self.Ad@x - self.bd - z == r - s, r >= 0, s >= 0] problem = cvxpy.Problem(objective, constraints) return problem, (x, z, r, s)
Example #2
Source File: suitesparse_huber.py From osqp_benchmarks with Apache License 2.0 | 6 votes |
def _generate_cvxpy_problem(self): ''' Generate QP problem ''' # Construct the problem # minimize 1/2 z.T * z + np.ones(m).T * (r + s) # subject to Ax - b - z = r - s # r >= 0 # s >= 0 # The problem reformulation follows from Eq. (24) of the following paper: # https://doi.org/10.1109/34.877518 x = cvxpy.Variable(self.n) z = cvxpy.Variable(self.m) r = cvxpy.Variable(self.m) s = cvxpy.Variable(self.m) objective = cvxpy.Minimize(.5 * cvxpy.sum_squares(z) + cvxpy.sum(r + s)) constraints = [self.Ad@x - self.bd - z == r - s, r >= 0, s >= 0] problem = cvxpy.Problem(objective, constraints) return problem, (x, z, r, s)
Example #3
Source File: test_cvxpylayer.py From cvxpylayers with Apache License 2.0 | 6 votes |
def test_simple_batch_socp(self): set_seed(243) n = 5 m = 1 batch_size = 4 P_sqrt = cp.Parameter((n, n), name='P_sqrt') q = cp.Parameter((n, 1), name='q') A = cp.Parameter((m, n), name='A') b = cp.Parameter((m, 1), name='b') x = cp.Variable((n, 1), name='x') objective = 0.5 * cp.sum_squares(P_sqrt @ x) + q.T @ x constraints = [A@x == b, cp.norm(x) <= 1] prob = cp.Problem(cp.Minimize(objective), constraints) prob_tch = CvxpyLayer(prob, [P_sqrt, q, A, b], [x]) P_sqrt_tch = torch.randn(batch_size, n, n, requires_grad=True) q_tch = torch.randn(batch_size, n, 1, requires_grad=True) A_tch = torch.randn(batch_size, m, n, requires_grad=True) b_tch = torch.randn(batch_size, m, 1, requires_grad=True) torch.autograd.gradcheck(prob_tch, (P_sqrt_tch, q_tch, A_tch, b_tch))
Example #4
Source File: cvxpy_examples.py From cvxpylayers with Apache License 2.0 | 6 votes |
def relu(): # print(f'--- {sys._getframe().f_code.co_name} ---') print('relu') npr.seed(0) n = 4 _x = cp.Parameter(n) _y = cp.Variable(n) obj = cp.Minimize(cp.sum_squares(_y - _x)) cons = [_y >= 0] prob = cp.Problem(obj, cons) _x.value = npr.randn(n) prob.solve(solver=cp.SCS) print(_y.value)
Example #5
Source File: test_cvxpylayer.py From cvxpylayers with Apache License 2.0 | 6 votes |
def test_shared_parameter(self): set_seed(243) m, n = 10, 5 A = cp.Parameter((m, n)) x = cp.Variable(n) b1 = np.random.randn(m) b2 = np.random.randn(m) prob1 = cp.Problem(cp.Minimize(cp.sum_squares(A @ x - b1))) layer1 = CvxpyLayer(prob1, parameters=[A], variables=[x]) prob2 = cp.Problem(cp.Minimize(cp.sum_squares(A @ x - b2))) layer2 = CvxpyLayer(prob2, parameters=[A], variables=[x]) A_tch = torch.randn(m, n, requires_grad=True) solver_args = { "eps": 1e-10, "acceleration_lookback": 0, "max_iters": 10000 } torch.autograd.gradcheck(lambda A: torch.cat( [layer1(A, solver_args=solver_args)[0], layer2(A, solver_args=solver_args)[0]]), (A_tch,))
Example #6
Source File: cvxpy_examples.py From cvxpylayers with Apache License 2.0 | 6 votes |
def ball_con(): # print(f'--- {sys._getframe().f_code.co_name} ---') print('ball con') npr.seed(0) n = 2 A = cp.Parameter((n, n)) z = cp.Parameter(n) p = cp.Parameter(n) x = cp.Variable(n) t = cp.Variable(n) obj = cp.Minimize(0.5 * cp.sum_squares(x - p)) # TODO automate introduction of variables. cons = [0.5 * cp.sum_squares(A * t) <= 1, t == (x - z)] prob = cp.Problem(obj, cons) L = npr.randn(n, n) A.value = L.T z.value = npr.randn(n) p.value = npr.randn(n) prob.solve(solver=cp.SCS) print(x.value)
Example #7
Source File: test_cvxpylayer.py From cvxpylayers with Apache License 2.0 | 6 votes |
def test_entropy_maximization(self): set_seed(243) n, m, p = 5, 3, 2 tmp = np.random.rand(n) A_np = np.random.randn(m, n) b_np = A_np.dot(tmp) F_np = np.random.randn(p, n) g_np = F_np.dot(tmp) + np.random.rand(p) x = cp.Variable(n) A = cp.Parameter((m, n)) b = cp.Parameter(m) F = cp.Parameter((p, n)) g = cp.Parameter(p) obj = cp.Maximize(cp.sum(cp.entr(x)) - .01 * cp.sum_squares(x)) constraints = [A * x == b, F * x <= g] prob = cp.Problem(obj, constraints) layer = CvxpyLayer(prob, [A, b, F, g], [x]) A_tch, b_tch, F_tch, g_tch = map( lambda x: torch.from_numpy(x).requires_grad_(True), [ A_np, b_np, F_np, g_np]) torch.autograd.gradcheck( lambda *x: layer(*x, solver_args={"eps": 1e-12, "max_iters": 10000}), (A_tch, b_tch, F_tch, g_tch), eps=1e-4, atol=1e-3, rtol=1e-3)
Example #8
Source File: test_cvxpylayer.py From cvxpylayers with Apache License 2.0 | 5 votes |
def test_incorrect_parameter_shape(self): set_seed(243) m, n = 100, 20 A = cp.Parameter((m, n)) b = cp.Parameter(m) x = cp.Variable(n) obj = cp.sum_squares(A@x - b) + cp.sum_squares(x) prob = cp.Problem(cp.Minimize(obj)) prob_th = CvxpyLayer(prob, [A, b], [x]) A_th = torch.randn(32, m, n).double().requires_grad_() b_th = torch.randn(20, m).double().requires_grad_() with self.assertRaises(ValueError): prob_th(A_th, b_th) A_th = torch.randn(32, m, n).double().requires_grad_() b_th = torch.randn(32, 2 * m).double().requires_grad_() with self.assertRaises(ValueError): prob_th(A_th, b_th) A_th = torch.randn(m, n).double().requires_grad_() b_th = torch.randn(2 * m).double().requires_grad_() with self.assertRaises(ValueError): prob_th(A_th, b_th) A_th = torch.randn(32, m, n).double().requires_grad_() b_th = torch.randn(32, 32, m).double().requires_grad_() with self.assertRaises(ValueError): prob_th(A_th, b_th)
Example #9
Source File: test_cvxpylayer.py From cvxpylayers with Apache License 2.0 | 5 votes |
def test_not_enough_parameters_at_call_time(self): x = cp.Variable(1) lam = cp.Parameter(1, nonneg=True) lam2 = cp.Parameter(1, nonneg=True) objective = lam * cp.norm(x, 1) + lam2 * cp.sum_squares(x) prob = cp.Problem(cp.Minimize(objective)) layer = CvxpyLayer(prob, [lam, lam2], [x]) with self.assertRaisesRegex( ValueError, 'A tensor must be provided for each CVXPY parameter.*'): layer(lam)
Example #10
Source File: test_cvxpylayer.py From cvxpylayers with Apache License 2.0 | 5 votes |
def test_sdp(self): tf.random.set_seed(5) n = 3 p = 3 C = cp.Parameter((n, n)) A = [cp.Parameter((n, n)) for _ in range(p)] b = [cp.Parameter((1, 1)) for _ in range(p)] C_tf = tf.Variable(tf.random.normal((n, n), dtype=tf.float64)) A_tf = [tf.Variable(tf.random.normal((n, n), dtype=tf.float64)) for _ in range(p)] b_tf = [tf.Variable(tf.random.normal((1, 1), dtype=tf.float64)) for _ in range(p)] X = cp.Variable((n, n), symmetric=True) constraints = [X >> 0] constraints += [ cp.trace(A[i]@X) == b[i] for i in range(p) ] problem = cp.Problem(cp.Minimize( cp.trace(C @ X) - cp.log_det(X) + cp.sum_squares(X)), constraints) layer = CvxpyLayer(problem, [C] + A + b, [X]) values = [C_tf] + A_tf + b_tf with tf.GradientTape() as tape: soln = layer(*values, solver_args={'eps': 1e-10, 'max_iters': 10000})[0] summed = tf.math.reduce_sum(soln) grads = tape.gradient(summed, values) def f(): problem.solve(cp.SCS, eps=1e-10, max_iters=10000) return np.sum(X.value) numgrads = numerical_grad(f, [C] + A + b, values, delta=1e-4) for g, ng in zip(grads, numgrads): np.testing.assert_allclose(g, ng, atol=1e-1)
Example #11
Source File: test_cvxpylayer.py From cvxpylayers with Apache License 2.0 | 5 votes |
def test_least_squares(self): set_seed(243) m, n = 100, 20 A = cp.Parameter((m, n)) b = cp.Parameter(m) x = cp.Variable(n) obj = cp.sum_squares(A@x - b) + cp.sum_squares(x) prob = cp.Problem(cp.Minimize(obj)) prob_th = CvxpyLayer(prob, [A, b], [x]) A_th = torch.randn(m, n).double().requires_grad_() b_th = torch.randn(m).double().requires_grad_() x = prob_th(A_th, b_th, solver_args={"eps": 1e-10})[0] def lstsq( A, b): return torch.solve( (A_th.t() @ b_th).unsqueeze(1), A_th.t() @ A_th + torch.eye(n).double())[0] x_lstsq = lstsq(A_th, b_th) grad_A_cvxpy, grad_b_cvxpy = grad(x.sum(), [A_th, b_th]) grad_A_lstsq, grad_b_lstsq = grad(x_lstsq.sum(), [A_th, b_th]) self.assertAlmostEqual( torch.norm( grad_A_cvxpy - grad_A_lstsq).item(), 0.0) self.assertAlmostEqual( torch.norm( grad_b_cvxpy - grad_b_lstsq).item(), 0.0)
Example #12
Source File: test_cvxpylayer.py From cvxpylayers with Apache License 2.0 | 5 votes |
def test_logistic_regression(self): set_seed(243) N, n = 10, 2 X_np = np.random.randn(N, n) a_true = np.random.randn(n, 1) y_np = np.round(sigmoid(X_np @ a_true + np.random.randn(N, 1) * 0.5)) X_tch = torch.from_numpy(X_np) X_tch.requires_grad_(True) lam_tch = 0.1 * torch.ones(1, requires_grad=True, dtype=torch.double) a = cp.Variable((n, 1)) X = cp.Parameter((N, n)) lam = cp.Parameter(1, nonneg=True) y = y_np log_likelihood = cp.sum( cp.multiply(y, X @ a) - cp.log_sum_exp(cp.hstack([np.zeros((N, 1)), X @ a]).T, axis=0, keepdims=True).T ) prob = cp.Problem( cp.Minimize(-log_likelihood + lam * cp.sum_squares(a))) fit_logreg = CvxpyLayer(prob, [X, lam], [a]) def layer_eps(*x): return fit_logreg(*x, solver_args={"eps": 1e-12}) torch.autograd.gradcheck(layer_eps, (X_tch, lam_tch), eps=1e-4, atol=1e-3, rtol=1e-3)
Example #13
Source File: test_cvxpylayer.py From cvxpylayers with Apache License 2.0 | 5 votes |
def test_sdp(self): set_seed(2) n = 3 p = 3 C = cp.Parameter((n, n)) A = [cp.Parameter((n, n)) for _ in range(p)] b = [cp.Parameter((1, 1)) for _ in range(p)] C_tch = torch.randn(n, n, requires_grad=True).double() A_tch = [torch.randn(n, n, requires_grad=True).double() for _ in range(p)] b_tch = [torch.randn(1, 1, requires_grad=True).double() for _ in range(p)] X = cp.Variable((n, n), symmetric=True) constraints = [X >> 0] constraints += [ cp.trace(A[i]@X) == b[i] for i in range(p) ] prob = cp.Problem(cp.Minimize(cp.trace(C@X) + cp.sum_squares(X)), constraints) layer = CvxpyLayer(prob, [C] + A + b, [X]) torch.autograd.gradcheck(lambda *x: layer(*x, solver_args={'eps': 1e-12}), [C_tch] + A_tch + b_tch, eps=1e-6, atol=1e-3, rtol=1e-3)
Example #14
Source File: test_cvxpylayer.py From cvxpylayers with Apache License 2.0 | 5 votes |
def test_not_enough_parameters_at_call_time(self): x = cp.Variable(1) lam = cp.Parameter(1, nonneg=True) lam2 = cp.Parameter(1, nonneg=True) objective = lam * cp.norm(x, 1) + lam2 * cp.sum_squares(x) prob = cp.Problem(cp.Minimize(objective)) layer = CvxpyLayer(prob, [lam, lam2], [x]) # noqa: F841 with self.assertRaisesRegex( ValueError, 'A tensor must be provided for each CVXPY parameter.*'): layer(lam)
Example #15
Source File: test_cvxpylayer.py From cvxpylayers with Apache License 2.0 | 5 votes |
def test_not_enough_parameters(self): x = cp.Variable(1) lam = cp.Parameter(1, nonneg=True) lam2 = cp.Parameter(1, nonneg=True) objective = lam * cp.norm(x, 1) + lam2 * cp.sum_squares(x) prob = cp.Problem(cp.Minimize(objective)) with self.assertRaisesRegex(ValueError, "The layer's parameters.*"): CvxpyLayer(prob, [lam], [x]) # noqa: F841
Example #16
Source File: test_cvxpylayer.py From cvxpylayers with Apache License 2.0 | 5 votes |
def test_broadcasting(self): set_seed(243) n_batch, m, n = 2, 100, 20 A = cp.Parameter((m, n)) b = cp.Parameter(m) x = cp.Variable(n) obj = cp.sum_squares(A@x - b) + cp.sum_squares(x) prob = cp.Problem(cp.Minimize(obj)) prob_th = CvxpyLayer(prob, [A, b], [x]) A_th = torch.randn(m, n).double().requires_grad_() b_th = torch.randn(m).double().unsqueeze(0).repeat(n_batch, 1) \ .requires_grad_() b_th_0 = b_th[0] x = prob_th(A_th, b_th, solver_args={"eps": 1e-10})[0] def lstsq( A, b): return torch.solve( (A.t() @ b).unsqueeze(1), A.t() @ A + torch.eye(n).double())[0] x_lstsq = lstsq(A_th, b_th_0) grad_A_cvxpy, grad_b_cvxpy = grad(x.sum(), [A_th, b_th]) grad_A_lstsq, grad_b_lstsq = grad(x_lstsq.sum(), [A_th, b_th_0]) self.assertAlmostEqual( torch.norm( grad_A_cvxpy / n_batch - grad_A_lstsq).item(), 0.0) self.assertAlmostEqual( torch.norm( grad_b_cvxpy[0] - grad_b_lstsq).item(), 0.0)
Example #17
Source File: test_cvxpylayer.py From cvxpylayers with Apache License 2.0 | 5 votes |
def test_equality(self): set_seed(243) n = 10 A = np.eye(n) x = cp.Variable(n) b = cp.Parameter(n) prob = cp.Problem(cp.Minimize(cp.sum_squares(x)), [A@x == b]) layer = CvxpyLayer(prob, parameters=[b], variables=[x]) b_tch = torch.randn(n, requires_grad=True) torch.autograd.gradcheck(lambda b: layer( b, solver_args={"eps": 1e-10, "acceleration_lookback": 0})[0].sum(), (b_tch,))
Example #18
Source File: robust_pca.py From learning-circuits with Apache License 2.0 | 5 votes |
def sparse_lowrank_mse(name_size): name, size = name_size print(name, size) matrix = named_target_matrix(name, size) M = matrix lambda1 = cp.Parameter(nonneg=True) lambda2 = cp.Parameter(nonneg=True) L = cp.Variable((size, size)) S = cp.Variable((size, size)) prob = cp.Problem(cp.Minimize(cp.sum_squares(M - L - S) / size**2 + lambda1 / size * cp.norm(L, 'nuc') + lambda2 / size**2 * cp.norm(S, 1))) result = [] for _ in range(ntrials): l1 = np.exp(np.random.uniform(np.log(1e-2), np.log(1e4))) l2 = np.exp(np.random.uniform(np.log(1e-2), np.log(1e4))) lambda1.value = l1 lambda2.value = l2 try: prob.solve() nnz = (np.abs(S.value) >= 1e-7).sum() singular_values = np.linalg.svd(L.value, compute_uv=False) rank = (singular_values >= 1e-7).sum() n_params = nnz + 2 * rank * size mse = np.sum((matrix - L.value - S.value)**2) / size**2 result.append((n_params, mse)) except: pass budget = 2 * size * np.log2(size) if model[name] == 'BPBP': budget *= 2 eligible = [res for res in result if res[0] <= budget] if eligible: mse = min(m for (n_params, m) in eligible) else: mse = np.sum(matrix**2) / size**2 print(name, size, 'done') return (name, size, mse)
Example #19
Source File: l1lsq.py From doatools.py with MIT License | 5 votes |
def __init__(self, m, k, formulation='penalizedl1', nonnegative=False): if not cvx_available: raise RuntimeError('Cannot initialize when cvxpy is not available.') # Initialize parameters and variables A = cvx.Parameter((m, k)) b = cvx.Parameter((m, 1)) l = cvx.Parameter(nonneg=True) x = cvx.Variable((k, 1)) # Create the problem if formulation == 'penalizedl1': obj_func = 0.5 * cvx.sum_squares(cvx.matmul(A, x) - b) + l * cvx.norm1(x) constraints = [] elif formulation == 'constrainedl1': obj_func = cvx.sum_squares(cvx.matmul(A, x) - b) constraints = [cvx.norm1(x) <= l] elif formulation == 'constrainedl2': obj_func = cvx.norm1(x) constraints = [cvx.norm(cvx.matmul(A, x) - b) <= l] else: raise ValueError("Unknown formulation '{0}'.".format(formulation)) if nonnegative: constraints.append(x >= 0) problem = cvx.Problem(cvx.Minimize(obj_func), constraints) self._formulation = formulation self._A = A self._b = b self._l = l self._x = x self._obj_func = obj_func self._constraints = constraints self._problem = problem
Example #20
Source File: problems.py From newton_admm with Apache License 2.0 | 5 votes |
def least_squares(m, n): """ Create a least squares problem with m datapoints and n dimensions """ A = np.random.randn(m, n) _x = np.random.randn(n) b = A.dot(_x) x = cp.Variable(n) return (x, cp.Problem(cp.Minimize(cp.sum_squares(A * x - b) + cp.norm(x, 2))))
Example #21
Source File: problems.py From newton_admm with Apache License 2.0 | 5 votes |
def robust_pca(p, suppfrac): """ Create a robust PCA problem with a low rank matrix """ # First, create a rank = "rk" matrix: rk = int(round(p * 0.5)) assert rk <= min(p, p) Lstar = np.zeros((p, p)) for i in range(rk): vi = np.random.randn(p, 1) mati = vi.dot(vi.T) Lstar += mati # Then, create a sparse matrix: Mstar = np.random.randn(p, p) Mstar_vec = Mstar.T.ravel() nnz = int(np.floor((1.0 - suppfrac) * p * p)) # Num. nonzeros assert nnz <= p * p idxes = np.random.randint(0, p * p, nnz) Mstar_vec[idxes] = 0 Mstar = np.reshape(Mstar_vec, (p, p)).T # Finally, sum the two matrices "L" and "M": X = Lstar + Mstar lam = 1.0 Lhat = cp.Variable(p, p) Mhat = cp.Variable(p, p) prob = cp.Problem(cp.Minimize(cp.norm(Lhat, "nuc") + cp.sum_squares(Lhat)), [cp.norm(Mhat, 1) <= lam, Lhat + Mhat == X]) data = prob.get_problem_data(cp.SCS) data['beta_from_x'] = cvxpy_beta_from_x(prob, (Lhat, Mhat), data['A'].shape[0]) return ((Lhat, Mhat), prob, data)
Example #22
Source File: test_transforms.py From ProxImaL with MIT License | 5 votes |
def test_merge(self): """Test merging functions. """ # sum_entries x = Variable(10) fn1 = sum_entries(x, gamma=1.0) fn2 = norm1(x) assert can_merge(fn1, fn2) merged = merge_fns(fn1, fn2) v = np.arange(10) * 1.0 - 5.0 prox_val1 = merged.prox(1.0, v.copy()) tmp = norm1(x, c=np.ones(10), gamma=1.0) prox_val2 = tmp.prox(1.0, v.copy()) self.assertItemsAlmostEqual(prox_val1, prox_val2) # sum_squares x = Variable(10) val = np.arange(10) fn1 = sum_squares(x, gamma=1.0, beta=2.0, alpha=3.0, b=val) fn2 = norm1(x) assert can_merge(fn1, fn2) merged = merge_fns(fn1, fn2) v = np.arange(10) * 1.0 - 5.0 prox_val1 = merged.prox(1.0, v.copy()) tmp = norm1(x, c=-12 * val, gamma=1.0 + 12, d=val.dot(val)) prox_val2 = tmp.prox(1.0, v.copy()) self.assertItemsAlmostEqual(prox_val1, prox_val2)
Example #23
Source File: mle.py From e2e-model-learning with Apache License 2.0 | 5 votes |
def linear_softmax_reg(X, Y, params): m, n = X.shape[0], X.shape[1] Theta = cp.Variable(n, len(params['d'])) f = cp.sum_entries(cp.log_sum_exp(X*Theta, axis=1) - cp.sum_entries(cp.mul_elemwise(Y, X*Theta), axis=1)) / m lam = 1e-5 # regularization cp.Problem(cp.Minimize(f + lam * cp.sum_squares(Theta)), []).solve() Theta = np.asarray(Theta.value) return Theta # Optimize expected value of inventory allocation
Example #24
Source File: reg.py From GLRM with MIT License | 5 votes |
def reg(self, X): return self.nu*cp.sum_squares(X)
Example #25
Source File: test_problem.py From ProxImaL with MIT License | 5 votes |
def test_single_func(self): """Test problems with only a single function to minimize. """ X = Variable((4, 2)) B = np.reshape(np.arange(8), (4, 2)) * 1. prox_fns = [sum_squares(X - B)] prob = Problem(prox_fns[0]) prob.solve(solver="admm", eps_rel=1e-6, eps_abs=1e-6) self.assertItemsAlmostEqual(X.value, B, places=2)
Example #26
Source File: test_problem.py From ProxImaL with MIT License | 5 votes |
def test_multiple_vars(self): """Test problems with multiple variables.""" x = Variable(3) y = Variable(6) rhs = np.array([1, 2, 3]) prob = Problem([sum_squares(x - rhs), sum_squares(subsample(y, [2]) - x)]) prob.solve(solver="admm", eps_rel=1e-6, eps_abs=1e-6) self.assertItemsAlmostEqual(x.value, [1, 2, 3], places=3) self.assertItemsAlmostEqual(y.value, [1, 0, 2, 0, 3, 0], places=3)
Example #27
Source File: test_prox_fn.py From ProxImaL with MIT License | 5 votes |
def test_sum_squares(self): """Test sum squares prox fn. """ # No modifiers. tmp = Variable(10) fn = sum_squares(tmp) rho = 1 v = np.arange(10) * 1.0 x = fn.prox(rho, v.copy()) self.assertItemsAlmostEqual(x, v * rho / (2 + rho)) rho = 2 x = fn.prox(rho, v.copy()) self.assertItemsAlmostEqual(x, v * rho / (2 + rho)) # With modifiers. mod_fn = sum_squares(tmp, alpha=2, beta=-1, c=np.ones(10) * 1.0, b=np.ones(10) * 1.0, gamma=1) rho = 2 v = np.arange(10) * 1.0 x = mod_fn.prox(rho, v.copy()) # vhat = mod_fn.beta*(v - mod_fn.c/rho)*rho/(rho+2*mod_fn.gamma) - mod_fn.b # rho_hat = rho/(mod_fn.alpha*np.sqrt(np.abs(mod_fn.beta))) # xhat = fn.prox(rho_hat, vhat) x_var = cvx.Variable(10) cost = 2 * cvx.sum_squares(-x_var - np.ones(10)) + \ np.ones(10).T * x_var + cvx.sum_squares(x_var) + \ (rho / 2) * cvx.sum_squares(x_var - v) prob = cvx.Problem(cvx.Minimize(cost)) prob.solve() self.assertItemsAlmostEqual(x, x_var.value, places=3)
Example #28
Source File: test_transforms.py From ProxImaL with MIT License | 5 votes |
def test_merge_all(self): """Test function to merge all prox operators possible. """ # merge all x = Variable(10) lin_op = grad(x) fns = [sum_squares(lin_op), sum_entries(lin_op), nonneg(lin_op)] merged = merge_all(fns) assert len(merged) == 1 v = np.reshape(np.arange(10) * 1.0 - 5.0, (10, 1)) prox_val1 = merged[0].prox(1.0, v.copy()) tmp = nonneg(lin_op, c=np.ones((10, 1)), gamma=1.0) prox_val2 = tmp.prox(1.0, v.copy()) self.assertItemsAlmostEqual(prox_val1, prox_val2)
Example #29
Source File: test_transforms.py From ProxImaL with MIT License | 5 votes |
def test_const_val(self): """Test obtaining the constant offset. """ x = Variable(10) b = np.arange(10) expr = x - b self.assertItemsAlmostEqual(-b, expr.get_offset()) fn = sum_squares(expr) new_fn = absorb_offset(fn) self.assertItemsAlmostEqual(b, new_fn.b)
Example #30
Source File: cvxpy_examples.py From cvxpylayers with Apache License 2.0 | 5 votes |
def simple_qp(): # print(f'--- {sys._getframe().f_code.co_name} ---') print('simple qp') npr.seed(0) nx, ncon = 2, 3 G = cp.Parameter((ncon, nx)) h = cp.Parameter(ncon) x = cp.Variable(nx) obj = cp.Minimize(0.5 * cp.sum_squares(x - 1)) cons = [G * x <= h] prob = cp.Problem(obj, cons) data, chain, inv_data = prob.get_problem_data(solver=cp.SCS) param_prob = data[cp.settings.PARAM_PROB] print(param_prob.A.A) x0 = npr.randn(nx) s0 = npr.randn(ncon) G.value = npr.randn(ncon, nx) h.value = G.value.dot(x0) + s0 prob.solve(solver=cp.SCS) delC = npr.randn(param_prob.c.shape[0])[:-1] delA = npr.randn(param_prob.A.shape[0]) num_con = delA.size // (param_prob.x.size + 1) delb = delA[-num_con:] delA = delA[:-num_con] delA = sp.csc_matrix(np.reshape(delA, (num_con, param_prob.x.size))) del_param_dict = param_prob.apply_param_jac(delC, delA, delb) print(del_param_dict) var_map = param_prob.split_solution(npr.randn(param_prob.x.size)) print(var_map) print(param_prob.split_adjoint(var_map)) print(x.value)