Python cvxpy.Constant() Examples
The following are 9
code examples of cvxpy.Constant().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
cvxpy
, or try the search function
.
Example #1
Source File: loss.py From GLRM with MIT License | 5 votes |
def loss(self, A, U): return cp.norm(cp.Constant(A) - U, "fro")/2.0
Example #2
Source File: loss.py From GLRM with MIT License | 5 votes |
def loss(self, A, U): return cp.sum_entries(cp.huber(cp.Constant(A) - U, self.a))
Example #3
Source File: loss.py From GLRM with MIT License | 5 votes |
def __str__(self): return "huber loss" # class FractionalLoss(Loss): # PRECISION = 1e-10 # def loss(self, A, U): # B = cp.Constant(A) # U = cp.max_elemwise(U, self.PRECISION) # to avoid dividing by zero # return cp.max_elemwise(cp.mul_elemwise(cp.inv_pos(cp.pos(U)), B-U), \ # return maximum((A - U)/U, (U - A)/A) #
Example #4
Source File: loss.py From GLRM with MIT License | 5 votes |
def loss(self, A, U): return cp.sum_entries(cp.pos(ones(A.shape)-cp.mul_elemwise(cp.Constant(A), U)))
Example #5
Source File: glrm.py From GLRM with MIT License | 5 votes |
def _initialize_probs(self, A, k, missing_list, regX, regY): # useful parameters m = A[0].shape[0] ns = [a.shape[1] for a in A] if missing_list == None: missing_list = [[]]*len(self.L) # initialize A, X, Y B = self._initialize_A(A, missing_list) X0, Y0 = self._initialize_XY(B, k, missing_list) self.X0, self.Y0 = X0, Y0 # cvxpy problems Xv, Yp = cp.Variable(m,k), [cp.Parameter(k+1,ni) for ni in ns] Xp, Yv = cp.Parameter(m,k+1), [cp.Variable(k+1,ni) for ni in ns] Xp.value = copy(X0) for yj, yj0 in zip(Yp, Y0): yj.value = copy(yj0) onesM = cp.Constant(ones((m,1))) obj = sum(L(Aj, cp.mul_elemwise(mask, Xv*yj[:-1,:] \ + onesM*yj[-1:,:]) + offset) + ry(yj[:-1,:])\ for L, Aj, yj, mask, offset, ry in \ zip(self.L, A, Yp, self.masks, self.offsets, regY)) + regX(Xv) pX = cp.Problem(cp.Minimize(obj)) pY = [cp.Problem(cp.Minimize(\ L(Aj, cp.mul_elemwise(mask, Xp*yj) + offset) \ + ry(yj[:-1,:]) + regX(Xp))) \ for L, Aj, yj, mask, offset, ry in zip(self.L, A, Yv, self.masks, self.offsets, regY)] self.probX = (Xv, Yp, pX) self.probY = (Xp, Yv, pY)
Example #6
Source File: reinforcement_learning.py From safe_learning with MIT License | 5 votes |
def _run_cvx_optimization(self, next_states, rewards, **solver_options): """Tensorflow wrapper around a cvxpy value function optimization. Parameters ---------- next_states : ndarray rewards : ndarray Returns ------- values : ndarray The optimal values at the states. """ # Define random variables; convert index from np.int64 to regular # python int to avoid strange cvxpy error; see: # https://github.com/cvxgrp/cvxpy/issues/380 values = cvxpy.Variable(rewards.shape) value_matrix = self.value_function.tri.parameter_derivative( next_states) # Make cvxpy work with sparse matrices value_matrix = cvxpy.Constant(value_matrix) objective = cvxpy.Maximize(cvxpy.sum(values)) constraints = [values <= rewards + self.gamma * value_matrix * values] prob = cvxpy.Problem(objective, constraints) # Solve optimization problem prob.solve(**solver_options) # Some error checking if not prob.status == cvxpy.OPTIMAL: raise OptimizationError('Optimization problem is {}' .format(prob.status)) return np.array(values.value)
Example #7
Source File: gen_bid_curve.py From cvxpower with GNU General Public License v3.0 | 5 votes |
def cost(self): p = -self.terminals[0].power_var segments = [cvx.Constant(self.no_load_cost)] prev_power = None for power, price in self.bid_curve[1:]: if prev_power is None: offset = self.no_load_cost else: offset += (power - prev_power)*prev_price segments.append(price*(p - power) + offset) prev_power = power prev_price = price return cvx.max_elemwise(*segments)
Example #8
Source File: admm_problem.py From ncvx with GNU General Public License v3.0 | 4 votes |
def admm(self, rho=None, max_iter=50, restarts=5, alpha=1.8, random=False, sigma=1.0, gamma=1e6, polish_best=True, num_procs=None, parallel=True, seed=1, show_progress=False, prox_polished=False, polish_depth=5, neighbor_func=None, polish_func=None, *args, **kwargs): # rho is a list of values, one for each restart. if rho is None: rho = [np.random.uniform() for i in range(restarts)] else: assert len(rho) == restarts # num_procs is the number of processors to launch. if num_procs is None: num_procs = multiprocessing.cpu_count() # Construct the relaxation. if type(self.objective) == cvx.Minimize: rel_obj = self.objective else: rel_obj = -self.objective rel_constr = self.constraints for var in get_noncvx_vars(self): rel_constr += var.relax() rel_prob = cvx.Problem(rel_obj, rel_constr) # HACK skip this. # lower_bound = rel_prob.solve(*args, **kwargs) lower_bound = -np.inf if show_progress: print("lower bound =", lower_bound) # Algorithm. if parallel: pool = multiprocessing.Pool(num_procs) tmp_prob = cvx.Problem(rel_prob.objective, rel_prob.constraints) best_per_rho = pool.map(admm_inner_iter, [(idx, tmp_prob, None, rho_val, gamma, max_iter, random, polish_best, seed, sigma, show_progress, neighbor_func, polish_func, prox_polished, polish_depth, lower_bound, alpha, args, kwargs) for idx, rho_val in enumerate(rho)]) pool.close() pool.join() else: xvars = {var.id: var for var in rel_prob.variables()} prox = Prox(rel_prob, xvars) best_per_rho = list(map(admm_inner_iter, [(idx, rel_prob, prox, rho_val, gamma, max_iter, random, polish_best, seed, sigma, show_progress, neighbor_func, polish_func, prox_polished, polish_depth, lower_bound, alpha, args, kwargs) for idx, rho_val in enumerate(rho)])) # Merge best so far. argmin = min([(val[0], idx) for idx, val in enumerate(best_per_rho)])[1] best_so_far = best_per_rho[argmin] #print "best found", best_so_far[0] # Unpack result. for var in self.variables(): var.value = best_so_far[1][var.id] residual = cvx.Constant(0) for constr in self.constraints: residual += get_constr_error(constr) return self.objective.value, residual.value
Example #9
Source File: cvxpy_.py From qpsolvers with GNU Lesser General Public License v3.0 | 4 votes |
def cvxpy_solve_qp(P, q, G=None, h=None, A=None, b=None, initvals=None, solver=None, verbose=False): """ Solve a Quadratic Program defined as: .. math:: \\begin{split}\\begin{array}{ll} \\mbox{minimize} & \\frac{1}{2} x^T P x + q^T x \\\\ \\mbox{subject to} & G x \\leq h \\\\ & A x = h \\end{array}\\end{split} calling a given solver using the `CVXPY <http://www.cvxpy.org/>`_ modelling language. Parameters ---------- P : array, shape=(n, n) Primal quadratic cost matrix. q : array, shape=(n,) Primal quadratic cost vector. G : array, shape=(m, n) Linear inequality constraint matrix. h : array, shape=(m,) Linear inequality constraint vector. A : array, shape=(meq, n), optional Linear equality constraint matrix. b : array, shape=(meq,), optional Linear equality constraint vector. initvals : array, shape=(n,), optional Warm-start guess vector (not used). solver : string, optional Solver name in ``cvxpy.installed_solvers()``. verbose : bool, optional Set to `True` to print out extra information. Returns ------- x : array, shape=(n,) Solution to the QP, if found, otherwise ``None``. """ if initvals is not None: print("CVXPY: note that warm-start values are ignored by wrapper") n = q.shape[0] x = Variable(n) P = Constant(P) # see http://www.cvxpy.org/en/latest/faq/ objective = Minimize(0.5 * quad_form(x, P) + q * x) constraints = [] if G is not None: constraints.append(G * x <= h) if A is not None: constraints.append(A * x == b) prob = Problem(objective, constraints) prob.solve(solver=solver, verbose=verbose) x_opt = array(x.value).reshape((n,)) return x_opt