Python pulp.lpSum() Examples

The following are 27 code examples of pulp.lpSum(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module pulp , or try the search function .
Example #1
Source File: multiplier_model.py    From pyDEA with MIT License 7 votes vote down vote up
def get_equality_constraint(self, input_data, dmu_code, input_variables,
                                output_variables):
        ''' Generates equality constraint of input-oriented multiplier model.

            Args:
                input_data (InputData): object that stores input data.
                dmu_code (str): DMU code.
                input_variables (dict of str to pulp.LpVariable): dictionary
                    that maps variable name to pulp variable corresponding
                    to input categories.
                output_variables (dict of str to pulp.LpVariable): dictionary
                    that maps variable name to pulp variable corresponding
                    to output categories.

            Returns:
                pulp.LpConstraint: equality constraint.
        '''
        return pulp.lpSum([input_data.coefficients[dmu_code, category] *
                          input_variables[category]
                          for category in input_data.input_categories]) == 1 
Example #2
Source File: pulp_solver.py    From pydfs-lineup-optimizer with MIT License 6 votes vote down vote up
def set_objective(self, variables, coefficients):
        self.prob += lpSum([variable * coefficient for variable, coefficient in zip(variables, coefficients)]) 
Example #3
Source File: lp_solve.py    From RevPy with MIT License 6 votes vote down vote up
def define_lp(fares, product_names):
    """Set up LP.

    Parameters
    ----------
    fares: 2D np array
            contains fares for products, size n_classes*n_products
    products_names: list
            product names (typically relation/class combinations)

    Returns
    -------
    tuple of the form (LP problem, decision variables)
    """
    prob = pulp.LpProblem('network_RM', pulp.LpMaximize)

    # decision variables: available seats per product
    x = pulp.LpVariable.dicts('x', product_names, lowBound=0)

    # objective function
    revenue = pulp.lpSum([x[it]*fares.ravel()[i] for i, it in
                          enumerate(product_names)])
    prob += revenue

    return prob, x 
Example #4
Source File: multiplier_model_decorators.py    From pyDEA with MIT License 6 votes vote down vote up
def _get_equality_constraint(self, dmu_code):
        ''' Returns equality constraint.

            Args:
                dmu_code (str): DMU code.

            Returns:
                pulp.LpConstraint: equality constraint.
        '''
        coeffs = self._model_to_decorate.input_data.coefficients
        variables = self._get_variables()
        sum_vars = pulp.lpSum([coeffs[dmu_code, category] * value
                              for category, value in variables.items()
                              if category not in self.categories])
        assert(sum_vars)
        return sum_vars == 1 
Example #5
Source File: multiplier_model.py    From pyDEA with MIT License 6 votes vote down vote up
def get_equality_constraint(self, input_data, dmu_code, input_variables,
                                output_variables):
        ''' Generates equality constraint of output-oriented multiplier model.

            Args:
                input_data (InputData): object that stores input data.
                dmu_code (str): DMU code.
                input_variables (dict of str to pulp.LpVariable): dictionary
                    that maps variable name to pulp variable corresponding
                    to input categories.
                output_variables (dict of str to pulp.LpVariable): dictionary
                    that maps variable name to pulp variable corresponding
                    to output categories.

            Returns:
                pulp.LpConstraint: equality constraint.
        '''
        return pulp.lpSum([input_data.coefficients[dmu_code, category] *
                          output_variables[category]
                          for category in input_data.output_categories]) == 1 
Example #6
Source File: multiplier_model.py    From pyDEA with MIT License 6 votes vote down vote up
def get_objective_function(self, input_data, dmu_code, input_variables,
                               output_variables):
        ''' Generates objective function of output-oriented multiplier model.

            Args:
                input_data (InputData): object that stores input data.
                dmu_code (str): DMU code.
                input_variables (dict of str to pulp.LpVariable): dictionary
                    that maps variable name to pulp variable corresponding
                    to input categories.
                output_variables (dict of str to pulp.LpVariable): dictionary
                    that maps variable name to pulp variable corresponding
                    to output categories.

            Returns:
                pulp.LpSum: objective function.
        '''
        return pulp.lpSum([input_data.coefficients[dmu_code, category] *
                          input_variables[category]
                          for category in input_data.input_categories]) 
Example #7
Source File: run_mskmeans.py    From MinSizeKmeans with GNU General Public License v3.0 6 votes vote down vote up
def create_model(self):
        def distances(assignment):
            return l2_distance(self.data[assignment[0]], self.centroids[assignment[1]])

        clusters = list(range(self.k))
        assignments = [(i, j)for i in range(self.n) for j in range(self.k)]

        # outflow variables for data nodes
        self.y = pulp.LpVariable.dicts('data-to-cluster assignments',
                                  assignments,
                                  lowBound=0,
                                  upBound=1,
                                  cat=pulp.LpInteger)

        # outflow variables for cluster nodes
        self.b = pulp.LpVariable.dicts('cluster outflows',
                                  clusters,
                                  lowBound=0,
                                  upBound=self.n-self.min_size,
                                  cat=pulp.LpContinuous)

        # create the model
        self.model = pulp.LpProblem("Model for assignment subproblem", pulp.LpMinimize)

        # objective function
        self.model += pulp.lpSum(distances(assignment) * self.y[assignment] for assignment in assignments)

        # flow balance constraints for data nodes
        for i in range(self.n):
            self.model += pulp.lpSum(self.y[(i, j)] for j in range(self.k)) == 1

        # flow balance constraints for cluster nodes
        for j in range(self.k):
            self.model += pulp.lpSum(self.y[(i, j)] for i in range(self.n)) - self.min_size == self.b[j]

        # flow balance constraint for the sink node
        self.model += pulp.lpSum(self.b[j] for j in range(self.k)) == self.n - (self.k * self.min_size) 
Example #8
Source File: envelopment_model_base.py    From pyDEA with MIT License 5 votes vote down vote up
def _add_constraints_for_outputs(self, variables, dmu_code,
                                     obj_variable):
        ''' Adds constraints for outputs to linear program.

            Args:
                variables (dict of str to pulp.LpVariable): a dictionary
                    that maps DMU codes to pulp.LpVariable, created with
                    pulp.LpVariable.dicts.
                dmu_code (str): DMU code for which LP is being created.
                obj_variable (pulp.LpVariable): LP variable that is optimised
                    (either efficiency score or inverse of efficiency score).
        '''
        for (count, output_category) in enumerate(
                self.input_data.output_categories):
            current_output = self.input_data.coefficients[(dmu_code,
                                                          output_category)]
            output_coeff = self._concrete_model.get_output_variable_coefficient(
                obj_variable, output_category)
            sum_all_outputs = pulp.lpSum([variables[dmu] *
                                         self.input_data.coefficients
                                         [(dmu, output_category)]
                                         for dmu in self.input_data.DMU_codes])
            name = 'constraint_output_{count}'.format(count=count)
            self.lp_model += (self._constraint_creator.create(
                              -output_coeff * current_output +
                              sum_all_outputs, 0, output_category), name)
            self._constraints[output_category] = name 
Example #9
Source File: LEMON.py    From cdlib with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def __min_one_norm(B, initial_seed, seed):
    weight_initial = 1 / float(len(initial_seed))
    weight_later_added = weight_initial / float(0.5)
    difference = len(seed) - len(initial_seed)
    [r, c] = B.shape
    prob = pulp.LpProblem("Minimum one norm", pulp.LpMinimize)
    indices_y = range(0, r)
    y = pulp.LpVariable.dicts("y_s", indices_y, 0)
    indices_x = range(0, c)
    x = pulp.LpVariable.dicts("x_s", indices_x)

    f = dict(zip(indices_y, [1.0] * r))

    prob += pulp.lpSum(f[i] * y[i] for i in indices_y)  # objective function

    prob += pulp.lpSum(y[s] for s in initial_seed) >= 1

    prob += pulp.lpSum(y[r] for r in seed) >= 1 + weight_later_added * difference

    for j in range(r):
        temp = dict(zip(indices_x, list(B[j, :])))
        prob += pulp.lpSum(y[j] + (temp[k] * x[k] for k in indices_x)) == 0

    prob.solve()

    result = []
    for var in indices_y:
        result.append(y[var].value())

    return result 
Example #10
Source File: upper_bound_ilp.py    From acl2017-interactive_summarizer with Apache License 2.0 5 votes vote down vote up
def solve_ilp(self, N):
        # build the A matrix: a_ij is 1 if j-th gram appears in the i-th sentence

        A = np.zeros((len(self.sentences_idx), len(self.ref_ngrams_idx)))
        for i in self.sentences_idx:
            sent = self.sentences[i].untokenized_form
            sngrams = list(extract_ngrams2([sent], self.stemmer, self.LANGUAGE, N))
            for j in self.ref_ngrams_idx:
                if self.ref_ngrams[j] in sngrams:
                    A[i][j] = 1

        # Define ILP variable, x_i is 1 if sentence i is selected, z_j is 1 if gram j appears in the created summary
        x = pulp.LpVariable.dicts('sentences', self.sentences_idx, lowBound=0, upBound=1, cat=pulp.LpInteger)
        z = pulp.LpVariable.dicts('grams', self.ref_ngrams_idx, lowBound=0, upBound=1, cat=pulp.LpInteger)

        # Define ILP problem, maximum coverage of grams from the reference summaries
        prob = pulp.LpProblem("ExtractiveUpperBound", pulp.LpMaximize)
        prob += pulp.lpSum(z[j] for j in self.ref_ngrams_idx)

        # Define ILP constraints, length constraint and consistency constraint (impose that z_j is 1 if j
        # appears in the created summary)
        prob += pulp.lpSum(x[i] * self.sentences[i].length for i in self.sentences_idx) <= self.sum_length

        for j in self.ref_ngrams_idx:
            prob += pulp.lpSum(A[i][j] * x[i] for i in self.sentences_idx) >= z[j]

        # Solve ILP problem and post-processing to get the summary
        try:
            print('Solving using CPLEX')
            prob.solve(pulp.CPLEX(msg=0))
        except:
            print('Fall back to GLPK')
            prob.solve(pulp.GLPK(msg=0))
                

        summary_idx = []
        for idx in self.sentences_idx:
            if x[idx].value() == 1.0:
                summary_idx.append(idx)

        return summary_idx 
Example #11
Source File: chemistry.py    From chempy with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def _solve_balancing_ilp_pulp(A):
    import pulp
    x = [pulp.LpVariable('x%d' % i, lowBound=1, cat='Integer') for i in range(A.shape[1])]
    prob = pulp.LpProblem("chempy balancing problem", pulp.LpMinimize)
    prob += reduce(add, x)
    for expr in [pulp.lpSum([x[i]*e for i, e in enumerate(row)]) for row in A.tolist()]:
        prob += expr == 0
    prob.solve()
    return [pulp.value(_) for _ in x] 
Example #12
Source File: lp_solve.py    From RevPy with MIT License 5 votes vote down vote up
def add_capacity_constraints(prob, x, A, product_names, capacities,
                             leg_names=None):
    """Add capacity contraints as upper bound on the segments/legs.

  Parameters
  ----------
    prob: pulp.LpProblem
        the LP problem
    x: dict
        contains decision variables (each of which has type pulp.pulp.LpVariable)
    A: 2D np array
        incidence matrix, size n_relations*n_legs
    products_names: list
        list of product names (e.g. relation/class combinations)
    capacities: Iterable
        list of capacity on each leg/segment
    leg_names: list
        list of leg names

    Returns
    -------
    list of tuples of the form (constraint, constraint_name)
    where `constraint` is of type pulp.LpConstraint
    and `constraint_name` is of type str
    """
    n_trips, n_legs = A.shape
    n_classes = int(len(product_names) / n_trips)
    A_ = np.tile(A, (n_classes, 1))

    capacity_constraints = []
    for leg, leg_name in enumerate(leg_names):
        leg_load = pulp.lpSum([x[it]*A_[i, leg] for i, it
                               in enumerate(product_names)])
        capacity_constraint = (leg_load <= capacities[leg],
                               "cap_{}".format(leg_name))
        prob += capacity_constraint
        capacity_constraints.append(capacity_constraint)

    return capacity_constraints 
Example #13
Source File: label_prop_v2.py    From transferlearning with MIT License 5 votes vote down vote up
def label_prop(C, nt, Dct, lp="linear"):
    
#Inputs:
#  C      :    Number of share classes between src and tar
#  nt     :    Number of target domain samples
#  Dct    :    All d_ct in matrix form, nt * C
#  lp     :    Type of linear programming: linear (default) | binary
#Outputs:
#  Mcj    :    all M_ct in matrix form, m * C
    
    Dct = abs(Dct)
    model = pulp.LpProblem("Cost minimising problem", pulp.LpMinimize)
    Mcj = pulp.LpVariable.dicts("Probability",
                                ((i, j) for i in range(C) for j in range(nt)),
                                lowBound=0,
                                upBound=1,
                                cat='Continuous')
    
    # Objective Function
    model += (
    pulp.lpSum([Dct[j, i]*Mcj[(i, j)] for i in range(C) for j in range(nt)])
    )
    
    # Constraints
    for j in range(nt):
        model += pulp.lpSum([Mcj[(i, j)] for i in range(C)]) == 1
    for i in range(C):
        model += pulp.lpSum([Mcj[(i, j)] for j in range(nt)]) >= 1
    
    # Solve our problem
    model.solve()
    pulp.LpStatus[model.status]
    Output = [[Mcj[i, j].varValue for i in range(C)] for j in range(nt)]
    
    return np.array(Output) 
Example #14
Source File: simplex_test.py    From GiMPy with Eclipse Public License 1.0 5 votes vote down vote up
def solve(g):
    el = g.get_edge_list()
    nl = g.get_node_list()
    p = LpProblem('min_cost', LpMinimize)
    capacity = {}
    cost = {}
    demand = {}
    x = {}
    for e in el:
        capacity[e] = g.get_edge_attr(e[0], e[1], 'capacity')
        cost[e] = g.get_edge_attr(e[0], e[1], 'cost')
    for i in nl:
        demand[i] = g.get_node_attr(i, 'demand')
    for e in el:
        x[e] = LpVariable("x"+str(e), 0, capacity[e])
    # add obj
    objective = lpSum (cost[e]*x[e] for e in el)
    p += objective
    # add constraints
    for i in nl:
        out_neig = g.get_out_neighbors(i)
        in_neig = g.get_in_neighbors(i)
        p += lpSum(x[(i,j)] for j in out_neig) -\
             lpSum(x[(j,i)] for j in in_neig)==demand[i]
    p.solve()
    return x, value(objective) 
Example #15
Source File: maximize_slacks.py    From pyDEA with MIT License 5 votes vote down vote up
def _create_lp(self):
        ''' See base class.
        '''
        self.model._create_lp()
        self.lp_model_max_slack = self.model.lp_model.deepcopy()

        input_slack_vars = pulp.LpVariable.dicts(
            'input_slack', self.strongly_disposal_input_categories,
            0, None, pulp.LpContinuous)
        output_slack_vars = pulp.LpVariable.dicts(
            'output_slack', self.strongly_disposal_output_categories,
            0, None, pulp.LpContinuous)

        # change objective function
        self.lp_model_max_slack.sense = pulp.LpMaximize
        self.lp_model_max_slack.objective = (
            pulp.lpSum(list(input_slack_vars.values())) +
            pulp.lpSum(list(output_slack_vars.values())))

        # change constraints
        for input_category in self.strongly_disposal_input_categories:
            name = self.model._constraints[input_category]
            self.lp_model_max_slack.constraints[name].addterm(
                input_slack_vars[input_category], 1)
            self.lp_model_max_slack.constraints[name].sense = pulp.LpConstraintEQ

        for output_category in self.strongly_disposal_output_categories:
            name = self.model._constraints[output_category]
            self.lp_model_max_slack.constraints[name].addterm(
                output_slack_vars[output_category], -1)
            self.lp_model_max_slack.constraints[name].sense = pulp.LpConstraintEQ 
Example #16
Source File: envelopment_model_base.py    From pyDEA with MIT License 5 votes vote down vote up
def _add_constraints_for_inputs(self, variables,
                                    dmu_code, obj_variable):
        ''' Adds constraints for inputs to LP.

            Args:
                variables (dict of {str: pulp.LpVariable}): a dictionary that
                    maps DMU codes to pulp.LpVariable, created with
                    pulp.LpVariable.dicts.
                dmu_code (str): DMU code for which LP is being created.
                obj_variable (pulp.LpVariable): LP variable that is optimised
                    (either efficiency score or inverse of efficiency score).
        '''
        for (count, input_category) in enumerate(
                self.input_data.input_categories):
            current_input = self.input_data.coefficients[(dmu_code,
                                                         input_category)]
            input_coeff = self._concrete_model.get_input_variable_coefficient(
                obj_variable, input_category)
            sum_all_inputs = pulp.lpSum([variables[dmu] *
                                        self.input_data.coefficients
                                        [(dmu, input_category)]
                                        for dmu in
                                        self.input_data.DMU_codes])
            name = 'constraint_input_{count}'.format(count=count)
            self.lp_model += (self._constraint_creator.create(
                              input_coeff * current_input
                              - sum_all_inputs, 0, input_category), name)
            self._constraints[input_category] = name 
Example #17
Source File: minmax_kmeans.py    From MinSizeKmeans with GNU General Public License v3.0 5 votes vote down vote up
def create_model(self):
        def distances(assignment):
            return l2_distance(self.data[assignment[0]], self.centroids[assignment[1]])

        clusters = list(range(self.k))
        assignments = [(i, j)for i in range(self.n) for j in range(self.k)]

        # outflow variables for data nodes
        self.y = pulp.LpVariable.dicts('data-to-cluster assignments',
                                  assignments,
                                  lowBound=0,
                                  upBound=1,
                                  cat=pulp.LpInteger)

        # outflow variables for cluster nodes
        self.b = pulp.LpVariable.dicts('cluster outflows',
                                  clusters,
                                  lowBound=0,
                                  upBound=self.n-self.min_size,
                                  cat=pulp.LpContinuous)

        # create the model
        self.model = pulp.LpProblem("Model for assignment subproblem", pulp.LpMinimize)

        # objective function
        self.model += pulp.lpSum([distances(assignment) * self.y[assignment] for assignment in assignments])

        # flow balance constraints for data nodes
        for i in range(self.n):
            self.model += pulp.lpSum(self.y[(i, j)] for j in range(self.k)) == 1

        # flow balance constraints for cluster nodes
        for j in range(self.k):
            self.model += pulp.lpSum(self.y[(i, j)] for i in range(self.n)) - self.min_size == self.b[j]
            
        # capacity constraint on outflow of cluster nodes
        for j in range(self.k):
            self.model += self.b[j] <= self.max_size - self.min_size 

        # flow balance constraint for the sink node
        self.model += pulp.lpSum(self.b[j] for j in range(self.k)) == self.n - (self.k * self.min_size) 
Example #18
Source File: pulp_solver.py    From pydfs-lineup-optimizer with MIT License 5 votes vote down vote up
def add_constraint(self, variables, coefficients, sign, rhs):
        if coefficients:
            lhs = [variable * coefficient for variable, coefficient in zip(variables, coefficients)]
        else:
            lhs = variables
        if sign == SolverSign.EQ:
            self.prob += lpSum(lhs) == rhs
        elif sign == SolverSign.NOT_EQ:
            self.prob += lpSum(lhs) != rhs
        elif sign == SolverSign.GTE:
            self.prob += lpSum(lhs) >= rhs
        elif sign == SolverSign.LTE:
            self.prob += lpSum(lhs) <= rhs
        else:
            raise SolverException('Incorrect constraint sign') 
Example #19
Source File: envelopment_model_decorators.py    From pyDEA with MIT License 5 votes vote down vote up
def _create_lp(self):
        ''' Creates initial LP.
        '''
        self._model_to_decorate._create_lp()
        self.lp_model = self._model_to_decorate.lp_model
        variables = [self._model_to_decorate._variables[dmu_code] for dmu_code
                     in self.input_data.DMU_codes]

        self._model_to_decorate.lp_model += (pulp.lpSum(variables) == 1,
                                             'VRS_constraint')
        self.lp_model = self._model_to_decorate.lp_model 
Example #20
Source File: optimization_model_pulp.py    From optimization-tutorial with MIT License 5 votes vote down vote up
def _set_objective_function(self):
        # Similar to constraints, saving the costs expressions as attributes
        # can give you the chance to retrieve their values at the end of the optimization
        self.total_holding_cost = self.input_params['holding_cost'] * pulp.lpSum(self.inventory_variables)
        self.total_production_cost = pulp.lpSum(row['production_cost'] * self.production_variables[index]
                                                for index, row in self.input_data.iterrows())

        objective = self.total_holding_cost + self.total_production_cost
        self.model.setObjective(objective)

    # ================== Optimization ================== 
Example #21
Source File: multiplier_model.py    From pyDEA with MIT License 5 votes vote down vote up
def get_objective_function(self, input_data, dmu_code, input_variables,
                               output_variables):
        ''' Generates objective function of input-oriented multiplier model.

            Args:
                input_data (InputData): object that stores input data.
                dmu_code (str): DMU code.
                input_variables (dict of str to pulp.LpVariable): dictionary
                    that maps variable name to pulp variable corresponding
                    to input categories.
                output_variables (dict of str to pulp.LpVariable): dictionary
                    that maps variable name to pulp variable corresponding
                    to output categories.

            Returns:
                pulp.LpSum: objective function.
        '''
        return pulp.lpSum([input_data.coefficients[dmu_code, category] *
                          output_variables[category]
                          for category in input_data.output_categories]) 
Example #22
Source File: space.py    From qmpy with MIT License 5 votes vote down vote up
def get_minima(self, phases, bounds):
        """
        Given a set of Phases, get_minima will determine the minimum
        free energy elemental composition as a weighted sum of these
        compounds
        """

        prob = pulp.LpProblem('GibbsEnergyMin', pulp.LpMinimize)
        pvars = pulp.LpVariable.dicts('phase', phases, 0)
        bvars = pulp.LpVariable.dicts('bound', bounds, 0.0, 1.0)
        prob += pulp.lpSum( self.phase_energy(p)*pvars[p] for p in phases ) - \
                pulp.lpSum( self.phase_energy(bound)*bvars[bound] for bound in bounds ), \
                                "Free Energy"
        for elt in self.bound_space:
            prob += sum([ p.unit_comp.get(elt,0)*pvars[p] for p in phases ])\
                        == \
                sum([ b.unit_comp.get(elt, 0)*bvars[b] for b in bounds ]),\
                            'Contraint to the proper range of'+elt
        prob += sum([ bvars[b] for b in bounds ]) == 1, \
                'sum of bounds must be 1'

        if pulp.GUROBI().available():
            prob.solve(pulp.GUROBI(msg=False))
        elif pulp.COIN_CMD().available():
            prob.solve(pulp.COIN_CMD())
        elif pulp.COINMP_DLL().available():
            prob.solve(pulp.COINMP_DLL())
        else:
            prob.solve()

        E = pulp.value(prob.objective)
        xsoln = defaultdict(float,
            [(p, pvars[p].varValue) for p in phases if
                abs(pvars[p].varValue) > 1e-4])
        return xsoln, E 
Example #23
Source File: space.py    From qmpy with MIT License 5 votes vote down vote up
def _gclp(self, composition={}, mus={}, phases=[]):
        if not qmpy.FOUND_PULP:
            raise Exception('Cannot do GCLP without installing PuLP and an LP',
                    'solver')
        prob = pulp.LpProblem('GibbsEnergyMin', pulp.LpMinimize)
        phase_vars = pulp.LpVariable.dicts('lib', phases, 0.0)
        prob += pulp.lpSum([ (p.energy -
            sum([ p.unit_comp.get(elt,0)*mu
                for elt, mu in mus.items() ])) * phase_vars[p]
            for p in phases]),\
                    "Free Energy"
        for elt, constraint in composition.items():
            prob += pulp.lpSum([
                p.unit_comp.get(elt,0)*phase_vars[p]
                for p in phases ]) == float(constraint),\
                        'Conservation of '+elt
        ##[vh]
        ##print prob
        if pulp.GUROBI().available():
            prob.solve(pulp.GUROBI(msg=False))
        elif pulp.COIN_CMD().available():
            prob.solve(pulp.COIN_CMD())
        else:
            prob.solve()

        phase_comp = dict([ (p, phase_vars[p].varValue)
            for p in phases if phase_vars[p].varValue > 1e-5])
        
        energy = sum( p.energy*amt for p, amt in phase_comp.items() )
        energy -= sum([ a*composition.get(e, 0) for e,a in mus.items()])
        return energy, phase_comp 
Example #24
Source File: weighted_mm_kmeans.py    From MinSizeKmeans with GNU General Public License v3.0 5 votes vote down vote up
def create_model(self):
        def distances(assignment):
            return l2_distance(self.data[assignment[0]], self.centroids[assignment[1]])

        assignments = [(i, j) for i in range(self.n) for j in range(self.k)]

        # assignment variables
        self.y = pulp.LpVariable.dicts('data-to-cluster assignments',
                                  assignments,
                                  lowBound=0,
                                  upBound=1,
                                  cat=pulp.LpInteger)

        # create the model
        self.model = pulp.LpProblem("Model for assignment subproblem", pulp.LpMinimize)

        # objective function
        self.model += pulp.lpSum([distances(assignment) * self.weights[assignment[0]] * self.y[assignment] for assignment in assignments]), 'Objective Function - sum weighted squared distances to assigned centroid'
        # this is also weighted, otherwise the weighted centroid computation don't make sense.

        # constraints on the total weights of clusters
        for j in range(self.k):
            self.model += pulp.lpSum([self.weights[i] * self.y[(i, j)] for i in range(self.n)]) >= self.min_weight, "minimum weight for cluster {}".format(j)
            self.model += pulp.lpSum([self.weights[i] * self.y[(i, j)] for i in range(self.n)]) <= self.max_weight, "maximum weight for cluster {}".format(j)

        # make sure each point is assigned at least once, and only once
        for i in range(self.n):
            self.model += pulp.lpSum([self.y[(i, j)] for j in range(self.k)]) == 1, "must assign point {}".format(i) 
Example #25
Source File: multiplier_model_base.py    From pyDEA with MIT License 4 votes vote down vote up
def _create_lp(self):
        ''' Creates initial LP model.
        '''
        assert len(self.input_data.DMU_codes) != 0
        # create LP for the first DMU - it is the easiest way to
        # adapt current code
        for elem in self.input_data.DMU_codes:
            dmu_code = elem
            break

        self.lp_model = pulp.LpProblem(
            'Multiplier model: {orientation}-oriented'.format(
            orientation=self._concrete_model.get_orientation()),
            self._concrete_model.get_objective_type())

        self._output_variables = pulp.LpVariable.dicts(
            'mu', self.input_data.output_categories, self.tolerance, None,
            pulp.LpContinuous)
        self._input_variables = pulp.LpVariable.dicts(
            'eta', self.input_data.input_categories, self.tolerance, None,
            pulp.LpContinuous)

        self.lp_model += (self._concrete_model.get_objective_function(
            self.input_data, dmu_code, self._input_variables,
            self._output_variables),
            'Efficiency score or inverse of efficiency score')

        self._dmu_constraint_names.clear()
        for dmu in self.input_data.DMU_codes:
            output_sum = pulp.lpSum([self.input_data.coefficients[
                dmu, category] * self._output_variables[category]
                for category in self.input_data.output_categories])
            input_sum = pulp.lpSum([self.input_data.coefficients[
                dmu, category] * self._input_variables[category]
                for category in self.input_data.input_categories])
            name = 'DMU_constraint_{count}'.format(count=dmu)
            self.lp_model += (output_sum - input_sum <= 0, name)
            self._dmu_constraint_names[name] = dmu

        self.lp_model += (self._concrete_model.get_equality_constraint(
            self.input_data, dmu_code, self._input_variables,
            self._output_variables), 'equality_constraint') 
Example #26
Source File: wordmoverdist.py    From PyShortTextCategorization with MIT License 4 votes vote down vote up
def word_mover_distance_probspec(first_sent_tokens, second_sent_tokens, wvmodel, distancefunc=euclidean, lpFile=None):
    """ Compute the Word Mover's distance (WMD) between the two given lists of tokens, and return the LP problem class.

    Using methods of linear programming, supported by PuLP, calculate the WMD between two lists of words. A word-embedding
    model has to be provided. The problem class is returned, containing all the information about the LP.

    Reference: Matt J. Kusner, Yu Sun, Nicholas I. Kolkin, Kilian Q. Weinberger, "From Word Embeddings to Document Distances," *ICML* (2015).

    :param first_sent_tokens: first list of tokens.
    :param second_sent_tokens: second list of tokens.
    :param wvmodel: word-embedding models.
    :param distancefunc: distance function that takes two numpy ndarray.
    :param lpFile: log file to write out.
    :return: a linear programming problem contains the solution
    :type first_sent_tokens: list
    :type second_sent_tokens: list
    :type wvmodel: gensim.models.keyedvectors.KeyedVectors
    :type distancefunc: function
    :type lpFile: str
    :rtype: pulp.LpProblem
    """
    all_tokens = list(set(first_sent_tokens+second_sent_tokens))
    wordvecs = {token: wvmodel[token] for token in all_tokens}

    first_sent_buckets = tokens_to_fracdict(first_sent_tokens)
    second_sent_buckets = tokens_to_fracdict(second_sent_tokens)

    T = pulp.LpVariable.dicts('T_matrix', list(product(all_tokens, all_tokens)), lowBound=0)

    prob = pulp.LpProblem('WMD', sense=pulp.LpMinimize)
    prob += pulp.lpSum([T[token1, token2]*distancefunc(wordvecs[token1], wordvecs[token2])
                        for token1, token2 in product(all_tokens, all_tokens)])
    for token2 in second_sent_buckets:
        prob += pulp.lpSum([T[token1, token2] for token1 in first_sent_buckets])==second_sent_buckets[token2]
    for token1 in first_sent_buckets:
        prob += pulp.lpSum([T[token1, token2] for token2 in second_sent_buckets])==first_sent_buckets[token1]

    if lpFile!=None:
        prob.writeLP(lpFile)

    prob.solve()

    return prob 
Example #27
Source File: covering.py    From pyspatialopt with MIT License 4 votes vote down vote up
def create_lscp_model(coverage_dict, model_file=None, delineator="$", ):
    """
    Creates a LSCP (Location set covering problem) using the provided coverage and
    parameters. Writes a .lp file which can be solved with Gurobi

    Church, R., & Murray, A. (2009). Coverage Business Site Selection, Location
    Analysis, and GIS (pp. 209-233). Hoboken, New Jersey: Wiley.

    :param coverage_dict: (dictionary) The coverage to use to generate the model
    :param model_file: (string) The model file to output
    :param delineator: (string) The character(s) to use to delineate the layer from the ids
    :return: (Pulp problem) The generated problem to solve
    """
    validate_coverage(coverage_dict, ["coverage"], ["binary"])
    if not isinstance(coverage_dict, dict):
        raise TypeError("coverage_dict is not a dictionary")
    if model_file and not (isinstance(model_file, str)):
        raise TypeError("model_file is not a string")
    if not isinstance(delineator, str):
        raise TypeError("delineator is not a string")
        # create the variables
    demand_vars = {}
    for demand_id in coverage_dict["demand"]:
        demand_vars[demand_id] = pulp.LpVariable("Y{}{}".format(delineator, demand_id), 0, 1, pulp.LpInteger)
    facility_vars = {}
    for facility_type in coverage_dict["facilities"]:
        facility_vars[facility_type] = {}
        for facility_id in coverage_dict["facilities"][facility_type]:
            facility_vars[facility_type][facility_id] = pulp.LpVariable(
                "{}{}{}".format(facility_type, delineator, facility_id), 0, 1, pulp.LpInteger)
    # create the problem
    prob = pulp.LpProblem("LSCP", pulp.LpMinimize)
    # Create objective, minimize number of facilities
    to_sum = []
    for facility_type in coverage_dict["facilities"]:
        for facility_id in coverage_dict["facilities"][facility_type]:
            to_sum.append(facility_vars[facility_type][facility_id])
    prob += pulp.lpSum(to_sum)
    # add coverage constraints
    for demand_id in coverage_dict["demand"]:
        to_sum = []
        for facility_type in coverage_dict["demand"][demand_id]["coverage"]:
            for facility_id in coverage_dict["demand"][demand_id]["coverage"][facility_type]:
                to_sum.append(facility_vars[facility_type][facility_id])
        # Hack to get model to "solve" when infeasible with GLPK.
        # Pulp will automatically add dummy variables when the sum is empty, since these are all the same name,
        # it seems that GLPK doesn't read the lp problem properly and fails
        if not to_sum:
            to_sum = [pulp.LpVariable("__dummy{}{}".format(delineator, demand_id), 0, 0, pulp.LpInteger)]
        prob += pulp.lpSum(to_sum) >= 1, "D{}".format(demand_id)
    if model_file:
        prob.writeLP(model_file)
    return prob