-1

Edit

I changed the constraints to this:

    def constraint_rule1(m, i):
        return sum(m.x[i] for i in m.data_set) == 0

    def constraint_rule2(m, i):
        return m.u1[i] - m.u2[i] == m.x[i] - m.data_param[i]

    def objective_rule(m):
        return summation(m.u1, m.u2)

and now I get: AttributeError: 'dict' object has no attribute 'is_expression_type'


I am trying to set up a model within a while loop and I don't understand why it is not working. I also don't know if it is best practice how I am creating a model each time. When I run the code that I wrote, I get "'_GeneralVarData' object is not iterable", I guess it is because of how I formulated the constraints. And if someone can maybe tell me what is best practice for creating a model several times?

#create random number generator
rng = np.random.default_rng()

dict_1 = {"A": (-400, 100.0), "B": (-50, -10.0), "C": (-100, 100.0)}
dict_2 = {"D": (10.0, 180.0), "E": (0.0, 80.0), "F": (0.0, 200.0), "H": 
(0.0, 20.0)}

# making a dataclass to hold the outputs
@dataclass
class NominationGenerationOutput:
    result: Dict
    diff: Dict
    diff1: Dict
    diff2: Dict
    node_lb: Dict
    node_ub: Dict


# number of nominations I want to have
max_noms_count = 10
count = 0
# maximum number of total iterations
iterCount = 0
#maximum iteration count
maxIter = 100
assert max_noms_count <= maxIter

while count != max_noms_count and iterCount < maxIter:
    # get uniformly distributed sample for each sink/source
    dict_A = {key: rng.uniform(low=val[0], high=val[1]) for key, val in 
             dict_1.items}
    dict_B = {key: rng.uniform(low=val[0], high=val[1]) for key, val in 
              dict_2.items()}

    # data preparation
    lb1 = {key: val[0] for key, val in dict_1.items()}
    ub1 = {key: val[1] for key, val in dict_1.items()}

    lb2 = {key: val[0] for key, val in dict_2.items()}
    ub2 = {key: val[1] for key, val in dict_2.items()}

    data_dict = {**dict_A, **dict_B}
    dict_lb = {**lb1, **lb2}
    dict_ub = {**ub1, **ub2}

    # create optimization problem
    m = ConcreteModel()

    #Set
    m.data_set = Set(initialize=list(data_dict.keys()))
    
    # nomination vector
    m.x = Var(m.set, bounds=(lb, ub))

    # to represent an absolute value term in the objective, we need 2 auxiliary variables
    m.u1 = Var(
        m.data_set,
        bounds=({key: 0 for key in m.data_set}, None),
    )
    m.u2 = Var(
        m.data_set,
        bounds=({key: 0 for key in m.data_set}, None),
    )

    #Parameter
    m.data_param = Param(m.data_set, initialize=data_dict)

    def constraint_rule1(m, i):
        return sum(m.x[i]) == 0

    def constraint_rule2(m, i):
        return m.u1[i] - m.u2[i] == m.x[i] - m.data_param[i]

    def objective_rule(m):
        return (sum(m.u1[i]) + sum(m.u2[i]) for i in m.data_set)

    # add balance constraint
    m.constraint1 = Constraint(m.data_set, rule=constraint_rule1)
    m.constraint2 = Constraint(m.data_set, rule=constraint_rule2)
    # set objective as the sum of absolute distances from each variable 
    to its random candidate and minimize that difference
    m.objective = Objective(rule=objective_rule, sense=minimize)
    results = SolverFactory("gurobi").solve(m, tee=False)
    # output handling
        if (results.solver.status == SolverStatus.ok) and (
            results.solver.termination_condition == 
            TerminationCondition.optimal):
            print("*" * 80 + "\n")
            print("objective: ", value(m.objective))
            print("*" * 80 + "\n")
            # nomination
            result = {i: m.x[i].value for i in m.x}
            # absolute differences
            diff = {i: m.u1[i].value + m.u2[i].value for i in m.x}
            # positive differences
            diff1 = {i: m.u1[i].value for i in m.x}
            # negative differences
            diff2 = {i: m.u2[i].value for i in m.x}
            output = NominationGenerationOutput(
                result, diff, diff1, diff2, node_lb, node_ub
            )
    
        print("maximum deviation: ", max(diff.values()))
        print("average deviation: ", sum(diff.values()) / len(diff))
        print("*" * 80 + "\n")
        if sum(diff.values()) / len(diff) < 20:
            noms.append(output)
            count += 1
        iterCount += 1
Cha26
  • 33
  • 9

1 Answers1

1

In case anyone needs this, I solved the problem by writing it like this:

while count != max_noms_count and iterCount < maxIter:
    # get uniformly distributed sample for each sink/source
    dict_A = {key: rng.uniform(low=val[0], high=val[1]) for key, val in 
             dict_1.items()}
    dict_B = {key: rng.uniform(low=val[0], high=val[1]) for key, val in 
              dict_2.items()}

    # some data preparation
    lb1 = {key: val[0] for key, val in dict_1.items()}
    ub1 = {key: val[1] for key, val in dict_1.items()}

    lb2 = {key: val[0] for key, val in dict_2.items()}
    ub2 = {key: val[1] for key, val in dict_2.items()}

    data_dict = {**dict_A, **dict_B}
    dict_lb = {**lb1, **lb2}
    dict_ub = {**ub1, **ub2}

    def bounds_rule(m, i):
        return (dict_lb[i], dict_ub[i])

    # create optimization problem
    m = ConcreteModel()

    #Set
    m.data_set = Set(initialize=list(data_dict.keys()))

    # nomination vector
    m.x = Var(m.data_set, bounds=bounds_rule)

    # to represent an absolute value term in the objective, we need 2 
    auxiliary variables
    m.u1 = Var(
        m.data_set,
        bounds=(0, None),
    )
    m.u2 = Var(
        m.data_set,
        bounds=(0, None),
    )

    #Parameter
    m.data_param = Param(m.data_set, initialize=data_dict)

    def constraint_rule1(m, i):
        return sum(m.x[i] for i in m.data_set) == 0

    def constraint_rule2(m, i):
        return m.u1[i] - m.u2[i] == m.x[i] - m.data_param[i]

    def objective_rule(m):
        return sum(m.u1[i] + m.u2[i] for i in m.data_set)

    # add balance constraint
    m.constraint1 = Constraint(m.data_set, rule=constraint_rule1)
    m.constraint2 = Constraint(m.data_set, rule=constraint_rule2)
    # set objective as the sum of absolute distances from each 
    nomination to its random candidate and minimize that difference
    m.objective = Objective(rule=objective_rule,
        sense=minimize,
    )
    optimiser = SolverFactory("gurobi")

    results = optimiser.solve(m)

    # output handling
    if (results.solver.status == SolverStatus.ok) and (
        results.solver.termination_condition == 
        TerminationCondition.optimal):
        print("*" * 80 + "\n")
        print("objective: ", value(m.objective))
        print("*" * 80 + "\n")
        # nomination
        result = {i: m.x[i].value for i in m.x}
        # absolute differences
        diff = {i: m.u1[i].value + m.u2[i].value for i in m.x}
        # positive differences
        diff1 = {i: m.u1[i].value for i in m.x}
        # negative differences
        diff2 = {i: m.u2[i].value for i in m.x}
        output = NominationGenerationOutput(
            result, diff, diff1, diff2, node_lb, node_ub
        )

    print("maximum deviation: ", max(diff.values()))
    print("average deviation: ", sum(diff.values()) / len(diff))
    print("*" * 80 + "\n")
    if sum(diff.values()) / len(diff) < 20:
        noms.append(output)
        count += 1
    iterCount += 1
Cha26
  • 33
  • 9