Thanks to folks who responded. The answer is to make the free variable vector bigger, and slice from it to get the variables as needed (obvious I guess :-). The following works (use at your own risk of course):
import numpy as np
import scipy.optimize as sco
# make the required lambda function "final" so it does not change when param i (or n) changes
def makeFinalLambda(i, n, op):
if op == '+':
return(lambda w: w[n+i] + w[i])
else:
return(lambda w: w[n+i] - w[i])
def optimize(alphas, cov, maxRisk):
n = len(alphas)
def _calcRisk(x):
w = x[:n]
var = np.dot(np.dot(w.T, cov), w)
return(var)
def _calcAlpha(x):
w = x[:n]
alpha = np.dot(alphas, w)
return(-alpha)
constraints = []
# make the constraints to create abs value variables
for i in range(n):
# note that this doesn't work; all the functions will refer to current i value
# constraints.append({'type': 'ineq', 'fun': lambda w: w[n+i] - w[i] })
# constraints.append({'type': 'ineq', 'fun': lambda w: w[n+i] + w[i] })
constraints.append({'type': 'ineq', 'fun': makeFinalLambda(i, n, '-') })
constraints.append({'type': 'ineq', 'fun': makeFinalLambda(i, n, '+') })
# add neutrality, gross value, and risk constraints
constraints = constraints + \
[{'type': 'eq', 'fun': lambda w: np.sum(w[:n]) },
{'type': 'eq', 'fun': lambda w: np.sum(w[n:]) - 1.0 },
{'type': 'ineq', 'fun': lambda w: maxRisk*maxRisk - _calcRisk(w)}]
bounds = tuple((-1, 1) for x in range(n))
bounds = bounds + tuple((0, 1) for x in range(n))
# try to choose a nice, feasible starting vector
initw = n * [0.001 / n]
initw = initw + [abs(w)+0.001 for w in initw]
result = sco.minimize(_calcAlpha, initw, method='SLSQP',
bounds=bounds, constraints=constraints)
return(result)
This iteratively creates 2 constraints for each weight variable to compute the absolute value variables. It's nicer to do this as a vector (per-element) constraint, as follows:
def optimize(alphas, cov, maxRisk):
n = len(alphas)
def _calcRisk(x):
w = x[:n]
var = np.dot(np.dot(w.T, cov), w)
return(var)
def _calcAlpha(x):
w = x[:n]
alpha = np.dot(alphas, w)
return(-alpha)
absfunpos = lambda x : [x[n+i] - x[i] for i in range(n)]
absfunneg = lambda x : [x[n+i] + x[i] for i in range(n)]
constraints = (
sco.NonlinearConstraint(absfunpos, [0.0]*n, [2.0]*n),
sco.NonlinearConstraint(absfunneg, [0.0]*n, [2.0]*n),
{'type': 'eq', 'fun': lambda w: np.sum(w[:n]) },
{'type': 'eq', 'fun': lambda w: np.sum(w[n:]) - 1.0 },
{'type': 'ineq', 'fun': lambda w: maxRisk*maxRisk - _calcRisk(w) } )
bounds = tuple((-1, 1) for x in range(n))
bounds = bounds + tuple((0, 3) for x in range(n))
initw = n * [0.01 / n]
initw = initw + [abs(w) for w in initw]
result = sco.minimize(_calcAlpha, initw, method='SLSQP',
bounds=bounds, constraints=constraints)
return(result)