/usr/share/pyshared/openopt/examples/nlp_11.py is in python-openopt 0.38+svn1589-1.
This file is owned by root:root, with mode 0o644.
The actual contents of the file can be viewed below.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 | """
Example:
(x0-5)^2 + (x2-5)^2 + ... +(x149-5)^2 -> min
subjected to
# lb<= x <= ub:
x4 <= 4
8 <= x5 <= 15
# Ax <= b
x0+...+x149 >= 825
x9 + x19 <= 3
x10+x11 <= 9
# Aeq x = beq
x100+x101 = 11
# c(x) <= 0
2*x0^4-32 <= 0
x1^2+x2^2-8 <= 0
# h(x) = 0
(x[149]-1)**6 = 0
(x[148]-1.5)**6 = 0
"""
from openopt import NLP
from numpy import cos, arange, ones, asarray, zeros, mat, array
N = 150
# objective function:
f = lambda x: ((x-5)**2).sum()
# objective function gradient (optional):
df = lambda x: 2*(x-5)
# start point (initial estimation)
x0 = 8*cos(arange(N))
# c(x) <= 0 constraints
c = [lambda x: 2* x[0] **4-32, lambda x: x[1]**2+x[2]**2 - 8]
# dc(x)/dx: non-lin ineq constraints gradients (optional):
dc0 = lambda x: [8 * x[0]**3] + [0]*(N-1)
dc1 = lambda x: [0, 2 * x[1], 2 * x[2]] + [0]*(N-3)
dc = [dc0, dc1]
# h(x) = 0 constraints
def h(x):
#return (x[-1]-1)**2
return (x[-1]-1)**6, (x[-2]-1.5)**6
# other possible return types: numpy array, matrix, Python list, tuple
# dh(x)/dx: non-lin eq constraints gradients (optional):
def dh(x):
r = zeros((2, N))
r[0, -1] = 6*(x[-1]-1)**5
r[1, -2] = 6*(x[-2]-1.5)**5
return r
#def dh(x):
# r = zeros((1, N))
# r[0, -1] = 2*(x[-1]-1)**1
# #r[1, -2] = 6*(x[148]-1.5)**5
# return r
# lower and upper bounds on variables
lb = -6*ones(N)
ub = 6*ones(N)
ub[4] = 4
lb[5], ub[5] = 8, 15
#lb[110:120]=5.67
#ub[110:120]=5.67
# general linear inequality constraints
A = zeros((3, N))
A[0, 9] = 1
A[0, 19] = 1
A[1, 10:12] = 1
A[2] = -ones(N)
b = [7, 9, -825]
# general linear equality constraints
Aeq = zeros((1, N))
Aeq[0, 100:102] = 1
beq = 11
#Aeq[1, 105] = 1
#Aeq[1, 106] = 1.00001
#beq = [11, 11.00005]
# required constraints tolerance, default for NLP is 1e-6
contol = 1e-6
# If you use solver algencan, NB! - it ignores xtol and ftol; using maxTime, maxCPUTime, maxIter, maxFunEvals, fEnough is recommended.
# Note that in algencan gtol means norm of projected gradient of the Augmented Lagrangian
# so it should be something like 1e-3...1e-5
gtol = 1e-7 # (default gtol = 1e-6)
# Assign problem:
# 1st arg - objective function
# 2nd arg - start point
p = NLP(f, x0, df=df, c=c, dc=dc, h=h, dh=dh, A=A, b=b, Aeq=Aeq, beq=beq,
lb=lb, ub=ub, gtol=gtol, contol=contol, iprint = 50, maxIter = 10000, maxFunEvals = 1e7, name = 'NLP_1')
#p = NLP(f, x0, df=df, Aeq=Aeq, beq=beq,
# gtol=gtol, contol=contol, iprint = 50, maxIter = 10000, maxFunEvals = 1e7, name = 'NLP_1')
#p = NLP(f, x0, df=df, lb=lb, ub=ub, gtol=gtol, contol=contol, iprint = 50, maxIter = 10000, maxFunEvals = 1e7, name = 'NLP_1')
#optional: graphic output, requires pylab (matplotlib)
p.plot = 1
#optional: user-supplied 1st derivatives check
p.checkdf()
p.checkdc()
p.checkdh()
def MyIterFcn(p):
return 0
# print 'Iteration',p.iter
# if p.iter == 50:
# p.user.mylist.append(p.xk.copy())
# return 0
p.user.mylist = []
# solve the problem
#p.debug=1
solver = 'algencan'
solver = 'ralg'
#solver = 'scipy_cobyla'
#solver = 'ipopt'
#solver = 'scipy_slsqp'
p.debug=1
r = p.solve(solver, showRej=1, iprint=1, maxTime = 15000, newLinEq=1, callback = MyIterFcn) # string argument is solver name
#r = p.solve('r2', iprint = 1, plot=0, showLS=1, maxIter=480)
# r.xf and r.ff are optim point and optim objFun value
# r.ff should be something like 132.0522
|