/usr/lib/python2.7/dist-packages/dipy/core/optimize.py is in python-dipy 0.10.1-1.
This file is owned by root:root, with mode 0o644.
The actual contents of the file can be viewed below.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 | """ A unified interface for performing and debugging optimization problems.
Only L-BFGS-B and Powell is supported in this class for versions of
Scipy < 0.12. All optimizers are available for scipy >= 0.12.
"""
import abc
from distutils.version import LooseVersion
import numpy as np
import scipy
import scipy.sparse as sps
import scipy.optimize as opt
from dipy.utils.six import with_metaclass
SCIPY_LESS_0_12 = LooseVersion(scipy.version.short_version) < '0.12'
if not SCIPY_LESS_0_12:
from scipy.optimize import minimize
else:
from scipy.optimize import fmin_l_bfgs_b, fmin_powell
class Optimizer(object):
def __init__(self, fun, x0, args=(), method='L-BFGS-B', jac=None,
hess=None, hessp=None, bounds=None, constraints=(),
tol=None, callback=None, options=None, evolution=False):
""" A class for handling minimization of scalar function of one or more
variables.
Parameters
----------
fun : callable
Objective function.
x0 : ndarray
Initial guess.
args : tuple, optional
Extra arguments passed to the objective function and its
derivatives (Jacobian, Hessian).
method : str, optional
Type of solver. Should be one of
- 'Nelder-Mead'
- 'Powell'
- 'CG'
- 'BFGS'
- 'Newton-CG'
- 'Anneal'
- 'L-BFGS-B'
- 'TNC'
- 'COBYLA'
- 'SLSQP'
- 'dogleg'
- 'trust-ncg'
jac : bool or callable, optional
Jacobian of objective function. Only for CG, BFGS, Newton-CG,
dogleg, trust-ncg.
If `jac` is a Boolean and is True, `fun` is assumed to return the
value of Jacobian along with the objective function. If False, the
Jacobian will be estimated numerically.
`jac` can also be a callable returning the Jacobian of the
objective. In this case, it must accept the same arguments
as `fun`.
hess, hessp : callable, optional
Hessian of objective function or Hessian of objective function
times an arbitrary vector p. Only for Newton-CG,
dogleg, trust-ncg.
Only one of `hessp` or `hess` needs to be given. If `hess` is
provided, then `hessp` will be ignored. If neither `hess` nor
`hessp` is provided, then the hessian product will be approximated
using finite differences on `jac`. `hessp` must compute the Hessian
times an arbitrary vector.
bounds : sequence, optional
Bounds for variables (only for L-BFGS-B, TNC and SLSQP).
``(min, max)`` pairs for each element in ``x``, defining
the bounds on that parameter. Use None for one of ``min`` or
``max`` when there is no bound in that direction.
constraints : dict or sequence of dict, optional
Constraints definition (only for COBYLA and SLSQP).
Each constraint is defined in a dictionary with fields:
type : str
Constraint type: 'eq' for equality, 'ineq' for inequality.
fun : callable
The function defining the constraint.
jac : callable, optional
The Jacobian of `fun` (only for SLSQP).
args : sequence, optional
Extra arguments to be passed to the function and Jacobian.
Equality constraint means that the constraint function result is to
be zero whereas inequality means that it is to be non-negative.
Note that COBYLA only supports inequality constraints.
tol : float, optional
Tolerance for termination. For detailed control, use
solver-specific options.
callback : callable, optional
Called after each iteration, as ``callback(xk)``, where ``xk`` is
the current parameter vector. Only available using Scipy >= 0.12.
options : dict, optional
A dictionary of solver options. All methods accept the following
generic options:
maxiter : int
Maximum number of iterations to perform.
disp : bool
Set to True to print convergence messages.
For method-specific options, see
`show_options('minimize', method)`.
evolution : bool, optional
save history of x for each iteration. Only available using Scipy
>= 0.12.
See also
---------
scipy.optimize.minimize
"""
self.size_of_x = len(x0)
self._evol_kx = None
_eps = np.finfo(float).eps
if SCIPY_LESS_0_12:
if evolution is True:
print('Saving history is available only with Scipy >= 0.12.')
if method == 'L-BFGS-B':
default_options = {'maxcor': 10, 'ftol': 1e-7, 'gtol': 1e-5,
'eps': 1e-8, 'maxiter': 1000}
if jac is None:
approx_grad = True
else:
approx_grad = False
if options is None:
options = default_options
if options is not None:
for key in options:
default_options[key] = options[key]
options = default_options
try:
out = fmin_l_bfgs_b(fun, x0, fprime=jac, args=args,
approx_grad=approx_grad,
bounds=bounds,
m=options['maxcor'],
factr=options['ftol']/_eps,
pgtol=options['gtol'],
epsilon=options['eps'],
maxiter=options['maxiter'])
except TypeError:
msg = 'In Scipy ' + scipy.__version__ + ' `maxiter` '
msg += 'parameter is not available for L-BFGS-B. \n Using '
msg += '`maxfun` instead with value twice of maxiter.'
print(msg)
out = fmin_l_bfgs_b(fun, x0, fprime=jac, args=args,
approx_grad=approx_grad,
bounds=bounds,
m=options['maxcor'],
factr=options['ftol']/_eps,
pgtol=options['gtol'],
epsilon=options['eps'],
maxfun=options['maxiter'] * 2)
res = {'x': out[0], 'fun': out[1], 'nfev': out[2]['funcalls']}
try:
res['nit'] = out[2]['nit']
except KeyError:
res['nit'] = None
elif method == 'Powell':
default_options = {'xtol': 0.0001, 'ftol': 0.0001,
'maxiter': None}
if options is None:
options = default_options
if options is not None:
for key in options:
default_options[key] = options[key]
options = default_options
out = fmin_powell(fun, x0, args,
xtol=options['xtol'],
ftol=options['ftol'],
maxiter=options['maxiter'],
full_output=True,
disp=False,
retall=True)
xopt, fopt, direc, iterations, funcs, warnflag, allvecs = out
res = {'x': xopt, 'fun': fopt,
'nfev': funcs, 'nit': iterations}
else:
msg = 'Only L-BFGS-B and Powell is supported in this class '
msg += 'for versions of Scipy < 0.12.'
raise ValueError(msg)
if not SCIPY_LESS_0_12:
if evolution is True:
self._evol_kx = []
def history_of_x(kx):
self._evol_kx.append(kx)
res = minimize(fun, x0, args, method, jac, hess, hessp, bounds,
constraints, tol, callback=history_of_x,
options=options)
else:
res = minimize(fun, x0, args, method, jac, hess, hessp, bounds,
constraints, tol, callback, options)
self.res = res
@property
def xopt(self):
return self.res['x']
@property
def fopt(self):
return self.res['fun']
@property
def nit(self):
return self.res['nit']
@property
def nfev(self):
return self.res['nfev']
@property
def message(self):
return self.res['message']
def print_summary(self):
print(self.res)
@property
def evolution(self):
if self._evol_kx is not None:
return np.asarray(self._evol_kx)
else:
return None
def spdot(A, B):
"""The same as np.dot(A, B), except it works even if A or B or both
are sparse matrices.
Parameters
----------
A, B : arrays of shape (m, n), (n, k)
Returns
-------
The matrix product AB. If both A and B are sparse, the result will be a
sparse matrix. Otherwise, a dense result is returned
See discussion here:
http://mail.scipy.org/pipermail/scipy-user/2010-November/027700.html
"""
if sps.issparse(A) and sps.issparse(B):
return A * B
elif sps.issparse(A) and not sps.issparse(B):
return (A * B).view(type=B.__class__)
elif not sps.issparse(A) and sps.issparse(B):
return (B.T * A.T).T.view(type=A.__class__)
else:
return np.dot(A, B)
def sparse_nnls(y, X,
momentum=1,
step_size=0.01,
non_neg=True,
check_error_iter=10,
max_error_checks=10,
converge_on_sse=0.99):
"""
Solve y=Xh for h, using gradient descent, with X a sparse matrix
Parameters
----------
y : 1-d array of shape (N)
The data. Needs to be dense.
X : ndarray. May be either sparse or dense. Shape (N, M)
The regressors
momentum : float, optional (default: 1).
The persistence of the gradient.
step_size : float, optional (default: 0.01).
The increment of parameter update in each iteration
non_neg : Boolean, optional (default: True)
Whether to enforce non-negativity of the solution.
check_error_iter : int (default:10)
How many rounds to run between error evaluation for
convergence-checking.
max_error_checks : int (default: 10)
Don't check errors more than this number of times if no improvement in
r-squared is seen.
converge_on_sse : float (default: 0.99)
a percentage improvement in SSE that is required each time to say
that things are still going well.
Returns
-------
h_best : The best estimate of the parameters.
"""
num_regressors = X.shape[1]
# Initialize the parameters at the origin:
h = np.zeros(num_regressors)
# If nothing good happens, we'll return that:
h_best = h
gradient = np.zeros(num_regressors)
iteration = 1
ss_residuals_min = np.inf # This will keep track of the best solution
sse_best = np.inf # This will keep track of the best performance so far
count_bad = 0 # Number of times estimation error has gone up.
error_checks = 0 # How many error checks have we done so far
while 1:
if iteration > 1:
# The sum of squared error given the current parameter setting:
sse = np.sum((y - spdot(X, h)) ** 2)
# The gradient is (Kay 2008 supplemental page 27):
gradient = spdot(X.T, spdot(X, h) - y)
gradient += momentum * gradient
# Normalize to unit-length
unit_length_gradient = (gradient /
np.sqrt(np.dot(gradient, gradient)))
# Update the parameters in the direction of the gradient:
h -= step_size * unit_length_gradient
if non_neg:
# Set negative values to 0:
h[h < 0] = 0
# Every once in a while check whether it's converged:
if np.mod(iteration, check_error_iter):
# This calculates the sum of squared residuals at this point:
sse = np.sum((y - spdot(X, h)) ** 2)
# Did we do better this time around?
if sse < ss_residuals_min:
# Update your expectations about the minimum error:
ss_residuals_min = sse
h_best = h # This holds the best params we have so far
# Are we generally (over iterations) converging on
# sufficient improvement in r-squared?
if sse < converge_on_sse * sse_best:
sse_best = sse
count_bad = 0
else:
count_bad += 1
else:
count_bad += 1
if count_bad >= max_error_checks:
return h_best
error_checks += 1
iteration += 1
class SKLearnLinearSolver(with_metaclass(abc.ABCMeta, object)):
"""
Provide a sklearn-like uniform interface to algorithms that solve problems
of the form: $y = Ax$ for $x$
Sub-classes of SKLearnLinearSolver should provide a 'fit' method that have
the following signature: `SKLearnLinearSolver.fit(X, y)`, which would set
an attribute `SKLearnLinearSolver.coef_`, with the shape (X.shape[1],),
such that an estimate of y can be calculated as:
`y_hat = np.dot(X, SKLearnLinearSolver.coef_.T)`
"""
def __init__(self, *args, **kwargs):
self._args = args
self._kwargs = kwargs
@abc.abstractmethod
def fit(self, X, y):
"""Implement for all derived classes """
def predict(self, X):
"""
Predict using the result of the model
Parameters
----------
X : array-like (n_samples, n_features)
Samples.
Returns
-------
C : array, shape = (n_samples,)
Predicted values.
"""
X = np.asarray(X)
return np.dot(X, self.coef_.T)
class NonNegativeLeastSquares(SKLearnLinearSolver):
"""
A sklearn-like interface to scipy.optimize.nnls
"""
def fit(self, X, y):
"""
Fit the NonNegativeLeastSquares linear model to data
Parameters
----------
"""
coef, rnorm = opt.nnls(X, y)
self.coef_ = coef
return self
|