This file is indexed.

/usr/include/freefem++/BFGS.hpp is in libfreefem++-dev 3.47+dfsg1-2build1.

This file is owned by root:root, with mode 0o644.

The actual contents of the file can be viewed below.

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
#ifndef BFGS_HH
#define BFGS_HH

#include "Optima.hpp"
#include "defs.hpp"

// BFGS Quasi-Newton Optimization Method
/* At the present version, you must use the CubicLineSearch 
   procedure */

// Necessite une "vraie" classe Matrice
template <class LS>
class BFGS : public Optima<LS>
{
  typedef typename LS::Real Real;
  typedef typename LS::Param Param;
  typedef typename LS::Vect Vect;
  typedef typename LS::VMat VMat;
  typedef typename LS::Mat Mat;
  typedef list<Real> mlist;

public:
  
  BFGS(///pointer to the line-search object.
	   LS* ls, 
	   ///Maximum number of iterations
	   int iter, 
	   ///minimum accepted gradient at optimum solution
	   Real tol,
	   ///vebose or quiet
	   int verb=0);
  ~BFGS(){;}
  
  // the BFGS search starting from model0, returns an optimum model 
  Param optimizer(Param& model0);

};

template <class LS>
BFGS<LS>::BFGS(LS* p, int it, Real eps, int verb) 
  :Optima<LS>(verb)
{
  this->ls=p;
  this->iterMax 	= 	it;
  this->tol 		= 	eps;
  this->iterNum 	= 	0;
}

// Nécessite pour la classe MAT:
// - un constructeur de matrice avec nbre de lig, nbre de col
// - un constructeur de matrice avec la diagonale comme paramètre
// - le produit matrice vecteur
// - une méthode outProduct qui construit à partir de 2 vecteurs u et v la matrice A_{i,j}=u(i)v(j)
// - une méthode t()
// - un opérateur +
// - un opérateur = Real
// - un opérateur = Vect
template <class LS>
typename BFGS<LS>::Param BFGS<LS>::optimizer(Param& model0)
{
  //reset the residue history for every new optimizer
  this->iterNum = 0;
  if (this->residue != NULL)
	{
      delete this->residue;
      this->residue = new mlist;
	}
  
  // Initial settings for some parameters
  int n = model0.size();
  Vect g0(n);
  double lambda = 0.025;
  double descent = 0.;
  

  g0= *(this->ls->gradient(model0));
	
  // check the gradient, in case the initial model is the optimal
  double err = (Real)sqrt((g0,g0));
  
  if (this->isVerbose) cerr << "Initial residue : " << err << endl;

  this->appendResidue(err);	// residual

  if (err < this->tol)
	{
      if (this->isVerbose) cerr << "Initial guess was great! \n";
      this->isSuccess = 1;
      return model0;
	}
   
   //initial identical matrix for estimating inverse of the Hessian
   //Vect diag(n,1.);

   Mat H(n,n);H=0;diagonal(H)=1;
   Real d, dd, scale;
   Param model1(model0);
   Vect s(n),gamma(n),delta(n),g1(n);

   //searching directions
   s=Real();
   s -= H*g0;
   descent = (s,g0);
   assert(s.max() <1e100);
   
   //cubic line search for a new model
   model1 = this->ls->search(model0, s, descent, lambda);
   g1 = *(this->ls->gradient(model1));
   err = (Real)sqrt((g1,g1));
   if (this->isVerbose)
	 cerr << "Iteration (" << this->iterNum << ") : "
		  <<"current value of the objective function: "
		  <<this->ls->currentValue() << "\t current residue: "<< err << endl;

   this->appendResidue(err);	// residual
   this->iterNum ++;

   Mat B(n,n);

   
   while (this->finalResidue() > this->tol && this->iterNum < this->iterMax)
   {
	 gamma = g1 - g0;
	 delta = model1 - model0;

	 //replace the searching direction with temporal storage
	 s = H*gamma;

	 //factor of the denominator
	 dd = (delta,gamma);

	 // Modif TONY
	 // Au départ, il y avait : if (dd < 0.00000001)
	 // Mais Adel ne fait pas ce test non plus...
	 if (Abs(dd)<1e-20)
	   {
		 // re-initialize the Hessian Matrix
		 // Il faut d'abord le mettre a zero : cf. Matrix.hpp
		 H = 0.; diagonal(H)=1.;
		
	   }
	 else
	   {
		 assert(dd);
		 d = 1./dd;
		 
		 scale = d*((gamma,s));
		 scale += 1;
		 scale *= d;
		 
		 // update the first term
		 H += scale*delta*delta.t();
		 
		 //update the second term
		 H -= d*s*delta.t();
		 H -= d*delta*s.t();
		 
		 //store the current model and gradient
		 g0 = g1;
		 model0 = model1;
	   }
	 s=Real();
	 s -= H*g0;
	 descent = (s,g0);
	 model1 = this->ls->search(model0, s, descent, lambda);
	 g1 = *(this->ls->gradient(model1));
	 err = (Real)sqrt((g1,g1));
	 
	 if (this->isVerbose)
	   cerr << "Iteration (" << this->iterNum << ") : "<<"current value of the objective function: "
			<<this->ls->currentValue() << "\t current residue: "<< err << endl;

	 this->appendResidue(err);	// residual
	 this->iterNum ++;
   }
   
   return(model1);
}

#endif