/usr/include/root/TMVA/MethodANNBase.h is in libroot-tmva-dev 5.34.19+dfsg-1.2.
This file is owned by root:root, with mode 0o644.
The actual contents of the file can be viewed below.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 | // @(#)root/tmva $Id$
// Author: Andreas Hoecker, Peter Speckmayer, Matt Jachowski, Jan Therhaag
/**********************************************************************************
* Project: TMVA - a Root-integrated toolkit for multivariate data analysis *
* Package: TMVA *
* Class : MethodANNBase *
* Web : http://tmva.sourceforge.net *
* *
* Description: *
* Artificial neural network base class for the discrimination of signal *
* from background. *
* *
* Authors (alphabetical): *
* Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland *
* Matt Jachowski <jachowski@stanford.edu> - Stanford University, USA *
* Peter Speckmayer <Peter.Speckmayer@cern.ch> - CERN, Switzerland *
* Joerg Stelzer <Joerg.Stelzer@cern.ch> - CERN, Switzerland *
* Jan Therhaag <Jan.Therhaag@cern.ch> - U of Bonn, Germany *
* *
* Small changes (regression): *
* Krzysztof Danielowski <danielow@cern.ch> - IFJ PAN & AGH, Poland *
* Kamil Kraszewski <kalq@cern.ch> - IFJ PAN & UJ , Poland *
* Maciej Kruk <mkruk@cern.ch> - IFJ PAN & AGH, Poland *
* *
* Copyright (c) 2005-2011: *
* CERN, Switzerland *
* *
* Redistribution and use in source and binary forms, with or without *
* modification, are permitted according to the terms listed in LICENSE *
* (http://tmva.sourceforge.net/LICENSE) *
**********************************************************************************/
#ifndef ROOT_TMVA_MethodANNBase
#define ROOT_TMVA_MethodANNBase
//////////////////////////////////////////////////////////////////////////
// //
// MethodANNBase //
// //
// Base class for all TMVA methods using artificial neural networks //
// //
//////////////////////////////////////////////////////////////////////////
#ifndef ROOT_TString
#include "TString.h"
#endif
#include <vector>
#ifndef ROOT_TTree
#include "TTree.h"
#endif
#ifndef ROOT_TObjArray
#include "TObjArray.h"
#endif
#ifndef ROOT_TRandom3
#include "TRandom3.h"
#endif
#ifndef ROOT_TMatrix
#include "TMatrix.h"
#endif
#ifndef ROOT_TMVA_MethodBase
#include "TMVA/MethodBase.h"
#endif
#ifndef ROOT_TMVA_TActivation
#include "TMVA/TActivation.h"
#endif
#ifndef ROOT_TMVA_TNeuron
#include "TMVA/TNeuron.h"
#endif
#ifndef ROOT_TMVA_TNeuronInput
#include "TMVA/TNeuronInput.h"
#endif
class TH1;
class TH1F;
namespace TMVA {
class MethodANNBase : public MethodBase {
public:
// constructors dictated by subclassing off of MethodBase
MethodANNBase( const TString& jobName,
Types::EMVA methodType,
const TString& methodTitle,
DataSetInfo& theData,
const TString& theOption,
TDirectory* theTargetDir );
MethodANNBase( Types::EMVA methodType,
DataSetInfo& theData,
const TString& theWeightFile,
TDirectory* theTargetDir );
virtual ~MethodANNBase();
// this does the real initialization work
void InitANNBase();
// setters for subclasses
void SetActivation(TActivation* activation) {
if (fActivation != NULL) delete fActivation; fActivation = activation;
}
void SetNeuronInputCalculator(TNeuronInput* inputCalculator) {
if (fInputCalculator != NULL) delete fInputCalculator;
fInputCalculator = inputCalculator;
}
// this will have to be overridden by every subclass
virtual void Train() = 0;
// print network, for debugging
virtual void PrintNetwork() const;
// call this function like that:
// ...
// MethodMLP* mlp = dynamic_cast<MethodMLP*>(method);
// std::vector<float> layerValues;
// mlp->GetLayerActivation (2, std::back_inserter(layerValues));
// ... do now something with the layerValues
//
template <typename WriteIterator>
void GetLayerActivation (size_t layer, WriteIterator writeIterator);
using MethodBase::ReadWeightsFromStream;
// write weights to file
void AddWeightsXMLTo( void* parent ) const;
void ReadWeightsFromXML( void* wghtnode );
// read weights from file
virtual void ReadWeightsFromStream( std::istream& istr );
// calculate the MVA value
virtual Double_t GetMvaValue( Double_t* err = 0, Double_t* errUpper = 0 );
virtual const std::vector<Float_t> &GetRegressionValues();
virtual const std::vector<Float_t> &GetMulticlassValues();
// write method specific histos to target file
virtual void WriteMonitoringHistosToFile() const;
// ranking of input variables
const Ranking* CreateRanking();
// the option handling methods
virtual void DeclareOptions();
virtual void ProcessOptions();
Bool_t Debug() const;
enum EEstimator { kMSE=0,kCE};
protected:
virtual void MakeClassSpecific( std::ostream&, const TString& ) const;
std::vector<Int_t>* ParseLayoutString( TString layerSpec );
virtual void BuildNetwork( std::vector<Int_t>* layout, std::vector<Double_t>* weights=NULL,
Bool_t fromFile = kFALSE );
void ForceNetworkInputs( const Event* ev, Int_t ignoreIndex = -1 );
Double_t GetNetworkOutput() { return GetOutputNeuron()->GetActivationValue(); }
// debugging utilities
void PrintMessage( TString message, Bool_t force = kFALSE ) const;
void ForceNetworkCalculations();
void WaitForKeyboard();
// accessors
Int_t NumCycles() { return fNcycles; }
TNeuron* GetInputNeuron (Int_t index) { return (TNeuron*)fInputLayer->At(index); }
TNeuron* GetOutputNeuron(Int_t index = 0) { return fOutputNeurons.at(index); }
// protected variables
TObjArray* fNetwork; // TObjArray of TObjArrays representing network
TObjArray* fSynapses; // array of pointers to synapses, no structural data
TActivation* fActivation; // activation function to be used for hidden layers
TActivation* fOutput; // activation function to be used for output layers, depending on estimator
TActivation* fIdentity; // activation for input and output layers
TRandom3* frgen; // random number generator for various uses
TNeuronInput* fInputCalculator; // input calculator for all neurons
std::vector<Int_t> fRegulatorIdx; //index to different priors from every synapses
std::vector<Double_t> fRegulators; //the priors as regulator
EEstimator fEstimator;
TString fEstimatorS;
// monitoring histograms
TH1F* fEstimatorHistTrain; // monitors convergence of training sample
TH1F* fEstimatorHistTest; // monitors convergence of independent test sample
// monitoring histograms (not available for regression)
void CreateWeightMonitoringHists( const TString& bulkname, std::vector<TH1*>* hv = 0 ) const;
std::vector<TH1*> fEpochMonHistS; // epoch monitoring hitograms for signal
std::vector<TH1*> fEpochMonHistB; // epoch monitoring hitograms for background
std::vector<TH1*> fEpochMonHistW; // epoch monitoring hitograms for weights
// general
TMatrixD fInvHessian; // zjh
bool fUseRegulator; // zjh
protected:
Int_t fRandomSeed; // random seed for initial synapse weights
Int_t fNcycles; // number of epochs to train
TString fNeuronType; // name of neuron activation function class
TString fNeuronInputType; // name of neuron input calculator class
private:
// helper functions for building network
void BuildLayers(std::vector<Int_t>* layout, Bool_t from_file = false);
void BuildLayer(Int_t numNeurons, TObjArray* curLayer, TObjArray* prevLayer,
Int_t layerIndex, Int_t numLayers, Bool_t from_file = false);
void AddPreLinks(TNeuron* neuron, TObjArray* prevLayer);
// helper functions for weight initialization
void InitWeights();
void ForceWeights(std::vector<Double_t>* weights);
// helper functions for deleting network
void DeleteNetwork();
void DeleteNetworkLayer(TObjArray*& layer);
// debugging utilities
void PrintLayer(TObjArray* layer) const;
void PrintNeuron(TNeuron* neuron) const;
// private variables
TObjArray* fInputLayer; // cache this for fast access
std::vector<TNeuron*> fOutputNeurons; // cache this for fast access
TString fLayerSpec; // layout specification option
// some static flags
static const Bool_t fgDEBUG = kTRUE; // debug flag
ClassDef(MethodANNBase,0) // Base class for TMVA ANNs
};
template <typename WriteIterator>
inline void MethodANNBase::GetLayerActivation (size_t layerNumber, WriteIterator writeIterator)
{
// get the activation values of the nodes in layer "layer"
// write the node activation values into the writeIterator
// assumes, that the network has been computed already (by calling
// "GetRegressionValues")
if (layerNumber >= (size_t)fNetwork->GetEntriesFast())
return;
TObjArray* layer = (TObjArray*)fNetwork->At(layerNumber);
UInt_t nNodes = layer->GetEntriesFast();
for (UInt_t iNode = 0; iNode < nNodes; iNode++)
{
(*writeIterator) = ((TNeuron*)layer->At(iNode))->GetActivationValue();
++writeIterator;
}
}
} // namespace TMVA
#endif
|