This file is indexed.

/usr/include/shark/Models/GaussianNoiseModel.h is in libshark-dev 3.0.1+ds1-2ubuntu1.

This file is owned by root:root, with mode 0o644.

The actual contents of the file can be viewed below.

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
/*!
 * 
 *
 * \brief       Implements a Model using a linear function.
 * 
 * 
 *
 * \author      O. Krause
 * \date        2014
 *
 *
 * \par Copyright 1995-2015 Shark Development Team
 * 
 * <BR><HR>
 * This file is part of Shark.
 * <http://image.diku.dk/shark/>
 * 
 * Shark is free software: you can redistribute it and/or modify
 * it under the terms of the GNU Lesser General Public License as published 
 * by the Free Software Foundation, either version 3 of the License, or
 * (at your option) any later version.
 * 
 * Shark is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU Lesser General Public License for more details.
 * 
 * You should have received a copy of the GNU Lesser General Public License
 * along with Shark.  If not, see <http://www.gnu.org/licenses/>.
 *
 */
#ifndef SHARK_MODELS_GAUSSIANNOISEMODEL_H
#define SHARK_MODELS_GAUSSIANNOISEMODEL_H

#include <shark/Models/AbstractModel.h>
#include <shark/Rng/GlobalRng.h>
#include <shark/Core/OpenMP.h>
namespace shark {

/// \brief Model which corrupts the data using gaussian noise
///
/// When training autoencoders, it proved beneficial to add noise to the input
/// and train the model to remove that noise again, instead of only larning a
/// identity transformation. This Model represents one choice of Noise: Gaussian Noise,
/// to do this. the formula of corruption of an input \f$ x=(x_1,\dots,x_n) \f$ with variances
/// \f$ \sigma = (\sigma_1, \dots, \sigma_n) \f$ is 
/// \f[ x_i \leftarrow x_i + N(0,\sigma_i) \f]
///
/// Usage is simple. given your autoencoder/decoder pair
///   ConvatenatedModel<RealVector,RealVector> autoencoder = encoder >> decoder;
/// we can just concatenate this model:
///   GaussianNoiseModel noise(0.1);//variance of noise
///   ConvatenatedModel<RealVector,RealVector> denoisingAutoencoder = noise>>autoencoder;
/// and train the model using the standard autoencoder error
class GaussianNoiseModel : public AbstractModel<RealVector,RealVector>
{
private:
	RealVector m_variances;
public:


	/// Default Constructor; use setStructure later
	GaussianNoiseModel(){
		m_features |= HAS_FIRST_PARAMETER_DERIVATIVE;
	}
	/// Constructor creating a model with given input size and the same variance for all inputs
	GaussianNoiseModel(unsigned int inputs, double variance)
	: m_variances(inputs,variance){
		m_features |= HAS_FIRST_PARAMETER_DERIVATIVE;
	}

	/// \brief From INameable: return the class name.
	std::string name() const
	{ return "GaussianNoiseModel"; }

	/// obtain the input dimension
	size_t inputSize() const{
		return m_variances.size();
	}

	/// obtain the output dimension
	size_t outputSize() const{
		return m_variances.size();
	}

	/// obtain the parameter vector
	RealVector parameterVector() const{
		return RealVector();
	}

	/// overwrite the parameter vector
	void setParameterVector(RealVector const& newParameters)
	{
		SIZE_CHECK(newParameters.size() == 0);
	}

	/// return the number of parameter
	size_t numberOfParameters() const{
		return 0;
	}

	/// overwrite structure and parameters
	void setStructure(unsigned int inputs, double variance){
		m_variances = RealVector(inputs,variance);
	}

	/// overwrite structure and parameters
	void setStructure(RealVector const& variances){
		m_variances = variances;
	}
	
	RealVector const& variances() const{
		return m_variances;
	}
	
	RealVector& variances(){
		return m_variances;
	}
	
	boost::shared_ptr<State> createState()const{
		return boost::shared_ptr<State>(new EmptyState());
	}

	/// \brief Add noise to the input
	void eval(BatchInputType const& inputs, BatchOutputType& outputs)const{
		SIZE_CHECK(inputs.size2() == inputSize());
		//we use the global Rng here so if this is a threaded region, we might
		//run into troubles when multiple threads run this. This should not be a bottle neck
		//as this routine should be quite fast, while very expensive routines are likely to
		//follow in the networks following this.
		SHARK_CRITICAL_REGION{
			outputs = inputs;
			for(std::size_t i = 0; i != outputs.size1(); ++i){
				for(std::size_t j = 0; j != outputs.size2(); ++j){
					outputs(i,j) += Rng::gauss(0,m_variances(j));
				}
			}
		}
	}
	/// Evaluate the model: output = matrix * input + offset
	void eval(BatchInputType const& inputs, BatchOutputType& outputs, State& state)const{
		eval(inputs,outputs);
	}
	
	void weightedParameterDerivative(
		BatchInputType const& patterns, RealVector const& coefficients, State const& state, RealVector& gradient
	)const{
		gradient.resize(0);
	}
};


}
#endif