/usr/include/caffe/layers/relu_layer.hpp is in libcaffe-cpu-dev 1.0.0-6.
This file is owned by root:root, with mode 0o644.
The actual contents of the file can be viewed below.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 | #ifndef CAFFE_RELU_LAYER_HPP_
#define CAFFE_RELU_LAYER_HPP_
#include <vector>
#include "caffe/blob.hpp"
#include "caffe/layer.hpp"
#include "caffe/proto/caffe.pb.h"
#include "caffe/layers/neuron_layer.hpp"
namespace caffe {
/**
* @brief Rectified Linear Unit non-linearity @f$ y = \max(0, x) @f$.
* The simple max is fast to compute, and the function does not saturate.
*/
template <typename Dtype>
class ReLULayer : public NeuronLayer<Dtype> {
public:
/**
* @param param provides ReLUParameter relu_param,
* with ReLULayer options:
* - negative_slope (\b optional, default 0).
* the value @f$ \nu @f$ by which negative values are multiplied.
*/
explicit ReLULayer(const LayerParameter& param)
: NeuronLayer<Dtype>(param) {}
virtual inline const char* type() const { return "ReLU"; }
protected:
/**
* @param bottom input Blob vector (length 1)
* -# @f$ (N \times C \times H \times W) @f$
* the inputs @f$ x @f$
* @param top output Blob vector (length 1)
* -# @f$ (N \times C \times H \times W) @f$
* the computed outputs @f$
* y = \max(0, x)
* @f$ by default. If a non-zero negative_slope @f$ \nu @f$ is provided,
* the computed outputs are @f$ y = \max(0, x) + \nu \min(0, x) @f$.
*/
virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
/**
* @brief Computes the error gradient w.r.t. the ReLU inputs.
*
* @param top output Blob vector (length 1), providing the error gradient with
* respect to the outputs
* -# @f$ (N \times C \times H \times W) @f$
* containing error gradients @f$ \frac{\partial E}{\partial y} @f$
* with respect to computed outputs @f$ y @f$
* @param propagate_down see Layer::Backward.
* @param bottom input Blob vector (length 1)
* -# @f$ (N \times C \times H \times W) @f$
* the inputs @f$ x @f$; Backward fills their diff with
* gradients @f$
* \frac{\partial E}{\partial x} = \left\{
* \begin{array}{lr}
* 0 & \mathrm{if} \; x \le 0 \\
* \frac{\partial E}{\partial y} & \mathrm{if} \; x > 0
* \end{array} \right.
* @f$ if propagate_down[0], by default.
* If a non-zero negative_slope @f$ \nu @f$ is provided,
* the computed gradients are @f$
* \frac{\partial E}{\partial x} = \left\{
* \begin{array}{lr}
* \nu \frac{\partial E}{\partial y} & \mathrm{if} \; x \le 0 \\
* \frac{\partial E}{\partial y} & \mathrm{if} \; x > 0
* \end{array} \right.
* @f$.
*/
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
};
} // namespace caffe
#endif // CAFFE_RELU_LAYER_HPP_
|