/usr/include/shogun/loss/SmoothHingeLoss.h is in libshogun-dev 3.1.1-1.
This file is owned by root:root, with mode 0o644.
The actual contents of the file can be viewed below.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 | /*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* Written (W) 2011 Shashwat Lal Das
* Written (W) 2012 Fernando José Iglesias García
* Copyright (c) 2011 Berlin Institute of TechnosmoothHingey and Max-Planck-Society.
*/
#ifndef _SMOOTHHINGELOSS_H__
#define _SMOOTHHINGELOSS_H__
#include <shogun/loss/LossFunction.h>
namespace shogun
{
/** @brief CSmoothHingeLoss implements
* the smooth hinge loss function.
*/
class CSmoothHingeLoss: public CLossFunction
{
public:
/**
* Constructor
*/
CSmoothHingeLoss(): CLossFunction() {};
/**
* Destructor
*/
~CSmoothHingeLoss() {};
/**
* Get loss for an example
*
* @param z where to evaluate the loss
*
* @return loss
*/
float64_t loss(float64_t z);
/**
* Get first derivative of the loss function
*
* @param z where to evaluate the derivative of the loss
*
* @return first derivative
*/
float64_t first_derivative(float64_t z);
/**
* Get second derivative of the loss function
*
* @param z where to evaluate the second derivative of the loss
*
* @return second derivative
*/
float64_t second_derivative(float64_t z);
/**
* Get importance aware weight update for this loss function
*
* @param prediction prediction
* @param label label
* @param eta_t learning rate at update number t
* @param norm scale value
*
* @return update
*/
virtual float64_t get_update(float64_t prediction, float64_t label, float64_t eta_t, float64_t norm);
/**
* Get square of gradient, used for adaptive learning
*
* @param prediction prediction
* @param label label
*
* @return square of gradient
*/
virtual float64_t get_square_grad(float64_t prediction, float64_t label);
/**
* Return loss type
*
* @return L_SMOOTHHINGELOSS
*/
virtual ELossType get_loss_type() { return L_SMOOTHHINGELOSS; }
virtual const char* get_name() const { return "SmoothHingeLoss"; }
};
}
#endif
|