/usr/include/ql/experimental/credit/gaussianlhplossmodel.hpp is in libquantlib0-dev 1.7.1-1.
This file is owned by root:root, with mode 0o644.
The actual contents of the file can be viewed below.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 | /* -*- mode: c++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
/*
Copyright (C) 2008 Roland Lichters
Copyright (C) 2009, 2014 Jose Aparicio
This file is part of QuantLib, a free-software/open-source library
for financial quantitative analysts and developers - http://quantlib.org/
QuantLib is free software: you can redistribute it and/or modify it
under the terms of the QuantLib license. You should have received a
copy of the license along with this program; if not, please email
<quantlib-dev@lists.sf.net>. The license is also available online at
<http://quantlib.org/license.shtml>.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the license for more details.
*/
#ifndef quantlib_gaussian_lhp_lossmodel_hpp
#define quantlib_gaussian_lhp_lossmodel_hpp
#include <ql/math/distributions/bivariatenormaldistribution.hpp>
#include <ql/experimental/credit/recoveryratequote.hpp>
#include <ql/quotes/simplequote.hpp>
#include <ql/experimental/credit/defaultlossmodel.hpp>
#include <ql/experimental/credit/basket.hpp>
#include <ql/experimental/math/latentmodel.hpp>
#include <boost/bind.hpp>
#include <boost/function.hpp>
#include <numeric>
/* Intended to replace GaussianLHPCDOEngine in
ql/experimental/credit/syntheticcdoengines.hpp
Moved from an engine to a loss model, CDO engines might refer to it.
*/
namespace QuantLib {
/*!
Portfolio loss model with analytical expected tranche loss for a large
homogeneous pool with Gaussian one-factor copula. See for example
"The Normal Inverse Gaussian Distribution for Synthetic CDO pricing.",
Anna Kalemanova, Bernd Schmid, Ralf Werner,
Journal of Derivatives, Vol. 14, No. 3, (Spring 2007), pp. 80-93.
http://www.defaultrisk.com/pp_crdrv_91.htm
It can be used to price a credit derivative or to provide risk metrics of
a portfolio.
\todo It should be checking that basket exposures are deterministic (fixed
or programmed amortizing) otherwise the model is not fit for the basket.
\todo Bugging on tranched baskets with upper limit over maximum
attainable loss?
*/
class GaussianLHPLossModel : public DefaultLossModel,
public LatentModel<GaussianCopulaPolicy> {
public:
typedef GaussianCopulaPolicy copulaType;
GaussianLHPLossModel(
const Handle<Quote>& correlQuote,
const std::vector<Handle<RecoveryRateQuote> >& quotes);
GaussianLHPLossModel(
Real correlation,
const std::vector<Real>& recoveries);
GaussianLHPLossModel(
const Handle<Quote>& correlQuote,
const std::vector<Real>& recoveries);
void update() {
sqrt1minuscorrel_ = std::sqrt(1.-correl_->value());
beta_ = std::sqrt(correl_->value());
biphi_ = BivariateCumulativeNormalDistribution(
-beta_);
// tell basket to notify instruments, etc, we are invalid
if(!basket_.empty()) basket_->notifyObservers();
}
private:
void resetModel() { }
/*! @param attachLimit as a fraction of the underlying live portfolio
notional
*/
Real expectedTrancheLossImpl(
Real remainingNot, // << at the given date 'd'
Real prob, // << at the given date 'd'
Real averageRR, // << at the given date 'd'
Real attachLimit, Real detachLimit) const;
public:
Real expectedTrancheLoss(const Date& d) const {
//can calls to Basket::remainingNotional(d) be cached?<<<<<<<<<<<<<
const Real remainingfullNot = basket_->remainingNotional(d);
Real averageRR = averageRecovery(d);
Probability prob = averageProb(d);
Real remainingAttachAmount = basket_->remainingAttachmentAmount();
Real remainingDetachAmount = basket_->remainingDetachmentAmount();
//const Real attach = std::min(remainingAttachAmount
// / remainingfullNot, 1.);
//const Real detach = std::min(remainingDetachAmount
// / remainingfullNot, 1.);
const Real attach = remainingAttachAmount / remainingfullNot;
const Real detach = remainingDetachAmount / remainingfullNot;
return expectedTrancheLossImpl(remainingfullNot, prob, averageRR,
attach, detach);
}
/*! @param remainingLossFraction fraction in live tranche
units, not portfolio as a fraction of the remaining(live)
tranche (i.e. a_remaining=0% and det_remaining=100%)
*/
Real probOverLoss(const Date& d, Real remainingLossFraction) const;
//! Returns the ESF as an absolute amount (rather than a fraction)
/* The way it is implemented here is a transformation from ETL to ESF
is a generic algorithm, not specific to this model so it should be moved
to the Basket/DefaultLossModel class.
TO DO: Implement the inverse transformation
*/
Real expectedShortfall(const Date& d, Probability perctl) const;
protected:
// This is wrong, it is not accounting for the current defaults ....
// returns the loss value in actual loss units, returns the loss value
// for the underlying portfolio, untranched
Real percentilePortfolioLossFraction(const Date& d, Real perctl) const;
Real expectedRecovery(const Date& d, Size iName,
const DefaultProbKey& ik) const {
return rrQuotes_[iName].currentLink()->value();
}
public:
// same as percentilePortfolio but tranched
Real percentile(const Date& d, Real perctl) const {
const Real remainingNot = basket_->remainingNotional(d);
Real remainingAttachAmount = basket_->remainingAttachmentAmount();
Real remainingDetachAmount = basket_->remainingDetachmentAmount();
const Real attach =
std::min(remainingAttachAmount / remainingNot, 1.);
const Real detach =
std::min(remainingDetachAmount / remainingNot, 1.);
return remainingNot *
std::min(std::max(percentilePortfolioLossFraction(d, perctl)
- attach, 0.), detach - attach);
}
Probability averageProb(const Date& d) const {// not an overload of Deflossmodel ???<<<<<???
// weighted average by programmed exposure.
const std::vector<Probability> probs =
basket_->remainingProbabilities(d);//use remaining basket
const std::vector<Real> remainingNots =
basket_->remainingNotionals(d);
return std::inner_product(probs.begin(), probs.end(),
remainingNots.begin(), 0.) / basket_->remainingNotional(d);
}
/* One could define the average recovery without the probability
factor, weighting only by notional instead, but that way the expected
loss of the average/aggregated and the original portfolio would not
coincide. This introduces however a time dependence in the recovery
value.
Weighting by notional implies time dependent weighting since the basket
might amortize.
*/
Real averageRecovery(
const Date& d) const //no explicit time dependence in this model
{
const std::vector<Probability> probs =
basket_->remainingProbabilities(d);
std::vector<Real> recoveries;
for(Size i=0; i<basket_->remainingSize(); i++)
recoveries.push_back(rrQuotes_[i]->value());
std::vector<Real> notionals = basket_->remainingNotionals(d);
Real denominator = std::inner_product(notionals.begin(),
notionals.end(), probs.begin(), 0.);
if(denominator == 0.) return 0.;
std::transform(notionals.begin(), notionals.end(), probs.begin(),
notionals.begin(), std::multiplies<Real>());
return std::inner_product(recoveries.begin(), recoveries.end(),
notionals.begin(), 0.) / denominator;
}
private:
// cached
mutable Real sqrt1minuscorrel_;
Handle<Quote> correl_;
std::vector<Handle<RecoveryRateQuote> > rrQuotes_;
// calculation buffers
/* The problem with defining a fixed average recovery on a portfolio
with uneven exposures is that it does not preserve portfolio
moments like the expected loss. To achieve it one should define the
averarage recovery with a time dependence:
$\hat{R}(t) = \frac{\sum_i R_i N_i P_i(t)}{\sum_i N_i P_i(t)}$
But the date dependence increases significantly the calculations cost.
Notice that this problem dissapears if the recoveries are all equal.
*/
Real beta_;
BivariateCumulativeNormalDistribution biphi_;
static CumulativeNormalDistribution const phi_;
};
}
#endif
|