QuantLib: a free/open-source library for quantitative finance
fully annotated source code - version 1.38
Loading...
Searching...
No Matches
pseudosqrt.cpp
Go to the documentation of this file.
1/* -*- mode: c++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
2
3/*
4 Copyright (C) 2003, 2004, 2007 Ferdinando Ametrano
5 Copyright (C) 2006 Yiping Chen
6 Copyright (C) 2007 Neil Firth
7
8 This file is part of QuantLib, a free-software/open-source library
9 for financial quantitative analysts and developers - http://quantlib.org/
10
11 QuantLib is free software: you can redistribute it and/or modify it
12 under the terms of the QuantLib license. You should have received a
13 copy of the license along with this program; if not, please email
14 <quantlib-dev@lists.sf.net>. The license is also available online at
15 <http://quantlib.org/license.shtml>.
16
17 This program is distributed in the hope that it will be useful, but WITHOUT
18 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
19 FOR A PARTICULAR PURPOSE. See the license for more details.
20*/
21
22#include <algorithm>
30#include <utility>
31
32namespace QuantLib {
33
34 namespace {
35
36 #if defined(QL_EXTRA_SAFETY_CHECKS)
37 void checkSymmetry(const Matrix& matrix) {
38 Size size = matrix.rows();
39 QL_REQUIRE(size == matrix.columns(),
40 "non square matrix: " << size << " rows, " <<
41 matrix.columns() << " columns");
42 for (Size i=0; i<size; ++i)
43 for (Size j=0; j<i; ++j)
44 QL_REQUIRE(close(matrix[i][j], matrix[j][i]),
45 "non symmetric matrix: " <<
46 "[" << i << "][" << j << "]=" << matrix[i][j] <<
47 ", [" << j << "][" << i << "]=" << matrix[j][i]);
48 }
49 #endif
50
51 void normalizePseudoRoot(const Matrix& matrix,
52 Matrix& pseudo) {
53 Size size = matrix.rows();
54 QL_REQUIRE(size == pseudo.rows(),
55 "matrix/pseudo mismatch: matrix rows are " << size <<
56 " while pseudo rows are " << pseudo.columns());
57 Size pseudoCols = pseudo.columns();
58
59 // row normalization
60 for (Size i=0; i<size; ++i) {
61 Real norm = 0.0;
62 for (Size j=0; j<pseudoCols; ++j)
63 norm += pseudo[i][j]*pseudo[i][j];
64 if (norm>0.0) {
65 Real normAdj = std::sqrt(matrix[i][i]/norm);
66 for (Size j=0; j<pseudoCols; ++j)
67 pseudo[i][j] *= normAdj;
68 }
69 }
70
71
72 }
73
74 //cost function for hypersphere and lower-diagonal algorithm
75 class HypersphereCostFunction : public CostFunction {
76 private:
82 public:
83 HypersphereCostFunction(const Matrix& targetMatrix,
84 Array targetVariance,
85 bool lowerDiagonal)
86 : size_(targetMatrix.rows()), lowerDiagonal_(lowerDiagonal),
87 targetMatrix_(targetMatrix), targetVariance_(std::move(targetVariance)),
89 Array values(const Array&) const override {
90 QL_FAIL("values method not implemented");
91 }
92 Real value(const Array& x) const override {
93 Size i,j,k;
94 std::fill(currentRoot_.begin(), currentRoot_.end(), 1.0);
95 if (lowerDiagonal_) {
96 for (i=0; i<size_; i++) {
97 for (k=0; k<size_; k++) {
98 if (k>i) {
99 currentRoot_[i][k]=0;
100 } else {
101 for (j=0; j<=k; j++) {
102 if (j == k && k!=i)
103 currentRoot_[i][k] *=
104 std::cos(x[i*(i-1)/2+j]);
105 else if (j!=i)
106 currentRoot_[i][k] *=
107 std::sin(x[i*(i-1)/2+j]);
108 }
109 }
110 }
111 }
112 } else {
113 for (i=0; i<size_; i++) {
114 for (k=0; k<size_; k++) {
115 for (j=0; j<=k; j++) {
116 if (j == k && k!=size_-1)
117 currentRoot_[i][k] *=
118 std::cos(x[j*size_+i]);
119 else if (j!=size_-1)
120 currentRoot_[i][k] *=
121 std::sin(x[j*size_+i]);
122 }
123 }
124 }
125 }
126 Real temp, error=0;
127 tempMatrix_ = transpose(currentRoot_);
129 for (i=0;i<size_;i++) {
130 for (j=0;j<size_;j++) {
131 temp = currentMatrix_[i][j]*targetVariance_[i]
133 error += temp*temp;
134 }
135 }
136 return error;
137 }
138 };
139
140 // Optimization function for hypersphere and lower-diagonal algorithm
141 Matrix hypersphereOptimize(const Matrix& targetMatrix,
142 const Matrix& currentRoot,
143 const bool lowerDiagonal) {
144 Size i,j,k,size = targetMatrix.rows();
145 Matrix result(currentRoot);
146 Array variance(size, 0);
147 for (i=0; i<size; i++){
148 variance[i]=std::sqrt(targetMatrix[i][i]);
149 }
150 if (lowerDiagonal) {
151 Matrix approxMatrix(result*transpose(result));
152 result = CholeskyDecomposition(approxMatrix, true);
153 for (i=0; i<size; i++) {
154 for (j=0; j<size; j++) {
155 result[i][j]/=std::sqrt(approxMatrix[i][i]);
156 }
157 }
158 } else {
159 for (i=0; i<size; i++) {
160 for (j=0; j<size; j++) {
161 result[i][j]/=variance[i];
162 }
163 }
164 }
165
166 ConjugateGradient optimize;
167 EndCriteria endCriteria(100, 10, 1e-8, 1e-8, 1e-8);
168 HypersphereCostFunction costFunction(targetMatrix, variance,
169 lowerDiagonal);
170 NoConstraint constraint;
171
172 // hypersphere vector optimization
173
174 if (lowerDiagonal) {
175 Array theta(size * (size-1)/2);
176 const Real eps=1e-16;
177 for (i=1; i<size; i++) {
178 for (j=0; j<i; j++) {
179 theta[i*(i-1)/2+j]=result[i][j];
180 if (theta[i*(i-1)/2+j]>1-eps)
181 theta[i*(i-1)/2+j]=1-eps;
182 if (theta[i*(i-1)/2+j]<-1+eps)
183 theta[i*(i-1)/2+j]=-1+eps;
184 for (k=0; k<j; k++) {
185 theta[i*(i-1)/2+j] /= std::sin(theta[i*(i-1)/2+k]);
186 if (theta[i*(i-1)/2+j]>1-eps)
187 theta[i*(i-1)/2+j]=1-eps;
188 if (theta[i*(i-1)/2+j]<-1+eps)
189 theta[i*(i-1)/2+j]=-1+eps;
190 }
191 theta[i*(i-1)/2+j] = std::acos(theta[i*(i-1)/2+j]);
192 if (j==i-1) {
193 if (result[i][i]<0)
194 theta[i*(i-1)/2+j]=-theta[i*(i-1)/2+j];
195 }
196 }
197 }
198 Problem p(costFunction, constraint, theta);
199 optimize.minimize(p, endCriteria);
200 theta = p.currentValue();
201 std::fill(result.begin(),result.end(),1.0);
202 for (i=0; i<size; i++) {
203 for (k=0; k<size; k++) {
204 if (k>i) {
205 result[i][k]=0;
206 } else {
207 for (j=0; j<=k; j++) {
208 if (j == k && k!=i)
209 result[i][k] *=
210 std::cos(theta[i*(i-1)/2+j]);
211 else if (j!=i)
212 result[i][k] *=
213 std::sin(theta[i*(i-1)/2+j]);
214 }
215 }
216 }
217 }
218 } else {
219 Array theta(size * (size-1));
220 const Real eps=1e-16;
221 for (i=0; i<size; i++) {
222 for (j=0; j<size-1; j++) {
223 theta[j*size+i]=result[i][j];
224 if (theta[j*size+i]>1-eps)
225 theta[j*size+i]=1-eps;
226 if (theta[j*size+i]<-1+eps)
227 theta[j*size+i]=-1+eps;
228 for (k=0;k<j;k++) {
229 theta[j*size+i] /= std::sin(theta[k*size+i]);
230 if (theta[j*size+i]>1-eps)
231 theta[j*size+i]=1-eps;
232 if (theta[j*size+i]<-1+eps)
233 theta[j*size+i]=-1+eps;
234 }
235 theta[j*size+i] = std::acos(theta[j*size+i]);
236 if (j==size-2) {
237 if (result[i][j+1]<0)
238 theta[j*size+i]=-theta[j*size+i];
239 }
240 }
241 }
242 Problem p(costFunction, constraint, theta);
243 optimize.minimize(p, endCriteria);
244 theta=p.currentValue();
245 std::fill(result.begin(),result.end(),1.0);
246 for (i=0; i<size; i++) {
247 for (k=0; k<size; k++) {
248 for (j=0; j<=k; j++) {
249 if (j == k && k!=size-1)
250 result[i][k] *= std::cos(theta[j*size+i]);
251 else if (j!=size-1)
252 result[i][k] *= std::sin(theta[j*size+i]);
253 }
254 }
255 }
256 }
257
258 for (i=0; i<size; i++) {
259 for (j=0; j<size; j++) {
260 result[i][j]*=variance[i];
261 }
262 }
263 return result;
264 }
265
266 // Matrix infinity norm. See Golub and van Loan (2.3.10) or
267 // <http://en.wikipedia.org/wiki/Matrix_norm>
268 Real normInf(const Matrix& M) {
269 Size rows = M.rows();
270 Size cols = M.columns();
271 Real norm = 0.0;
272 for (Size i=0; i<rows; ++i) {
273 Real colSum = 0.0;
274 for (Size j=0; j<cols; ++j)
275 colSum += std::fabs(M[i][j]);
276 norm = std::max(norm, colSum);
277 }
278 return norm;
279 }
280
281 // Take a matrix and make all the diagonal entries 1.
282 Matrix projectToUnitDiagonalMatrix(const Matrix& M) {
283 Size size = M.rows();
284 QL_REQUIRE(size == M.columns(),
285 "matrix not square");
286
287 Matrix result(M);
288 for (Size i=0; i<size; ++i)
289 result[i][i] = 1.0;
290
291 return result;
292 }
293
294 // Take a matrix and make all the eigenvalues non-negative
295 Matrix projectToPositiveSemidefiniteMatrix(Matrix& M) {
296 Size size = M.rows();
297 QL_REQUIRE(size == M.columns(),
298 "matrix not square");
299
300 Matrix diagonal(size, size, 0.0);
301 SymmetricSchurDecomposition jd(M);
302 for (Size i=0; i<size; ++i)
303 diagonal[i][i] = std::max<Real>(jd.eigenvalues()[i], 0.0);
304
305 Matrix result =
306 jd.eigenvectors()*diagonal*transpose(jd.eigenvectors());
307 return result;
308 }
309
310 // implementation of the Higham algorithm to find the nearest
311 // correlation matrix.
312 Matrix highamImplementation(const Matrix& A, const Size maxIterations, const Real& tolerance) {
313
314 Size size = A.rows();
315 Matrix R, Y(A), X(A), deltaS(size, size, 0.0);
316
317 Matrix lastX(X);
318 Matrix lastY(Y);
319
320 for (Size i=0; i<maxIterations; ++i) {
321 R = Y - deltaS;
322 X = projectToPositiveSemidefiniteMatrix(R);
323 deltaS = X - R;
324 Y = projectToUnitDiagonalMatrix(X);
325
326 // convergence test
327 if (std::max({
328 normInf(X-lastX)/normInf(X),
329 normInf(Y-lastY)/normInf(Y),
330 normInf(Y-X)/normInf(Y)
331 })
332 <= tolerance)
333 {
334 break;
335 }
336 lastX = X;
337 lastY = Y;
338 }
339
340 // ensure we return a symmetric matrix
341 for (Size i=0; i<size; ++i)
342 for (Size j=0; j<i; ++j)
343 Y[i][j] = Y[j][i];
344
345 return Y;
346 }
347 }
348
349
351 Size size = matrix.rows();
352
353 #if defined(QL_EXTRA_SAFETY_CHECKS)
354 checkSymmetry(matrix);
355 #else
356 QL_REQUIRE(size == matrix.columns(),
357 "non square matrix: " << size << " rows, " <<
358 matrix.columns() << " columns");
359 #endif
360
361 // spectral (a.k.a Principal Component) analysis
363 Matrix diagonal(size, size, 0.0);
364
365 // salvaging algorithm
366 Matrix result(size, size);
367 bool negative;
368 switch (sa) {
370 // eigenvalues are sorted in decreasing order
371 QL_REQUIRE(jd.eigenvalues()[size-1]>=-1e-16,
372 "negative eigenvalue(s) ("
373 << std::scientific << jd.eigenvalues()[size-1]
374 << ")");
375 result = CholeskyDecomposition(matrix, true);
376 break;
378 // negative eigenvalues set to zero
379 for (Size i=0; i<size; i++)
380 diagonal[i][i] =
381 std::sqrt(std::max<Real>(jd.eigenvalues()[i], 0.0));
382
383 result = jd.eigenvectors() * diagonal;
384 normalizePseudoRoot(matrix, result);
385 break;
387 // negative eigenvalues set to zero
388 negative=false;
389 for (Size i=0; i<size; ++i){
390 diagonal[i][i] =
391 std::sqrt(std::max<Real>(jd.eigenvalues()[i], 0.0));
392 if (jd.eigenvalues()[i]<0.0) negative=true;
393 }
394 result = jd.eigenvectors() * diagonal;
395 normalizePseudoRoot(matrix, result);
396
397 if (negative)
398 result = hypersphereOptimize(matrix, result, false);
399 break;
401 // negative eigenvalues set to zero
402 negative=false;
403 for (Size i=0; i<size; ++i){
404 diagonal[i][i] =
405 std::sqrt(std::max<Real>(jd.eigenvalues()[i], 0.0));
406 if (jd.eigenvalues()[i]<0.0) negative=true;
407 }
408 result = jd.eigenvectors() * diagonal;
409
410 normalizePseudoRoot(matrix, result);
411
412 if (negative)
413 result = hypersphereOptimize(matrix, result, true);
414 break;
416 int maxIterations = 40;
417 Real tol = 1e-6;
418 result = highamImplementation(matrix, maxIterations, tol);
419 result = CholeskyDecomposition(result, true);
420 }
421 break;
424 "negative eigenvalue(s) ("
425 << std::scientific << jd.eigenvalues().back()
426 << ")");
427
428 Array sqrtEigenvalues(size);
429 std::transform(
430 jd.eigenvalues().begin(), jd.eigenvalues().end(),
431 sqrtEigenvalues.begin(),
432 [](Real lambda) -> Real {
433 return std::sqrt(std::max<Real>(lambda, 0.0));
434 }
435 );
436
437 for (Size i=0; i < size; ++i)
438 std::transform(
439 sqrtEigenvalues.begin(), sqrtEigenvalues.end(),
440 jd.eigenvectors().row_begin(i),
441 diagonal.column_begin(i),
442 std::multiplies<>()
443 );
444
445 result = jd.eigenvectors()*diagonal;
446 result = 0.5*(result + transpose(result));
447 }
448 break;
449 default:
450 QL_FAIL("unknown salvaging algorithm");
451 }
452
453 return result;
454 }
455
456
458 Size maxRank,
459 Real componentRetainedPercentage,
461 Size size = matrix.rows();
462
463 #if defined(QL_EXTRA_SAFETY_CHECKS)
464 checkSymmetry(matrix);
465 #else
466 QL_REQUIRE(size == matrix.columns(),
467 "non square matrix: " << size << " rows, " <<
468 matrix.columns() << " columns");
469 #endif
470
471 QL_REQUIRE(componentRetainedPercentage>0.0,
472 "no eigenvalues retained");
473
474 QL_REQUIRE(componentRetainedPercentage<=1.0,
475 "percentage to be retained > 100%");
476
477 QL_REQUIRE(maxRank>=1,
478 "max rank required < 1");
479
480 // spectral (a.k.a Principal Component) analysis
482 Array eigenValues = jd.eigenvalues();
483
484 // salvaging algorithm
485 switch (sa) {
487 // eigenvalues are sorted in decreasing order
488 QL_REQUIRE(eigenValues[size-1]>=-1e-16,
489 "negative eigenvalue(s) ("
490 << std::scientific << eigenValues[size-1]
491 << ")");
492 break;
494 // negative eigenvalues set to zero
495 for (Size i=0; i<size; ++i)
496 eigenValues[i] = std::max<Real>(eigenValues[i], 0.0);
497 break;
499 {
500 int maxIterations = 40;
501 Real tolerance = 1e-6;
502 Matrix adjustedMatrix = highamImplementation(matrix, maxIterations, tolerance);
503 jd = SymmetricSchurDecomposition(adjustedMatrix);
504 eigenValues = jd.eigenvalues();
505 }
506 break;
507 default:
508 QL_FAIL("unknown or invalid salvaging algorithm");
509 }
510
511 // factor reduction
512 Real enough = componentRetainedPercentage *
513 std::accumulate(eigenValues.begin(),
514 eigenValues.end(), Real(0.0));
515 if (componentRetainedPercentage == 1.0) {
516 // numerical glitches might cause some factors to be discarded
517 enough *= 1.1;
518 }
519 // retain at least one factor
520 Real components = eigenValues[0];
521 Size retainedFactors = 1;
522 for (Size i=1; components<enough && i<size; ++i) {
523 components += eigenValues[i];
524 retainedFactors++;
525 }
526 // output is granted to have a rank<=maxRank
527 retainedFactors=std::min(retainedFactors, maxRank);
528
529 Matrix diagonal(size, retainedFactors, 0.0);
530 for (Size i=0; i<retainedFactors; ++i)
531 diagonal[i][i] = std::sqrt(eigenValues[i]);
532 Matrix result = jd.eigenvectors() * diagonal;
533
534 normalizePseudoRoot(matrix, result);
535 return result;
536 }
537}
Cholesky decomposition.
1-D array used in linear algebra.
Definition: array.hpp:52
const_iterator end() const
Definition: array.hpp:499
Real back() const
Definition: array.hpp:446
const_iterator begin() const
Definition: array.hpp:491
Matrix used in linear algebra.
Definition: matrix.hpp:41
const_row_iterator row_begin(Size i) const
Definition: matrix.hpp:360
Size rows() const
Definition: matrix.hpp:504
Size columns() const
Definition: matrix.hpp:508
const_column_iterator column_begin(Size i) const
Definition: matrix.hpp:415
symmetric threshold Jacobi algorithm.
floating-point comparisons
Conjugate gradient optimization method.
Abstract constraint class.
#define QL_REQUIRE(condition, message)
throw an error if the given pre-condition is not verified
Definition: errors.hpp:117
#define QL_FAIL(message)
throw an error (possibly with file and line information)
Definition: errors.hpp:92
LinearInterpolation variance
#define QL_EPSILON
Definition: qldefines.hpp:178
QL_REAL Real
real number
Definition: types.hpp:50
std::size_t Size
size of a container
Definition: types.hpp:58
Real theta
Definition: any.hpp:37
Matrix pseudoSqrt(const Matrix &matrix, SalvagingAlgorithm::Type sa)
Definition: pseudosqrt.cpp:350
Matrix rankReducedSqrt(const Matrix &matrix, Size maxRank, Real componentRetainedPercentage, SalvagingAlgorithm::Type sa)
Definition: pseudosqrt.cpp:457
Matrix CholeskyDecomposition(const Matrix &S, bool flexible)
bool close(const Quantity &m1, const Quantity &m2, Size n)
Definition: quantity.cpp:163
Matrix transpose(const Matrix &m)
Definition: matrix.hpp:700
STL namespace.
Abstract optimization problem class.
Matrix tempMatrix_
Definition: pseudosqrt.cpp:81
Matrix currentMatrix_
Definition: pseudosqrt.cpp:81
Array targetVariance_
Definition: pseudosqrt.cpp:80
Matrix targetMatrix_
Definition: pseudosqrt.cpp:79
Matrix currentRoot_
Definition: pseudosqrt.cpp:81
Size size_
Definition: pseudosqrt.cpp:77
bool lowerDiagonal_
Definition: pseudosqrt.cpp:78
pseudo square root of a real symmetric matrix
Eigenvalues/eigenvectors of a real symmetric matrix.