TooN 2.1
|
00001 /** 00002 \page sCramerIsBad Speed, but not at the expense of accuracy. 00003 00004 TooN aims to be a fast library, and may choose between one of several algorithms 00005 depending on the size of the arguments. However TooN will never substitute a 00006 fast but numerically inferior algorithm. For example LU decomposition, Gaussian 00007 elimination and Gauss-Jordan reduction all have similar numerical properties for 00008 computing a matrix inverse. Direct inversino using Cramer's rule is 00009 significantly less stable, even for 3x3 matrices. 00010 00011 The following code computes a matrix inverse of the ill conditioned matrix: 00012 \f[ 00013 \begin{bmatrix} 00014 1 & 2 & 3 \\ 00015 1 & 2 + \epsilon & 3 \\ 00016 1 & 2 & 3 + \epsilon 00017 \end{bmatrix}, 00018 \f] 00019 using LU decomposition, Gauss-Jordan, pseudo-inverse with singular value 00020 decomposition and with Cramer's rule. The error is computed as: 00021 \f[ 00022 \|A^{-1} A - I\|_{\text{fro}} 00023 \f] 00024 The code is: 00025 00026 @code 00027 #include <TooN/TooN.h> 00028 #include <TooN/helpers.h> 00029 #include <TooN/LU.h> 00030 #include <TooN/GR_SVD.h> 00031 #include <TooN/gauss_jordan.h> 00032 #include <TooN/gaussian_elimination.h> 00033 #include <iomanip> 00034 using namespace TooN; 00035 using namespace std; 00036 Matrix<3> invert_cramer(const Matrix<3>& A) 00037 { 00038 Matrix<3> i; 00039 double t0 = A[0][0]*A[1][1]*A[2][2]-A[0][0]*A[1][2]*A[2][1]-A[1][0]*A[0][1]*A[2][2]+A[1][0]*A[0][2]*A[2][1]+A[2][0]*A[0][1]*A[1][2]-A[2][0]*A[0][2]*A[1][1]; 00040 double idet = 1/t0; 00041 t0 = A[1][1]*A[2][2]-A[1][2]*A[2][1]; 00042 i[0][0] = t0*idet; 00043 t0 = -A[0][1]*A[2][2]+A[0][2]*A[2][1]; 00044 i[0][1] = t0*idet; 00045 t0 = A[0][1]*A[1][2]-A[0][2]*A[1][1]; 00046 i[0][2] = t0*idet; 00047 t0 = -A[1][0]*A[2][2]+A[1][2]*A[2][0]; 00048 i[1][0] = t0*idet; 00049 t0 = A[0][0]*A[2][2]-A[0][2]*A[2][0]; 00050 i[1][1] = t0*idet; 00051 t0 = -A[0][0]*A[1][2]+A[0][2]*A[1][0]; 00052 i[1][2] = t0*idet; 00053 t0 = A[1][0]*A[2][1]-A[1][1]*A[2][0]; 00054 i[2][0] = t0*idet; 00055 t0 = -A[0][0]*A[2][1]+A[0][1]*A[2][0]; 00056 i[2][1] = t0*idet; 00057 t0 = A[0][0]*A[1][1]-A[0][1]*A[1][0]; 00058 i[2][2] = t0*idet; 00059 00060 return i; 00061 } 00062 00063 00064 int main() 00065 { 00066 Matrix<3> singular = Data(1, 2, 3, 00067 1, 2, 3, 00068 1, 2, 3); 00069 00070 00071 for(double i=0; i < 1000; i++) 00072 { 00073 double delta = pow(0.9, i); 00074 00075 //Make a poorly conditioned matrix 00076 Matrix<3> bad = singular; 00077 bad[2][2] += delta; 00078 bad[1][1] += delta; 00079 00080 00081 //Compute the inverse with LU decomposition 00082 Matrix<3> linv; 00083 LU<3> blu(bad); 00084 linv = blu.get_inverse(); 00085 00086 //Compute the inverse with Gauss-Jordan reduction 00087 Matrix<3, 6> gj; 00088 Matrix<3> ginv; 00089 gj.slice<0,0,3,3>() = bad; 00090 gj.slice<0,3,3,3>() = Identity; 00091 gauss_jordan(gj); 00092 ginv = gj.slice<0,3,3,3>(); 00093 00094 00095 //Compute the pseudo-inverse with singular value decomposition 00096 GR_SVD<3,3> bsvd(bad); 00097 Matrix<3> sinv = bsvd.get_pinv(); 00098 00099 00100 Matrix<3> I = Identity; 00101 Matrix<3> einv = gaussian_elimination(bad, I); 00102 00103 Matrix<3> cinv = invert_cramer(bad); 00104 00105 00106 double lerr = norm_fro(linv * bad + -1 * Identity); 00107 double gerr = norm_fro(ginv * bad + -1 * Identity); 00108 double serr = norm_fro(sinv * bad + -1 * Identity); 00109 double cerr = norm_fro(cinv * bad + -1 * Identity); 00110 double eerr = norm_fro(einv * bad + -1 * Identity); 00111 00112 cout << setprecision(15) << scientific << delta << " " << lerr << " " << gerr << " " << serr << " " << eerr << " " << cerr << endl; 00113 00114 00115 00116 } 00117 00118 00119 } 00120 @endcode 00121 00122 The direct inverse with Cramer's rule is about three times faster than the 00123 builtin routines. However, the numerical stability is considerably worse, 00124 giving errors 1e6 times higher in extreme cases: 00125 00126 \image html cramer.png "Comparison of errors in matrix inverse" 00127 00128 */