TooN 2.1
|
00001 // -*- c++ -*- 00002 00003 // Copyright (C) 2005,2009 Tom Drummond (twd20@cam.ac.uk) 00004 // 00005 // This file is part of the TooN Library. This library is free 00006 // software; you can redistribute it and/or modify it under the 00007 // terms of the GNU General Public License as published by the 00008 // Free Software Foundation; either version 2, or (at your option) 00009 // any later version. 00010 00011 // This library is distributed in the hope that it will be useful, 00012 // but WITHOUT ANY WARRANTY; without even the implied warranty of 00013 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 00014 // GNU General Public License for more details. 00015 00016 // You should have received a copy of the GNU General Public License along 00017 // with this library; see the file COPYING. If not, write to the Free 00018 // Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, 00019 // USA. 00020 00021 // As a special exception, you may use this file as part of a free software 00022 // library without restriction. Specifically, if other files instantiate 00023 // templates or use macros or inline functions from this file, or you compile 00024 // this file and link it with other files to produce an executable, this 00025 // file does not by itself cause the resulting executable to be covered by 00026 // the GNU General Public License. This exception does not however 00027 // invalidate any other reasons why the executable file might be covered by 00028 // the GNU General Public License. 00029 00030 #ifndef __SYMEIGEN_H 00031 #define __SYMEIGEN_H 00032 00033 #include <iostream> 00034 #include <cassert> 00035 #include <cmath> 00036 #include <utility> 00037 #include <complex> 00038 #include <TooN/lapack.h> 00039 00040 #include <TooN/TooN.h> 00041 00042 namespace TooN { 00043 static const double root3=1.73205080756887729352744634150587236694280525381038062805580; 00044 00045 namespace Internal{ 00046 00047 using std::swap; 00048 00049 ///Default condition number for SymEigen::backsub, SymEigen::get_pinv and SymEigen::get_inv_diag 00050 static const double symeigen_condition_no=1e9; 00051 00052 ///@internal 00053 ///@brief Compute eigensystems for sizes > 2 00054 ///Helper struct for computing eigensystems, to allow for specialization on 00055 ///2x2 matrices. 00056 ///@ingroup gInternal 00057 template <int Size> struct ComputeSymEigen { 00058 00059 ///@internal 00060 ///Compute an eigensystem. 00061 ///@param m Input matrix (assumed to be symmetric) 00062 ///@param evectors Eigen vector output 00063 ///@param evalues Eigen values output 00064 template<int Rows, int Cols, typename P, typename B> 00065 static inline void compute(const Matrix<Rows,Cols,P, B>& m, Matrix<Size,Size,P> & evectors, Vector<Size, P>& evalues) { 00066 00067 SizeMismatch<Rows, Cols>::test(m.num_rows(), m.num_cols()); //m must be square 00068 SizeMismatch<Size, Rows>::test(m.num_rows(), evalues.size()); //m must be the size of the system 00069 00070 00071 evectors = m; 00072 FortranInteger N = evalues.size(); 00073 FortranInteger lda = evalues.size(); 00074 FortranInteger info; 00075 FortranInteger lwork=-1; 00076 P size; 00077 00078 // find out how much space fortran needs 00079 syev_((char*)"V",(char*)"U",&N,&evectors[0][0],&lda,&evalues[0], &size,&lwork,&info); 00080 lwork = int(size); 00081 Vector<Dynamic, P> WORK(lwork); 00082 00083 // now compute the decomposition 00084 syev_((char*)"V",(char*)"U",&N,&evectors[0][0],&lda,&evalues[0], &WORK[0],&lwork,&info); 00085 00086 if(info!=0){ 00087 std::cerr << "In SymEigen<"<<Size<<">: " << info 00088 << " off-diagonal elements of an intermediate tridiagonal form did not converge to zero." << std::endl 00089 << "M = " << m << std::endl; 00090 } 00091 } 00092 }; 00093 00094 ///@internal 00095 ///@brief Compute 2x2 eigensystems 00096 ///Helper struct for computing eigensystems, specialized on 2x2 matrices. 00097 ///@ingroup gInternal 00098 template <> struct ComputeSymEigen<2> { 00099 00100 ///@internal 00101 ///Compute an eigensystem. 00102 ///@param m Input matrix (assumed to be symmetric) 00103 ///@param eig Eigen vector output 00104 ///@param ev Eigen values output 00105 template<typename P, typename B> 00106 static inline void compute(const Matrix<2,2,P,B>& m, Matrix<2,2,P>& eig, Vector<2, P>& ev) { 00107 double trace = m[0][0] + m[1][1]; 00108 //Only use the upper triangular elements. 00109 double det = m[0][0]*m[1][1] - m[0][1]*m[0][1]; 00110 double disc = trace*trace - 4 * det; 00111 00112 //Mathematically, disc >= 0 always. 00113 //Numerically, it can drift very slightly below zero. 00114 if(disc < 0) 00115 disc = 0; 00116 00117 using std::sqrt; 00118 double root_disc = sqrt(disc); 00119 ev[0] = 0.5 * (trace - root_disc); 00120 ev[1] = 0.5 * (trace + root_disc); 00121 double a = m[0][0] - ev[0]; 00122 double b = m[0][1]; 00123 double magsq = a*a + b*b; 00124 if (magsq == 0) { 00125 eig[0][0] = 1.0; 00126 eig[0][1] = 0; 00127 } else { 00128 eig[0][0] = -b; 00129 eig[0][1] = a; 00130 eig[0] *= 1.0/sqrt(magsq); 00131 } 00132 eig[1][0] = -eig[0][1]; 00133 eig[1][1] = eig[0][0]; 00134 } 00135 }; 00136 00137 ///@internal 00138 ///@brief Compute 3x3 eigensystems 00139 ///Helper struct for computing eigensystems, specialized on 3x3 matrices. 00140 ///@ingroup gInternal 00141 template <> struct ComputeSymEigen<3> { 00142 00143 ///@internal 00144 ///Compute an eigensystem. 00145 ///@param m Input matrix (assumed to be symmetric) 00146 ///@param eig Eigen vector output 00147 ///@param ev Eigen values output 00148 template<typename P, typename B> 00149 static inline void compute(const Matrix<3,3,P,B>& m, Matrix<3,3,P>& eig, Vector<3, P>& ev) { 00150 //method uses closed form solution of cubic equation to obtain roots of characteristic equation. 00151 using std::sqrt; 00152 using std::min; 00153 00154 double a_norm = norm_1(m); 00155 double eps = 1e-7 * a_norm; 00156 00157 if(a_norm == 0) 00158 { 00159 eig = TooN::Identity; 00160 return; 00161 } 00162 00163 //Polynomial terms of |a - l * Identity| 00164 //l^3 + a*l^2 + b*l + c 00165 00166 const double& a11 = m[0][0]; 00167 const double& a12 = m[0][1]; 00168 const double& a13 = m[0][2]; 00169 00170 const double& a22 = m[1][1]; 00171 const double& a23 = m[1][2]; 00172 00173 const double& a33 = m[2][2]; 00174 00175 //From matlab: 00176 double a = -a11-a22-a33; 00177 double b = a11*a22+a11*a33+a22*a33-a12*a12-a13*a13-a23*a23; 00178 double c = a11*(a23*a23)+(a13*a13)*a22+(a12*a12)*a33-a12*a13*a23*2.0-a11*a22*a33; 00179 00180 //Using Cardano's method: 00181 double p = b - a*a/3; 00182 double q = c + (2*a*a*a - 9*a*b)/27; 00183 00184 double alpha = -q/2; 00185 00186 //beta_descriminant <= 0 for real roots! 00187 //force to zero to avoid nasty rounding issues. 00188 double beta_descriminant = std::min(0.0, q*q/4 + p*p*p/27); 00189 00190 double beta = sqrt(-beta_descriminant); 00191 double r2 = alpha*alpha - beta_descriminant; 00192 00193 ///Need A,B = cubert(alpha +- beta) 00194 ///Turn in to r, theta 00195 /// r^(1/3) * e^(i * theta/3) 00196 double cuberoot_r = pow(r2, 1./6); 00197 00198 double theta3 = atan2(beta, alpha)/3; 00199 00200 double A_plus_B = 2*cuberoot_r*cos(theta3); 00201 double A_minus_B = -2*cuberoot_r*sin(theta3); 00202 00203 //calculate eigenvalues 00204 ev = makeVector(A_plus_B, -A_plus_B/2 + A_minus_B * sqrt(3)/2, -A_plus_B/2 - A_minus_B * sqrt(3)/2) - Ones * a/3; 00205 00206 if(ev[0] > ev[1]) 00207 swap(ev[0], ev[1]); 00208 if(ev[1] > ev[2]) 00209 swap(ev[1], ev[2]); 00210 if(ev[0] > ev[1]) 00211 swap(ev[0], ev[1]); 00212 00213 // for the vector [x y z] 00214 // eliminate to compute the ratios x/z and y/z 00215 // in both cases, the denominator is the same, so in the absence of 00216 // any other scaling, choose the denominator to be z and 00217 // tne numerators to be x and y. 00218 // 00219 // x/z and y/z, implies ax, ay, az which vanishes 00220 // if a=0 00221 //calculate the eigenvectors 00222 eig[0][0]=a12 * a23 - a13 * (a22 - ev[0]); 00223 eig[0][1]=a12 * a13 - a23 * (a11 - ev[0]); 00224 eig[0][2]=(a11-ev[0])*(a22-ev[0]) - a12*a12; 00225 eig[1][0]=a12 * a23 - a13 * (a22 - ev[1]); 00226 eig[1][1]=a12 * a13 - a23 * (a11 - ev[1]); 00227 eig[1][2]=(a11-ev[1])*(a22-ev[1]) - a12*a12; 00228 eig[2][0]=a12 * a23 - a13 * (a22 - ev[2]); 00229 eig[2][1]=a12 * a13 - a23 * (a11 - ev[2]); 00230 eig[2][2]=(a11-ev[2])*(a22-ev[2]) - a12*a12; 00231 00232 //Check to see if we have any zero vectors 00233 double e0norm = norm_1(eig[0]); 00234 double e1norm = norm_1(eig[1]); 00235 double e2norm = norm_1(eig[2]); 00236 00237 //Some of the vectors vanish: we're computing 00238 // x/z and y/z, which implies ax, ay, az which vanishes 00239 // if a=0; 00240 // 00241 // So compute the other two choices. 00242 if(e0norm < eps) 00243 { 00244 eig[0][0] += a12 * (ev[0] - a33) + a23 * a13; 00245 eig[0][1] += (ev[0]-a11)*(ev[0]-a33) - a13*a13; 00246 eig[0][2] += a23 * (ev[0] - a11) + a12 * a13; 00247 e0norm = norm_1(eig[0]); 00248 } 00249 00250 if(e1norm < eps) 00251 { 00252 eig[1][0] += a12 * (ev[1] - a33) + a23 * a13; 00253 eig[1][1] += (ev[1]-a11)*(ev[1]-a33) - a13*a13; 00254 eig[1][2] += a23 * (ev[1] - a11) + a12 * a13; 00255 e1norm = norm_1(eig[1]); 00256 } 00257 00258 if(e2norm < eps) 00259 { 00260 eig[2][0] += a12 * (ev[2] - a33) + a23 * a13; 00261 eig[2][1] += (ev[2]-a11)*(ev[2]-a33) - a13*a13; 00262 eig[2][2] += a23 * (ev[2] - a11) + a12 * a13; 00263 e2norm = norm_1(eig[2]); 00264 } 00265 00266 00267 //OK, a AND b might be 0 00268 //Check for vanishing and add in y/x, z/x which implies cx, cy, cz 00269 if(e0norm < eps) 00270 { 00271 eig[0][0] +=(ev[0]-a22)*(ev[0]-a33) - a23*a23; 00272 eig[0][1] +=a12 * (ev[0] - a33) + a23 * a13; 00273 eig[0][2] +=a13 * (ev[0] - a22) + a23 * a12; 00274 e0norm = norm_1(eig[0]); 00275 } 00276 00277 if(e1norm < eps) 00278 { 00279 eig[1][0] +=(ev[1]-a22)*(ev[1]-a33) - a23*a23; 00280 eig[1][1] +=a12 * (ev[1] - a33) + a23 * a13; 00281 eig[1][2] +=a13 * (ev[1] - a22) + a23 * a12; 00282 e1norm = norm_1(eig[1]); 00283 } 00284 00285 if(e2norm < eps) 00286 { 00287 eig[2][0] +=(ev[2]-a22)*(ev[2]-a33) - a23*a23; 00288 eig[2][1] +=a12 * (ev[2] - a33) + a23 * a13; 00289 eig[2][2] +=a13 * (ev[2] - a22) + a23 * a12; 00290 e2norm = norm_1(eig[2]); 00291 } 00292 00293 //Oh, COME ON!!! 00294 if(e0norm < eps || e1norm < eps || e2norm < eps) 00295 { 00296 //This is blessedly rare 00297 00298 double ns[] = {e0norm, e1norm, e2norm}; 00299 double is[] = {0, 1, 2}; 00300 00301 //Sort them 00302 if(ns[0] > ns[1]) 00303 { 00304 swap(ns[0], ns[1]); 00305 swap(is[0], is[1]); 00306 } 00307 if(ns[1] > ns[2]) 00308 { 00309 swap(ns[1], ns[2]); 00310 swap(is[1], is[2]); 00311 } 00312 if(ns[0] > ns[1]) 00313 { 00314 swap(ns[0], ns[1]); 00315 swap(is[0], is[1]); 00316 } 00317 00318 00319 if(ns[1] >= eps) 00320 { 00321 //one small one. Use the cross product of the other two 00322 normalize(eig[1]); 00323 normalize(eig[2]); 00324 eig[is[0]] = eig[is[1]]^eig[is[2]]; 00325 } 00326 else if(ns[2] >= eps) 00327 { 00328 normalize(eig[is[2]]); 00329 00330 //Permute vector to get a new vector with some orthogonal components. 00331 Vector<3> p = makeVector(eig[is[2]][1], eig[is[2]][2], eig[is[2]][0]); 00332 00333 //Gram-Schmit 00334 Vector<3> v1 = unit(p - eig[is[2]] * (p * eig[is[2]])); 00335 Vector<3> v2 = v1^eig[is[2]]; 00336 00337 eig[is[0]] = v1; 00338 eig[is[1]] = v2; 00339 } 00340 else 00341 eig = TooN::Identity; 00342 } 00343 else 00344 { 00345 normalize(eig[0]); 00346 normalize(eig[1]); 00347 normalize(eig[2]); 00348 } 00349 } 00350 }; 00351 00352 }; 00353 00354 /** 00355 Performs eigen decomposition of a matrix. 00356 Real symmetric (and hence square matrices) can be decomposed into 00357 \f[M = U \times \Lambda \times U^T\f] 00358 where \f$U\f$ is an orthogonal matrix (and hence \f$U^T = U^{-1}\f$) whose columns 00359 are the eigenvectors of \f$M\f$ and \f$\Lambda\f$ is a diagonal matrix whose entries 00360 are the eigenvalues of \f$M\f$. These quantities are often of use directly, and can 00361 be obtained as follows: 00362 @code 00363 // construct M 00364 Matrix<3> M(3,3); 00365 M[0]=makeVector(4,0,2); 00366 M[1]=makeVector(0,5,3); 00367 M[2]=makeVector(2,3,6); 00368 00369 // create the eigen decomposition of M 00370 SymEigen<3> eigM(M); 00371 cout << "A=" << M << endl; 00372 cout << "(E,v)=eig(A)" << endl; 00373 // print the smallest eigenvalue 00374 cout << "v[0]=" << eigM.get_evalues()[0] << endl; 00375 // print the associated eigenvector 00376 cout << "E[0]=" << eigM.get_evectors()[0] << endl; 00377 @endcode 00378 00379 Further, provided the eigenvalues are nonnegative, the square root of 00380 a matrix and its inverse can also be obtained, 00381 @code 00382 // print the square root of the matrix. 00383 cout << "R=sqrtm(A)=" << eigM.get_sqrtm() << endl; 00384 // print the square root of the matrix squared. 00385 cout << "(should equal A), R^T*R=" 00386 << eigM.get_sqrtm().T() * eigM.get_sqrtm() << endl; 00387 // print the inverse of the matrix. 00388 cout << "A^-1=" << eigM.get_pinv() << endl; 00389 // print the inverse square root of the matrix. 00390 cout << "C=isqrtm(A)=" << eigM.get_isqrtm() << endl; 00391 // print the inverse square root of the matrix squared. 00392 cout << "(should equal A^-1), C^T*C=" 00393 << eigM.get_isqrtm().T() * eigM.get_isqrtm() << endl; 00394 @endcode 00395 00396 This decomposition is very similar to the SVD (q.v.), and can be used to solve 00397 equations using backsub() or get_pinv(), with the same treatment of condition numbers. 00398 00399 SymEigen<> (= SymEigen<-1>) can be used to create an eigen decomposition whose size is determined at run-time. 00400 @ingroup gDecomps 00401 **/ 00402 template <int Size=Dynamic, typename Precision = double> 00403 class SymEigen { 00404 public: 00405 inline SymEigen(){} 00406 00407 /// Initialise this eigen decomposition but do no immediately 00408 /// perform a decomposition. 00409 /// 00410 /// @param m The size of the matrix to perform the eigen decomposition on. 00411 inline SymEigen(int m) : my_evectors(m,m), my_evalues(m) {} 00412 00413 /// Construct the eigen decomposition of a matrix. This initialises the class, and 00414 /// performs the decomposition immediately. 00415 template<int R, int C, typename B> 00416 inline SymEigen(const Matrix<R, C, Precision, B>& m) : my_evectors(m.num_rows(), m.num_cols()), my_evalues(m.num_rows()) { 00417 compute(m); 00418 } 00419 00420 /// Perform the eigen decomposition of a matrix. 00421 template<int R, int C, typename B> 00422 inline void compute(const Matrix<R,C,Precision,B>& m){ 00423 SizeMismatch<R, C>::test(m.num_rows(), m.num_cols()); 00424 SizeMismatch<R, Size>::test(m.num_rows(), my_evectors.num_rows()); 00425 Internal::ComputeSymEigen<Size>::compute(m, my_evectors, my_evalues); 00426 } 00427 00428 /// Calculate result of multiplying the (pseudo-)inverse of M by a vector. 00429 /// For a vector \f$b\f$, this calculates \f$M^{\dagger}b\f$ by back substitution 00430 /// (i.e. without explictly calculating the (pseudo-)inverse). 00431 /// See the SVD detailed description for a description of condition variables. 00432 template <int S, typename P, typename B> 00433 Vector<Size, Precision> backsub(const Vector<S,P,B>& rhs) const { 00434 return (my_evectors.T() * diagmult(get_inv_diag(Internal::symeigen_condition_no),(my_evectors * rhs))); 00435 } 00436 00437 /// Calculate result of multiplying the (pseudo-)inverse of M by another matrix. 00438 /// For a matrix \f$A\f$, this calculates \f$M^{\dagger}A\f$ by back substitution 00439 /// (i.e. without explictly calculating the (pseudo-)inverse). 00440 /// See the SVD detailed description for a description of condition variables. 00441 template <int R, int C, typename P, typename B> 00442 Matrix<Size,C, Precision> backsub(const Matrix<R,C,P,B>& rhs) const { 00443 return (my_evectors.T() * diagmult(get_inv_diag(Internal::symeigen_condition_no),(my_evectors * rhs))); 00444 } 00445 00446 /// Calculate (pseudo-)inverse of the matrix. This is not usually needed: 00447 /// if you need the inverse just to multiply it by a matrix or a vector, use 00448 /// one of the backsub() functions, which will be faster. 00449 /// See the SVD detailed description for a description of the pseudo-inverse 00450 /// and condition variables. 00451 Matrix<Size, Size, Precision> get_pinv(const double condition=Internal::symeigen_condition_no) const { 00452 return my_evectors.T() * diagmult(get_inv_diag(condition),my_evectors); 00453 } 00454 00455 /// Calculates the reciprocals of the eigenvalues of the matrix. 00456 /// The vector <code>invdiag</code> lists the eigenvalues in order, from 00457 /// the largest (i.e. smallest reciprocal) to the smallest. 00458 /// These are also the diagonal values of the matrix \f$Lambda^{-1}\f$. 00459 /// Any eigenvalues which are too small are set to zero (see the SVD 00460 /// detailed description for a description of the and condition variables). 00461 Vector<Size, Precision> get_inv_diag(const double condition) const { 00462 Precision max_diag = -my_evalues[0] > my_evalues[my_evalues.size()-1] ? -my_evalues[0]:my_evalues[my_evalues.size()-1]; 00463 Vector<Size, Precision> invdiag(my_evalues.size()); 00464 for(int i=0; i<my_evalues.size(); i++){ 00465 if(fabs(my_evalues[i]) * condition > max_diag) { 00466 invdiag[i] = 1/my_evalues[i]; 00467 } else { 00468 invdiag[i]=0; 00469 } 00470 } 00471 return invdiag; 00472 } 00473 00474 /// Returns the eigenvectors of the matrix. 00475 /// This returns \f$U^T\f$, so that the rows of the matrix are the eigenvectors, 00476 /// which can be extracted using usual Matrix::operator[]() subscript operator. 00477 /// They are returned in order of the size of the corresponding eigenvalue, i.e. 00478 /// the vector with the largest eigenvalue is first. 00479 Matrix<Size,Size,Precision>& get_evectors() {return my_evectors;} 00480 00481 /**\overload 00482 */ 00483 const Matrix<Size,Size,Precision>& get_evectors() const {return my_evectors;} 00484 00485 00486 /// Returns the eigenvalues of the matrix. 00487 /// The eigenvalues are listed in order, from the smallest to the largest. 00488 /// These are also the diagonal values of the matrix \f$\Lambda\f$. 00489 Vector<Size, Precision>& get_evalues() {return my_evalues;} 00490 /**\overload 00491 */ 00492 const Vector<Size, Precision>& get_evalues() const {return my_evalues;} 00493 00494 /// Is the matrix positive definite? 00495 bool is_posdef() const { 00496 for (int i = 0; i < my_evalues.size(); ++i) { 00497 if (my_evalues[i] <= 0.0) 00498 return false; 00499 } 00500 return true; 00501 } 00502 00503 /// Is the matrix negative definite? 00504 bool is_negdef() const { 00505 for (int i = 0; i < my_evalues.size(); ++i) { 00506 if (my_evalues[i] >= 0.0) 00507 return false; 00508 } 00509 return true; 00510 } 00511 00512 /// Get the determinant of the matrix 00513 Precision get_determinant () const { 00514 Precision det = 1.0; 00515 for (int i = 0; i < my_evalues.size(); ++i) { 00516 det *= my_evalues[i]; 00517 } 00518 return det; 00519 } 00520 00521 /// Calculate the square root of a matrix which is a matrix M 00522 /// such that M.T*M=A. 00523 Matrix<Size, Size, Precision> get_sqrtm () const { 00524 Vector<Size, Precision> diag_sqrt(my_evalues.size()); 00525 // In the future, maybe throw an exception if an 00526 // eigenvalue is negative? 00527 for (int i = 0; i < my_evalues.size(); ++i) { 00528 diag_sqrt[i] = std::sqrt(my_evalues[i]); 00529 } 00530 return my_evectors.T() * diagmult(diag_sqrt, my_evectors); 00531 } 00532 00533 /// Calculate the inverse square root of a matrix which is a 00534 /// matrix M such that M.T*M=A^-1. 00535 /// 00536 /// Any square-rooted eigenvalues which are too small are set 00537 /// to zero (see the SVD detailed description for a 00538 /// description of the condition variables). 00539 Matrix<Size, Size, Precision> get_isqrtm (const double condition=Internal::symeigen_condition_no) const { 00540 Vector<Size, Precision> diag_isqrt(my_evalues.size()); 00541 00542 // Because sqrt is a monotonic-preserving transformation, 00543 Precision max_diag = -my_evalues[0] > my_evalues[my_evalues.size()-1] ? (-std::sqrt(my_evalues[0])):(std::sqrt(my_evalues[my_evalues.size()-1])); 00544 // In the future, maybe throw an exception if an 00545 // eigenvalue is negative? 00546 for (int i = 0; i < my_evalues.size(); ++i) { 00547 diag_isqrt[i] = std::sqrt(my_evalues[i]); 00548 if(fabs(diag_isqrt[i]) * condition > max_diag) { 00549 diag_isqrt[i] = 1/diag_isqrt[i]; 00550 } else { 00551 diag_isqrt[i] = 0; 00552 } 00553 } 00554 return my_evectors.T() * diagmult(diag_isqrt, my_evectors); 00555 } 00556 00557 private: 00558 // eigen vectors laid out row-wise so evectors[i] is the ith evector 00559 Matrix<Size,Size,Precision> my_evectors; 00560 00561 Vector<Size, Precision> my_evalues; 00562 }; 00563 00564 } 00565 00566 #endif 00567