SNAP Library , Developer Reference  2013-01-07 14:03:36
SNAP, a general purpose, high performance system for analysis and manipulation of large networks
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines
linalg.cpp
Go to the documentation of this file.
00001 
00002 // Sparse-Column-Matrix
00003 void TSparseColMatrix::PMultiply(const TFltVV& B, int ColId, TFltV& Result) const {
00004     Assert(B.GetRows() >= ColN && Result.Len() >= RowN);
00005     int i, j; TFlt *ResV = Result.BegI();
00006     for (i = 0; i < RowN; i++) ResV[i] = 0.0;
00007     for (j = 0; j < ColN; j++) {
00008         const TIntFltKdV& ColV = ColSpVV[j]; int len = ColV.Len();
00009         for (i = 0; i < len; i++) {
00010             ResV[ColV[i].Key] += ColV[i].Dat * B(j,ColId);
00011         }
00012     }
00013 }
00014 
00015 void TSparseColMatrix::PMultiply(const TFltV& Vec, TFltV& Result) const {
00016     Assert(Vec.Len() >= ColN && Result.Len() >= RowN);
00017     int i, j; TFlt *ResV = Result.BegI();
00018     for (i = 0; i < RowN; i++) ResV[i] = 0.0;
00019     for (j = 0; j < ColN; j++) {
00020         const TIntFltKdV& ColV = ColSpVV[j]; int len = ColV.Len();
00021         for (i = 0; i < len; i++) {
00022             ResV[ColV[i].Key] += ColV[i].Dat * Vec[j];
00023         }
00024     }
00025 }
00026 
00027 void TSparseColMatrix::PMultiplyT(const TFltVV& B, int ColId, TFltV& Result) const {
00028     Assert(B.GetRows() >= RowN && Result.Len() >= ColN);
00029     int i, j, len; TFlt *ResV = Result.BegI();
00030     for (j = 0; j < ColN; j++) {
00031         const TIntFltKdV& ColV = ColSpVV[j];
00032         len = ColV.Len(); ResV[j] = 0.0;
00033         for (i = 0; i < len; i++) {
00034             ResV[j] += ColV[i].Dat * B(ColV[i].Key, ColId);
00035         }
00036     }
00037 }
00038 
00039 void TSparseColMatrix::PMultiplyT(const TFltV& Vec, TFltV& Result) const {
00040     Assert(Vec.Len() >= RowN && Result.Len() >= ColN);
00041     int i, j, len; TFlt *VecV = Vec.BegI(), *ResV = Result.BegI();
00042     for (j = 0; j < ColN; j++) {
00043         const TIntFltKdV& ColV = ColSpVV[j];
00044         len = ColV.Len(); ResV[j] = 0.0;
00045         for (i = 0; i < len; i++) {
00046             ResV[j] += ColV[i].Dat * VecV[ColV[i].Key];
00047         }
00048     }
00049 }
00050 
00052 // Sparse-Row-Matrix
00053 TSparseRowMatrix::TSparseRowMatrix(const TStr& MatlabMatrixFNm) {
00054    FILE *F = fopen(MatlabMatrixFNm.CStr(), "rt");  IAssert(F != NULL);
00055    TVec<TTriple<TInt, TInt, TSFlt> > MtxV;
00056    RowN = 0;  ColN = 0;
00057    while (! feof(F)) {
00058      int row=-1, col=-1; float val;
00059      if (fscanf(F, "%d %d %f\n", &row, &col, &val) == 3) {
00060        IAssert(row > 0 && col > 0);
00061        MtxV.Add(TTriple<TInt, TInt, TSFlt>(row, col, val));
00062        RowN = TMath::Mx(RowN, row);
00063        ColN = TMath::Mx(ColN, col);
00064      }
00065    }
00066    fclose(F);
00067    // create matrix
00068    MtxV.Sort();
00069    RowSpVV.Gen(RowN);
00070    int cnt = 0;
00071    for (int row = 1; row <= RowN; row++) {
00072      while (cnt < MtxV.Len() && MtxV[cnt].Val1 == row) {
00073        RowSpVV[row-1].Add(TIntFltKd(MtxV[cnt].Val2-1, MtxV[cnt].Val3()));
00074        cnt++;
00075      }
00076    }
00077 }
00078 
00079 void TSparseRowMatrix::PMultiplyT(const TFltVV& B, int ColId, TFltV& Result) const {
00080     Assert(B.GetRows() >= RowN && Result.Len() >= ColN);
00081     for (int i = 0; i < ColN; i++) Result[i] = 0.0;
00082     for (int j = 0; j < RowN; j++) {
00083         const TIntFltKdV& RowV = RowSpVV[j]; int len = RowV.Len();
00084         for (int i = 0; i < len; i++) {
00085             Result[RowV[i].Key] += RowV[i].Dat * B(j,ColId);
00086         }
00087     }
00088 }
00089 
00090 void TSparseRowMatrix::PMultiplyT(const TFltV& Vec, TFltV& Result) const {
00091     Assert(Vec.Len() >= RowN && Result.Len() >= ColN);
00092     for (int i = 0; i < ColN; i++) Result[i] = 0.0;
00093     for (int j = 0; j < RowN; j++) {
00094         const TIntFltKdV& RowV = RowSpVV[j]; int len = RowV.Len();
00095         for (int i = 0; i < len; i++) {
00096             Result[RowV[i].Key] += RowV[i].Dat * Vec[j];
00097         }
00098     }
00099 }
00100 
00101 void TSparseRowMatrix::PMultiply(const TFltVV& B, int ColId, TFltV& Result) const {
00102     Assert(B.GetRows() >= ColN && Result.Len() >= RowN);
00103     for (int j = 0; j < RowN; j++) {
00104         const TIntFltKdV& RowV = RowSpVV[j];
00105         int len = RowV.Len(); Result[j] = 0.0;
00106         for (int i = 0; i < len; i++) {
00107             Result[j] += RowV[i].Dat * B(RowV[i].Key, ColId);
00108         }
00109     }
00110 }
00111 
00112 void TSparseRowMatrix::PMultiply(const TFltV& Vec, TFltV& Result) const {
00113     Assert(Vec.Len() >= ColN && Result.Len() >= RowN);
00114     for (int j = 0; j < RowN; j++) {
00115         const TIntFltKdV& RowV = RowSpVV[j];
00116         int len = RowV.Len(); Result[j] = 0.0;
00117         for (int i = 0; i < len; i++) {
00118             Result[j] += RowV[i].Dat * Vec[RowV[i].Key];
00119         }
00120     }
00121 }
00122 
00124 // Full-Col-Matrix
00125 TFullColMatrix::TFullColMatrix(const TStr& MatlabMatrixFNm): TMatrix() {
00126     TLAMisc::LoadMatlabTFltVV(MatlabMatrixFNm, ColV);
00127     RowN=ColV[0].Len(); ColN=ColV.Len();
00128     for (int i = 0; i < ColN; i++) {
00129         IAssertR(ColV[i].Len() == RowN, TStr::Fmt("%d != %d", ColV[i].Len(), RowN));
00130     }
00131 }
00132 
00133 void TFullColMatrix::PMultiplyT(const TFltVV& B, int ColId, TFltV& Result) const {
00134     Assert(B.GetRows() >= RowN && Result.Len() >= ColN);
00135     for (int i = 0; i < ColN; i++) {
00136         Result[i] = TLinAlg::DotProduct(B, ColId, ColV[i]);
00137     }
00138 }
00139 
00140 void TFullColMatrix::PMultiplyT(const TFltV& Vec, TFltV& Result) const {
00141     Assert(Vec.Len() >= RowN && Result.Len() >= ColN);
00142     for (int i = 0; i < ColN; i++) {
00143         Result[i] = TLinAlg::DotProduct(Vec, ColV[i]);
00144     }
00145 }
00146 
00147 void TFullColMatrix::PMultiply(const TFltVV& B, int ColId, TFltV& Result) const {
00148     Assert(B.GetRows() >= ColN && Result.Len() >= RowN);
00149     for (int i = 0; i < RowN; i++) { Result[i] = 0.0; }
00150     for (int i = 0; i < ColN; i++) {
00151         TLinAlg::AddVec(B(i, ColId), ColV[i], Result, Result);
00152     }
00153 }
00154 
00155 void TFullColMatrix::PMultiply(const TFltV& Vec, TFltV& Result) const {
00156     Assert(Vec.Len() >= ColN && Result.Len() >= RowN);
00157     for (int i = 0; i < RowN; i++) { Result[i] = 0.0; }
00158     for (int i = 0; i < ColN; i++) {
00159         TLinAlg::AddVec(Vec[i], ColV[i], Result, Result);
00160     }
00161 }
00162 
00164 // Basic Linear Algebra Operations
00165 double TLinAlg::DotProduct(const TFltV& x, const TFltV& y) {
00166     IAssertR(x.Len() == y.Len(), TStr::Fmt("%d != %d", x.Len(), y.Len()));
00167     double result = 0.0; int Len = x.Len();
00168     for (int i = 0; i < Len; i++)
00169         result += x[i] * y[i];
00170     return result;
00171 }
00172 
00173 double TLinAlg::DotProduct(const TFltVV& X, int ColIdX, const TFltVV& Y, int ColIdY) {
00174     Assert(X.GetRows() == Y.GetRows());
00175     double result = 0.0, len = X.GetRows();
00176     for (int i = 0; i < len; i++)
00177         result = result + X(i,ColIdX) * Y(i,ColIdY);
00178     return result;
00179 }
00180 
00181 double TLinAlg::DotProduct(const TFltVV& X, int ColId, const TFltV& Vec) {
00182     Assert(X.GetRows() == Vec.Len());
00183     double result = 0.0; int Len = X.GetRows();
00184     for (int i = 0; i < Len; i++)
00185         result += X(i,ColId) * Vec[i];
00186     return result;
00187 }
00188 
00189 double TLinAlg::DotProduct(const TIntFltKdV& x, const TIntFltKdV& y) {
00190     const int xLen = x.Len(), yLen = y.Len();
00191     double Res = 0.0; int i1 = 0, i2 = 0;
00192     while (i1 < xLen && i2 < yLen) {
00193         if (x[i1].Key < y[i2].Key) i1++;
00194         else if (x[i1].Key > y[i2].Key) i2++;
00195         else { Res += x[i1].Dat * y[i2].Dat;  i1++;  i2++; }
00196     }
00197     return Res;
00198 }
00199 
00200 double TLinAlg::DotProduct(const TFltV& x, const TIntFltKdV& y) {
00201     double Res = 0.0; const int xLen = x.Len(), yLen = y.Len();
00202     for (int i = 0; i < yLen; i++) {
00203         const int key = y[i].Key;
00204         if (key < xLen) Res += y[i].Dat * x[key];
00205     }
00206     return Res;
00207 }
00208 
00209 double TLinAlg::DotProduct(const TFltVV& X, int ColId, const TIntFltKdV& y) {
00210     double Res = 0.0; const int n = X.GetRows(), yLen = y.Len();
00211     for (int i = 0; i < yLen; i++) {
00212         const int key = y[i].Key;
00213         if (key < n) Res += y[i].Dat * X(key,ColId);
00214     }
00215     return Res;
00216 }
00217 
00218 void TLinAlg::LinComb(const double& p, const TFltV& x,
00219         const double& q, const TFltV& y, TFltV& z) {
00220 
00221     Assert(x.Len() == y.Len() && y.Len() == z.Len());
00222     const int Len = x.Len();
00223     for (int i = 0; i < Len; i++) {
00224         z[i] = p * x[i] + q * y[i]; }
00225 }
00226 
00227 void TLinAlg::ConvexComb(const double& p, const TFltV& x, const TFltV& y, TFltV& z) {
00228     AssertR(0.0 <= p && p <= 1.0, TFlt::GetStr(p));
00229     LinComb(p, x, 1.0 - p, y, z);
00230 }
00231 
00232 void TLinAlg::AddVec(const double& k, const TFltV& x, const TFltV& y, TFltV& z) {
00233     LinComb(k, x, 1.0, y, z);
00234 }
00235 
00236 void TLinAlg::AddVec(const double& k, const TIntFltKdV& x, const TFltV& y, TFltV& z) {
00237     Assert(y.Len() == z.Len());
00238     z = y; // first we set z to be y
00239     // and than we add x to z (==y)
00240     const int xLen = x.Len(), yLen = y.Len();
00241     for (int i = 0; i < xLen; i++) {
00242         const int ii = x[i].Key;
00243         if (ii < yLen) {
00244             z[ii] = k * x[i].Dat + y[ii];
00245         }
00246     }
00247 }
00248 
00249 void TLinAlg::AddVec(const double& k, const TIntFltKdV& x, TFltV& y) {
00250     const int xLen = x.Len(), yLen = y.Len();
00251     for (int i = 0; i < xLen; i++) {
00252         const int ii = x[i].Key;
00253         if (ii < yLen) {
00254             y[ii] += k * x[i].Dat;
00255         }
00256     }
00257 }
00258 
00259 void TLinAlg::AddVec(double k, const TFltVV& X, int ColIdX, TFltVV& Y, int ColIdY){
00260     Assert(X.GetRows() == Y.GetRows());
00261     const int len = Y.GetRows();
00262     for (int i = 0; i < len; i++) {
00263         Y(i,ColIdY) = Y(i,ColIdY) + k * X(i, ColIdX);
00264     }
00265 }
00266 
00267 void TLinAlg::AddVec(double k, const TFltVV& X, int ColId, TFltV& Result){
00268     Assert(X.GetRows() == Result.Len());
00269     const int len = Result.Len();
00270     for (int i = 0; i < len; i++) {
00271         Result[i] = Result[i] + k * X(i, ColId);
00272     }
00273 }
00274 
00275 void TLinAlg::AddVec(const TIntFltKdV& x, const TIntFltKdV& y, TIntFltKdV& z) {
00276         TSparseOpsIntFlt::SparseMerge(x, y, z);
00277 }
00278 
00279 double TLinAlg::SumVec(const TFltV& x) {
00280     const int len = x.Len();
00281     double Res = 0.0;
00282     for (int i = 0; i < len; i++) {
00283         Res += x[i];
00284     }
00285     return Res;
00286 }
00287 
00288 double TLinAlg::SumVec(double k, const TFltV& x, const TFltV& y) {
00289     Assert(x.Len() == y.Len());
00290     const int len = x.Len();
00291     double Res = 0.0;
00292     for (int i = 0; i < len; i++) {
00293         Res += k * x[i] + y[i];
00294     }
00295     return Res;
00296 }
00297 
00298 double TLinAlg::EuclDist2(const TFltV& x, const TFltV& y) {
00299     Assert(x.Len() == y.Len());
00300     const int len = x.Len();
00301     double Res = 0.0;
00302     for (int i = 0; i < len; i++) {
00303         Res += TMath::Sqr(x[i] - y[i]);
00304     }
00305     return Res;
00306 }
00307 
00308 double TLinAlg::EuclDist(const TFltV& x, const TFltV& y) {
00309     return sqrt(EuclDist2(x, y));
00310 }
00311 
00312 double TLinAlg::Norm2(const TFltV& x) {
00313     return DotProduct(x, x);
00314 }
00315 
00316 double TLinAlg::Norm(const TFltV& x) {
00317     return sqrt(Norm2(x));
00318 }
00319 
00320 void TLinAlg::Normalize(TFltV& x) {
00321     const double xNorm = Norm(x);
00322     if (xNorm > 0.0) { MultiplyScalar(1/xNorm, x, x); }
00323 }
00324 
00325 double TLinAlg::Norm2(const TIntFltKdV& x) {
00326     double Result = 0;
00327     for (int i = 0; i < x.Len(); i++) {
00328         Result += TMath::Sqr(x[i].Dat);
00329     }
00330     return Result;
00331 }
00332 
00333 double TLinAlg::Norm(const TIntFltKdV& x) {
00334     return sqrt(Norm2(x));
00335 }
00336 
00337 void TLinAlg::Normalize(TIntFltKdV& x) {
00338     MultiplyScalar(1/Norm(x), x, x);
00339 }
00340 
00341 double TLinAlg::Norm2(const TFltVV& X, int ColId) {
00342     return DotProduct(X, ColId, X, ColId);
00343 }
00344 
00345 double TLinAlg::Norm(const TFltVV& X, int ColId) {
00346     return sqrt(Norm2(X, ColId));
00347 }
00348 
00349 double TLinAlg::NormL1(const TFltV& x) {
00350     double norm = 0.0; const int Len = x.Len();
00351     for (int i = 0; i < Len; i++)
00352         norm += TFlt::Abs(x[i]);
00353     return norm;
00354 }
00355 
00356 double TLinAlg::NormL1(double k, const TFltV& x, const TFltV& y) {
00357     Assert(x.Len() == y.Len());
00358     double norm = 0.0; const int len = x.Len();
00359     for (int i = 0; i < len; i++) {
00360         norm += TFlt::Abs(k * x[i] + y[i]);
00361     }
00362     return norm;
00363 }
00364 
00365 double TLinAlg::NormL1(const TIntFltKdV& x) {
00366     double norm = 0.0; const int Len = x.Len();
00367     for (int i = 0; i < Len; i++)
00368         norm += TFlt::Abs(x[i].Dat);
00369     return norm;
00370 }
00371 
00372 void TLinAlg::NormalizeL1(TFltV& x) {
00373     const double xNorm = NormL1(x);
00374     if (xNorm > 0.0) { MultiplyScalar(1/xNorm, x, x); }
00375 }
00376 
00377 void TLinAlg::NormalizeL1(TIntFltKdV& x) {
00378     const double xNorm = NormL1(x);
00379     if (xNorm > 0.0) { MultiplyScalar(1/xNorm, x, x); }
00380 }
00381 
00382 double TLinAlg::NormLinf(const TFltV& x) {
00383     double norm = 0.0; const int Len = x.Len();
00384     for (int i = 0; i < Len; i++)
00385         norm = TFlt::GetMx(TFlt::Abs(x[i]), norm);
00386     return norm;
00387 }
00388 
00389 double TLinAlg::NormLinf(const TIntFltKdV& x) {
00390     double norm = 0.0; const int Len = x.Len();
00391     for (int i = 0; i < Len; i++)
00392         norm = TFlt::GetMx(TFlt::Abs(x[i].Dat), norm);
00393     return norm;
00394 }
00395 
00396 void TLinAlg::NormalizeLinf(TFltV& x) {
00397     const double xNormLinf = NormLinf(x);
00398     if (xNormLinf > 0.0) { MultiplyScalar(1.0/xNormLinf, x, x); }
00399 }
00400 
00401 void TLinAlg::NormalizeLinf(TIntFltKdV& x) {
00402     const double xNormLInf = NormLinf(x);
00403     if (xNormLInf> 0.0) { MultiplyScalar(1.0/xNormLInf, x, x); }
00404 }
00405 
00406 void TLinAlg::MultiplyScalar(const double& k, const TFltV& x, TFltV& y) {
00407     Assert(x.Len() == y.Len());
00408     int Len = x.Len();
00409     for (int i = 0; i < Len; i++)
00410         y[i] = k * x[i];
00411 }
00412 
00413 void TLinAlg::MultiplyScalar(const double& k, const TIntFltKdV& x, TIntFltKdV& y) {
00414     Assert(x.Len() == y.Len());
00415     int Len = x.Len();
00416     for (int i = 0; i < Len; i++)
00417         y[i].Dat = k * x[i].Dat;
00418 }
00419 
00420 void TLinAlg::Multiply(const TFltVV& A, const TFltV& x, TFltV& y) {
00421     Assert(A.GetCols() == x.Len() && A.GetRows() == y.Len());
00422     int n = A.GetRows(), m = A.GetCols();
00423     for (int i = 0; i < n; i++) {
00424         y[i] = 0.0;
00425         for (int j = 0; j < m; j++)
00426             y[i] += A(i,j) * x[j];
00427     }
00428 }
00429 
00430 void TLinAlg::Multiply(const TFltVV& A, const TFltV& x, TFltVV& C, int ColId) {
00431     Assert(A.GetCols() == x.Len() && A.GetRows() == C.GetRows());
00432     int n = A.GetRows(), m = A.GetCols();
00433     for (int i = 0; i < n; i++) {
00434         C(i,ColId) = 0.0;
00435         for (int j = 0; j < m; j++)
00436             C(i,ColId) += A(i,j) * x[j];
00437     }
00438 }
00439 
00440 void TLinAlg::Multiply(const TFltVV& A, const TFltVV& B, int ColId, TFltV& y) {
00441     Assert(A.GetCols() == B.GetRows() && A.GetRows() == y.Len());
00442     int n = A.GetRows(), m = A.GetCols();
00443     for (int i = 0; i < n; i++) {
00444         y[i] = 0.0;
00445         for (int j = 0; j < m; j++)
00446             y[i] += A(i,j) * B(j,ColId);
00447     }
00448 }
00449 
00450 void TLinAlg::Multiply(const TFltVV& A, const TFltVV& B, int ColIdB, TFltVV& C, int ColIdC){
00451     Assert(A.GetCols() == B.GetRows() && A.GetRows() == C.GetRows());
00452     int n = A.GetRows(), m = A.GetCols();
00453     for (int i = 0; i < n; i++) {
00454         C(i,ColIdC) = 0.0;
00455         for (int j = 0; j < m; j++)
00456             C(i,ColIdC) += A(i,j) * B(j,ColIdB);
00457     }
00458 }
00459 
00460 void TLinAlg::MultiplyT(const TFltVV& A, const TFltV& x, TFltV& y) {
00461     Assert(A.GetRows() == x.Len() && A.GetCols() == y.Len());
00462     int n = A.GetCols(), m = A.GetRows();
00463     for (int i = 0; i < n; i++) {
00464         y[i] = 0.0;
00465         for (int j = 0; j < m; j++)
00466             y[i] += A(j,i) * x[j];
00467     }
00468 }
00469 
00470 void TLinAlg::Multiply(const TFltVV& A, const TFltVV& B, TFltVV& C) {
00471     Assert(A.GetRows() == C.GetRows() && B.GetCols() == C.GetCols() && A.GetCols() == B.GetRows());
00472 
00473     int n = C.GetRows(), m = C.GetCols(), l = A.GetCols();
00474     for (int i = 0; i < n; i++) {
00475         for (int j = 0; j < m; j++) {
00476             double sum = 0.0;
00477             for (int k = 0; k < l; k++)
00478                 sum += A(i,k)*B(k,j);
00479             C(i,j) = sum;
00480         }
00481     }
00482 }
00483 
00484 // general matrix multiplication (GEMM)
00485 void TLinAlg::Gemm(const double& Alpha, const TFltVV& A, const TFltVV& B, const double& Beta, 
00486                 const TFltVV& C, TFltVV& D, const int& TransposeFlags) {
00487 
00488         bool tA = (TransposeFlags & GEMM_A_T) == GEMM_A_T;
00489         bool tB = (TransposeFlags & GEMM_B_T) == GEMM_B_T;
00490         bool tC = (TransposeFlags & GEMM_C_T) == GEMM_C_T;
00491 
00492         // setting dimensions
00493         int a_i = tA ? A.GetRows() : A.GetCols();
00494         int a_j = tA ? A.GetCols() : A.GetRows();
00495 
00496         int b_i = tB ? A.GetRows() : A.GetCols();
00497         int b_j = tB ? A.GetCols() : A.GetRows();
00498 
00499         int c_i = tC ? A.GetRows() : A.GetCols();
00500         int c_j = tC ? A.GetCols() : A.GetRows();
00501 
00502         int d_i = D.GetCols();
00503         int d_j = D.GetRows();
00504         
00505         // assertions for dimensions
00506         Assert(a_i == c_j && b_i == c_i && a_i == b_j && c_i == d_i && c_j == d_j);
00507 
00508         double Aij, Bij, Cij;
00509 
00510         // rows
00511         for (int j = 0; j < a_j; j++) {
00512                 // cols
00513                 for (int i = 0; i < a_i; i++) {
00514                         // not optimized for speed (!)
00515                         double sum = 0;
00516                         for (int k = 0; k < a_i; k++) {
00517                                 Aij = tA ? A.At(j, k) : A.At(k, j);
00518                                 Bij = tB ? B.At(k, i) : B.At(i, k);
00519                                 sum += Alpha * Aij * Bij;
00520                         }
00521                         Cij = tC ? C.At(i, j) : C.At(j, i);
00522                         sum += Beta * Cij;
00523                         D.At(i, j) = sum;
00524                 }
00525         }
00526 }
00527 
00528 void TLinAlg::Transpose(const TFltVV& A, TFltVV& B) {
00529         Assert(B.GetRows() == A.GetCols() && B.GetCols() == A.GetRows());
00530         for (int j = 0; j < A.GetRows(); j++) {
00531                 for (int i = 0; i < B.GetCols(); i++) {
00532                         B.At(j, i) = A.At(i, j);
00533                 }
00534         }
00535 }
00536 
00537 void TLinAlg::Inverse(const TFltVV& A, TFltVV& B, const TLinAlgInverseType& DecompType) {
00538         switch (DecompType) {
00539                 case DECOMP_SVD:
00540                         InverseSVD(A, B);
00541         }
00542 }
00543 
00544 void TLinAlg::InverseSVD(const TFltVV& M, TFltVV& B) {
00545         // create temp matrices
00546         TFltVV U, V;
00547         TFltV E;
00548         TSvd SVD;
00549 
00550         U.Gen(M.GetRows(), M.GetRows());        
00551         V.Gen(M.GetCols(), M.GetCols());
00552 
00553         // do the SVD decompostion
00554         SVD.Svd(M, U, E, V);
00555 
00556         // calculate reciprocal values for diagonal matrix = inverse diagonal
00557         for (int i = 0; i < E.Len(); i++) {
00558                 E[i] = 1 / E[i];
00559         }
00560 
00561         // calculate pseudoinverse: M^(-1) = V * E^(-1) * U'
00562         for (int i = 0; i < U.GetCols(); i++) {
00563                 for (int j = 0; j < V.GetRows(); j++) {
00564                         double sum = 0;
00565                         for (int k = 0; k < U.GetCols(); k++) {
00566                                 sum += E[k] * V.At(i, k) * U.At(j, k);
00567                         }
00568                         B.At(i, j) = sum;
00569                 }
00570         }       
00571 }
00572 
00573 void TLinAlg::GS(TVec<TFltV>& Q) {
00574     IAssert(Q.Len() > 0);
00575     int m = Q.Len(); // int n = Q[0].Len();
00576     for (int i = 0; i < m; i++) {
00577         printf("%d\r",i);
00578         for (int j = 0; j < i; j++) {
00579             double r = TLinAlg::DotProduct(Q[i], Q[j]);
00580             TLinAlg::AddVec(-r,Q[j],Q[i],Q[i]);
00581         }
00582         TLinAlg::Normalize(Q[i]);
00583     }
00584     printf("\n");
00585 }
00586 
00587 void TLinAlg::GS(TFltVV& Q) {
00588     int m = Q.GetCols(), n = Q.GetRows();
00589     for (int i = 0; i < m; i++) {
00590         for (int j = 0; j < i; j++) {
00591             double r = TLinAlg::DotProduct(Q, i, Q, j);
00592             TLinAlg::AddVec(-r,Q,j,Q,i);
00593         }
00594         double nr = TLinAlg::Norm(Q,i);
00595         for (int k = 0; k < n; k++)
00596             Q(k,i) = Q(k,i) / nr;
00597     }
00598 }
00599 
00600 void TLinAlg::Rotate(const double& OldX, const double& OldY, const double& Angle, double& NewX, double& NewY) {
00601     NewX = OldX*cos(Angle) - OldY*sin(Angle);
00602     NewY = OldX*sin(Angle) + OldY*cos(Angle);
00603 }
00604 
00605 void TLinAlg::AssertOrtogonality(const TVec<TFltV>& Vecs, const double& Threshold) {
00606     int m = Vecs.Len();
00607     for (int i = 0; i < m; i++) {
00608         for (int j = 0; j < i; j++) {
00609             double res = DotProduct(Vecs[i], Vecs[j]);
00610             if (TFlt::Abs(res) > Threshold)
00611                 printf("<%d,%d> = %.5f", i,j,res);
00612         }
00613         double norm = Norm2(Vecs[i]);
00614         if (TFlt::Abs(norm-1) > Threshold)
00615             printf("||%d|| = %.5f", i, norm);
00616     }
00617 }
00618 
00619 void TLinAlg::AssertOrtogonality(const TFltVV& Vecs, const double& Threshold) {
00620     int m = Vecs.GetCols();
00621     for (int i = 0; i < m; i++) {
00622         for (int j = 0; j < i; j++) {
00623             double res = DotProduct(Vecs, i, Vecs, j);
00624             if (TFlt::Abs(res) > Threshold)
00625                 printf("<%d,%d> = %.5f", i, j, res);
00626         }
00627         double norm = Norm2(Vecs, i);
00628         if (TFlt::Abs(norm-1) > Threshold)
00629             printf("||%d|| = %.5f", i, norm);
00630     }
00631     printf("\n");
00632 }
00633 
00635 // Numerical Linear Algebra
00636 double TNumericalStuff::sqr(double a) {
00637   return a == 0.0 ? 0.0 : a*a;
00638 }
00639 
00640 double TNumericalStuff::sign(double a, double b) {
00641   return b >= 0.0 ? fabs(a) : -fabs(a);
00642 }
00643 
00644 void TNumericalStuff::nrerror(const TStr& error_text) {
00645     printf("NR_ERROR: %s", error_text.CStr());
00646     throw new TNSException(error_text);
00647 }
00648 
00649 double TNumericalStuff::pythag(double a, double b) {
00650     double absa = fabs(a), absb = fabs(b);
00651     if (absa > absb)
00652         return absa*sqrt(1.0+sqr(absb/absa));
00653     else
00654         return (absb == 0.0 ? 0.0 : absb*sqrt(1.0+sqr(absa/absb)));
00655 }
00656 
00657 void TNumericalStuff::SymetricToTridiag(TFltVV& a, int n, TFltV& d, TFltV& e) {
00658     int l,k,j,i;
00659     double scale,hh,h,g,f;
00660     for (i=n;i>=2;i--) {
00661         l=i-1;
00662         h=scale=0.0;
00663         if (l > 1) {
00664             for (k=1;k<=l;k++)
00665                 scale += fabs(a(i-1,k-1).Val);
00666             if (scale == 0.0) //Skip transformation.
00667                 e[i]=a(i-1,l-1);
00668             else {
00669                 for (k=1;k<=l;k++) {
00670                     a(i-1,k-1) /= scale; //Use scaled a's for transformation.
00671                     h += a(i-1,k-1)*a(i-1,k-1);
00672                 }
00673                 f=a(i-1,l-1);
00674                 g=(f >= 0.0 ? -sqrt(h) : sqrt(h));
00675                 IAssertR(_isnan(g) == 0, TFlt::GetStr(h));
00676                 e[i]=scale*g;
00677                 h -= f*g; //Now h is equation (11.2.4).
00678                 a(i-1,l-1)=f-g; //Store u in the ith row of a.
00679                 f=0.0;
00680                 for (j=1;j<=l;j++) {
00681                     // Next statement can be omitted if eigenvectors not wanted
00682                     a(j-1,i-1)=a(i-1,j-1)/h; //Store u=H in ith column of a.
00683                     g=0.0; //Form an element of A  u in g.
00684                     for (k=1;k<=j;k++)
00685                         g += a(j-1,k-1)*a(i-1,k-1);
00686                     for (k=j+1;k<=l;k++)
00687                         g += a(k-1,j-1)*a(i-1,k-1);
00688                     e[j]=g/h; //Form element of p in temporarily unused element of e.
00689                     f += e[j]*a(i-1,j-1);
00690                 }
00691                 hh=f/(h+h); //Form K, equation (11.2.11).
00692                 for (j=1;j<=l;j++) { //Form q and store in e overwriting p.
00693                     f=a(i-1,j-1);
00694                     e[j]=g=e[j]-hh*f;
00695                     for (k=1;k<=j;k++) { //Reduce a, equation (11.2.13).
00696                         a(j-1,k-1) -= (f*e[k]+g*a(i-1,k-1));
00697                         Assert(_isnan(a(j-1,k-1)) == 0);
00698                     }
00699                 }
00700             }
00701         } else
00702             e[i]=a(i-1,l-1);
00703         d[i]=h;
00704     }
00705     // Next statement can be omitted if eigenvectors not wanted
00706     d[1]=0.0;
00707     e[1]=0.0;
00708     // Contents of this loop can be omitted if eigenvectors not
00709     // wanted except for statement d[i]=a[i][i];
00710     for (i=1;i<=n;i++) { //Begin accumulation of transformationmatrices.
00711         l=i-1;
00712         if (d[i]) { //This block skipped when i=1.
00713             for (j=1;j<=l;j++) {
00714                 g=0.0;
00715                 for (k=1;k<=l;k++) //Use u and u=H stored in a to form PQ.
00716                     g += a(i-1,k-1)*a(k-1,j-1);
00717                 for (k=1;k<=l;k++) {
00718                     a(k-1,j-1) -= g*a(k-1,i-1);
00719                     Assert(_isnan(a(k-1,j-1)) == 0);
00720                 }
00721             }
00722         }
00723         d[i]=a(i-1,i-1); //This statement remains.
00724         a(i-1,i-1)=1.0; //Reset row and column of a to identity  matrix for next iteration.
00725         for (j=1;j<=l;j++) a(j-1,i-1)=a(i-1,j-1)=0.0;
00726     }
00727 }
00728 
00729 void TNumericalStuff::EigSymmetricTridiag(TFltV& d, TFltV& e, int n, TFltVV& z) {
00730     int m,l,iter,i,k; // N = n+1;
00731     double s,r,p,g,f,dd,c,b;
00732     // Convenient to renumber the elements of e
00733     for (i=2;i<=n;i++) e[i-1]=e[i];
00734     e[n]=0.0;
00735     for (l=1;l<=n;l++) {
00736         iter=0;
00737         do {
00738             // Look for a single small subdiagonal element to split the matrix.
00739             for (m=l;m<=n-1;m++) {
00740         dd=TFlt::Abs(d[m])+TFlt::Abs(d[m+1]);
00741                 if ((double)(TFlt::Abs(e[m])+dd) == dd) break;
00742             }
00743             if (m != l) {
00744                 if (iter++ == 60) nrerror("Too many iterations in EigSymmetricTridiag");
00745                 //Form shift.
00746                 g=(d[l+1]-d[l])/(2.0*e[l]);
00747                 r=pythag(g,1.0);
00748                 //This is dm - ks.
00749                 g=d[m]-d[l]+e[l]/(g+sign(r,g));
00750                 s=c=1.0;
00751                 p=0.0;
00752                 // A plane rotation as in the original QL, followed by
00753                 // Givens rotations to restore tridiagonal form
00754                 for (i=m-1;i>=l;i--) {
00755                     f=s*e[i];
00756                     b=c*e[i];
00757                     e[i+1]=(r=pythag(f,g));
00758                     // Recover from underflow.
00759                     if (r == 0.0) {
00760                         d[i+1] -= p;
00761                         e[m]=0.0;
00762                         break;
00763                     }
00764                     s=f/r;
00765                     c=g/r;
00766                     g=d[i+1]-p;
00767                     r=(d[i]-g)*s+2.0*c*b;
00768                     d[i+1]=g+(p=s*r);
00769                     g=c*r-b;
00770                     // Next loop can be omitted if eigenvectors not wanted
00771                     for (k=0;k<n;k++) {
00772                         f=z(k,i);
00773                         z(k,i)=s*z(k,i-1)+c*f;
00774                         z(k,i-1)=c*z(k,i-1)-s*f;
00775                     }
00776                 }
00777                 if (r == 0.0 && i >= l) continue;
00778                 d[l] -= p;
00779                 e[l]=g;
00780                 e[m]=0.0;
00781             }
00782         } while (m != l);
00783     }
00784 }
00785 
00786 void TNumericalStuff::CholeskyDecomposition(TFltVV& A, TFltV& p) {
00787   Assert(A.GetRows() == A.GetCols());
00788   int n = A.GetRows(); p.Reserve(n,n);
00789 
00790   int i,j,k;
00791   double sum;
00792   for (i=1;i<=n;i++) {
00793     for (j=i;j<=n;j++) {
00794       for (sum=A(i-1,j-1),k=i-1;k>=1;k--) sum -= A(i-1,k-1)*A(j-1,k-1);
00795       if (i == j) {
00796         if (sum <= 0.0)
00797           nrerror("choldc failed");
00798         p[i-1]=sqrt(sum);
00799       } else A(j-1,i-1)=sum/p[i-1];
00800     }
00801   }
00802 }
00803 
00804 void TNumericalStuff::CholeskySolve(const TFltVV& A, const TFltV& p, const TFltV& b, TFltV& x) {
00805   IAssert(A.GetRows() == A.GetCols());
00806   int n = A.GetRows(); x.Reserve(n,n);
00807 
00808   int i,k;
00809   double sum;
00810 
00811   // Solve L * y = b, storing y in x
00812   for (i=1;i<=n;i++) {
00813     for (sum=b[i-1],k=i-1;k>=1;k--)
00814       sum -= A(i-1,k-1)*x[k-1];
00815     x[i-1]=sum/p[i-1];
00816   }
00817 
00818   // Solve L^T * x = y
00819   for (i=n;i>=1;i--) {
00820     for (sum=x[i-1],k=i+1;k<=n;k++)
00821       sum -= A(k-1,i-1)*x[k-1];
00822     x[i-1]=sum/p[i-1];
00823   }
00824 }
00825 
00826 void TNumericalStuff::SolveSymetricSystem(TFltVV& A, const TFltV& b, TFltV& x) {
00827   IAssert(A.GetRows() == A.GetCols());
00828   TFltV p; CholeskyDecomposition(A, p);
00829   CholeskySolve(A, p, b, x);
00830 }
00831 
00832 void TNumericalStuff::InverseSubstitute(TFltVV& A, const TFltV& p) {
00833   IAssert(A.GetRows() == A.GetCols());
00834   int n = A.GetRows(); TFltV x(n);
00835 
00836     int i, j, k; double sum;
00837     for (i = 0; i < n; i++) {
00838       // solve L * y = e_i, store in x
00839         // elements from 0 to i-1 are 0.0
00840         for (j = 0; j < i; j++) x[j] = 0.0;
00841         // solve l_ii * y_i = 1 => y_i = 1/l_ii
00842         x[i] = 1/p[i];
00843         // solve y_j for j > i
00844         for (j = i+1; j < n; j++) {
00845             for (sum = 0.0, k = i; k < j; k++)
00846                 sum -= A(j,k) * x[k];
00847             x[j] = sum / p[j];
00848         }
00849 
00850       // solve L'* x = y, store in upper triangule of A
00851         for (j = n-1; j >= i; j--) {
00852             for (sum = x[j], k = j+1; k < n; k++)
00853                 sum -= A(k,j)*x[k];
00854             x[j] = sum/p[j];
00855         }
00856         for (int j = i; j < n; j++) A(i,j) = x[j];
00857     }
00858 
00859 }
00860 
00861 void TNumericalStuff::InverseSymetric(TFltVV& A) {
00862     IAssert(A.GetRows() == A.GetCols());
00863     TFltV p;
00864     // first we calculate cholesky decomposition of A
00865     CholeskyDecomposition(A, p);
00866     // than we solve system A x_i = e_i for i = 1..n
00867     InverseSubstitute(A, p);
00868 }
00869 
00870 void TNumericalStuff::InverseTriagonal(TFltVV& A) {
00871   IAssert(A.GetRows() == A.GetCols());
00872   int n = A.GetRows(); TFltV x(n), p(n);
00873 
00874     int i, j, k; double sum;
00875     // copy upper triangle to lower one as we'll overwrite upper one
00876     for (i = 0; i < n; i++) {
00877         p[i] = A(i,i);
00878         for (j = i+1; j < n; j++)
00879             A(j,i) = A(i,j);
00880     }
00881     // solve
00882     for (i = 0; i < n; i++) {
00883         // solve R * x = e_i, store in x
00884         // elements from 0 to i-1 are 0.0
00885         for (j = n-1; j > i; j--) x[j] = 0.0;
00886         // solve l_ii * y_i = 1 => y_i = 1/l_ii
00887         x[i] = 1/p[i];
00888         // solve y_j for j > i
00889         for (j = i-1; j >= 0; j--) {
00890             for (sum = 0.0, k = i; k > j; k--)
00891                 sum -= A(k,j) * x[k];
00892             x[j] = sum / p[j];
00893         }
00894         for (int j = 0; j <= i; j++) A(j,i) = x[j];
00895     }
00896 }
00897 
00898 void TNumericalStuff::LUDecomposition(TFltVV& A, TIntV& indx, double& d) {
00899   Assert(A.GetRows() == A.GetCols());
00900   int n = A.GetRows(); indx.Reserve(n,n);
00901 
00902     int i=0,imax=0,j=0,k=0;
00903     double big,dum,sum,temp;
00904     TFltV vv(n); // vv stores the implicit scaling of each row.
00905     d=1.0;       // No row interchanges yet.
00906 
00907     // Loop over rows to get the implicit scaling information.
00908     for (i=1;i<=n;i++) {
00909         big=0.0;
00910         for (j=1;j<=n;j++)
00911             if ((temp=TFlt::Abs(A(i-1,j-1))) > big) big=temp;
00912         if (big == 0.0) nrerror("Singular matrix in routine LUDecomposition");
00913         vv[i-1]=1.0/big;
00914     }
00915 
00916     for (j=1;j<=n;j++) {
00917         for (i=1;i<j;i++) {
00918             sum=A(i-1,j-1);
00919             for (k=1;k<i;k++) sum -= A(i-1,k-1)*A(k-1,j-1);
00920             A(i-1,j-1)=sum;
00921         }
00922         big=0.0; //Initialize for the search for largest pivot element.
00923         for (i=j;i<=n;i++) {
00924             sum=A(i-1,j-1);
00925             for (k=1;k<j;k++)
00926                 sum -= A(i-1,k-1)*A(k-1,j-1);
00927             A(i-1,j-1)=sum;
00928 
00929             //Is the figure of merit for the pivot better than the best so far?
00930             if ((dum=vv[i-1] * TFlt::Abs(sum)) >= big) {
00931                 big=dum;
00932                 imax=i;
00933             }
00934         }
00935 
00936         //Do we need to interchange rows?
00937         if (j != imax) {
00938             //Yes, do so...
00939             for (k=1;k<=n;k++) {
00940                 dum=A(imax-1,k-1);
00941             A(imax-1,k-1)=A(j-1,k-1); // Tadej: imax-1,k looks wrong
00942             A(j-1,k-1)=dum;
00943             }
00944             //...and change the parity of d.
00945             d = -d;
00946             vv[imax-1]=vv[j-1]; //Also interchange the scale factor.
00947         }
00948         indx[j-1]=imax;
00949 
00950         //If the pivot element is zero the matrix is singular (at least to the precision of the
00951         //algorithm). For some applications on singular matrices, it is desirable to substitute
00952         //TINY for zero.
00953         if (A(j-1,j-1) == 0.0) A(j-1,j-1)=1e-20;
00954 
00955          //Now, finally, divide by the pivot element.
00956         if (j != n) {
00957             dum=1.0/(A(j-1,j-1));
00958             for (i=j+1;i<=n;i++) A(i-1,j-1) *= dum;
00959         }
00960     } //Go back for the next column in the reduction.
00961 }
00962 
00963 void TNumericalStuff::LUSolve(const TFltVV& A, const TIntV& indx, TFltV& b) {
00964   Assert(A.GetRows() == A.GetCols());
00965   int n = A.GetRows();
00966     int i,ii=0,ip,j;
00967     double sum;
00968     for (i=1;i<=n;i++) {
00969         ip=indx[i-1];
00970         sum=b[ip-1];
00971         b[ip-1]=b[i-1];
00972         if (ii)
00973             for (j=ii;j<=i-1;j++) sum -= A(i-1,j-1)*b[j-1];
00974         else if (sum) ii=i;b[i-1]=sum;
00975     }
00976     for (i=n;i>=1;i--) {
00977         sum=b[i-1];
00978         for (j=i+1;j<=n;j++) sum -= A(i-1,j-1)*b[j-1];
00979         b[i-1]=sum/A(i-1,i-1);
00980     }
00981 }
00982 
00983 void TNumericalStuff::SolveLinearSystem(TFltVV& A, const TFltV& b, TFltV& x) {
00984     TIntV indx; double d;
00985     LUDecomposition(A, indx, d);
00986     x = b;
00987     LUSolve(A, indx, x);
00988 }
00989 
00991 // Sparse-SVD
00992 void TSparseSVD::MultiplyATA(const TMatrix& Matrix,
00993         const TFltVV& Vec, int ColId, TFltV& Result) {
00994     TFltV tmp(Matrix.GetRows());
00995     // tmp = A * Vec(:,ColId)
00996     Matrix.Multiply(Vec, ColId, tmp);
00997     // Vec = A' * tmp
00998     Matrix.MultiplyT(tmp, Result);
00999 }
01000 
01001 void TSparseSVD::MultiplyATA(const TMatrix& Matrix,
01002         const TFltV& Vec, TFltV& Result) {
01003     TFltV tmp(Matrix.GetRows());
01004     // tmp = A * Vec
01005     Matrix.Multiply(Vec, tmp);
01006     // Vec = A' * tmp
01007     Matrix.MultiplyT(tmp, Result);
01008 }
01009 
01010 void TSparseSVD::OrtoIterSVD(const TMatrix& Matrix,
01011         int NumSV, int IterN, TFltV& SgnValV) {
01012 
01013     int i, j, k;
01014     int N = Matrix.GetCols(), M = NumSV;
01015     TFltVV Q(N, M);
01016 
01017     // Q = rand(N,M)
01018     TRnd rnd;
01019     for (i = 0; i < N; i++) {
01020         for (j = 0; j < M; j++)
01021             Q(i,j) = rnd.GetUniDev();
01022     }
01023 
01024     TFltV tmp(N);
01025     for (int IterC = 0; IterC < IterN; IterC++) {
01026         printf("%d..", IterC);
01027         // Gram-Schmidt
01028         TLinAlg::GS(Q);
01029         // Q = A'*A*Q
01030         for (int ColId = 0; ColId < M; ColId++) {
01031             MultiplyATA(Matrix, Q, ColId, tmp);
01032             for (k = 0; k < N; k++) Q(k,ColId) = tmp[k];
01033         }
01034     }
01035 
01036     SgnValV.Reserve(NumSV,0);
01037     for (i = 0; i < NumSV; i++)
01038         SgnValV.Add(sqrt(TLinAlg::Norm(Q,i)));
01039     TLinAlg::GS(Q);
01040 }
01041 
01042 void TSparseSVD::SimpleLanczos(const TMatrix& Matrix,
01043         const int& NumEig, TFltV& EigValV,
01044         const bool& DoLocalReortoP, const bool& SvdMatrixProductP) {
01045 
01046     if (SvdMatrixProductP) {
01047         // if this fails, use transposed matrix
01048         IAssert(Matrix.GetRows() >= Matrix.GetCols());
01049     } else {
01050         IAssert(Matrix.GetRows() == Matrix.GetCols());
01051     }
01052 
01053     const int N = Matrix.GetCols(); // size of matrix
01054     TFltV r(N), v0(N), v1(N); // current vector and 2 previous ones
01055     TFltV alpha(NumEig, 0), beta(NumEig, 0); // diagonal and subdiagonal of T
01056 
01057     printf("Calculating %d eigen-values of %d x %d matrix\n", NumEig, N, N);
01058 
01059     // set starting vector
01060     //TRnd Rnd(0);
01061     for (int i = 0; i < N; i++) {
01062         r[i] = 1/sqrt((double)N); // Rnd.GetNrmDev();
01063         v0[i] = v1[i] = 0.0;
01064     }
01065     beta.Add(TLinAlg::Norm(r));
01066 
01067     for (int j = 0; j < NumEig; j++) {
01068         printf("%d\r", j+1);
01069         // v_j -> v_(j-1)
01070         v0 = v1;
01071         // v_j = (1/beta_(j-1)) * r
01072         TLinAlg::MultiplyScalar(1/beta[j], r, v1);
01073         // r = A*v_j
01074         if (SvdMatrixProductP) {
01075             // A = Matrix'*Matrix
01076             MultiplyATA(Matrix, v1, r);
01077         } else {
01078             // A = Matrix
01079             Matrix.Multiply(v1, r);
01080         }
01081         // r = r - beta_(j-1) * v_(j-1)
01082         TLinAlg::AddVec(-beta[j], v0, r, r);
01083         // alpha_j = vj'*r
01084         alpha.Add(TLinAlg::DotProduct(v1, r));
01085         // r = r - v_j * alpha_j
01086         TLinAlg::AddVec(-alpha[j], v1, r, r);
01087         // reortogonalization if neessary
01088         if (DoLocalReortoP) { } //TODO
01089         // beta_j = ||r||_2
01090         beta.Add(TLinAlg::Norm(r));
01091         // compoute approximatie eigenvalues T_j
01092         // test bounds for convergence
01093     }
01094     printf("\n");
01095 
01096     // prepare matrix T
01097     TFltV d(NumEig + 1), e(NumEig + 1);
01098     d[1] = alpha[0]; d[0] = e[0] = e[1] = 0.0;
01099     for (int i = 1; i < NumEig; i++) {
01100         d[i+1] = alpha[i]; e[i+1] = beta[i]; }
01101     // solve eigne problem for tridiagonal matrix with diag d and subdiag e
01102     TFltVV S(NumEig+1,NumEig+1); // eigen-vectors
01103     TLAMisc::FillIdentity(S); // make it identity
01104     TNumericalStuff::EigSymmetricTridiag(d, e, NumEig, S); // solve
01105     //TLAMisc::PrintTFltV(d, "AllEigV");
01106 
01107     // check convergence
01108     TFltKdV AllEigValV(NumEig, 0);
01109     for (int i = 1; i <= NumEig; i++) {
01110         const double ResidualNorm = TFlt::Abs(S(i-1, NumEig-1) * beta.Last());
01111         if (ResidualNorm < 1e-5)
01112             AllEigValV.Add(TFltKd(TFlt::Abs(d[i]), d[i]));
01113     }
01114 
01115     // prepare results
01116     AllEigValV.Sort(false); EigValV.Gen(NumEig, 0);
01117     for (int i = 0; i < AllEigValV.Len(); i++) {
01118         if (i == 0 || (TFlt::Abs(AllEigValV[i].Dat/AllEigValV[i-1].Dat) < 0.9999))
01119             EigValV.Add(AllEigValV[i].Dat);
01120     }
01121 }
01122 
01123 void TSparseSVD::Lanczos(const TMatrix& Matrix, int NumEig,
01124         int Iters, const TSpSVDReOrtoType& ReOrtoType,
01125         TFltV& EigValV, TFltVV& EigVecVV, const bool& SvdMatrixProductP) {
01126 
01127     if (SvdMatrixProductP) {
01128         // if this fails, use transposed matrix
01129         IAssert(Matrix.GetRows() >= Matrix.GetCols());
01130     } else {
01131         IAssert(Matrix.GetRows() == Matrix.GetCols());
01132     }
01133   IAssertR(NumEig <= Iters, TStr::Fmt("%d <= %d", NumEig, Iters));
01134 
01135     //if (ReOrtoType == ssotFull) printf("Full reortogonalization\n");
01136     int i, N = Matrix.GetCols(), K; // K - current dimension of T
01137     double t = 0.0, eps = 1e-6; // t - 1-norm of T
01138 
01139     //sequence of Ritz's vectors
01140     TFltVV Q(N, Iters);
01141     double tmp = 1/sqrt((double)N);
01142     for (i = 0; i < N; i++)
01143         Q(i,0) = tmp;
01144     //converget Ritz's vectors
01145     TVec<TFltV> ConvgQV(Iters);
01146     TIntV CountConvgV(Iters);
01147     for (i = 0; i < Iters; i++) CountConvgV[i] = 0;
01148     // const int ConvgTreshold = 50;
01149 
01150     //diagonal and subdiagonal of T
01151     TFltV d(Iters+1), e(Iters+1);
01152     //eigenvectors of T
01153     //TFltVV V;
01154     TFltVV V(Iters, Iters);
01155 
01156     // z - current Lanczos's vector
01157     TFltV z(N), bb(Iters), aa(Iters), y(N);
01158     //printf("svd(%d,%d)...\n", NumEig, Iters);
01159 
01160     if (SvdMatrixProductP) {
01161         // A = Matrix'*Matrix
01162         MultiplyATA(Matrix, Q, 0, z);
01163     } else {
01164         // A = Matrix
01165         Matrix.Multiply(Q, 0, z);
01166     }
01167 
01168     for (int j = 0; j < (Iters-1); j++) {
01169         //printf("%d..\r",j+2);
01170 
01171         //calculates (j+1)-th Lanczos's vector
01172         // aa[j] = <Q(:,j), z>
01173         aa[j] = TLinAlg::DotProduct(Q, j, z);
01174         //printf(" %g -- ", aa[j].Val); //HACK
01175 
01176         TLinAlg::AddVec(-aa[j], Q, j, z);
01177         if (j > 0) {
01178             // z := -aa[j] * Q(:,j) + z
01179             TLinAlg::AddVec(-bb[j-1], Q, j-1, z);
01180 
01181             //reortogonalization
01182             if (ReOrtoType == ssotSelective || ReOrtoType == ssotFull) {
01183                 for (i = 0; i <= j; i++) {
01184                     // if i-tj vector converget, than we have to ortogonalize against it
01185                     if ((ReOrtoType == ssotFull) ||
01186                         (bb[j-1] * TFlt::Abs(V(K-1, i)) < eps * t)) {
01187 
01188                         ConvgQV[i].Reserve(N,N); CountConvgV[i]++;
01189                         TFltV& vec = ConvgQV[i];
01190                         //vec = Q * V(:,i)
01191                         for (int k = 0; k < N; k++) {
01192                             vec[k] = 0.0;
01193                             for (int l = 0; l < K; l++)
01194                                 vec[k] += Q(k,l) * V(l,i);
01195                         }
01196                         TLinAlg::AddVec(-TLinAlg::DotProduct(ConvgQV[i], z), ConvgQV[i], z ,z);
01197                     }
01198                 }
01199             }
01200         }
01201 
01202         //adds (j+1)-th Lanczos's vector to Q
01203         bb[j] = TLinAlg::Norm(z);
01204     if (!(bb[j] > 1e-10)) {
01205       printf("Rank of matrix is only %d\n", j+2);
01206       printf("Last singular value is %g\n", bb[j].Val);
01207       break;
01208     }
01209         for (i = 0; i < N; i++)
01210             Q(i, j+1) = z[i] / bb[j];
01211 
01212         //next Lanzcos vector
01213         if (SvdMatrixProductP) {
01214             // A = Matrix'*Matrix
01215             MultiplyATA(Matrix, Q, j+1, z);
01216         } else {
01217             // A = Matrix
01218             Matrix.Multiply(Q, j+1, z);
01219         }
01220 
01221         //calculate T (K x K matrix)
01222         K = j + 2;
01223         // calculate diagonal
01224         for (i = 1; i < K; i++) d[i] = aa[i-1];
01225         d[K] = TLinAlg::DotProduct(Q, K-1, z);
01226         // calculate subdiagonal
01227         e[1] = 0.0;
01228         for (i = 2; i <= K; i++) e[i] = bb[i-2];
01229 
01230         //calculate 1-norm of T
01231         t = TFlt::GetMx(TFlt::Abs(d[1]) + TFlt::Abs(e[2]), TFlt::Abs(e[K]) + TFlt::Abs(d[K]));
01232         for (i = 2; i < K; i++)
01233             t = TFlt::GetMx(t, TFlt::Abs(e[i]) + TFlt::Abs(d[i]) + TFlt::Abs(e[i+1]));
01234 
01235         //set V to identity matrix
01236         //V.Gen(K,K);
01237         for (i = 0; i < K; i++) {
01238             for (int k = 0; k < K; k++)
01239                 V(i,k) = 0.0;
01240             V(i,i) = 1.0;
01241         }
01242 
01243         //eigenvectors of T
01244         TNumericalStuff::EigSymmetricTridiag(d, e, K, V);
01245     }//for
01246     //printf("\n");
01247 
01248     // Finds NumEig largest eigen values
01249     TFltIntKdV sv(K);
01250     for (i = 0; i < K; i++) {
01251         sv[i].Key = TFlt::Abs(d[i+1]);
01252         sv[i].Dat = i;
01253     }
01254     sv.Sort(false);
01255 
01256     TFltV uu(Matrix.GetRows());
01257     const int FinalNumEig = TInt::GetMn(NumEig, K);
01258     EigValV.Reserve(FinalNumEig,0);
01259     EigVecVV.Gen(Matrix.GetCols(), FinalNumEig);
01260     for (i = 0; i < FinalNumEig; i++) {
01261         //printf("s[%d] = %20.15f\r", i, sv[i].Key.Val);
01262         int ii = sv[i].Dat;
01263         double sigma = d[ii+1].Val;
01264         // calculate singular value
01265         EigValV.Add(sigma);
01266         // calculate i-th right singular vector ( V := Q * W )
01267         TLinAlg::Multiply(Q, V, ii, EigVecVV, i);
01268     }
01269     //printf("done                           \n");
01270 }
01271 
01272 void TSparseSVD::Lanczos2(const TMatrix& Matrix, int MaxNumEig,
01273     int MaxSecs, const TSpSVDReOrtoType& ReOrtoType,
01274     TFltV& EigValV, TFltVV& EigVecVV, const bool& SvdMatrixProductP) {
01275 
01276   if (SvdMatrixProductP) {
01277     // if this fails, use transposed matrix
01278     IAssert(Matrix.GetRows() >= Matrix.GetCols());
01279   } else {
01280     IAssert(Matrix.GetRows() == Matrix.GetCols());
01281   }
01282   //IAssertR(NumEig <= Iters, TStr::Fmt("%d <= %d", NumEig, Iters));
01283 
01284   //if (ReOrtoType == ssotFull) printf("Full reortogonalization\n");
01285   int i, N = Matrix.GetCols(), K; // K - current dimension of T
01286   double t = 0.0, eps = 1e-6; // t - 1-norm of T
01287 
01288   //sequence of Ritz's vectors
01289   TFltVV Q(N, MaxNumEig);
01290   double tmp = 1/sqrt((double)N);
01291   for (i = 0; i < N; i++)
01292       Q(i,0) = tmp;
01293   //converget Ritz's vectors
01294   TVec<TFltV> ConvgQV(MaxNumEig);
01295   TIntV CountConvgV(MaxNumEig);
01296   for (i = 0; i < MaxNumEig; i++) CountConvgV[i] = 0;
01297   // const int ConvgTreshold = 50;
01298 
01299   //diagonal and subdiagonal of T
01300   TFltV d(MaxNumEig+1), e(MaxNumEig+1);
01301   //eigenvectors of T
01302   //TFltVV V;
01303   TFltVV V(MaxNumEig, MaxNumEig);
01304 
01305   // z - current Lanczos's vector
01306   TFltV z(N), bb(MaxNumEig), aa(MaxNumEig), y(N);
01307   //printf("svd(%d,%d)...\n", NumEig, Iters);
01308 
01309   if (SvdMatrixProductP) {
01310       // A = Matrix'*Matrix
01311       MultiplyATA(Matrix, Q, 0, z);
01312   } else {
01313       // A = Matrix
01314       Matrix.Multiply(Q, 0, z);
01315   }
01316   TExeTm ExeTm;
01317   for (int j = 0; j < (MaxNumEig-1); j++) {
01318     printf("%d [%s]..\r",j+2, ExeTm.GetStr());
01319     if (ExeTm.GetSecs() > MaxSecs) { break; }
01320 
01321     //calculates (j+1)-th Lanczos's vector
01322     // aa[j] = <Q(:,j), z>
01323     aa[j] = TLinAlg::DotProduct(Q, j, z);
01324     //printf(" %g -- ", aa[j].Val); //HACK
01325 
01326     TLinAlg::AddVec(-aa[j], Q, j, z);
01327     if (j > 0) {
01328         // z := -aa[j] * Q(:,j) + z
01329         TLinAlg::AddVec(-bb[j-1], Q, j-1, z);
01330 
01331         //reortogonalization
01332         if (ReOrtoType == ssotSelective || ReOrtoType == ssotFull) {
01333             for (i = 0; i <= j; i++) {
01334                 // if i-tj vector converget, than we have to ortogonalize against it
01335                 if ((ReOrtoType == ssotFull) ||
01336                     (bb[j-1] * TFlt::Abs(V(K-1, i)) < eps * t)) {
01337 
01338                     ConvgQV[i].Reserve(N,N); CountConvgV[i]++;
01339                     TFltV& vec = ConvgQV[i];
01340                     //vec = Q * V(:,i)
01341                     for (int k = 0; k < N; k++) {
01342                         vec[k] = 0.0;
01343                         for (int l = 0; l < K; l++)
01344                             vec[k] += Q(k,l) * V(l,i);
01345                     }
01346                     TLinAlg::AddVec(-TLinAlg::DotProduct(ConvgQV[i], z), ConvgQV[i], z ,z);
01347                 }
01348             }
01349         }
01350     }
01351 
01352     //adds (j+1)-th Lanczos's vector to Q
01353     bb[j] = TLinAlg::Norm(z);
01354     if (!(bb[j] > 1e-10)) {
01355       printf("Rank of matrix is only %d\n", j+2);
01356       printf("Last singular value is %g\n", bb[j].Val);
01357       break;
01358     }
01359     for (i = 0; i < N; i++)
01360         Q(i, j+1) = z[i] / bb[j];
01361 
01362     //next Lanzcos vector
01363     if (SvdMatrixProductP) {
01364         // A = Matrix'*Matrix
01365         MultiplyATA(Matrix, Q, j+1, z);
01366     } else {
01367         // A = Matrix
01368         Matrix.Multiply(Q, j+1, z);
01369     }
01370 
01371     //calculate T (K x K matrix)
01372     K = j + 2;
01373     // calculate diagonal
01374     for (i = 1; i < K; i++) d[i] = aa[i-1];
01375     d[K] = TLinAlg::DotProduct(Q, K-1, z);
01376     // calculate subdiagonal
01377     e[1] = 0.0;
01378     for (i = 2; i <= K; i++) e[i] = bb[i-2];
01379 
01380     //calculate 1-norm of T
01381     t = TFlt::GetMx(TFlt::Abs(d[1]) + TFlt::Abs(e[2]), TFlt::Abs(e[K]) + TFlt::Abs(d[K]));
01382     for (i = 2; i < K; i++)
01383         t = TFlt::GetMx(t, TFlt::Abs(e[i]) + TFlt::Abs(d[i]) + TFlt::Abs(e[i+1]));
01384 
01385     //set V to identity matrix
01386     //V.Gen(K,K);
01387     for (i = 0; i < K; i++) {
01388         for (int k = 0; k < K; k++)
01389             V(i,k) = 0.0;
01390         V(i,i) = 1.0;
01391     }
01392 
01393     //eigenvectors of T
01394     TNumericalStuff::EigSymmetricTridiag(d, e, K, V);
01395   }//for
01396   printf("... calc %d.", K);
01397   // Finds NumEig largest eigen values
01398   TFltIntKdV sv(K);
01399   for (i = 0; i < K; i++) {
01400     sv[i].Key = TFlt::Abs(d[i+1]);
01401     sv[i].Dat = i;
01402   }
01403   sv.Sort(false);
01404 
01405   TFltV uu(Matrix.GetRows());
01406   const int FinalNumEig = K; //TInt::GetMn(NumEig, K);
01407   EigValV.Reserve(FinalNumEig,0);
01408   EigVecVV.Gen(Matrix.GetCols(), FinalNumEig);
01409   for (i = 0; i < FinalNumEig; i++) {
01410     //printf("s[%d] = %20.15f\r", i, sv[i].Key.Val);
01411     int ii = sv[i].Dat;
01412     double sigma = d[ii+1].Val;
01413     // calculate singular value
01414     EigValV.Add(sigma);
01415     // calculate i-th right singular vector ( V := Q * W )
01416     TLinAlg::Multiply(Q, V, ii, EigVecVV, i);
01417   }
01418   printf("  done\n");
01419 }
01420 
01421 
01422 void TSparseSVD::SimpleLanczosSVD(const TMatrix& Matrix,
01423         const int& CalcSV, TFltV& SngValV, const bool& DoLocalReorto) {
01424 
01425     SimpleLanczos(Matrix, CalcSV, SngValV, DoLocalReorto, true);
01426     for (int SngValN = 0; SngValN < SngValV.Len(); SngValN++) {
01427       //IAssert(SngValV[SngValN] >= 0.0);
01428       if (SngValV[SngValN] < 0.0) {
01429         printf("bad sng val: %d %g\n", SngValN, SngValV[SngValN]());
01430         SngValV[SngValN] = 0;
01431       }
01432       SngValV[SngValN] = sqrt(SngValV[SngValN].Val);
01433     }
01434 }
01435 
01436 void TSparseSVD::LanczosSVD(const TMatrix& Matrix, int NumSV,
01437         int Iters, const TSpSVDReOrtoType& ReOrtoType,
01438         TFltV& SgnValV, TFltVV& LeftSgnVecVV, TFltVV& RightSgnVecVV) {
01439 
01440     // solve eigen problem for Matrix'*Matrix
01441     Lanczos(Matrix, NumSV, Iters, ReOrtoType, SgnValV, RightSgnVecVV, true);
01442     // calculate left singular vectors and sqrt singular values
01443     const int FinalNumSV = SgnValV.Len();
01444     LeftSgnVecVV.Gen(Matrix.GetRows(), FinalNumSV);
01445     TFltV LeftSgnVecV(Matrix.GetRows());
01446     for (int i = 0; i < FinalNumSV; i++) {
01447         if (SgnValV[i].Val < 0.0) { SgnValV[i] = 0.0; }
01448         const double SgnVal = sqrt(SgnValV[i]);
01449         SgnValV[i] = SgnVal;
01450         // calculate i-th left singular vector ( U := A * V * S^(-1) )
01451         Matrix.Multiply(RightSgnVecVV, i, LeftSgnVecV);
01452         for (int j = 0; j < LeftSgnVecV.Len(); j++) {
01453             LeftSgnVecVV(j,i) = LeftSgnVecV[j] / SgnVal; }
01454     }
01455     //printf("done                           \n");
01456 }
01457 
01458 void TSparseSVD::Project(const TIntFltKdV& Vec, const TFltVV& U, TFltV& ProjVec) {
01459     const int m = U.GetCols(); // number of columns
01460 
01461     ProjVec.Gen(m, 0);
01462     for (int j = 0; j < m; j++) {
01463         double x = 0.0;
01464         for (int i = 0; i < Vec.Len(); i++)
01465             x += U(Vec[i].Key, j) * Vec[i].Dat;
01466         ProjVec.Add(x);
01467     }
01468 }
01469 
01471 // Sigmoid
01472 double TSigmoid::EvaluateFit(const TFltIntKdV& data, const double A, const double B)
01473 {
01474   double J = 0.0;
01475   for (int i = 0; i < data.Len(); i++)
01476   {
01477     double zi = data[i].Key; int yi = data[i].Dat;
01478     double e = exp(-A * zi + B);
01479     double denum = 1.0 + e;
01480     double prob = (yi > 0) ? (1.0 / denum) : (e / denum);
01481     J -= log(prob < 1e-20 ? 1e-20 : prob);
01482   }
01483   return J;
01484 }
01485 
01486 void TSigmoid::EvaluateFit(const TFltIntKdV& data, const double A, const double B, double& J, double& JA, double& JB)
01487 {
01488   //               J(A, B) = \sum_{i : y_i = 1} ln [1 + e^{-Az_i + B}] + \sum_{i : y_i = -1} [ln [1 + e^{-Az_i + B}] - {-Az_i + B}]
01489   //                       = \sum_i ln [1 + e^{-Az_i + B}] - \sum_{i : y_i = -1} {-Az_i + B}.
01490   // partial J / partial A = \sum_i (-z_i) e^{-Az_i + B} / [1 + e^{-Az_i + B}] + \sum_{i : y_i = -1} Az_i.
01491   // partial J / partial B = \sum_i        e^{-Az_i + B} / [1 + e^{-Az_i + B}] + \sum_{i : y_i = -1} (-1).
01492   J = 0.0; double sum_all_PyNeg = 0.0, sum_all_ziPyNeg = 0.0, sum_yNeg_zi = 0.0, sum_yNeg_1 = 0.0;
01493   for (int i = 0; i < data.Len(); i++)
01494   {
01495     double zi = data[i].Key; int yi = data[i].Dat;
01496     double e = exp(-A * zi + B);
01497     double denum = 1.0 + e;
01498     double prob = (yi > 0) ? (1.0 / denum) : (e / denum);
01499     J -= log(prob < 1e-20 ? 1e-20 : prob);
01500     sum_all_PyNeg += e / denum;
01501     sum_all_ziPyNeg += zi * e / denum;
01502     if (yi < 0) { sum_yNeg_zi += zi; sum_yNeg_1 += 1; }
01503   }
01504   JA = -sum_all_ziPyNeg +     sum_yNeg_zi;
01505   JB =  sum_all_PyNeg   -     sum_yNeg_1;
01506 }
01507 
01508 void TSigmoid::EvaluateFit(const TFltIntKdV& data, const double A, const double B, const double U,
01509                            const double V, const double lambda, double& J, double& JJ, double& JJJ)
01510 {
01511   // Let E_i = e^{-(A + lambda U) z_i + (B + lambda V)}.  Then we have
01512   // J(lambda) = \sum_i ln [1 + E_i] - \sum_{i : y_i = -1} {-(A + lambda U)z_i + (B + lambda V)}.
01513   // J'(lambda) = \sum_i (V - U z_i) E_i / [1 + E_i] - \sum_{i : y_i = -1} {V - U z_i).
01514   //            = \sum_i (V - U z_i) [1 - 1 / [1 + E_i]] - \sum_{i : y_i = -1} {V - U z_i).
01515   // J"(lambda) = \sum_i (V - U z_i)^2 E_i / [1 + E_i]^2.
01516   J = 0.0; JJ = 0.0; JJJ = 0.0;
01517   for (int i = 0; i < data.Len(); i++)
01518   {
01519     double zi = data[i].Key; int yi = data[i].Dat;
01520     double e = exp(-A * zi + B);
01521     double denum = 1.0 + e;
01522     double prob = (yi > 0) ? (1.0 / denum) : (e / denum);
01523     J -= log(prob < 1e-20 ? 1e-20 : prob);
01524     double VU = V - U * zi;
01525     JJ += VU * (e / denum); if (yi < 0) JJ -= VU;
01526     JJJ += VU * VU * e / denum / denum;
01527   }
01528 }
01529 
01530 TSigmoid::TSigmoid(const TFltIntKdV& data) {
01531   // Let z_i be the projection of the i'th training example, and y_i \in {-1, +1} be its class label.
01532   // Our sigmoid is: P(Y = y | Z = z) = 1 / [1 + e^{-Az + B}]
01533   // and we want to maximize \prod_i P(Y = y_i | Z = z_i)
01534   //                       = \prod_{i : y_i = 1} 1 / [1 + e^{-Az_i + B}]  \prod_{i : y_i = -1} e^{-Az_i + B} / [1 + e^{-Az_i + B}]
01535   // or minimize its negative logarithm,
01536   //               J(A, B) = \sum_{i : y_i = 1} ln [1 + e^{-Az_i + B}] + \sum_{i : y_i = -1} [ln [1 + e^{-Az_i + B}] - {-Az_i + B}]
01537   //                       = \sum_i ln [1 + e^{-Az_i + B}] - \sum_{i : y_i = -1} {-Az_i + B}.
01538   // partial J / partial A = \sum_i (-z_i) e^{-Az_i + B} / [1 + e^{-Az_i + B}] + \sum_{i : y_i = -1} Az_i.
01539   // partial J / partial B = \sum_i        e^{-Az_i + B} / [1 + e^{-Az_i + B}] + \sum_{i : y_i = -1} (-1).
01540   double minProj = data[0].Key, maxProj = data[0].Key;
01541   {for (int i = 1; i < data.Len(); i++) {
01542     double zi = data[i].Key; if (zi < minProj) minProj = zi; if (zi > maxProj) maxProj = zi; }}
01543   //const bool dump = false;
01544   A = 1.0; B = 0.5 * (minProj + maxProj);
01545   double bestJ = 0.0, bestA = 0.0, bestB = 0.0, lambda = 1.0;
01546   for (int nIter = 0; nIter < 50; nIter++)
01547   {
01548     double J, JA, JB; TSigmoid::EvaluateFit(data, A, B, J, JA, JB);
01549     if (nIter == 0 || J < bestJ) { bestJ = J; bestA = A; bestB = B; }
01550     // How far should we move?
01551     //if (dump) printf("Iter %2d: A = %.5f, B = %.5f, J = %.5f, partial = (%.5f, %.5f)\n", nIter, A, B, J, JA, JB);
01552         double norm = TMath::Sqr(JA) + TMath::Sqr(JB);
01553     if (norm < 1e-10) break;
01554     const int cl = -1; // should be -1
01555 
01556     double Jc = TSigmoid::EvaluateFit(data, A + cl * lambda * JA / norm, B + cl * lambda * JB / norm);
01557     //if (dump) printf("  At lambda = %.5f, Jc = %.5f\n", lambda, Jc);
01558     if (Jc > J) {
01559       while (lambda > 1e-5) {
01560         lambda = 0.5 * lambda;
01561         Jc = TSigmoid::EvaluateFit(data, A + cl * lambda * JA / norm, B + cl * lambda * JB / norm);
01562         //if (dump) printf("  At lambda = %.5f, Jc = %.5f\n", lambda, Jc);
01563       } }
01564     else if (Jc < J) {
01565       while (lambda < 1e5) {
01566         double lambda2 = 2 * lambda;
01567         double Jc2 = TSigmoid::EvaluateFit(data, A + cl * lambda2 * JA / norm, B + cl * lambda2 * JB / norm);
01568         //if (dump) printf("  At lambda = %.5f, Jc = %.5f\n", lambda2, Jc2);
01569         if (Jc2 > Jc) break;
01570         lambda = lambda2; Jc = Jc2; } }
01571     if (Jc >= J) break;
01572     A += cl * lambda * JA / norm; B += cl * lambda * JB / norm;
01573     //if (dump) printf("   Lambda = %.5f, new A = %.5f, new B = %.5f, new J = %.5f\n", lambda, A, B, Jc);
01574   }
01575   A = bestA; B = bestB;
01576 }
01577 
01579 // Useful stuff (hopefuly)
01580 void TLAMisc::SaveCsvTFltV(const TFltV& Vec, TSOut& SOut) {
01581     for (int ValN = 0; ValN < Vec.Len(); ValN++) {
01582         SOut.PutFlt(Vec[ValN]); SOut.PutCh(',');
01583     }
01584     SOut.PutLn();
01585 }
01586 
01587 void TLAMisc::SaveMatlabTFltIntKdV(const TIntFltKdV& SpV, const int& ColN, TSOut& SOut) {
01588     const int Len = SpV.Len();
01589     for (int ValN = 0; ValN < Len; ValN++) {
01590         SOut.PutStrLn(TStr::Fmt("%d %d %g", SpV[ValN].Key+1, ColN+1, SpV[ValN].Dat()));
01591     }
01592 }
01593 
01594 void TLAMisc::SaveMatlabTFltV(const TFltV& m, const TStr& FName) {
01595     PSOut out = TFOut::New(FName);
01596     const int RowN = m.Len();
01597     for (int RowId = 0; RowId < RowN; RowId++) {
01598         out->PutStr(TFlt::GetStr(m[RowId], 20, 18));
01599         out->PutCh('\n');
01600     }
01601     out->Flush();
01602 }
01603 
01604 void TLAMisc::SaveMatlabTIntV(const TIntV& m, const TStr& FName) {
01605     PSOut out = TFOut::New(FName);
01606     const int RowN = m.Len();
01607     for (int RowId = 0; RowId < RowN; RowId++) {
01608         out->PutInt(m[RowId]);
01609         out->PutCh('\n');
01610     }
01611     out->Flush();
01612 }
01613 
01614 void TLAMisc::SaveMatlabTFltVVCol(const TFltVV& m, int ColId, const TStr& FName) {
01615     PSOut out = TFOut::New(FName);
01616     const int RowN = m.GetRows();
01617     for (int RowId = 0; RowId < RowN; RowId++) {
01618         out->PutStr(TFlt::GetStr(m(RowId,ColId), 20, 18));
01619         out->PutCh('\n');
01620     }
01621     out->Flush();
01622 }
01623 
01624 
01625 void TLAMisc::SaveMatlabTFltVV(const TFltVV& m, const TStr& FName) {
01626     PSOut out = TFOut::New(FName);
01627     const int RowN = m.GetRows();
01628     const int ColN = m.GetCols();
01629     for (int RowId = 0; RowId < RowN; RowId++) {
01630         for (int ColId = 0; ColId < ColN; ColId++) {
01631             out->PutStr(TFlt::GetStr(m(RowId,ColId), 20, 18));
01632             out->PutCh(' ');
01633         }
01634         out->PutCh('\n');
01635     }
01636     out->Flush();
01637 }
01638 
01639 void TLAMisc::SaveMatlabTFltVVMjrSubMtrx(const TFltVV& m,
01640         int RowN, int ColN, const TStr& FName) {
01641 
01642     PSOut out = TFOut::New(FName);
01643     for (int RowId = 0; RowId < RowN; RowId++) {
01644         for (int ColId = 0; ColId < ColN; ColId++) {
01645             out->PutStr(TFlt::GetStr(m(RowId,ColId), 20, 18)); out->PutCh(' ');
01646         }
01647         out->PutCh('\n');
01648     }
01649     out->Flush();
01650 }
01651 
01652 void TLAMisc::LoadMatlabTFltVV(const TStr& FNm, TVec<TFltV>& ColV) {
01653     PSIn SIn = TFIn::New(FNm);
01654     TILx Lx(SIn, TFSet()|iloRetEoln|iloSigNum|iloExcept);
01655     int Row = 0, Col = 0; ColV.Clr();
01656     Lx.GetSym(syFlt, syEof, syEoln);
01657     //printf("%d x %d\r", Row, ColV.Len());
01658     while (Lx.Sym != syEof) {
01659         if (Lx.Sym == syFlt) {
01660             if (ColV.Len() > Col) {
01661                 IAssert(ColV[Col].Len() == Row);
01662                 ColV[Col].Add(Lx.Flt);
01663             } else {
01664                 IAssert(Row == 0);
01665                 ColV.Add(TFltV::GetV(Lx.Flt));
01666             }
01667             Col++;
01668         } else if (Lx.Sym == syEoln) {
01669             IAssert(Col == ColV.Len());
01670             Col = 0; Row++;
01671             if (Row%100 == 0) {
01672                 //printf("%d x %d\r", Row, ColV.Len());
01673             }
01674         } else {
01675             Fail;
01676         }
01677         Lx.GetSym(syFlt, syEof, syEoln);
01678     }
01679     //printf("\n");
01680     IAssert(Col == ColV.Len() || Col == 0);
01681 }
01682 
01683 void TLAMisc::LoadMatlabTFltVV(const TStr& FNm, TFltVV& MatrixVV) {
01684     TVec<TFltV> ColV; LoadMatlabTFltVV(FNm, ColV);
01685     if (ColV.Empty()) { MatrixVV.Clr(); return; }
01686     const int Rows = ColV[0].Len(), Cols = ColV.Len();
01687     MatrixVV.Gen(Rows, Cols);
01688     for (int RowN = 0; RowN < Rows; RowN++) {
01689         for (int ColN = 0; ColN < Cols; ColN++) {
01690             MatrixVV(RowN, ColN) = ColV[ColN][RowN];
01691         }
01692     }
01693 }
01694 
01695 
01696 void TLAMisc::PrintTFltV(const TFltV& Vec, const TStr& VecNm) {
01697     printf("%s = [", VecNm.CStr());
01698     for (int i = 0; i < Vec.Len(); i++) {
01699         printf("%.5f", Vec[i]());
01700                 if (i < Vec.Len() - 1) { printf(", "); }
01701     }
01702     printf("]\n");
01703 }
01704 
01705 
01706 void TLAMisc::PrintTFltVV(const TFltVV& A, const TStr& MatrixNm) {
01707     printf("%s = [\n", MatrixNm.CStr());
01708         for (int j = 0; j < A.GetRows(); j++) {
01709                 for (int i = 0; i < A.GetCols(); i++) {
01710                         printf("%f\t", A.At(i, j).Val);
01711                 }
01712                 printf("\n");
01713         }
01714     printf("]\n");
01715 }
01716 
01717 void TLAMisc::PrintTIntV(const TIntV& Vec, const TStr& VecNm) {
01718     printf("%s = [", VecNm.CStr());
01719     for (int i = 0; i < Vec.Len(); i++) {
01720         printf("%d", Vec[i]());
01721         if (i < Vec.Len() - 1) printf(", ");
01722     }
01723     printf("]\n");
01724 }
01725 
01726 
01727 void TLAMisc::FillRnd(TFltV& Vec, TRnd& Rnd) {
01728     int Len = Vec.Len();
01729     for (int i = 0; i < Len; i++)
01730         Vec[i] = Rnd.GetNrmDev();
01731 }
01732 
01733 void TLAMisc::FillIdentity(TFltVV& M) {
01734     IAssert(M.GetRows() == M.GetCols());
01735     int Len = M.GetRows();
01736     for (int i = 0; i < Len; i++) {
01737         for (int j = 0; j < Len; j++) M(i,j) = 0.0;
01738         M(i,i) = 1.0;
01739     }
01740 }
01741 
01742 void TLAMisc::FillIdentity(TFltVV& M, const double& Elt) {
01743     IAssert(M.GetRows() == M.GetCols());
01744     int Len = M.GetRows();
01745     for (int i = 0; i < Len; i++) {
01746         for (int j = 0; j < Len; j++) M(i,j) = 0.0;
01747         M(i,i) = Elt;
01748     }
01749 }
01750 
01751 int TLAMisc::SumVec(const TIntV& Vec) {
01752     const int Len = Vec.Len();
01753     int res = 0;
01754     for (int i = 0; i < Len; i++)
01755         res += Vec[i];
01756     return res;
01757 }
01758 
01759 double TLAMisc::SumVec(const TFltV& Vec) {
01760     const int Len = Vec.Len();
01761     double res = 0.0;
01762     for (int i = 0; i < Len; i++)
01763         res += Vec[i];
01764     return res;
01765 }
01766 
01767 void TLAMisc::ToSpVec(const TFltV& Vec, TIntFltKdV& SpVec,
01768         const double& CutSumPrc) {
01769 
01770     // determine minimal element value
01771     IAssert(0.0 <= CutSumPrc && CutSumPrc <= 1.0);
01772     const int Elts = Vec.Len();
01773     double EltSum = 0.0;
01774     for (int EltN = 0; EltN < Elts; EltN++) {
01775         EltSum += TFlt::Abs(Vec[EltN]); }
01776     const double MnEltVal = CutSumPrc * EltSum;
01777     // create sparse vector
01778     SpVec.Clr();
01779     for (int EltN = 0; EltN < Elts; EltN++) {
01780         if (TFlt::Abs(Vec[EltN]) > MnEltVal) {
01781             SpVec.Add(TIntFltKd(EltN, Vec[EltN]));
01782         }
01783     }
01784     SpVec.Pack();
01785 }
01786 
01787 void TLAMisc::ToVec(const TIntFltKdV& SpVec, TFltV& Vec, const int& VecLen) {
01788     Vec.Gen(VecLen); Vec.PutAll(0.0);
01789     int Elts = SpVec.Len();
01790     for (int EltN = 0; EltN < Elts; EltN++) {
01791         if (SpVec[EltN].Key < VecLen) {
01792             Vec[SpVec[EltN].Key] = SpVec[EltN].Dat;
01793         }
01794     }
01795 }