SNAP Library 2.0, User Reference  2013-05-13 16:33:57
SNAP, a general purpose, high performance system for analysis and manipulation of large networks
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines
linalg.cpp
Go to the documentation of this file.
00001 
00002 // Sparse-Column-Matrix
00003 void TSparseColMatrix::PMultiply(const TFltVV& B, int ColId, TFltV& Result) const {
00004     Assert(B.GetRows() >= ColN && Result.Len() >= RowN);
00005     int i, j; TFlt *ResV = Result.BegI();
00006     for (i = 0; i < RowN; i++) ResV[i] = 0.0;
00007     for (j = 0; j < ColN; j++) {
00008         const TIntFltKdV& ColV = ColSpVV[j]; int len = ColV.Len();
00009         for (i = 0; i < len; i++) {
00010             ResV[ColV[i].Key] += ColV[i].Dat * B(j,ColId);
00011         }
00012     }
00013 }
00014 
00015 void TSparseColMatrix::PMultiply(const TFltV& Vec, TFltV& Result) const {
00016     Assert(Vec.Len() >= ColN && Result.Len() >= RowN);
00017     int i, j; TFlt *ResV = Result.BegI();
00018     for (i = 0; i < RowN; i++) ResV[i] = 0.0;
00019     for (j = 0; j < ColN; j++) {
00020         const TIntFltKdV& ColV = ColSpVV[j]; int len = ColV.Len();
00021         for (i = 0; i < len; i++) {
00022             ResV[ColV[i].Key] += ColV[i].Dat * Vec[j];
00023         }
00024     }
00025 }
00026 
00027 void TSparseColMatrix::PMultiplyT(const TFltVV& B, int ColId, TFltV& Result) const {
00028     Assert(B.GetRows() >= RowN && Result.Len() >= ColN);
00029     int i, j, len; TFlt *ResV = Result.BegI();
00030     for (j = 0; j < ColN; j++) {
00031         const TIntFltKdV& ColV = ColSpVV[j];
00032         len = ColV.Len(); ResV[j] = 0.0;
00033         for (i = 0; i < len; i++) {
00034             ResV[j] += ColV[i].Dat * B(ColV[i].Key, ColId);
00035         }
00036     }
00037 }
00038 
00039 void TSparseColMatrix::PMultiplyT(const TFltV& Vec, TFltV& Result) const {
00040     Assert(Vec.Len() >= RowN && Result.Len() >= ColN);
00041     int i, j, len; TFlt *VecV = Vec.BegI(), *ResV = Result.BegI();
00042     for (j = 0; j < ColN; j++) {
00043         const TIntFltKdV& ColV = ColSpVV[j];
00044         len = ColV.Len(); ResV[j] = 0.0;
00045         for (i = 0; i < len; i++) {
00046             ResV[j] += ColV[i].Dat * VecV[ColV[i].Key];
00047         }
00048     }
00049 }
00050 
00052 // Sparse-Row-Matrix
00053 TSparseRowMatrix::TSparseRowMatrix(const TStr& MatlabMatrixFNm) {
00054    FILE *F = fopen(MatlabMatrixFNm.CStr(), "rt");  IAssert(F != NULL);
00055    TVec<TTriple<TInt, TInt, TSFlt> > MtxV;
00056    RowN = 0;  ColN = 0;
00057    while (! feof(F)) {
00058      int row=-1, col=-1; float val;
00059      if (fscanf(F, "%d %d %f\n", &row, &col, &val) == 3) {
00060        IAssert(row > 0 && col > 0);
00061        MtxV.Add(TTriple<TInt, TInt, TSFlt>(row, col, val));
00062        RowN = TMath::Mx(RowN, row);
00063        ColN = TMath::Mx(ColN, col);
00064      }
00065    }
00066    fclose(F);
00067    // create matrix
00068    MtxV.Sort();
00069    RowSpVV.Gen(RowN);
00070    int cnt = 0;
00071    for (int row = 1; row <= RowN; row++) {
00072      while (cnt < MtxV.Len() && MtxV[cnt].Val1 == row) {
00073        RowSpVV[row-1].Add(TIntFltKd(MtxV[cnt].Val2-1, MtxV[cnt].Val3()));
00074        cnt++;
00075      }
00076    }
00077 }
00078 
00079 void TSparseRowMatrix::PMultiplyT(const TFltVV& B, int ColId, TFltV& Result) const {
00080     Assert(B.GetRows() >= RowN && Result.Len() >= ColN);
00081     for (int i = 0; i < ColN; i++) Result[i] = 0.0;
00082     for (int j = 0; j < RowN; j++) {
00083         const TIntFltKdV& RowV = RowSpVV[j]; int len = RowV.Len();
00084         for (int i = 0; i < len; i++) {
00085             Result[RowV[i].Key] += RowV[i].Dat * B(j,ColId);
00086         }
00087     }
00088 }
00089 
00090 void TSparseRowMatrix::PMultiplyT(const TFltV& Vec, TFltV& Result) const {
00091     Assert(Vec.Len() >= RowN && Result.Len() >= ColN);
00092     for (int i = 0; i < ColN; i++) Result[i] = 0.0;
00093     for (int j = 0; j < RowN; j++) {
00094         const TIntFltKdV& RowV = RowSpVV[j]; int len = RowV.Len();
00095         for (int i = 0; i < len; i++) {
00096             Result[RowV[i].Key] += RowV[i].Dat * Vec[j];
00097         }
00098     }
00099 }
00100 
00101 void TSparseRowMatrix::PMultiply(const TFltVV& B, int ColId, TFltV& Result) const {
00102     Assert(B.GetRows() >= ColN && Result.Len() >= RowN);
00103     for (int j = 0; j < RowN; j++) {
00104         const TIntFltKdV& RowV = RowSpVV[j];
00105         int len = RowV.Len(); Result[j] = 0.0;
00106         for (int i = 0; i < len; i++) {
00107             Result[j] += RowV[i].Dat * B(RowV[i].Key, ColId);
00108         }
00109     }
00110 }
00111 
00112 void TSparseRowMatrix::PMultiply(const TFltV& Vec, TFltV& Result) const {
00113     Assert(Vec.Len() >= ColN && Result.Len() >= RowN);
00114     for (int j = 0; j < RowN; j++) {
00115         const TIntFltKdV& RowV = RowSpVV[j];
00116         int len = RowV.Len(); Result[j] = 0.0;
00117         for (int i = 0; i < len; i++) {
00118             Result[j] += RowV[i].Dat * Vec[RowV[i].Key];
00119         }
00120     }
00121 }
00122 
00124 // Full-Col-Matrix
00125 TFullColMatrix::TFullColMatrix(const TStr& MatlabMatrixFNm): TMatrix() {
00126     TLAMisc::LoadMatlabTFltVV(MatlabMatrixFNm, ColV);
00127     RowN=ColV[0].Len(); ColN=ColV.Len();
00128     for (int i = 0; i < ColN; i++) {
00129         IAssertR(ColV[i].Len() == RowN, TStr::Fmt("%d != %d", ColV[i].Len(), RowN));
00130     }
00131 }
00132 
00133 void TFullColMatrix::PMultiplyT(const TFltVV& B, int ColId, TFltV& Result) const {
00134     Assert(B.GetRows() >= RowN && Result.Len() >= ColN);
00135     for (int i = 0; i < ColN; i++) {
00136         Result[i] = TLinAlg::DotProduct(B, ColId, ColV[i]);
00137     }
00138 }
00139 
00140 void TFullColMatrix::PMultiplyT(const TFltV& Vec, TFltV& Result) const {
00141     Assert(Vec.Len() >= RowN && Result.Len() >= ColN);
00142     for (int i = 0; i < ColN; i++) {
00143         Result[i] = TLinAlg::DotProduct(Vec, ColV[i]);
00144     }
00145 }
00146 
00147 void TFullColMatrix::PMultiply(const TFltVV& B, int ColId, TFltV& Result) const {
00148     Assert(B.GetRows() >= ColN && Result.Len() >= RowN);
00149     for (int i = 0; i < RowN; i++) { Result[i] = 0.0; }
00150     for (int i = 0; i < ColN; i++) {
00151         TLinAlg::AddVec(B(i, ColId), ColV[i], Result, Result);
00152     }
00153 }
00154 
00155 void TFullColMatrix::PMultiply(const TFltV& Vec, TFltV& Result) const {
00156     Assert(Vec.Len() >= ColN && Result.Len() >= RowN);
00157     for (int i = 0; i < RowN; i++) { Result[i] = 0.0; }
00158     for (int i = 0; i < ColN; i++) {
00159         TLinAlg::AddVec(Vec[i], ColV[i], Result, Result);
00160     }
00161 }
00162 
00164 // Basic Linear Algebra Operations
00165 double TLinAlg::DotProduct(const TFltV& x, const TFltV& y) {
00166     IAssertR(x.Len() == y.Len(), TStr::Fmt("%d != %d", x.Len(), y.Len()));
00167     double result = 0.0; int Len = x.Len();
00168     for (int i = 0; i < Len; i++)
00169         result += x[i] * y[i];
00170     return result;
00171 }
00172 
00173 double TLinAlg::DotProduct(const TFltVV& X, int ColIdX, const TFltVV& Y, int ColIdY) {
00174     Assert(X.GetRows() == Y.GetRows());
00175     double result = 0.0, len = X.GetRows();
00176     for (int i = 0; i < len; i++)
00177         result = result + X(i,ColIdX) * Y(i,ColIdY);
00178     return result;
00179 }
00180 
00181 double TLinAlg::DotProduct(const TFltVV& X, int ColId, const TFltV& Vec) {
00182     Assert(X.GetRows() == Vec.Len());
00183     double result = 0.0; int Len = X.GetRows();
00184     for (int i = 0; i < Len; i++)
00185         result += X(i,ColId) * Vec[i];
00186     return result;
00187 }
00188 
00189 double TLinAlg::DotProduct(const TIntFltKdV& x, const TIntFltKdV& y) {
00190     const int xLen = x.Len(), yLen = y.Len();
00191     double Res = 0.0; int i1 = 0, i2 = 0;
00192     while (i1 < xLen && i2 < yLen) {
00193         if (x[i1].Key < y[i2].Key) i1++;
00194         else if (x[i1].Key > y[i2].Key) i2++;
00195         else { Res += x[i1].Dat * y[i2].Dat;  i1++;  i2++; }
00196     }
00197     return Res;
00198 }
00199 
00200 double TLinAlg::DotProduct(const TFltV& x, const TIntFltKdV& y) {
00201     double Res = 0.0; const int xLen = x.Len(), yLen = y.Len();
00202     for (int i = 0; i < yLen; i++) {
00203         const int key = y[i].Key;
00204         if (key < xLen) Res += y[i].Dat * x[key];
00205     }
00206     return Res;
00207 }
00208 
00209 double TLinAlg::DotProduct(const TFltVV& X, int ColId, const TIntFltKdV& y) {
00210     double Res = 0.0; const int n = X.GetRows(), yLen = y.Len();
00211     for (int i = 0; i < yLen; i++) {
00212         const int key = y[i].Key;
00213         if (key < n) Res += y[i].Dat * X(key,ColId);
00214     }
00215     return Res;
00216 }
00217 
00218 void TLinAlg::LinComb(const double& p, const TFltV& x,
00219         const double& q, const TFltV& y, TFltV& z) {
00220 
00221     Assert(x.Len() == y.Len() && y.Len() == z.Len());
00222     const int Len = x.Len();
00223     for (int i = 0; i < Len; i++) {
00224         z[i] = p * x[i] + q * y[i]; }
00225 }
00226 
00227 void TLinAlg::ConvexComb(const double& p, const TFltV& x, const TFltV& y, TFltV& z) {
00228     AssertR(0.0 <= p && p <= 1.0, TFlt::GetStr(p));
00229     LinComb(p, x, 1.0 - p, y, z);
00230 }
00231 
00232 void TLinAlg::AddVec(const double& k, const TFltV& x, const TFltV& y, TFltV& z) {
00233     LinComb(k, x, 1.0, y, z);
00234 }
00235 
00236 void TLinAlg::AddVec(const double& k, const TIntFltKdV& x, const TFltV& y, TFltV& z) {
00237     Assert(y.Len() == z.Len());
00238     z = y; // first we set z to be y
00239     // and than we add x to z (==y)
00240     const int xLen = x.Len(), yLen = y.Len();
00241     for (int i = 0; i < xLen; i++) {
00242         const int ii = x[i].Key;
00243         if (ii < yLen) {
00244             z[ii] = k * x[i].Dat + y[ii];
00245         }
00246     }
00247 }
00248 
00249 void TLinAlg::AddVec(const double& k, const TIntFltKdV& x, TFltV& y) {
00250     const int xLen = x.Len(), yLen = y.Len();
00251     for (int i = 0; i < xLen; i++) {
00252         const int ii = x[i].Key;
00253         if (ii < yLen) {
00254             y[ii] += k * x[i].Dat;
00255         }
00256     }
00257 }
00258 
00259 void TLinAlg::AddVec(double k, const TFltVV& X, int ColIdX, TFltVV& Y, int ColIdY){
00260     Assert(X.GetRows() == Y.GetRows());
00261     const int len = Y.GetRows();
00262     for (int i = 0; i < len; i++) {
00263         Y(i,ColIdY) = Y(i,ColIdY) + k * X(i, ColIdX);
00264     }
00265 }
00266 
00267 void TLinAlg::AddVec(double k, const TFltVV& X, int ColId, TFltV& Result){
00268     Assert(X.GetRows() == Result.Len());
00269     const int len = Result.Len();
00270     for (int i = 0; i < len; i++) {
00271         Result[i] = Result[i] + k * X(i, ColId);
00272     }
00273 }
00274 
00275 void TLinAlg::AddVec(const TIntFltKdV& x, const TIntFltKdV& y, TIntFltKdV& z) {
00276         TSparseOpsIntFlt::SparseMerge(x, y, z);
00277 }
00278 
00279 double TLinAlg::SumVec(const TFltV& x) {
00280     const int len = x.Len();
00281     double Res = 0.0;
00282     for (int i = 0; i < len; i++) {
00283         Res += x[i];
00284     }
00285     return Res;
00286 }
00287 
00288 double TLinAlg::SumVec(double k, const TFltV& x, const TFltV& y) {
00289     Assert(x.Len() == y.Len());
00290     const int len = x.Len();
00291     double Res = 0.0;
00292     for (int i = 0; i < len; i++) {
00293         Res += k * x[i] + y[i];
00294     }
00295     return Res;
00296 }
00297 
00298 double TLinAlg::EuclDist2(const TFltV& x, const TFltV& y) {
00299     Assert(x.Len() == y.Len());
00300     const int len = x.Len();
00301     double Res = 0.0;
00302     for (int i = 0; i < len; i++) {
00303         Res += TMath::Sqr(x[i] - y[i]);
00304     }
00305     return Res;
00306 }
00307 
00308 double TLinAlg::EuclDist2(const TFltPr& x, const TFltPr& y) {
00309     return TMath::Sqr(x.Val1 - y.Val1) + TMath::Sqr(x.Val2 - y.Val2);
00310 }
00311 
00312 double TLinAlg::EuclDist(const TFltV& x, const TFltV& y) {
00313     return sqrt(EuclDist2(x, y));
00314 }
00315 
00316 double TLinAlg::EuclDist(const TFltPr& x, const TFltPr& y) {
00317     return sqrt(EuclDist2(x, y));
00318 }
00319 
00320 double TLinAlg::Norm2(const TFltV& x) {
00321     return DotProduct(x, x);
00322 }
00323 
00324 double TLinAlg::Norm(const TFltV& x) {
00325     return sqrt(Norm2(x));
00326 }
00327 
00328 void TLinAlg::Normalize(TFltV& x) {
00329     const double xNorm = Norm(x);
00330     if (xNorm > 0.0) { MultiplyScalar(1/xNorm, x, x); }
00331 }
00332 
00333 double TLinAlg::Norm2(const TIntFltKdV& x) {
00334     double Result = 0;
00335     for (int i = 0; i < x.Len(); i++) {
00336         Result += TMath::Sqr(x[i].Dat);
00337     }
00338     return Result;
00339 }
00340 
00341 double TLinAlg::Norm(const TIntFltKdV& x) {
00342     return sqrt(Norm2(x));
00343 }
00344 
00345 void TLinAlg::Normalize(TIntFltKdV& x) {
00346     MultiplyScalar(1/Norm(x), x, x);
00347 }
00348 
00349 double TLinAlg::Norm2(const TFltVV& X, int ColId) {
00350     return DotProduct(X, ColId, X, ColId);
00351 }
00352 
00353 double TLinAlg::Norm(const TFltVV& X, int ColId) {
00354     return sqrt(Norm2(X, ColId));
00355 }
00356 
00357 double TLinAlg::NormL1(const TFltV& x) {
00358     double norm = 0.0; const int Len = x.Len();
00359     for (int i = 0; i < Len; i++)
00360         norm += TFlt::Abs(x[i]);
00361     return norm;
00362 }
00363 
00364 double TLinAlg::NormL1(double k, const TFltV& x, const TFltV& y) {
00365     Assert(x.Len() == y.Len());
00366     double norm = 0.0; const int len = x.Len();
00367     for (int i = 0; i < len; i++) {
00368         norm += TFlt::Abs(k * x[i] + y[i]);
00369     }
00370     return norm;
00371 }
00372 
00373 double TLinAlg::NormL1(const TIntFltKdV& x) {
00374     double norm = 0.0; const int Len = x.Len();
00375     for (int i = 0; i < Len; i++)
00376         norm += TFlt::Abs(x[i].Dat);
00377     return norm;
00378 }
00379 
00380 void TLinAlg::NormalizeL1(TFltV& x) {
00381     const double xNorm = NormL1(x);
00382     if (xNorm > 0.0) { MultiplyScalar(1/xNorm, x, x); }
00383 }
00384 
00385 void TLinAlg::NormalizeL1(TIntFltKdV& x) {
00386     const double xNorm = NormL1(x);
00387     if (xNorm > 0.0) { MultiplyScalar(1/xNorm, x, x); }
00388 }
00389 
00390 double TLinAlg::NormLinf(const TFltV& x) {
00391     double norm = 0.0; const int Len = x.Len();
00392     for (int i = 0; i < Len; i++)
00393         norm = TFlt::GetMx(TFlt::Abs(x[i]), norm);
00394     return norm;
00395 }
00396 
00397 double TLinAlg::NormLinf(const TIntFltKdV& x) {
00398     double norm = 0.0; const int Len = x.Len();
00399     for (int i = 0; i < Len; i++)
00400         norm = TFlt::GetMx(TFlt::Abs(x[i].Dat), norm);
00401     return norm;
00402 }
00403 
00404 void TLinAlg::NormalizeLinf(TFltV& x) {
00405     const double xNormLinf = NormLinf(x);
00406     if (xNormLinf > 0.0) { MultiplyScalar(1.0/xNormLinf, x, x); }
00407 }
00408 
00409 void TLinAlg::NormalizeLinf(TIntFltKdV& x) {
00410     const double xNormLInf = NormLinf(x);
00411     if (xNormLInf> 0.0) { MultiplyScalar(1.0/xNormLInf, x, x); }
00412 }
00413 
00414 void TLinAlg::MultiplyScalar(const double& k, const TFltV& x, TFltV& y) {
00415     Assert(x.Len() == y.Len());
00416     int Len = x.Len();
00417     for (int i = 0; i < Len; i++)
00418         y[i] = k * x[i];
00419 }
00420 
00421 void TLinAlg::MultiplyScalar(const double& k, const TIntFltKdV& x, TIntFltKdV& y) {
00422     Assert(x.Len() == y.Len());
00423     int Len = x.Len();
00424     for (int i = 0; i < Len; i++)
00425         y[i].Dat = k * x[i].Dat;
00426 }
00427 
00428 void TLinAlg::Multiply(const TFltVV& A, const TFltV& x, TFltV& y) {
00429     Assert(A.GetCols() == x.Len() && A.GetRows() == y.Len());
00430     int n = A.GetRows(), m = A.GetCols();
00431     for (int i = 0; i < n; i++) {
00432         y[i] = 0.0;
00433         for (int j = 0; j < m; j++)
00434             y[i] += A(i,j) * x[j];
00435     }
00436 }
00437 
00438 void TLinAlg::Multiply(const TFltVV& A, const TFltV& x, TFltVV& C, int ColId) {
00439     Assert(A.GetCols() == x.Len() && A.GetRows() == C.GetRows());
00440     int n = A.GetRows(), m = A.GetCols();
00441     for (int i = 0; i < n; i++) {
00442         C(i,ColId) = 0.0;
00443         for (int j = 0; j < m; j++)
00444             C(i,ColId) += A(i,j) * x[j];
00445     }
00446 }
00447 
00448 void TLinAlg::Multiply(const TFltVV& A, const TFltVV& B, int ColId, TFltV& y) {
00449     Assert(A.GetCols() == B.GetRows() && A.GetRows() == y.Len());
00450     int n = A.GetRows(), m = A.GetCols();
00451     for (int i = 0; i < n; i++) {
00452         y[i] = 0.0;
00453         for (int j = 0; j < m; j++)
00454             y[i] += A(i,j) * B(j,ColId);
00455     }
00456 }
00457 
00458 void TLinAlg::Multiply(const TFltVV& A, const TFltVV& B, int ColIdB, TFltVV& C, int ColIdC){
00459     Assert(A.GetCols() == B.GetRows() && A.GetRows() == C.GetRows());
00460     int n = A.GetRows(), m = A.GetCols();
00461     for (int i = 0; i < n; i++) {
00462         C(i,ColIdC) = 0.0;
00463         for (int j = 0; j < m; j++)
00464             C(i,ColIdC) += A(i,j) * B(j,ColIdB);
00465     }
00466 }
00467 
00468 void TLinAlg::MultiplyT(const TFltVV& A, const TFltV& x, TFltV& y) {
00469     Assert(A.GetRows() == x.Len() && A.GetCols() == y.Len());
00470     int n = A.GetCols(), m = A.GetRows();
00471     for (int i = 0; i < n; i++) {
00472         y[i] = 0.0;
00473         for (int j = 0; j < m; j++)
00474             y[i] += A(j,i) * x[j];
00475     }
00476 }
00477 
00478 void TLinAlg::Multiply(const TFltVV& A, const TFltVV& B, TFltVV& C) {
00479     Assert(A.GetRows() == C.GetRows() && B.GetCols() == C.GetCols() && A.GetCols() == B.GetRows());
00480 
00481     int n = C.GetRows(), m = C.GetCols(), l = A.GetCols();
00482     for (int i = 0; i < n; i++) {
00483         for (int j = 0; j < m; j++) {
00484             double sum = 0.0;
00485             for (int k = 0; k < l; k++)
00486                 sum += A(i,k)*B(k,j);
00487             C(i,j) = sum;
00488         }
00489     }
00490 }
00491 
00492 // general matrix multiplication (GEMM)
00493 void TLinAlg::Gemm(const double& Alpha, const TFltVV& A, const TFltVV& B, const double& Beta, 
00494                 const TFltVV& C, TFltVV& D, const int& TransposeFlags) {
00495 
00496         bool tA = (TransposeFlags & GEMM_A_T) == GEMM_A_T;
00497         bool tB = (TransposeFlags & GEMM_B_T) == GEMM_B_T;
00498         bool tC = (TransposeFlags & GEMM_C_T) == GEMM_C_T;
00499 
00500         // setting dimensions
00501         int a_i = tA ? A.GetRows() : A.GetCols();
00502         int a_j = tA ? A.GetCols() : A.GetRows();
00503 
00504         int b_i = tB ? A.GetRows() : A.GetCols();
00505         int b_j = tB ? A.GetCols() : A.GetRows();
00506 
00507         int c_i = tC ? A.GetRows() : A.GetCols();
00508         int c_j = tC ? A.GetCols() : A.GetRows();
00509 
00510         int d_i = D.GetCols();
00511         int d_j = D.GetRows();
00512         
00513         // assertions for dimensions
00514         Assert(a_i == c_j && b_i == c_i && a_i == b_j && c_i == d_i && c_j == d_j);
00515 
00516         double Aij, Bij, Cij;
00517 
00518         // rows
00519         for (int j = 0; j < a_j; j++) {
00520                 // cols
00521                 for (int i = 0; i < a_i; i++) {
00522                         // not optimized for speed (!)
00523                         double sum = 0;
00524                         for (int k = 0; k < a_i; k++) {
00525                                 Aij = tA ? A.At(j, k) : A.At(k, j);
00526                                 Bij = tB ? B.At(k, i) : B.At(i, k);
00527                                 sum += Alpha * Aij * Bij;
00528                         }
00529                         Cij = tC ? C.At(i, j) : C.At(j, i);
00530                         sum += Beta * Cij;
00531                         D.At(i, j) = sum;
00532                 }
00533         }
00534 }
00535 
00536 void TLinAlg::Transpose(const TFltVV& A, TFltVV& B) {
00537         Assert(B.GetRows() == A.GetCols() && B.GetCols() == A.GetRows());
00538         for (int i = 0; i < A.GetCols(); i++) {
00539                 for (int j = 0; j < A.GetRows(); j++) {
00540                         B.At(i, j) = A.At(j, i);
00541                 }
00542         }
00543 }
00544 
00545 void TLinAlg::Inverse(const TFltVV& A, TFltVV& B, const TLinAlgInverseType& DecompType) {
00546         switch (DecompType) {
00547                 case DECOMP_SVD:
00548                         InverseSVD(A, B);
00549         }
00550 }
00551 
00552 void TLinAlg::InverseSVD(const TFltVV& M, TFltVV& B) {
00553         // create temp matrices
00554         TFltVV U, V;
00555         TFltV E;
00556         TSvd SVD;
00557 
00558         U.Gen(M.GetRows(), M.GetRows());        
00559         V.Gen(M.GetCols(), M.GetCols());
00560 
00561         // do the SVD decompostion
00562         SVD.Svd(M, U, E, V);
00563 
00564         // calculate reciprocal values for diagonal matrix = inverse diagonal
00565         for (int i = 0; i < E.Len(); i++) {
00566                 E[i] = 1 / E[i];
00567         }
00568 
00569         // calculate pseudoinverse: M^(-1) = V * E^(-1) * U'
00570         for (int i = 0; i < U.GetCols(); i++) {
00571                 for (int j = 0; j < V.GetRows(); j++) {
00572                         double sum = 0;
00573                         for (int k = 0; k < U.GetCols(); k++) {
00574                                 sum += E[k] * V.At(i, k) * U.At(j, k);
00575                         }
00576                         B.At(i, j) = sum;
00577                 }
00578         }       
00579 }
00580 
00581 void TLinAlg::GS(TVec<TFltV>& Q) {
00582     IAssert(Q.Len() > 0);
00583     int m = Q.Len(); // int n = Q[0].Len();
00584     for (int i = 0; i < m; i++) {
00585         printf("%d\r",i);
00586         for (int j = 0; j < i; j++) {
00587             double r = TLinAlg::DotProduct(Q[i], Q[j]);
00588             TLinAlg::AddVec(-r,Q[j],Q[i],Q[i]);
00589         }
00590         TLinAlg::Normalize(Q[i]);
00591     }
00592     printf("\n");
00593 }
00594 
00595 void TLinAlg::GS(TFltVV& Q) {
00596     int m = Q.GetCols(), n = Q.GetRows();
00597     for (int i = 0; i < m; i++) {
00598         for (int j = 0; j < i; j++) {
00599             double r = TLinAlg::DotProduct(Q, i, Q, j);
00600             TLinAlg::AddVec(-r,Q,j,Q,i);
00601         }
00602         double nr = TLinAlg::Norm(Q,i);
00603         for (int k = 0; k < n; k++)
00604             Q(k,i) = Q(k,i) / nr;
00605     }
00606 }
00607 
00608 void TLinAlg::Rotate(const double& OldX, const double& OldY, const double& Angle, double& NewX, double& NewY) {
00609     NewX = OldX*cos(Angle) - OldY*sin(Angle);
00610     NewY = OldX*sin(Angle) + OldY*cos(Angle);
00611 }
00612 
00613 void TLinAlg::AssertOrtogonality(const TVec<TFltV>& Vecs, const double& Threshold) {
00614     int m = Vecs.Len();
00615     for (int i = 0; i < m; i++) {
00616         for (int j = 0; j < i; j++) {
00617             double res = DotProduct(Vecs[i], Vecs[j]);
00618             if (TFlt::Abs(res) > Threshold)
00619                 printf("<%d,%d> = %.5f", i,j,res);
00620         }
00621         double norm = Norm2(Vecs[i]);
00622         if (TFlt::Abs(norm-1) > Threshold)
00623             printf("||%d|| = %.5f", i, norm);
00624     }
00625 }
00626 
00627 void TLinAlg::AssertOrtogonality(const TFltVV& Vecs, const double& Threshold) {
00628     int m = Vecs.GetCols();
00629     for (int i = 0; i < m; i++) {
00630         for (int j = 0; j < i; j++) {
00631             double res = DotProduct(Vecs, i, Vecs, j);
00632             if (TFlt::Abs(res) > Threshold)
00633                 printf("<%d,%d> = %.5f", i, j, res);
00634         }
00635         double norm = Norm2(Vecs, i);
00636         if (TFlt::Abs(norm-1) > Threshold)
00637             printf("||%d|| = %.5f", i, norm);
00638     }
00639     printf("\n");
00640 }
00641 
00643 // Numerical Linear Algebra
00644 double TNumericalStuff::sqr(double a) {
00645   return a == 0.0 ? 0.0 : a*a;
00646 }
00647 
00648 double TNumericalStuff::sign(double a, double b) {
00649   return b >= 0.0 ? fabs(a) : -fabs(a);
00650 }
00651 
00652 void TNumericalStuff::nrerror(const TStr& error_text) {
00653     printf("NR_ERROR: %s", error_text.CStr());
00654     throw new TNSException(error_text);
00655 }
00656 
00657 double TNumericalStuff::pythag(double a, double b) {
00658     double absa = fabs(a), absb = fabs(b);
00659     if (absa > absb)
00660         return absa*sqrt(1.0+sqr(absb/absa));
00661     else
00662         return (absb == 0.0 ? 0.0 : absb*sqrt(1.0+sqr(absa/absb)));
00663 }
00664 
00665 void TNumericalStuff::SymetricToTridiag(TFltVV& a, int n, TFltV& d, TFltV& e) {
00666     int l,k,j,i;
00667     double scale,hh,h,g,f;
00668     for (i=n;i>=2;i--) {
00669         l=i-1;
00670         h=scale=0.0;
00671         if (l > 1) {
00672             for (k=1;k<=l;k++)
00673                 scale += fabs(a(i-1,k-1).Val);
00674             if (scale == 0.0) //Skip transformation.
00675                 e[i]=a(i-1,l-1);
00676             else {
00677                 for (k=1;k<=l;k++) {
00678                     a(i-1,k-1) /= scale; //Use scaled a's for transformation.
00679                     h += a(i-1,k-1)*a(i-1,k-1);
00680                 }
00681                 f=a(i-1,l-1);
00682                 g=(f >= 0.0 ? -sqrt(h) : sqrt(h));
00683                 IAssertR(_isnan(g) == 0, TFlt::GetStr(h));
00684                 e[i]=scale*g;
00685                 h -= f*g; //Now h is equation (11.2.4).
00686                 a(i-1,l-1)=f-g; //Store u in the ith row of a.
00687                 f=0.0;
00688                 for (j=1;j<=l;j++) {
00689                     // Next statement can be omitted if eigenvectors not wanted
00690                     a(j-1,i-1)=a(i-1,j-1)/h; //Store u=H in ith column of a.
00691                     g=0.0; //Form an element of A  u in g.
00692                     for (k=1;k<=j;k++)
00693                         g += a(j-1,k-1)*a(i-1,k-1);
00694                     for (k=j+1;k<=l;k++)
00695                         g += a(k-1,j-1)*a(i-1,k-1);
00696                     e[j]=g/h; //Form element of p in temporarily unused element of e.
00697                     f += e[j]*a(i-1,j-1);
00698                 }
00699                 hh=f/(h+h); //Form K, equation (11.2.11).
00700                 for (j=1;j<=l;j++) { //Form q and store in e overwriting p.
00701                     f=a(i-1,j-1);
00702                     e[j]=g=e[j]-hh*f;
00703                     for (k=1;k<=j;k++) { //Reduce a, equation (11.2.13).
00704                         a(j-1,k-1) -= (f*e[k]+g*a(i-1,k-1));
00705                         Assert(_isnan(a(j-1,k-1)) == 0);
00706                     }
00707                 }
00708             }
00709         } else
00710             e[i]=a(i-1,l-1);
00711         d[i]=h;
00712     }
00713     // Next statement can be omitted if eigenvectors not wanted
00714     d[1]=0.0;
00715     e[1]=0.0;
00716     // Contents of this loop can be omitted if eigenvectors not
00717     // wanted except for statement d[i]=a[i][i];
00718     for (i=1;i<=n;i++) { //Begin accumulation of transformationmatrices.
00719         l=i-1;
00720         if (d[i]) { //This block skipped when i=1.
00721             for (j=1;j<=l;j++) {
00722                 g=0.0;
00723                 for (k=1;k<=l;k++) //Use u and u=H stored in a to form PQ.
00724                     g += a(i-1,k-1)*a(k-1,j-1);
00725                 for (k=1;k<=l;k++) {
00726                     a(k-1,j-1) -= g*a(k-1,i-1);
00727                     Assert(_isnan(a(k-1,j-1)) == 0);
00728                 }
00729             }
00730         }
00731         d[i]=a(i-1,i-1); //This statement remains.
00732         a(i-1,i-1)=1.0; //Reset row and column of a to identity  matrix for next iteration.
00733         for (j=1;j<=l;j++) a(j-1,i-1)=a(i-1,j-1)=0.0;
00734     }
00735 }
00736 
00737 void TNumericalStuff::EigSymmetricTridiag(TFltV& d, TFltV& e, int n, TFltVV& z) {
00738     int m,l,iter,i,k; // N = n+1;
00739     double s,r,p,g,f,dd,c,b;
00740     // Convenient to renumber the elements of e
00741     for (i=2;i<=n;i++) e[i-1]=e[i];
00742     e[n]=0.0;
00743     for (l=1;l<=n;l++) {
00744         iter=0;
00745         do {
00746             // Look for a single small subdiagonal element to split the matrix.
00747             for (m=l;m<=n-1;m++) {
00748         dd=TFlt::Abs(d[m])+TFlt::Abs(d[m+1]);
00749                 if ((double)(TFlt::Abs(e[m])+dd) == dd) break;
00750             }
00751             if (m != l) {
00752                 if (iter++ == 60) nrerror("Too many iterations in EigSymmetricTridiag");
00753                 //Form shift.
00754                 g=(d[l+1]-d[l])/(2.0*e[l]);
00755                 r=pythag(g,1.0);
00756                 //This is dm - ks.
00757                 g=d[m]-d[l]+e[l]/(g+sign(r,g));
00758                 s=c=1.0;
00759                 p=0.0;
00760                 // A plane rotation as in the original QL, followed by
00761                 // Givens rotations to restore tridiagonal form
00762                 for (i=m-1;i>=l;i--) {
00763                     f=s*e[i];
00764                     b=c*e[i];
00765                     e[i+1]=(r=pythag(f,g));
00766                     // Recover from underflow.
00767                     if (r == 0.0) {
00768                         d[i+1] -= p;
00769                         e[m]=0.0;
00770                         break;
00771                     }
00772                     s=f/r;
00773                     c=g/r;
00774                     g=d[i+1]-p;
00775                     r=(d[i]-g)*s+2.0*c*b;
00776                     d[i+1]=g+(p=s*r);
00777                     g=c*r-b;
00778                     // Next loop can be omitted if eigenvectors not wanted
00779                     for (k=0;k<n;k++) {
00780                         f=z(k,i);
00781                         z(k,i)=s*z(k,i-1)+c*f;
00782                         z(k,i-1)=c*z(k,i-1)-s*f;
00783                     }
00784                 }
00785                 if (r == 0.0 && i >= l) continue;
00786                 d[l] -= p;
00787                 e[l]=g;
00788                 e[m]=0.0;
00789             }
00790         } while (m != l);
00791     }
00792 }
00793 
00794 void TNumericalStuff::CholeskyDecomposition(TFltVV& A, TFltV& p) {
00795   Assert(A.GetRows() == A.GetCols());
00796   int n = A.GetRows(); p.Reserve(n,n);
00797 
00798   int i,j,k;
00799   double sum;
00800   for (i=1;i<=n;i++) {
00801     for (j=i;j<=n;j++) {
00802       for (sum=A(i-1,j-1),k=i-1;k>=1;k--) sum -= A(i-1,k-1)*A(j-1,k-1);
00803       if (i == j) {
00804         if (sum <= 0.0)
00805           nrerror("choldc failed");
00806         p[i-1]=sqrt(sum);
00807       } else A(j-1,i-1)=sum/p[i-1];
00808     }
00809   }
00810 }
00811 
00812 void TNumericalStuff::CholeskySolve(const TFltVV& A, const TFltV& p, const TFltV& b, TFltV& x) {
00813   IAssert(A.GetRows() == A.GetCols());
00814   int n = A.GetRows(); x.Reserve(n,n);
00815 
00816   int i,k;
00817   double sum;
00818 
00819   // Solve L * y = b, storing y in x
00820   for (i=1;i<=n;i++) {
00821     for (sum=b[i-1],k=i-1;k>=1;k--)
00822       sum -= A(i-1,k-1)*x[k-1];
00823     x[i-1]=sum/p[i-1];
00824   }
00825 
00826   // Solve L^T * x = y
00827   for (i=n;i>=1;i--) {
00828     for (sum=x[i-1],k=i+1;k<=n;k++)
00829       sum -= A(k-1,i-1)*x[k-1];
00830     x[i-1]=sum/p[i-1];
00831   }
00832 }
00833 
00834 void TNumericalStuff::SolveSymetricSystem(TFltVV& A, const TFltV& b, TFltV& x) {
00835   IAssert(A.GetRows() == A.GetCols());
00836   TFltV p; CholeskyDecomposition(A, p);
00837   CholeskySolve(A, p, b, x);
00838 }
00839 
00840 void TNumericalStuff::InverseSubstitute(TFltVV& A, const TFltV& p) {
00841   IAssert(A.GetRows() == A.GetCols());
00842   int n = A.GetRows(); TFltV x(n);
00843 
00844     int i, j, k; double sum;
00845     for (i = 0; i < n; i++) {
00846       // solve L * y = e_i, store in x
00847         // elements from 0 to i-1 are 0.0
00848         for (j = 0; j < i; j++) x[j] = 0.0;
00849         // solve l_ii * y_i = 1 => y_i = 1/l_ii
00850         x[i] = 1/p[i];
00851         // solve y_j for j > i
00852         for (j = i+1; j < n; j++) {
00853             for (sum = 0.0, k = i; k < j; k++)
00854                 sum -= A(j,k) * x[k];
00855             x[j] = sum / p[j];
00856         }
00857 
00858       // solve L'* x = y, store in upper triangule of A
00859         for (j = n-1; j >= i; j--) {
00860             for (sum = x[j], k = j+1; k < n; k++)
00861                 sum -= A(k,j)*x[k];
00862             x[j] = sum/p[j];
00863         }
00864         for (int j = i; j < n; j++) A(i,j) = x[j];
00865     }
00866 
00867 }
00868 
00869 void TNumericalStuff::InverseSymetric(TFltVV& A) {
00870     IAssert(A.GetRows() == A.GetCols());
00871     TFltV p;
00872     // first we calculate cholesky decomposition of A
00873     CholeskyDecomposition(A, p);
00874     // than we solve system A x_i = e_i for i = 1..n
00875     InverseSubstitute(A, p);
00876 }
00877 
00878 void TNumericalStuff::InverseTriagonal(TFltVV& A) {
00879   IAssert(A.GetRows() == A.GetCols());
00880   int n = A.GetRows(); TFltV x(n), p(n);
00881 
00882     int i, j, k; double sum;
00883     // copy upper triangle to lower one as we'll overwrite upper one
00884     for (i = 0; i < n; i++) {
00885         p[i] = A(i,i);
00886         for (j = i+1; j < n; j++)
00887             A(j,i) = A(i,j);
00888     }
00889     // solve
00890     for (i = 0; i < n; i++) {
00891         // solve R * x = e_i, store in x
00892         // elements from 0 to i-1 are 0.0
00893         for (j = n-1; j > i; j--) x[j] = 0.0;
00894         // solve l_ii * y_i = 1 => y_i = 1/l_ii
00895         x[i] = 1/p[i];
00896         // solve y_j for j > i
00897         for (j = i-1; j >= 0; j--) {
00898             for (sum = 0.0, k = i; k > j; k--)
00899                 sum -= A(k,j) * x[k];
00900             x[j] = sum / p[j];
00901         }
00902         for (int j = 0; j <= i; j++) A(j,i) = x[j];
00903     }
00904 }
00905 
00906 void TNumericalStuff::LUDecomposition(TFltVV& A, TIntV& indx, double& d) {
00907   Assert(A.GetRows() == A.GetCols());
00908   int n = A.GetRows(); indx.Reserve(n,n);
00909 
00910     int i=0,imax=0,j=0,k=0;
00911     double big,dum,sum,temp;
00912     TFltV vv(n); // vv stores the implicit scaling of each row.
00913     d=1.0;       // No row interchanges yet.
00914 
00915     // Loop over rows to get the implicit scaling information.
00916     for (i=1;i<=n;i++) {
00917         big=0.0;
00918         for (j=1;j<=n;j++)
00919             if ((temp=TFlt::Abs(A(i-1,j-1))) > big) big=temp;
00920         if (big == 0.0) nrerror("Singular matrix in routine LUDecomposition");
00921         vv[i-1]=1.0/big;
00922     }
00923 
00924     for (j=1;j<=n;j++) {
00925         for (i=1;i<j;i++) {
00926             sum=A(i-1,j-1);
00927             for (k=1;k<i;k++) sum -= A(i-1,k-1)*A(k-1,j-1);
00928             A(i-1,j-1)=sum;
00929         }
00930         big=0.0; //Initialize for the search for largest pivot element.
00931         for (i=j;i<=n;i++) {
00932             sum=A(i-1,j-1);
00933             for (k=1;k<j;k++)
00934                 sum -= A(i-1,k-1)*A(k-1,j-1);
00935             A(i-1,j-1)=sum;
00936 
00937             //Is the figure of merit for the pivot better than the best so far?
00938             if ((dum=vv[i-1] * TFlt::Abs(sum)) >= big) {
00939                 big=dum;
00940                 imax=i;
00941             }
00942         }
00943 
00944         //Do we need to interchange rows?
00945         if (j != imax) {
00946             //Yes, do so...
00947             for (k=1;k<=n;k++) {
00948                 dum=A(imax-1,k-1);
00949             A(imax-1,k-1)=A(j-1,k-1); // Tadej: imax-1,k looks wrong
00950             A(j-1,k-1)=dum;
00951             }
00952             //...and change the parity of d.
00953             d = -d;
00954             vv[imax-1]=vv[j-1]; //Also interchange the scale factor.
00955         }
00956         indx[j-1]=imax;
00957 
00958         //If the pivot element is zero the matrix is singular (at least to the precision of the
00959         //algorithm). For some applications on singular matrices, it is desirable to substitute
00960         //TINY for zero.
00961         if (A(j-1,j-1) == 0.0) A(j-1,j-1)=1e-20;
00962 
00963          //Now, finally, divide by the pivot element.
00964         if (j != n) {
00965             dum=1.0/(A(j-1,j-1));
00966             for (i=j+1;i<=n;i++) A(i-1,j-1) *= dum;
00967         }
00968     } //Go back for the next column in the reduction.
00969 }
00970 
00971 void TNumericalStuff::LUSolve(const TFltVV& A, const TIntV& indx, TFltV& b) {
00972   Assert(A.GetRows() == A.GetCols());
00973   int n = A.GetRows();
00974     int i,ii=0,ip,j;
00975     double sum;
00976     for (i=1;i<=n;i++) {
00977         ip=indx[i-1];
00978         sum=b[ip-1];
00979         b[ip-1]=b[i-1];
00980         if (ii)
00981             for (j=ii;j<=i-1;j++) sum -= A(i-1,j-1)*b[j-1];
00982         else if (sum) ii=i;b[i-1]=sum;
00983     }
00984     for (i=n;i>=1;i--) {
00985         sum=b[i-1];
00986         for (j=i+1;j<=n;j++) sum -= A(i-1,j-1)*b[j-1];
00987         b[i-1]=sum/A(i-1,i-1);
00988     }
00989 }
00990 
00991 void TNumericalStuff::SolveLinearSystem(TFltVV& A, const TFltV& b, TFltV& x) {
00992     TIntV indx; double d;
00993     LUDecomposition(A, indx, d);
00994     x = b;
00995     LUSolve(A, indx, x);
00996 }
00997 
00999 // Sparse-SVD
01000 void TSparseSVD::MultiplyATA(const TMatrix& Matrix,
01001         const TFltVV& Vec, int ColId, TFltV& Result) {
01002     TFltV tmp(Matrix.GetRows());
01003     // tmp = A * Vec(:,ColId)
01004     Matrix.Multiply(Vec, ColId, tmp);
01005     // Vec = A' * tmp
01006     Matrix.MultiplyT(tmp, Result);
01007 }
01008 
01009 void TSparseSVD::MultiplyATA(const TMatrix& Matrix,
01010         const TFltV& Vec, TFltV& Result) {
01011     TFltV tmp(Matrix.GetRows());
01012     // tmp = A * Vec
01013     Matrix.Multiply(Vec, tmp);
01014     // Vec = A' * tmp
01015     Matrix.MultiplyT(tmp, Result);
01016 }
01017 
01018 void TSparseSVD::OrtoIterSVD(const TMatrix& Matrix,
01019         int NumSV, int IterN, TFltV& SgnValV) {
01020 
01021     int i, j, k;
01022     int N = Matrix.GetCols(), M = NumSV;
01023     TFltVV Q(N, M);
01024 
01025     // Q = rand(N,M)
01026     TRnd rnd;
01027     for (i = 0; i < N; i++) {
01028         for (j = 0; j < M; j++)
01029             Q(i,j) = rnd.GetUniDev();
01030     }
01031 
01032     TFltV tmp(N);
01033     for (int IterC = 0; IterC < IterN; IterC++) {
01034         printf("%d..", IterC);
01035         // Gram-Schmidt
01036         TLinAlg::GS(Q);
01037         // Q = A'*A*Q
01038         for (int ColId = 0; ColId < M; ColId++) {
01039             MultiplyATA(Matrix, Q, ColId, tmp);
01040             for (k = 0; k < N; k++) Q(k,ColId) = tmp[k];
01041         }
01042     }
01043 
01044     SgnValV.Reserve(NumSV,0);
01045     for (i = 0; i < NumSV; i++)
01046         SgnValV.Add(sqrt(TLinAlg::Norm(Q,i)));
01047     TLinAlg::GS(Q);
01048 }
01049 
01050 void TSparseSVD::SimpleLanczos(const TMatrix& Matrix,
01051         const int& NumEig, TFltV& EigValV,
01052         const bool& DoLocalReortoP, const bool& SvdMatrixProductP) {
01053 
01054     if (SvdMatrixProductP) {
01055         // if this fails, use transposed matrix
01056         IAssert(Matrix.GetRows() >= Matrix.GetCols());
01057     } else {
01058         IAssert(Matrix.GetRows() == Matrix.GetCols());
01059     }
01060 
01061     const int N = Matrix.GetCols(); // size of matrix
01062     TFltV r(N), v0(N), v1(N); // current vector and 2 previous ones
01063     TFltV alpha(NumEig, 0), beta(NumEig, 0); // diagonal and subdiagonal of T
01064 
01065     printf("Calculating %d eigen-values of %d x %d matrix\n", NumEig, N, N);
01066 
01067     // set starting vector
01068     //TRnd Rnd(0);
01069     for (int i = 0; i < N; i++) {
01070         r[i] = 1/sqrt((double)N); // Rnd.GetNrmDev();
01071         v0[i] = v1[i] = 0.0;
01072     }
01073     beta.Add(TLinAlg::Norm(r));
01074 
01075     for (int j = 0; j < NumEig; j++) {
01076         printf("%d\r", j+1);
01077         // v_j -> v_(j-1)
01078         v0 = v1;
01079         // v_j = (1/beta_(j-1)) * r
01080         TLinAlg::MultiplyScalar(1/beta[j], r, v1);
01081         // r = A*v_j
01082         if (SvdMatrixProductP) {
01083             // A = Matrix'*Matrix
01084             MultiplyATA(Matrix, v1, r);
01085         } else {
01086             // A = Matrix
01087             Matrix.Multiply(v1, r);
01088         }
01089         // r = r - beta_(j-1) * v_(j-1)
01090         TLinAlg::AddVec(-beta[j], v0, r, r);
01091         // alpha_j = vj'*r
01092         alpha.Add(TLinAlg::DotProduct(v1, r));
01093         // r = r - v_j * alpha_j
01094         TLinAlg::AddVec(-alpha[j], v1, r, r);
01095         // reortogonalization if neessary
01096         if (DoLocalReortoP) { } //TODO
01097         // beta_j = ||r||_2
01098         beta.Add(TLinAlg::Norm(r));
01099         // compoute approximatie eigenvalues T_j
01100         // test bounds for convergence
01101     }
01102     printf("\n");
01103 
01104     // prepare matrix T
01105     TFltV d(NumEig + 1), e(NumEig + 1);
01106     d[1] = alpha[0]; d[0] = e[0] = e[1] = 0.0;
01107     for (int i = 1; i < NumEig; i++) {
01108         d[i+1] = alpha[i]; e[i+1] = beta[i]; }
01109     // solve eigne problem for tridiagonal matrix with diag d and subdiag e
01110     TFltVV S(NumEig+1,NumEig+1); // eigen-vectors
01111     TLAMisc::FillIdentity(S); // make it identity
01112     TNumericalStuff::EigSymmetricTridiag(d, e, NumEig, S); // solve
01113     //TLAMisc::PrintTFltV(d, "AllEigV");
01114 
01115     // check convergence
01116     TFltKdV AllEigValV(NumEig, 0);
01117     for (int i = 1; i <= NumEig; i++) {
01118         const double ResidualNorm = TFlt::Abs(S(i-1, NumEig-1) * beta.Last());
01119         if (ResidualNorm < 1e-5)
01120             AllEigValV.Add(TFltKd(TFlt::Abs(d[i]), d[i]));
01121     }
01122 
01123     // prepare results
01124     AllEigValV.Sort(false); EigValV.Gen(NumEig, 0);
01125     for (int i = 0; i < AllEigValV.Len(); i++) {
01126         if (i == 0 || (TFlt::Abs(AllEigValV[i].Dat/AllEigValV[i-1].Dat) < 0.9999))
01127             EigValV.Add(AllEigValV[i].Dat);
01128     }
01129 }
01130 
01131 void TSparseSVD::Lanczos(const TMatrix& Matrix, int NumEig,
01132         int Iters, const TSpSVDReOrtoType& ReOrtoType,
01133         TFltV& EigValV, TFltVV& EigVecVV, const bool& SvdMatrixProductP) {
01134 
01135     if (SvdMatrixProductP) {
01136         // if this fails, use transposed matrix
01137         IAssert(Matrix.GetRows() >= Matrix.GetCols());
01138     } else {
01139         IAssert(Matrix.GetRows() == Matrix.GetCols());
01140     }
01141   IAssertR(NumEig <= Iters, TStr::Fmt("%d <= %d", NumEig, Iters));
01142 
01143     //if (ReOrtoType == ssotFull) printf("Full reortogonalization\n");
01144     int i, N = Matrix.GetCols(), K; // K - current dimension of T
01145     double t = 0.0, eps = 1e-6; // t - 1-norm of T
01146 
01147     //sequence of Ritz's vectors
01148     TFltVV Q(N, Iters);
01149     double tmp = 1/sqrt((double)N);
01150     for (i = 0; i < N; i++)
01151         Q(i,0) = tmp;
01152     //converget Ritz's vectors
01153     TVec<TFltV> ConvgQV(Iters);
01154     TIntV CountConvgV(Iters);
01155     for (i = 0; i < Iters; i++) CountConvgV[i] = 0;
01156     // const int ConvgTreshold = 50;
01157 
01158     //diagonal and subdiagonal of T
01159     TFltV d(Iters+1), e(Iters+1);
01160     //eigenvectors of T
01161     //TFltVV V;
01162     TFltVV V(Iters, Iters);
01163 
01164     // z - current Lanczos's vector
01165     TFltV z(N), bb(Iters), aa(Iters), y(N);
01166     //printf("svd(%d,%d)...\n", NumEig, Iters);
01167 
01168     if (SvdMatrixProductP) {
01169         // A = Matrix'*Matrix
01170         MultiplyATA(Matrix, Q, 0, z);
01171     } else {
01172         // A = Matrix
01173         Matrix.Multiply(Q, 0, z);
01174     }
01175 
01176     for (int j = 0; j < (Iters-1); j++) {
01177         //printf("%d..\r",j+2);
01178 
01179         //calculates (j+1)-th Lanczos's vector
01180         // aa[j] = <Q(:,j), z>
01181         aa[j] = TLinAlg::DotProduct(Q, j, z);
01182         //printf(" %g -- ", aa[j].Val); //HACK
01183 
01184         TLinAlg::AddVec(-aa[j], Q, j, z);
01185         if (j > 0) {
01186             // z := -aa[j] * Q(:,j) + z
01187             TLinAlg::AddVec(-bb[j-1], Q, j-1, z);
01188 
01189             //reortogonalization
01190             if (ReOrtoType == ssotSelective || ReOrtoType == ssotFull) {
01191                 for (i = 0; i <= j; i++) {
01192                     // if i-tj vector converget, than we have to ortogonalize against it
01193                     if ((ReOrtoType == ssotFull) ||
01194                         (bb[j-1] * TFlt::Abs(V(K-1, i)) < eps * t)) {
01195 
01196                         ConvgQV[i].Reserve(N,N); CountConvgV[i]++;
01197                         TFltV& vec = ConvgQV[i];
01198                         //vec = Q * V(:,i)
01199                         for (int k = 0; k < N; k++) {
01200                             vec[k] = 0.0;
01201                             for (int l = 0; l < K; l++)
01202                                 vec[k] += Q(k,l) * V(l,i);
01203                         }
01204                         TLinAlg::AddVec(-TLinAlg::DotProduct(ConvgQV[i], z), ConvgQV[i], z ,z);
01205                     }
01206                 }
01207             }
01208         }
01209 
01210         //adds (j+1)-th Lanczos's vector to Q
01211         bb[j] = TLinAlg::Norm(z);
01212     if (!(bb[j] > 1e-10)) {
01213       printf("Rank of matrix is only %d\n", j+2);
01214       printf("Last singular value is %g\n", bb[j].Val);
01215       break;
01216     }
01217         for (i = 0; i < N; i++)
01218             Q(i, j+1) = z[i] / bb[j];
01219 
01220         //next Lanzcos vector
01221         if (SvdMatrixProductP) {
01222             // A = Matrix'*Matrix
01223             MultiplyATA(Matrix, Q, j+1, z);
01224         } else {
01225             // A = Matrix
01226             Matrix.Multiply(Q, j+1, z);
01227         }
01228 
01229         //calculate T (K x K matrix)
01230         K = j + 2;
01231         // calculate diagonal
01232         for (i = 1; i < K; i++) d[i] = aa[i-1];
01233         d[K] = TLinAlg::DotProduct(Q, K-1, z);
01234         // calculate subdiagonal
01235         e[1] = 0.0;
01236         for (i = 2; i <= K; i++) e[i] = bb[i-2];
01237 
01238         //calculate 1-norm of T
01239         t = TFlt::GetMx(TFlt::Abs(d[1]) + TFlt::Abs(e[2]), TFlt::Abs(e[K]) + TFlt::Abs(d[K]));
01240         for (i = 2; i < K; i++)
01241             t = TFlt::GetMx(t, TFlt::Abs(e[i]) + TFlt::Abs(d[i]) + TFlt::Abs(e[i+1]));
01242 
01243         //set V to identity matrix
01244         //V.Gen(K,K);
01245         for (i = 0; i < K; i++) {
01246             for (int k = 0; k < K; k++)
01247                 V(i,k) = 0.0;
01248             V(i,i) = 1.0;
01249         }
01250 
01251         //eigenvectors of T
01252         TNumericalStuff::EigSymmetricTridiag(d, e, K, V);
01253     }//for
01254     //printf("\n");
01255 
01256     // Finds NumEig largest eigen values
01257     TFltIntKdV sv(K);
01258     for (i = 0; i < K; i++) {
01259         sv[i].Key = TFlt::Abs(d[i+1]);
01260         sv[i].Dat = i;
01261     }
01262     sv.Sort(false);
01263 
01264     TFltV uu(Matrix.GetRows());
01265     const int FinalNumEig = TInt::GetMn(NumEig, K);
01266     EigValV.Reserve(FinalNumEig,0);
01267     EigVecVV.Gen(Matrix.GetCols(), FinalNumEig);
01268     for (i = 0; i < FinalNumEig; i++) {
01269         //printf("s[%d] = %20.15f\r", i, sv[i].Key.Val);
01270         int ii = sv[i].Dat;
01271         double sigma = d[ii+1].Val;
01272         // calculate singular value
01273         EigValV.Add(sigma);
01274         // calculate i-th right singular vector ( V := Q * W )
01275         TLinAlg::Multiply(Q, V, ii, EigVecVV, i);
01276     }
01277     //printf("done                           \n");
01278 }
01279 
01280 void TSparseSVD::Lanczos2(const TMatrix& Matrix, int MaxNumEig,
01281     int MaxSecs, const TSpSVDReOrtoType& ReOrtoType,
01282     TFltV& EigValV, TFltVV& EigVecVV, const bool& SvdMatrixProductP) {
01283 
01284   if (SvdMatrixProductP) {
01285     // if this fails, use transposed matrix
01286     IAssert(Matrix.GetRows() >= Matrix.GetCols());
01287   } else {
01288     IAssert(Matrix.GetRows() == Matrix.GetCols());
01289   }
01290   //IAssertR(NumEig <= Iters, TStr::Fmt("%d <= %d", NumEig, Iters));
01291 
01292   //if (ReOrtoType == ssotFull) printf("Full reortogonalization\n");
01293   int i, N = Matrix.GetCols(), K; // K - current dimension of T
01294   double t = 0.0, eps = 1e-6; // t - 1-norm of T
01295 
01296   //sequence of Ritz's vectors
01297   TFltVV Q(N, MaxNumEig);
01298   double tmp = 1/sqrt((double)N);
01299   for (i = 0; i < N; i++)
01300       Q(i,0) = tmp;
01301   //converget Ritz's vectors
01302   TVec<TFltV> ConvgQV(MaxNumEig);
01303   TIntV CountConvgV(MaxNumEig);
01304   for (i = 0; i < MaxNumEig; i++) CountConvgV[i] = 0;
01305   // const int ConvgTreshold = 50;
01306 
01307   //diagonal and subdiagonal of T
01308   TFltV d(MaxNumEig+1), e(MaxNumEig+1);
01309   //eigenvectors of T
01310   //TFltVV V;
01311   TFltVV V(MaxNumEig, MaxNumEig);
01312 
01313   // z - current Lanczos's vector
01314   TFltV z(N), bb(MaxNumEig), aa(MaxNumEig), y(N);
01315   //printf("svd(%d,%d)...\n", NumEig, Iters);
01316 
01317   if (SvdMatrixProductP) {
01318       // A = Matrix'*Matrix
01319       MultiplyATA(Matrix, Q, 0, z);
01320   } else {
01321       // A = Matrix
01322       Matrix.Multiply(Q, 0, z);
01323   }
01324   TExeTm ExeTm;
01325   for (int j = 0; j < (MaxNumEig-1); j++) {
01326     printf("%d [%s]..\r",j+2, ExeTm.GetStr());
01327     if (ExeTm.GetSecs() > MaxSecs) { break; }
01328 
01329     //calculates (j+1)-th Lanczos's vector
01330     // aa[j] = <Q(:,j), z>
01331     aa[j] = TLinAlg::DotProduct(Q, j, z);
01332     //printf(" %g -- ", aa[j].Val); //HACK
01333 
01334     TLinAlg::AddVec(-aa[j], Q, j, z);
01335     if (j > 0) {
01336         // z := -aa[j] * Q(:,j) + z
01337         TLinAlg::AddVec(-bb[j-1], Q, j-1, z);
01338 
01339         //reortogonalization
01340         if (ReOrtoType == ssotSelective || ReOrtoType == ssotFull) {
01341             for (i = 0; i <= j; i++) {
01342                 // if i-tj vector converget, than we have to ortogonalize against it
01343                 if ((ReOrtoType == ssotFull) ||
01344                     (bb[j-1] * TFlt::Abs(V(K-1, i)) < eps * t)) {
01345 
01346                     ConvgQV[i].Reserve(N,N); CountConvgV[i]++;
01347                     TFltV& vec = ConvgQV[i];
01348                     //vec = Q * V(:,i)
01349                     for (int k = 0; k < N; k++) {
01350                         vec[k] = 0.0;
01351                         for (int l = 0; l < K; l++)
01352                             vec[k] += Q(k,l) * V(l,i);
01353                     }
01354                     TLinAlg::AddVec(-TLinAlg::DotProduct(ConvgQV[i], z), ConvgQV[i], z ,z);
01355                 }
01356             }
01357         }
01358     }
01359 
01360     //adds (j+1)-th Lanczos's vector to Q
01361     bb[j] = TLinAlg::Norm(z);
01362     if (!(bb[j] > 1e-10)) {
01363       printf("Rank of matrix is only %d\n", j+2);
01364       printf("Last singular value is %g\n", bb[j].Val);
01365       break;
01366     }
01367     for (i = 0; i < N; i++)
01368         Q(i, j+1) = z[i] / bb[j];
01369 
01370     //next Lanzcos vector
01371     if (SvdMatrixProductP) {
01372         // A = Matrix'*Matrix
01373         MultiplyATA(Matrix, Q, j+1, z);
01374     } else {
01375         // A = Matrix
01376         Matrix.Multiply(Q, j+1, z);
01377     }
01378 
01379     //calculate T (K x K matrix)
01380     K = j + 2;
01381     // calculate diagonal
01382     for (i = 1; i < K; i++) d[i] = aa[i-1];
01383     d[K] = TLinAlg::DotProduct(Q, K-1, z);
01384     // calculate subdiagonal
01385     e[1] = 0.0;
01386     for (i = 2; i <= K; i++) e[i] = bb[i-2];
01387 
01388     //calculate 1-norm of T
01389     t = TFlt::GetMx(TFlt::Abs(d[1]) + TFlt::Abs(e[2]), TFlt::Abs(e[K]) + TFlt::Abs(d[K]));
01390     for (i = 2; i < K; i++)
01391         t = TFlt::GetMx(t, TFlt::Abs(e[i]) + TFlt::Abs(d[i]) + TFlt::Abs(e[i+1]));
01392 
01393     //set V to identity matrix
01394     //V.Gen(K,K);
01395     for (i = 0; i < K; i++) {
01396         for (int k = 0; k < K; k++)
01397             V(i,k) = 0.0;
01398         V(i,i) = 1.0;
01399     }
01400 
01401     //eigenvectors of T
01402     TNumericalStuff::EigSymmetricTridiag(d, e, K, V);
01403   }//for
01404   printf("... calc %d.", K);
01405   // Finds NumEig largest eigen values
01406   TFltIntKdV sv(K);
01407   for (i = 0; i < K; i++) {
01408     sv[i].Key = TFlt::Abs(d[i+1]);
01409     sv[i].Dat = i;
01410   }
01411   sv.Sort(false);
01412 
01413   TFltV uu(Matrix.GetRows());
01414   const int FinalNumEig = K; //TInt::GetMn(NumEig, K);
01415   EigValV.Reserve(FinalNumEig,0);
01416   EigVecVV.Gen(Matrix.GetCols(), FinalNumEig);
01417   for (i = 0; i < FinalNumEig; i++) {
01418     //printf("s[%d] = %20.15f\r", i, sv[i].Key.Val);
01419     int ii = sv[i].Dat;
01420     double sigma = d[ii+1].Val;
01421     // calculate singular value
01422     EigValV.Add(sigma);
01423     // calculate i-th right singular vector ( V := Q * W )
01424     TLinAlg::Multiply(Q, V, ii, EigVecVV, i);
01425   }
01426   printf("  done\n");
01427 }
01428 
01429 
01430 void TSparseSVD::SimpleLanczosSVD(const TMatrix& Matrix,
01431         const int& CalcSV, TFltV& SngValV, const bool& DoLocalReorto) {
01432 
01433     SimpleLanczos(Matrix, CalcSV, SngValV, DoLocalReorto, true);
01434     for (int SngValN = 0; SngValN < SngValV.Len(); SngValN++) {
01435       //IAssert(SngValV[SngValN] >= 0.0);
01436       if (SngValV[SngValN] < 0.0) {
01437         printf("bad sng val: %d %g\n", SngValN, SngValV[SngValN]());
01438         SngValV[SngValN] = 0;
01439       }
01440       SngValV[SngValN] = sqrt(SngValV[SngValN].Val);
01441     }
01442 }
01443 
01444 void TSparseSVD::LanczosSVD(const TMatrix& Matrix, int NumSV,
01445         int Iters, const TSpSVDReOrtoType& ReOrtoType,
01446         TFltV& SgnValV, TFltVV& LeftSgnVecVV, TFltVV& RightSgnVecVV) {
01447 
01448     // solve eigen problem for Matrix'*Matrix
01449     Lanczos(Matrix, NumSV, Iters, ReOrtoType, SgnValV, RightSgnVecVV, true);
01450     // calculate left singular vectors and sqrt singular values
01451     const int FinalNumSV = SgnValV.Len();
01452     LeftSgnVecVV.Gen(Matrix.GetRows(), FinalNumSV);
01453     TFltV LeftSgnVecV(Matrix.GetRows());
01454     for (int i = 0; i < FinalNumSV; i++) {
01455         if (SgnValV[i].Val < 0.0) { SgnValV[i] = 0.0; }
01456         const double SgnVal = sqrt(SgnValV[i]);
01457         SgnValV[i] = SgnVal;
01458         // calculate i-th left singular vector ( U := A * V * S^(-1) )
01459         Matrix.Multiply(RightSgnVecVV, i, LeftSgnVecV);
01460         for (int j = 0; j < LeftSgnVecV.Len(); j++) {
01461             LeftSgnVecVV(j,i) = LeftSgnVecV[j] / SgnVal; }
01462     }
01463     //printf("done                           \n");
01464 }
01465 
01466 void TSparseSVD::Project(const TIntFltKdV& Vec, const TFltVV& U, TFltV& ProjVec) {
01467     const int m = U.GetCols(); // number of columns
01468 
01469     ProjVec.Gen(m, 0);
01470     for (int j = 0; j < m; j++) {
01471         double x = 0.0;
01472         for (int i = 0; i < Vec.Len(); i++)
01473             x += U(Vec[i].Key, j) * Vec[i].Dat;
01474         ProjVec.Add(x);
01475     }
01476 }
01477 
01479 // Sigmoid
01480 double TSigmoid::EvaluateFit(const TFltIntKdV& data, const double A, const double B)
01481 {
01482   double J = 0.0;
01483   for (int i = 0; i < data.Len(); i++)
01484   {
01485     double zi = data[i].Key; int yi = data[i].Dat;
01486     double e = exp(-A * zi + B);
01487     double denum = 1.0 + e;
01488     double prob = (yi > 0) ? (1.0 / denum) : (e / denum);
01489     J -= log(prob < 1e-20 ? 1e-20 : prob);
01490   }
01491   return J;
01492 }
01493 
01494 void TSigmoid::EvaluateFit(const TFltIntKdV& data, const double A, const double B, double& J, double& JA, double& JB)
01495 {
01496   //               J(A, B) = \sum_{i : y_i = 1} ln [1 + e^{-Az_i + B}] + \sum_{i : y_i = -1} [ln [1 + e^{-Az_i + B}] - {-Az_i + B}]
01497   //                       = \sum_i ln [1 + e^{-Az_i + B}] - \sum_{i : y_i = -1} {-Az_i + B}.
01498   // partial J / partial A = \sum_i (-z_i) e^{-Az_i + B} / [1 + e^{-Az_i + B}] + \sum_{i : y_i = -1} Az_i.
01499   // partial J / partial B = \sum_i        e^{-Az_i + B} / [1 + e^{-Az_i + B}] + \sum_{i : y_i = -1} (-1).
01500   J = 0.0; double sum_all_PyNeg = 0.0, sum_all_ziPyNeg = 0.0, sum_yNeg_zi = 0.0, sum_yNeg_1 = 0.0;
01501   for (int i = 0; i < data.Len(); i++)
01502   {
01503     double zi = data[i].Key; int yi = data[i].Dat;
01504     double e = exp(-A * zi + B);
01505     double denum = 1.0 + e;
01506     double prob = (yi > 0) ? (1.0 / denum) : (e / denum);
01507     J -= log(prob < 1e-20 ? 1e-20 : prob);
01508     sum_all_PyNeg += e / denum;
01509     sum_all_ziPyNeg += zi * e / denum;
01510     if (yi < 0) { sum_yNeg_zi += zi; sum_yNeg_1 += 1; }
01511   }
01512   JA = -sum_all_ziPyNeg +     sum_yNeg_zi;
01513   JB =  sum_all_PyNeg   -     sum_yNeg_1;
01514 }
01515 
01516 void TSigmoid::EvaluateFit(const TFltIntKdV& data, const double A, const double B, const double U,
01517                            const double V, const double lambda, double& J, double& JJ, double& JJJ)
01518 {
01519   // Let E_i = e^{-(A + lambda U) z_i + (B + lambda V)}.  Then we have
01520   // J(lambda) = \sum_i ln [1 + E_i] - \sum_{i : y_i = -1} {-(A + lambda U)z_i + (B + lambda V)}.
01521   // J'(lambda) = \sum_i (V - U z_i) E_i / [1 + E_i] - \sum_{i : y_i = -1} {V - U z_i).
01522   //            = \sum_i (V - U z_i) [1 - 1 / [1 + E_i]] - \sum_{i : y_i = -1} {V - U z_i).
01523   // J"(lambda) = \sum_i (V - U z_i)^2 E_i / [1 + E_i]^2.
01524   J = 0.0; JJ = 0.0; JJJ = 0.0;
01525   for (int i = 0; i < data.Len(); i++)
01526   {
01527     double zi = data[i].Key; int yi = data[i].Dat;
01528     double e = exp(-A * zi + B);
01529     double denum = 1.0 + e;
01530     double prob = (yi > 0) ? (1.0 / denum) : (e / denum);
01531     J -= log(prob < 1e-20 ? 1e-20 : prob);
01532     double VU = V - U * zi;
01533     JJ += VU * (e / denum); if (yi < 0) JJ -= VU;
01534     JJJ += VU * VU * e / denum / denum;
01535   }
01536 }
01537 
01538 TSigmoid::TSigmoid(const TFltIntKdV& data) {
01539   // Let z_i be the projection of the i'th training example, and y_i \in {-1, +1} be its class label.
01540   // Our sigmoid is: P(Y = y | Z = z) = 1 / [1 + e^{-Az + B}]
01541   // and we want to maximize \prod_i P(Y = y_i | Z = z_i)
01542   //                       = \prod_{i : y_i = 1} 1 / [1 + e^{-Az_i + B}]  \prod_{i : y_i = -1} e^{-Az_i + B} / [1 + e^{-Az_i + B}]
01543   // or minimize its negative logarithm,
01544   //               J(A, B) = \sum_{i : y_i = 1} ln [1 + e^{-Az_i + B}] + \sum_{i : y_i = -1} [ln [1 + e^{-Az_i + B}] - {-Az_i + B}]
01545   //                       = \sum_i ln [1 + e^{-Az_i + B}] - \sum_{i : y_i = -1} {-Az_i + B}.
01546   // partial J / partial A = \sum_i (-z_i) e^{-Az_i + B} / [1 + e^{-Az_i + B}] + \sum_{i : y_i = -1} Az_i.
01547   // partial J / partial B = \sum_i        e^{-Az_i + B} / [1 + e^{-Az_i + B}] + \sum_{i : y_i = -1} (-1).
01548   double minProj = data[0].Key, maxProj = data[0].Key;
01549   {for (int i = 1; i < data.Len(); i++) {
01550     double zi = data[i].Key; if (zi < minProj) minProj = zi; if (zi > maxProj) maxProj = zi; }}
01551   //const bool dump = false;
01552   A = 1.0; B = 0.5 * (minProj + maxProj);
01553   double bestJ = 0.0, bestA = 0.0, bestB = 0.0, lambda = 1.0;
01554   for (int nIter = 0; nIter < 50; nIter++)
01555   {
01556     double J, JA, JB; TSigmoid::EvaluateFit(data, A, B, J, JA, JB);
01557     if (nIter == 0 || J < bestJ) { bestJ = J; bestA = A; bestB = B; }
01558     // How far should we move?
01559     //if (dump) printf("Iter %2d: A = %.5f, B = %.5f, J = %.5f, partial = (%.5f, %.5f)\n", nIter, A, B, J, JA, JB);
01560         double norm = TMath::Sqr(JA) + TMath::Sqr(JB);
01561     if (norm < 1e-10) break;
01562     const int cl = -1; // should be -1
01563 
01564     double Jc = TSigmoid::EvaluateFit(data, A + cl * lambda * JA / norm, B + cl * lambda * JB / norm);
01565     //if (dump) printf("  At lambda = %.5f, Jc = %.5f\n", lambda, Jc);
01566     if (Jc > J) {
01567       while (lambda > 1e-5) {
01568         lambda = 0.5 * lambda;
01569         Jc = TSigmoid::EvaluateFit(data, A + cl * lambda * JA / norm, B + cl * lambda * JB / norm);
01570         //if (dump) printf("  At lambda = %.5f, Jc = %.5f\n", lambda, Jc);
01571       } }
01572     else if (Jc < J) {
01573       while (lambda < 1e5) {
01574         double lambda2 = 2 * lambda;
01575         double Jc2 = TSigmoid::EvaluateFit(data, A + cl * lambda2 * JA / norm, B + cl * lambda2 * JB / norm);
01576         //if (dump) printf("  At lambda = %.5f, Jc = %.5f\n", lambda2, Jc2);
01577         if (Jc2 > Jc) break;
01578         lambda = lambda2; Jc = Jc2; } }
01579     if (Jc >= J) break;
01580     A += cl * lambda * JA / norm; B += cl * lambda * JB / norm;
01581     //if (dump) printf("   Lambda = %.5f, new A = %.5f, new B = %.5f, new J = %.5f\n", lambda, A, B, Jc);
01582   }
01583   A = bestA; B = bestB;
01584 }
01585 
01587 // Useful stuff (hopefuly)
01588 void TLAMisc::SaveCsvTFltV(const TFltV& Vec, TSOut& SOut) {
01589     for (int ValN = 0; ValN < Vec.Len(); ValN++) {
01590         SOut.PutFlt(Vec[ValN]); SOut.PutCh(',');
01591     }
01592     SOut.PutLn();
01593 }
01594 
01595 void TLAMisc::SaveMatlabTFltIntKdV(const TIntFltKdV& SpV, const int& ColN, TSOut& SOut) {
01596     const int Len = SpV.Len();
01597     for (int ValN = 0; ValN < Len; ValN++) {
01598         SOut.PutStrLn(TStr::Fmt("%d %d %g", SpV[ValN].Key+1, ColN+1, SpV[ValN].Dat()));
01599     }
01600 }
01601 
01602 void TLAMisc::SaveMatlabTFltV(const TFltV& m, const TStr& FName) {
01603     PSOut out = TFOut::New(FName);
01604     const int RowN = m.Len();
01605     for (int RowId = 0; RowId < RowN; RowId++) {
01606         out->PutStr(TFlt::GetStr(m[RowId], 20, 18));
01607         out->PutCh('\n');
01608     }
01609     out->Flush();
01610 }
01611 
01612 void TLAMisc::SaveMatlabTIntV(const TIntV& m, const TStr& FName) {
01613     PSOut out = TFOut::New(FName);
01614     const int RowN = m.Len();
01615     for (int RowId = 0; RowId < RowN; RowId++) {
01616         out->PutInt(m[RowId]);
01617         out->PutCh('\n');
01618     }
01619     out->Flush();
01620 }
01621 
01622 void TLAMisc::SaveMatlabTFltVVCol(const TFltVV& m, int ColId, const TStr& FName) {
01623     PSOut out = TFOut::New(FName);
01624     const int RowN = m.GetRows();
01625     for (int RowId = 0; RowId < RowN; RowId++) {
01626         out->PutStr(TFlt::GetStr(m(RowId,ColId), 20, 18));
01627         out->PutCh('\n');
01628     }
01629     out->Flush();
01630 }
01631 
01632 
01633 void TLAMisc::SaveMatlabTFltVV(const TFltVV& m, const TStr& FName) {
01634     PSOut out = TFOut::New(FName);
01635     const int RowN = m.GetRows();
01636     const int ColN = m.GetCols();
01637     for (int RowId = 0; RowId < RowN; RowId++) {
01638         for (int ColId = 0; ColId < ColN; ColId++) {
01639             out->PutStr(TFlt::GetStr(m(RowId,ColId), 20, 18));
01640             out->PutCh(' ');
01641         }
01642         out->PutCh('\n');
01643     }
01644     out->Flush();
01645 }
01646 
01647 void TLAMisc::SaveMatlabTFltVVMjrSubMtrx(const TFltVV& m,
01648         int RowN, int ColN, const TStr& FName) {
01649 
01650     PSOut out = TFOut::New(FName);
01651     for (int RowId = 0; RowId < RowN; RowId++) {
01652         for (int ColId = 0; ColId < ColN; ColId++) {
01653             out->PutStr(TFlt::GetStr(m(RowId,ColId), 20, 18)); out->PutCh(' ');
01654         }
01655         out->PutCh('\n');
01656     }
01657     out->Flush();
01658 }
01659 
01660 void TLAMisc::LoadMatlabTFltVV(const TStr& FNm, TVec<TFltV>& ColV) {
01661     PSIn SIn = TFIn::New(FNm);
01662     TILx Lx(SIn, TFSet()|iloRetEoln|iloSigNum|iloExcept);
01663     int Row = 0, Col = 0; ColV.Clr();
01664     Lx.GetSym(syFlt, syEof, syEoln);
01665     //printf("%d x %d\r", Row, ColV.Len());
01666     while (Lx.Sym != syEof) {
01667         if (Lx.Sym == syFlt) {
01668             if (ColV.Len() > Col) {
01669                 IAssert(ColV[Col].Len() == Row);
01670                 ColV[Col].Add(Lx.Flt);
01671             } else {
01672                 IAssert(Row == 0);
01673                 ColV.Add(TFltV::GetV(Lx.Flt));
01674             }
01675             Col++;
01676         } else if (Lx.Sym == syEoln) {
01677             IAssert(Col == ColV.Len());
01678             Col = 0; Row++;
01679             if (Row%100 == 0) {
01680                 //printf("%d x %d\r", Row, ColV.Len());
01681             }
01682         } else {
01683             Fail;
01684         }
01685         Lx.GetSym(syFlt, syEof, syEoln);
01686     }
01687     //printf("\n");
01688     IAssert(Col == ColV.Len() || Col == 0);
01689 }
01690 
01691 void TLAMisc::LoadMatlabTFltVV(const TStr& FNm, TFltVV& MatrixVV) {
01692     TVec<TFltV> ColV; LoadMatlabTFltVV(FNm, ColV);
01693     if (ColV.Empty()) { MatrixVV.Clr(); return; }
01694     const int Rows = ColV[0].Len(), Cols = ColV.Len();
01695     MatrixVV.Gen(Rows, Cols);
01696     for (int RowN = 0; RowN < Rows; RowN++) {
01697         for (int ColN = 0; ColN < Cols; ColN++) {
01698             MatrixVV(RowN, ColN) = ColV[ColN][RowN];
01699         }
01700     }
01701 }
01702 
01703 
01704 void TLAMisc::PrintTFltV(const TFltV& Vec, const TStr& VecNm) {
01705     printf("%s = [", VecNm.CStr());
01706     for (int i = 0; i < Vec.Len(); i++) {
01707         printf("%.5f", Vec[i]());
01708                 if (i < Vec.Len() - 1) { printf(", "); }
01709     }
01710     printf("]\n");
01711 }
01712 
01713 
01714 void TLAMisc::PrintTFltVV(const TFltVV& A, const TStr& MatrixNm) {
01715     printf("%s = [\n", MatrixNm.CStr());
01716         for (int j = 0; j < A.GetRows(); j++) {
01717                 for (int i = 0; i < A.GetCols(); i++) {
01718                         printf("%f\t", A.At(i, j).Val);
01719                 }
01720                 printf("\n");
01721         }
01722     printf("]\n");
01723 }
01724 
01725 void TLAMisc::PrintTIntV(const TIntV& Vec, const TStr& VecNm) {
01726     printf("%s = [", VecNm.CStr());
01727     for (int i = 0; i < Vec.Len(); i++) {
01728         printf("%d", Vec[i]());
01729         if (i < Vec.Len() - 1) printf(", ");
01730     }
01731     printf("]\n");
01732 }
01733 
01734 
01735 void TLAMisc::FillRnd(TFltV& Vec, TRnd& Rnd) {
01736     int Len = Vec.Len();
01737     for (int i = 0; i < Len; i++)
01738         Vec[i] = Rnd.GetNrmDev();
01739 }
01740 
01741 void TLAMisc::FillIdentity(TFltVV& M) {
01742     IAssert(M.GetRows() == M.GetCols());
01743     int Len = M.GetRows();
01744     for (int i = 0; i < Len; i++) {
01745         for (int j = 0; j < Len; j++) M(i,j) = 0.0;
01746         M(i,i) = 1.0;
01747     }
01748 }
01749 
01750 void TLAMisc::FillIdentity(TFltVV& M, const double& Elt) {
01751     IAssert(M.GetRows() == M.GetCols());
01752     int Len = M.GetRows();
01753     for (int i = 0; i < Len; i++) {
01754         for (int j = 0; j < Len; j++) M(i,j) = 0.0;
01755         M(i,i) = Elt;
01756     }
01757 }
01758 
01759 int TLAMisc::SumVec(const TIntV& Vec) {
01760     const int Len = Vec.Len();
01761     int res = 0;
01762     for (int i = 0; i < Len; i++)
01763         res += Vec[i];
01764     return res;
01765 }
01766 
01767 double TLAMisc::SumVec(const TFltV& Vec) {
01768     const int Len = Vec.Len();
01769     double res = 0.0;
01770     for (int i = 0; i < Len; i++)
01771         res += Vec[i];
01772     return res;
01773 }
01774 
01775 void TLAMisc::ToSpVec(const TFltV& Vec, TIntFltKdV& SpVec,
01776         const double& CutSumPrc) {
01777 
01778     // determine minimal element value
01779     IAssert(0.0 <= CutSumPrc && CutSumPrc <= 1.0);
01780     const int Elts = Vec.Len();
01781     double EltSum = 0.0;
01782     for (int EltN = 0; EltN < Elts; EltN++) {
01783         EltSum += TFlt::Abs(Vec[EltN]); }
01784     const double MnEltVal = CutSumPrc * EltSum;
01785     // create sparse vector
01786     SpVec.Clr();
01787     for (int EltN = 0; EltN < Elts; EltN++) {
01788         if (TFlt::Abs(Vec[EltN]) > MnEltVal) {
01789             SpVec.Add(TIntFltKd(EltN, Vec[EltN]));
01790         }
01791     }
01792     SpVec.Pack();
01793 }
01794 
01795 void TLAMisc::ToVec(const TIntFltKdV& SpVec, TFltV& Vec, const int& VecLen) {
01796     Vec.Gen(VecLen); Vec.PutAll(0.0);
01797     int Elts = SpVec.Len();
01798     for (int EltN = 0; EltN < Elts; EltN++) {
01799         if (SpVec[EltN].Key < VecLen) {
01800             Vec[SpVec[EltN].Key] = SpVec[EltN].Dat;
01801         }
01802     }
01803 }