SNAP Library 2.2, Developer Reference  2014-03-11 19:15:55
SNAP, a general purpose, high performance system for analysis and manipulation of large networks
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines
linalg.cpp
Go to the documentation of this file.
00001 
00002 // Sparse-Column-Matrix
00003 void TSparseColMatrix::PMultiply(const TFltVV& B, int ColId, TFltV& Result) const {
00004     Assert(B.GetRows() >= ColN && Result.Len() >= RowN);
00005     int i, j; TFlt *ResV = Result.BegI();
00006     for (i = 0; i < RowN; i++) ResV[i] = 0.0;
00007     for (j = 0; j < ColN; j++) {
00008         const TIntFltKdV& ColV = ColSpVV[j]; int len = ColV.Len();
00009         for (i = 0; i < len; i++) {
00010             ResV[ColV[i].Key] += ColV[i].Dat * B(j,ColId);
00011         }
00012     }
00013 }
00014 
00015 void TSparseColMatrix::PMultiply(const TFltV& Vec, TFltV& Result) const {
00016     Assert(Vec.Len() >= ColN && Result.Len() >= RowN);
00017     int i, j; TFlt *ResV = Result.BegI();
00018     for (i = 0; i < RowN; i++) ResV[i] = 0.0;
00019     for (j = 0; j < ColN; j++) {
00020         const TIntFltKdV& ColV = ColSpVV[j]; int len = ColV.Len();
00021         for (i = 0; i < len; i++) {
00022             ResV[ColV[i].Key] += ColV[i].Dat * Vec[j];
00023         }
00024     }
00025 }
00026 
00027 void TSparseColMatrix::PMultiplyT(const TFltVV& B, int ColId, TFltV& Result) const {
00028     Assert(B.GetRows() >= RowN && Result.Len() >= ColN);
00029     int i, j, len; TFlt *ResV = Result.BegI();
00030     for (j = 0; j < ColN; j++) {
00031         const TIntFltKdV& ColV = ColSpVV[j];
00032         len = ColV.Len(); ResV[j] = 0.0;
00033         for (i = 0; i < len; i++) {
00034             ResV[j] += ColV[i].Dat * B(ColV[i].Key, ColId);
00035         }
00036     }
00037 }
00038 
00039 void TSparseColMatrix::PMultiplyT(const TFltV& Vec, TFltV& Result) const {
00040     Assert(Vec.Len() >= RowN && Result.Len() >= ColN);
00041     int i, j, len; TFlt *VecV = Vec.BegI(), *ResV = Result.BegI();
00042     for (j = 0; j < ColN; j++) {
00043         const TIntFltKdV& ColV = ColSpVV[j];
00044         len = ColV.Len(); ResV[j] = 0.0;
00045         for (i = 0; i < len; i++) {
00046             ResV[j] += ColV[i].Dat * VecV[ColV[i].Key];
00047         }
00048     }
00049 }
00050 
00052 // Sparse-Row-Matrix
00053 TSparseRowMatrix::TSparseRowMatrix(const TStr& MatlabMatrixFNm) {
00054    FILE *F = fopen(MatlabMatrixFNm.CStr(), "rt");  IAssert(F != NULL);
00055    TVec<TTriple<TInt, TInt, TSFlt> > MtxV;
00056    RowN = 0;  ColN = 0;
00057    while (! feof(F)) {
00058      int row=-1, col=-1; float val;
00059      if (fscanf(F, "%d %d %f\n", &row, &col, &val) == 3) {
00060        IAssert(row > 0 && col > 0);
00061        MtxV.Add(TTriple<TInt, TInt, TSFlt>(row, col, val));
00062        RowN = TMath::Mx(RowN, row);
00063        ColN = TMath::Mx(ColN, col);
00064      }
00065    }
00066    fclose(F);
00067    // create matrix
00068    MtxV.Sort();
00069    RowSpVV.Gen(RowN);
00070    int cnt = 0;
00071    for (int row = 1; row <= RowN; row++) {
00072      while (cnt < MtxV.Len() && MtxV[cnt].Val1 == row) {
00073        RowSpVV[row-1].Add(TIntFltKd(MtxV[cnt].Val2-1, MtxV[cnt].Val3()));
00074        cnt++;
00075      }
00076    }
00077 }
00078 
00079 void TSparseRowMatrix::PMultiplyT(const TFltVV& B, int ColId, TFltV& Result) const {
00080     Assert(B.GetRows() >= RowN && Result.Len() >= ColN);
00081     for (int i = 0; i < ColN; i++) Result[i] = 0.0;
00082     for (int j = 0; j < RowN; j++) {
00083         const TIntFltKdV& RowV = RowSpVV[j]; int len = RowV.Len();
00084         for (int i = 0; i < len; i++) {
00085             Result[RowV[i].Key] += RowV[i].Dat * B(j,ColId);
00086         }
00087     }
00088 }
00089 
00090 void TSparseRowMatrix::PMultiplyT(const TFltV& Vec, TFltV& Result) const {
00091     Assert(Vec.Len() >= RowN && Result.Len() >= ColN);
00092     for (int i = 0; i < ColN; i++) Result[i] = 0.0;
00093     for (int j = 0; j < RowN; j++) {
00094         const TIntFltKdV& RowV = RowSpVV[j]; int len = RowV.Len();
00095         for (int i = 0; i < len; i++) {
00096             Result[RowV[i].Key] += RowV[i].Dat * Vec[j];
00097         }
00098     }
00099 }
00100 
00101 void TSparseRowMatrix::PMultiply(const TFltVV& B, int ColId, TFltV& Result) const {
00102     Assert(B.GetRows() >= ColN && Result.Len() >= RowN);
00103     for (int j = 0; j < RowN; j++) {
00104         const TIntFltKdV& RowV = RowSpVV[j];
00105         int len = RowV.Len(); Result[j] = 0.0;
00106         for (int i = 0; i < len; i++) {
00107             Result[j] += RowV[i].Dat * B(RowV[i].Key, ColId);
00108         }
00109     }
00110 }
00111 
00112 void TSparseRowMatrix::PMultiply(const TFltV& Vec, TFltV& Result) const {
00113     Assert(Vec.Len() >= ColN && Result.Len() >= RowN);
00114     for (int j = 0; j < RowN; j++) {
00115         const TIntFltKdV& RowV = RowSpVV[j];
00116         int len = RowV.Len(); Result[j] = 0.0;
00117         for (int i = 0; i < len; i++) {
00118             Result[j] += RowV[i].Dat * Vec[RowV[i].Key];
00119         }
00120     }
00121 }
00122 
00124 // Full-Col-Matrix
00125 TFullColMatrix::TFullColMatrix(const TStr& MatlabMatrixFNm): TMatrix() {
00126     TLAMisc::LoadMatlabTFltVV(MatlabMatrixFNm, ColV);
00127     RowN=ColV[0].Len(); ColN=ColV.Len();
00128     for (int i = 0; i < ColN; i++) {
00129         IAssertR(ColV[i].Len() == RowN, TStr::Fmt("%d != %d", ColV[i].Len(), RowN));
00130     }
00131 }
00132 
00133 void TFullColMatrix::PMultiplyT(const TFltVV& B, int ColId, TFltV& Result) const {
00134     Assert(B.GetRows() >= RowN && Result.Len() >= ColN);
00135     for (int i = 0; i < ColN; i++) {
00136         Result[i] = TLinAlg::DotProduct(B, ColId, ColV[i]);
00137     }
00138 }
00139 
00140 void TFullColMatrix::PMultiplyT(const TFltV& Vec, TFltV& Result) const {
00141     Assert(Vec.Len() >= RowN && Result.Len() >= ColN);
00142     for (int i = 0; i < ColN; i++) {
00143         Result[i] = TLinAlg::DotProduct(Vec, ColV[i]);
00144     }
00145 }
00146 
00147 void TFullColMatrix::PMultiply(const TFltVV& B, int ColId, TFltV& Result) const {
00148     Assert(B.GetRows() >= ColN && Result.Len() >= RowN);
00149     for (int i = 0; i < RowN; i++) { Result[i] = 0.0; }
00150     for (int i = 0; i < ColN; i++) {
00151         TLinAlg::AddVec(B(i, ColId), ColV[i], Result, Result);
00152     }
00153 }
00154 
00155 void TFullColMatrix::PMultiply(const TFltV& Vec, TFltV& Result) const {
00156     Assert(Vec.Len() >= ColN && Result.Len() >= RowN);
00157     for (int i = 0; i < RowN; i++) { Result[i] = 0.0; }
00158     for (int i = 0; i < ColN; i++) {
00159         TLinAlg::AddVec(Vec[i], ColV[i], Result, Result);
00160     }
00161 }
00162 
00164 // Basic Linear Algebra Operations
00165 double TLinAlg::DotProduct(const TFltV& x, const TFltV& y) {
00166     IAssertR(x.Len() == y.Len(), TStr::Fmt("%d != %d", x.Len(), y.Len()));
00167     double result = 0.0; int Len = x.Len();
00168     for (int i = 0; i < Len; i++)
00169         result += x[i] * y[i];
00170     return result;
00171 }
00172 
00173 double TLinAlg::DotProduct(const TFltVV& X, int ColIdX, const TFltVV& Y, int ColIdY) {
00174     Assert(X.GetRows() == Y.GetRows());
00175     double result = 0.0, len = X.GetRows();
00176     for (int i = 0; i < len; i++)
00177         result = result + X(i,ColIdX) * Y(i,ColIdY);
00178     return result;
00179 }
00180 
00181 double TLinAlg::DotProduct(const TFltVV& X, int ColId, const TFltV& Vec) {
00182     Assert(X.GetRows() == Vec.Len());
00183     double result = 0.0; int Len = X.GetRows();
00184     for (int i = 0; i < Len; i++)
00185         result += X(i,ColId) * Vec[i];
00186     return result;
00187 }
00188 
00189 double TLinAlg::DotProduct(const TIntFltKdV& x, const TIntFltKdV& y) {
00190     const int xLen = x.Len(), yLen = y.Len();
00191     double Res = 0.0; int i1 = 0, i2 = 0;
00192     while (i1 < xLen && i2 < yLen) {
00193         if (x[i1].Key < y[i2].Key) i1++;
00194         else if (x[i1].Key > y[i2].Key) i2++;
00195         else { Res += x[i1].Dat * y[i2].Dat;  i1++;  i2++; }
00196     }
00197     return Res;
00198 }
00199 
00200 double TLinAlg::DotProduct(const TFltV& x, const TIntFltKdV& y) {
00201     double Res = 0.0; const int xLen = x.Len(), yLen = y.Len();
00202     for (int i = 0; i < yLen; i++) {
00203         const int key = y[i].Key;
00204         if (key < xLen) Res += y[i].Dat * x[key];
00205     }
00206     return Res;
00207 }
00208 
00209 double TLinAlg::DotProduct(const TFltVV& X, int ColId, const TIntFltKdV& y) {
00210     double Res = 0.0; const int n = X.GetRows(), yLen = y.Len();
00211     for (int i = 0; i < yLen; i++) {
00212         const int key = y[i].Key;
00213         if (key < n) Res += y[i].Dat * X(key,ColId);
00214     }
00215     return Res;
00216 }
00217 
00218 void TLinAlg::LinComb(const double& p, const TFltV& x,
00219         const double& q, const TFltV& y, TFltV& z) {
00220 
00221     Assert(x.Len() == y.Len() && y.Len() == z.Len());
00222     const int Len = x.Len();
00223     for (int i = 0; i < Len; i++) {
00224         z[i] = p * x[i] + q * y[i]; }
00225 }
00226 
00227 void TLinAlg::ConvexComb(const double& p, const TFltV& x, const TFltV& y, TFltV& z) {
00228     AssertR(0.0 <= p && p <= 1.0, TFlt::GetStr(p));
00229     LinComb(p, x, 1.0 - p, y, z);
00230 }
00231 
00232 void TLinAlg::AddVec(const double& k, const TFltV& x, const TFltV& y, TFltV& z) {
00233     LinComb(k, x, 1.0, y, z);
00234 }
00235 
00236 void TLinAlg::AddVec(const double& k, const TIntFltKdV& x, const TFltV& y, TFltV& z) {
00237     Assert(y.Len() == z.Len());
00238     z = y; // first we set z to be y
00239     // and than we add x to z (==y)
00240     const int xLen = x.Len(), yLen = y.Len();
00241     for (int i = 0; i < xLen; i++) {
00242         const int ii = x[i].Key;
00243         if (ii < yLen) {
00244             z[ii] = k * x[i].Dat + y[ii];
00245         }
00246     }
00247 }
00248 
00249 void TLinAlg::AddVec(const double& k, const TIntFltKdV& x, TFltV& y) {
00250     const int xLen = x.Len(), yLen = y.Len();
00251     for (int i = 0; i < xLen; i++) {
00252         const int ii = x[i].Key;
00253         if (ii < yLen) {
00254             y[ii] += k * x[i].Dat;
00255         }
00256     }
00257 }
00258 
00259 void TLinAlg::AddVec(double k, const TFltVV& X, int ColIdX, TFltVV& Y, int ColIdY){
00260     Assert(X.GetRows() == Y.GetRows());
00261     const int len = Y.GetRows();
00262     for (int i = 0; i < len; i++) {
00263         Y(i,ColIdY) = Y(i,ColIdY) + k * X(i, ColIdX);
00264     }
00265 }
00266 
00267 void TLinAlg::AddVec(double k, const TFltVV& X, int ColId, TFltV& Result){
00268     Assert(X.GetRows() == Result.Len());
00269     const int len = Result.Len();
00270     for (int i = 0; i < len; i++) {
00271         Result[i] = Result[i] + k * X(i, ColId);
00272     }
00273 }
00274 
00275 void TLinAlg::AddVec(const TIntFltKdV& x, const TIntFltKdV& y, TIntFltKdV& z) {
00276         TSparseOpsIntFlt::SparseMerge(x, y, z);
00277 }
00278 
00279 double TLinAlg::SumVec(const TFltV& x) {
00280     const int len = x.Len();
00281     double Res = 0.0;
00282     for (int i = 0; i < len; i++) {
00283         Res += x[i];
00284     }
00285     return Res;
00286 }
00287 
00288 double TLinAlg::SumVec(double k, const TFltV& x, const TFltV& y) {
00289     Assert(x.Len() == y.Len());
00290     const int len = x.Len();
00291     double Res = 0.0;
00292     for (int i = 0; i < len; i++) {
00293         Res += k * x[i] + y[i];
00294     }
00295     return Res;
00296 }
00297 
00298 double TLinAlg::EuclDist2(const TFltV& x, const TFltV& y) {
00299     Assert(x.Len() == y.Len());
00300     const int len = x.Len();
00301     double Res = 0.0;
00302     for (int i = 0; i < len; i++) {
00303         Res += TMath::Sqr(x[i] - y[i]);
00304     }
00305     return Res;
00306 }
00307 
00308 double TLinAlg::EuclDist2(const TFltPr& x, const TFltPr& y) {
00309     return TMath::Sqr(x.Val1 - y.Val1) + TMath::Sqr(x.Val2 - y.Val2);
00310 }
00311 
00312 double TLinAlg::EuclDist(const TFltV& x, const TFltV& y) {
00313     return sqrt(EuclDist2(x, y));
00314 }
00315 
00316 double TLinAlg::EuclDist(const TFltPr& x, const TFltPr& y) {
00317     return sqrt(EuclDist2(x, y));
00318 }
00319 
00320 double TLinAlg::Norm2(const TFltV& x) {
00321     return DotProduct(x, x);
00322 }
00323 
00324 double TLinAlg::Norm(const TFltV& x) {
00325     return sqrt(Norm2(x));
00326 }
00327 
00328 void TLinAlg::Normalize(TFltV& x) {
00329     const double xNorm = Norm(x);
00330     if (xNorm > 0.0) { MultiplyScalar(1/xNorm, x, x); }
00331 }
00332 
00333 double TLinAlg::Norm2(const TIntFltKdV& x) {
00334     double Result = 0;
00335     for (int i = 0; i < x.Len(); i++) {
00336         Result += TMath::Sqr(x[i].Dat);
00337     }
00338     return Result;
00339 }
00340 
00341 double TLinAlg::Norm(const TIntFltKdV& x) {
00342     return sqrt(Norm2(x));
00343 }
00344 
00345 void TLinAlg::Normalize(TIntFltKdV& x) {
00346     MultiplyScalar(1/Norm(x), x, x);
00347 }
00348 
00349 double TLinAlg::Norm2(const TFltVV& X, int ColId) {
00350     return DotProduct(X, ColId, X, ColId);
00351 }
00352 
00353 double TLinAlg::Norm(const TFltVV& X, int ColId) {
00354     return sqrt(Norm2(X, ColId));
00355 }
00356 
00357 double TLinAlg::NormL1(const TFltV& x) {
00358     double norm = 0.0; const int Len = x.Len();
00359     for (int i = 0; i < Len; i++)
00360         norm += TFlt::Abs(x[i]);
00361     return norm;
00362 }
00363 
00364 double TLinAlg::NormL1(double k, const TFltV& x, const TFltV& y) {
00365     Assert(x.Len() == y.Len());
00366     double norm = 0.0; const int len = x.Len();
00367     for (int i = 0; i < len; i++) {
00368         norm += TFlt::Abs(k * x[i] + y[i]);
00369     }
00370     return norm;
00371 }
00372 
00373 double TLinAlg::NormL1(const TIntFltKdV& x) {
00374     double norm = 0.0; const int Len = x.Len();
00375     for (int i = 0; i < Len; i++)
00376         norm += TFlt::Abs(x[i].Dat);
00377     return norm;
00378 }
00379 
00380 void TLinAlg::NormalizeL1(TFltV& x) {
00381     const double xNorm = NormL1(x);
00382     if (xNorm > 0.0) { MultiplyScalar(1/xNorm, x, x); }
00383 }
00384 
00385 void TLinAlg::NormalizeL1(TIntFltKdV& x) {
00386     const double xNorm = NormL1(x);
00387     if (xNorm > 0.0) { MultiplyScalar(1/xNorm, x, x); }
00388 }
00389 
00390 double TLinAlg::NormLinf(const TFltV& x) {
00391     double norm = 0.0; const int Len = x.Len();
00392     for (int i = 0; i < Len; i++)
00393         norm = TFlt::GetMx(TFlt::Abs(x[i]), norm);
00394     return norm;
00395 }
00396 
00397 double TLinAlg::NormLinf(const TIntFltKdV& x) {
00398     double norm = 0.0; const int Len = x.Len();
00399     for (int i = 0; i < Len; i++)
00400         norm = TFlt::GetMx(TFlt::Abs(x[i].Dat), norm);
00401     return norm;
00402 }
00403 
00404 void TLinAlg::NormalizeLinf(TFltV& x) {
00405     const double xNormLinf = NormLinf(x);
00406     if (xNormLinf > 0.0) { MultiplyScalar(1.0/xNormLinf, x, x); }
00407 }
00408 
00409 void TLinAlg::NormalizeLinf(TIntFltKdV& x) {
00410     const double xNormLInf = NormLinf(x);
00411     if (xNormLInf> 0.0) { MultiplyScalar(1.0/xNormLInf, x, x); }
00412 }
00413 
00414 void TLinAlg::MultiplyScalar(const double& k, const TFltV& x, TFltV& y) {
00415     Assert(x.Len() == y.Len());
00416     int Len = x.Len();
00417     for (int i = 0; i < Len; i++)
00418         y[i] = k * x[i];
00419 }
00420 
00421 void TLinAlg::MultiplyScalar(const double& k, const TIntFltKdV& x, TIntFltKdV& y) {
00422     Assert(x.Len() == y.Len());
00423     int Len = x.Len();
00424     for (int i = 0; i < Len; i++)
00425         y[i].Dat = k * x[i].Dat;
00426 }
00427 
00428 void TLinAlg::Multiply(const TFltVV& A, const TFltV& x, TFltV& y) {
00429     Assert(A.GetCols() == x.Len() && A.GetRows() == y.Len());
00430     int n = A.GetRows(), m = A.GetCols();
00431     for (int i = 0; i < n; i++) {
00432         y[i] = 0.0;
00433         for (int j = 0; j < m; j++)
00434             y[i] += A(i,j) * x[j];
00435     }
00436 }
00437 
00438 void TLinAlg::Multiply(const TFltVV& A, const TFltV& x, TFltVV& C, int ColId) {
00439     Assert(A.GetCols() == x.Len() && A.GetRows() == C.GetRows());
00440     int n = A.GetRows(), m = A.GetCols();
00441     for (int i = 0; i < n; i++) {
00442         C(i,ColId) = 0.0;
00443         for (int j = 0; j < m; j++)
00444             C(i,ColId) += A(i,j) * x[j];
00445     }
00446 }
00447 
00448 void TLinAlg::Multiply(const TFltVV& A, const TFltVV& B, int ColId, TFltV& y) {
00449     Assert(A.GetCols() == B.GetRows() && A.GetRows() == y.Len());
00450     int n = A.GetRows(), m = A.GetCols();
00451     for (int i = 0; i < n; i++) {
00452         y[i] = 0.0;
00453         for (int j = 0; j < m; j++)
00454             y[i] += A(i,j) * B(j,ColId);
00455     }
00456 }
00457 
00458 void TLinAlg::Multiply(const TFltVV& A, const TFltVV& B, int ColIdB, TFltVV& C, int ColIdC){
00459     Assert(A.GetCols() == B.GetRows() && A.GetRows() == C.GetRows());
00460     int n = A.GetRows(), m = A.GetCols();
00461     for (int i = 0; i < n; i++) {
00462         C(i,ColIdC) = 0.0;
00463         for (int j = 0; j < m; j++)
00464             C(i,ColIdC) += A(i,j) * B(j,ColIdB);
00465     }
00466 }
00467 
00468 void TLinAlg::MultiplyT(const TFltVV& A, const TFltV& x, TFltV& y) {
00469     Assert(A.GetRows() == x.Len() && A.GetCols() == y.Len());
00470     int n = A.GetCols(), m = A.GetRows();
00471     for (int i = 0; i < n; i++) {
00472         y[i] = 0.0;
00473         for (int j = 0; j < m; j++)
00474             y[i] += A(j,i) * x[j];
00475     }
00476 }
00477 
00478 void TLinAlg::Multiply(const TFltVV& A, const TFltVV& B, TFltVV& C) {
00479     Assert(A.GetRows() == C.GetRows() && B.GetCols() == C.GetCols() && A.GetCols() == B.GetRows());
00480 
00481     int n = C.GetRows(), m = C.GetCols(), l = A.GetCols();
00482     for (int i = 0; i < n; i++) {
00483         for (int j = 0; j < m; j++) {
00484             double sum = 0.0;
00485             for (int k = 0; k < l; k++)
00486                 sum += A(i,k)*B(k,j);
00487             C(i,j) = sum;
00488         }
00489     }
00490 }
00491 
00492 // general matrix multiplication (GEMM)
00493 void TLinAlg::Gemm(const double& Alpha, const TFltVV& A, const TFltVV& B, const double& Beta, 
00494                 const TFltVV& C, TFltVV& D, const int& TransposeFlags) {
00495 
00496         bool tA = (TransposeFlags & GEMM_A_T) == GEMM_A_T;
00497         bool tB = (TransposeFlags & GEMM_B_T) == GEMM_B_T;
00498         bool tC = (TransposeFlags & GEMM_C_T) == GEMM_C_T;
00499 
00500         // setting dimensions
00501         int a_i = tA ? A.GetRows() : A.GetCols();
00502         int a_j = tA ? A.GetCols() : A.GetRows();
00503 
00504         int b_i = tB ? A.GetRows() : A.GetCols();
00505         int b_j = tB ? A.GetCols() : A.GetRows();
00506 
00507         int c_i = tC ? A.GetRows() : A.GetCols();
00508         int c_j = tC ? A.GetCols() : A.GetRows();
00509 
00510         int d_i = D.GetCols();
00511         int d_j = D.GetRows();
00512         
00513         // assertions for dimensions
00514   bool Cnd = a_i == c_j && b_i == c_i && a_i == b_j && c_i == d_i && c_j == d_j;
00515   if (Cnd) {
00516           Assert(Cnd);
00517   }
00518 
00519         double Aij, Bij, Cij;
00520 
00521         // rows
00522         for (int j = 0; j < a_j; j++) {
00523                 // cols
00524                 for (int i = 0; i < a_i; i++) {
00525                         // not optimized for speed (!)
00526                         double sum = 0;
00527                         for (int k = 0; k < a_i; k++) {
00528                                 Aij = tA ? A.At(j, k) : A.At(k, j);
00529                                 Bij = tB ? B.At(k, i) : B.At(i, k);
00530                                 sum += Alpha * Aij * Bij;
00531                         }
00532                         Cij = tC ? C.At(i, j) : C.At(j, i);
00533                         sum += Beta * Cij;
00534                         D.At(i, j) = sum;
00535                 }
00536         }
00537 }
00538 
00539 void TLinAlg::Transpose(const TFltVV& A, TFltVV& B) {
00540         Assert(B.GetRows() == A.GetCols() && B.GetCols() == A.GetRows());
00541         for (int i = 0; i < A.GetCols(); i++) {
00542                 for (int j = 0; j < A.GetRows(); j++) {
00543                         B.At(i, j) = A.At(j, i);
00544                 }
00545         }
00546 }
00547 
00548 void TLinAlg::Inverse(const TFltVV& A, TFltVV& B, const TLinAlgInverseType& DecompType) {
00549         switch (DecompType) {
00550                 case DECOMP_SVD:
00551                         InverseSVD(A, B);
00552         }
00553 }
00554 
00555 void TLinAlg::InverseSVD(const TFltVV& M, TFltVV& B) {
00556         // create temp matrices
00557         TFltVV U, V;
00558         TFltV E;
00559         TSvd SVD;
00560 
00561         U.Gen(M.GetRows(), M.GetRows());        
00562         V.Gen(M.GetCols(), M.GetCols());
00563 
00564         // do the SVD decompostion
00565         SVD.Svd(M, U, E, V);
00566 
00567         // calculate reciprocal values for diagonal matrix = inverse diagonal
00568         for (int i = 0; i < E.Len(); i++) {
00569                 E[i] = 1 / E[i];
00570         }
00571 
00572         // calculate pseudoinverse: M^(-1) = V * E^(-1) * U'
00573         for (int i = 0; i < U.GetCols(); i++) {
00574                 for (int j = 0; j < V.GetRows(); j++) {
00575                         double sum = 0;
00576                         for (int k = 0; k < U.GetCols(); k++) {
00577                                 sum += E[k] * V.At(i, k) * U.At(j, k);
00578                         }
00579                         B.At(i, j) = sum;
00580                 }
00581         }       
00582 }
00583 
00584 void TLinAlg::GS(TVec<TFltV>& Q) {
00585     IAssert(Q.Len() > 0);
00586     int m = Q.Len(); // int n = Q[0].Len();
00587     for (int i = 0; i < m; i++) {
00588         printf("%d\r",i);
00589         for (int j = 0; j < i; j++) {
00590             double r = TLinAlg::DotProduct(Q[i], Q[j]);
00591             TLinAlg::AddVec(-r,Q[j],Q[i],Q[i]);
00592         }
00593         TLinAlg::Normalize(Q[i]);
00594     }
00595     printf("\n");
00596 }
00597 
00598 void TLinAlg::GS(TFltVV& Q) {
00599     int m = Q.GetCols(), n = Q.GetRows();
00600     for (int i = 0; i < m; i++) {
00601         for (int j = 0; j < i; j++) {
00602             double r = TLinAlg::DotProduct(Q, i, Q, j);
00603             TLinAlg::AddVec(-r,Q,j,Q,i);
00604         }
00605         double nr = TLinAlg::Norm(Q,i);
00606         for (int k = 0; k < n; k++)
00607             Q(k,i) = Q(k,i) / nr;
00608     }
00609 }
00610 
00611 void TLinAlg::Rotate(const double& OldX, const double& OldY, const double& Angle, double& NewX, double& NewY) {
00612     NewX = OldX*cos(Angle) - OldY*sin(Angle);
00613     NewY = OldX*sin(Angle) + OldY*cos(Angle);
00614 }
00615 
00616 void TLinAlg::AssertOrtogonality(const TVec<TFltV>& Vecs, const double& Threshold) {
00617     int m = Vecs.Len();
00618     for (int i = 0; i < m; i++) {
00619         for (int j = 0; j < i; j++) {
00620             double res = DotProduct(Vecs[i], Vecs[j]);
00621             if (TFlt::Abs(res) > Threshold)
00622                 printf("<%d,%d> = %.5f", i,j,res);
00623         }
00624         double norm = Norm2(Vecs[i]);
00625         if (TFlt::Abs(norm-1) > Threshold)
00626             printf("||%d|| = %.5f", i, norm);
00627     }
00628 }
00629 
00630 void TLinAlg::AssertOrtogonality(const TFltVV& Vecs, const double& Threshold) {
00631     int m = Vecs.GetCols();
00632     for (int i = 0; i < m; i++) {
00633         for (int j = 0; j < i; j++) {
00634             double res = DotProduct(Vecs, i, Vecs, j);
00635             if (TFlt::Abs(res) > Threshold)
00636                 printf("<%d,%d> = %.5f", i, j, res);
00637         }
00638         double norm = Norm2(Vecs, i);
00639         if (TFlt::Abs(norm-1) > Threshold)
00640             printf("||%d|| = %.5f", i, norm);
00641     }
00642     printf("\n");
00643 }
00644 
00646 // Numerical Linear Algebra
00647 double TNumericalStuff::sqr(double a) {
00648   return a == 0.0 ? 0.0 : a*a;
00649 }
00650 
00651 double TNumericalStuff::sign(double a, double b) {
00652   return b >= 0.0 ? fabs(a) : -fabs(a);
00653 }
00654 
00655 void TNumericalStuff::nrerror(const TStr& error_text) {
00656     printf("NR_ERROR: %s", error_text.CStr());
00657     throw new TNSException(error_text);
00658 }
00659 
00660 double TNumericalStuff::pythag(double a, double b) {
00661     double absa = fabs(a), absb = fabs(b);
00662     if (absa > absb)
00663         return absa*sqrt(1.0+sqr(absb/absa));
00664     else
00665         return (absb == 0.0 ? 0.0 : absb*sqrt(1.0+sqr(absa/absb)));
00666 }
00667 
00668 void TNumericalStuff::SymetricToTridiag(TFltVV& a, int n, TFltV& d, TFltV& e) {
00669     int l,k,j,i;
00670     double scale,hh,h,g,f;
00671     for (i=n;i>=2;i--) {
00672         l=i-1;
00673         h=scale=0.0;
00674         if (l > 1) {
00675             for (k=1;k<=l;k++)
00676                 scale += fabs(a(i-1,k-1).Val);
00677             if (scale == 0.0) //Skip transformation.
00678                 e[i]=a(i-1,l-1);
00679             else {
00680                 for (k=1;k<=l;k++) {
00681                     a(i-1,k-1) /= scale; //Use scaled a's for transformation.
00682                     h += a(i-1,k-1)*a(i-1,k-1);
00683                 }
00684                 f=a(i-1,l-1);
00685                 g=(f >= 0.0 ? -sqrt(h) : sqrt(h));
00686                 IAssertR(_isnan(g) == 0, TFlt::GetStr(h));
00687                 e[i]=scale*g;
00688                 h -= f*g; //Now h is equation (11.2.4).
00689                 a(i-1,l-1)=f-g; //Store u in the ith row of a.
00690                 f=0.0;
00691                 for (j=1;j<=l;j++) {
00692                     // Next statement can be omitted if eigenvectors not wanted
00693                     a(j-1,i-1)=a(i-1,j-1)/h; //Store u=H in ith column of a.
00694                     g=0.0; //Form an element of A  u in g.
00695                     for (k=1;k<=j;k++)
00696                         g += a(j-1,k-1)*a(i-1,k-1);
00697                     for (k=j+1;k<=l;k++)
00698                         g += a(k-1,j-1)*a(i-1,k-1);
00699                     e[j]=g/h; //Form element of p in temporarily unused element of e.
00700                     f += e[j]*a(i-1,j-1);
00701                 }
00702                 hh=f/(h+h); //Form K, equation (11.2.11).
00703                 for (j=1;j<=l;j++) { //Form q and store in e overwriting p.
00704                     f=a(i-1,j-1);
00705                     e[j]=g=e[j]-hh*f;
00706                     for (k=1;k<=j;k++) { //Reduce a, equation (11.2.13).
00707                         a(j-1,k-1) -= (f*e[k]+g*a(i-1,k-1));
00708                         Assert(_isnan(a(j-1,k-1)) == 0);
00709                     }
00710                 }
00711             }
00712         } else
00713             e[i]=a(i-1,l-1);
00714         d[i]=h;
00715     }
00716     // Next statement can be omitted if eigenvectors not wanted
00717     d[1]=0.0;
00718     e[1]=0.0;
00719     // Contents of this loop can be omitted if eigenvectors not
00720     // wanted except for statement d[i]=a[i][i];
00721     for (i=1;i<=n;i++) { //Begin accumulation of transformationmatrices.
00722         l=i-1;
00723         if (d[i]) { //This block skipped when i=1.
00724             for (j=1;j<=l;j++) {
00725                 g=0.0;
00726                 for (k=1;k<=l;k++) //Use u and u=H stored in a to form PQ.
00727                     g += a(i-1,k-1)*a(k-1,j-1);
00728                 for (k=1;k<=l;k++) {
00729                     a(k-1,j-1) -= g*a(k-1,i-1);
00730                     Assert(_isnan(a(k-1,j-1)) == 0);
00731                 }
00732             }
00733         }
00734         d[i]=a(i-1,i-1); //This statement remains.
00735         a(i-1,i-1)=1.0; //Reset row and column of a to identity  matrix for next iteration.
00736         for (j=1;j<=l;j++) a(j-1,i-1)=a(i-1,j-1)=0.0;
00737     }
00738 }
00739 
00740 void TNumericalStuff::EigSymmetricTridiag(TFltV& d, TFltV& e, int n, TFltVV& z) {
00741     int m,l,iter,i,k; // N = n+1;
00742     double s,r,p,g,f,dd,c,b;
00743     // Convenient to renumber the elements of e
00744     for (i=2;i<=n;i++) e[i-1]=e[i];
00745     e[n]=0.0;
00746     for (l=1;l<=n;l++) {
00747         iter=0;
00748         do {
00749             // Look for a single small subdiagonal element to split the matrix.
00750             for (m=l;m<=n-1;m++) {
00751         dd=TFlt::Abs(d[m])+TFlt::Abs(d[m+1]);
00752                 if ((double)(TFlt::Abs(e[m])+dd) == dd) break;
00753             }
00754             if (m != l) {
00755                 if (iter++ == 60) nrerror("Too many iterations in EigSymmetricTridiag");
00756                 //Form shift.
00757                 g=(d[l+1]-d[l])/(2.0*e[l]);
00758                 r=pythag(g,1.0);
00759                 //This is dm - ks.
00760                 g=d[m]-d[l]+e[l]/(g+sign(r,g));
00761                 s=c=1.0;
00762                 p=0.0;
00763                 // A plane rotation as in the original QL, followed by
00764                 // Givens rotations to restore tridiagonal form
00765                 for (i=m-1;i>=l;i--) {
00766                     f=s*e[i];
00767                     b=c*e[i];
00768                     e[i+1]=(r=pythag(f,g));
00769                     // Recover from underflow.
00770                     if (r == 0.0) {
00771                         d[i+1] -= p;
00772                         e[m]=0.0;
00773                         break;
00774                     }
00775                     s=f/r;
00776                     c=g/r;
00777                     g=d[i+1]-p;
00778                     r=(d[i]-g)*s+2.0*c*b;
00779                     d[i+1]=g+(p=s*r);
00780                     g=c*r-b;
00781                     // Next loop can be omitted if eigenvectors not wanted
00782                     for (k=0;k<n;k++) {
00783                         f=z(k,i);
00784                         z(k,i)=s*z(k,i-1)+c*f;
00785                         z(k,i-1)=c*z(k,i-1)-s*f;
00786                     }
00787                 }
00788                 if (r == 0.0 && i >= l) continue;
00789                 d[l] -= p;
00790                 e[l]=g;
00791                 e[m]=0.0;
00792             }
00793         } while (m != l);
00794     }
00795 }
00796 
00797 void TNumericalStuff::CholeskyDecomposition(TFltVV& A, TFltV& p) {
00798   Assert(A.GetRows() == A.GetCols());
00799   int n = A.GetRows(); p.Reserve(n,n);
00800 
00801   int i,j,k;
00802   double sum;
00803   for (i=1;i<=n;i++) {
00804     for (j=i;j<=n;j++) {
00805       for (sum=A(i-1,j-1),k=i-1;k>=1;k--) sum -= A(i-1,k-1)*A(j-1,k-1);
00806       if (i == j) {
00807         if (sum <= 0.0)
00808           nrerror("choldc failed");
00809         p[i-1]=sqrt(sum);
00810       } else A(j-1,i-1)=sum/p[i-1];
00811     }
00812   }
00813 }
00814 
00815 void TNumericalStuff::CholeskySolve(const TFltVV& A, const TFltV& p, const TFltV& b, TFltV& x) {
00816   IAssert(A.GetRows() == A.GetCols());
00817   int n = A.GetRows(); x.Reserve(n,n);
00818 
00819   int i,k;
00820   double sum;
00821 
00822   // Solve L * y = b, storing y in x
00823   for (i=1;i<=n;i++) {
00824     for (sum=b[i-1],k=i-1;k>=1;k--)
00825       sum -= A(i-1,k-1)*x[k-1];
00826     x[i-1]=sum/p[i-1];
00827   }
00828 
00829   // Solve L^T * x = y
00830   for (i=n;i>=1;i--) {
00831     for (sum=x[i-1],k=i+1;k<=n;k++)
00832       sum -= A(k-1,i-1)*x[k-1];
00833     x[i-1]=sum/p[i-1];
00834   }
00835 }
00836 
00837 void TNumericalStuff::SolveSymetricSystem(TFltVV& A, const TFltV& b, TFltV& x) {
00838   IAssert(A.GetRows() == A.GetCols());
00839   TFltV p; CholeskyDecomposition(A, p);
00840   CholeskySolve(A, p, b, x);
00841 }
00842 
00843 void TNumericalStuff::InverseSubstitute(TFltVV& A, const TFltV& p) {
00844   IAssert(A.GetRows() == A.GetCols());
00845   int n = A.GetRows(); TFltV x(n);
00846 
00847     int i, j, k; double sum;
00848     for (i = 0; i < n; i++) {
00849       // solve L * y = e_i, store in x
00850         // elements from 0 to i-1 are 0.0
00851         for (j = 0; j < i; j++) x[j] = 0.0;
00852         // solve l_ii * y_i = 1 => y_i = 1/l_ii
00853         x[i] = 1/p[i];
00854         // solve y_j for j > i
00855         for (j = i+1; j < n; j++) {
00856             for (sum = 0.0, k = i; k < j; k++)
00857                 sum -= A(j,k) * x[k];
00858             x[j] = sum / p[j];
00859         }
00860 
00861       // solve L'* x = y, store in upper triangule of A
00862         for (j = n-1; j >= i; j--) {
00863             for (sum = x[j], k = j+1; k < n; k++)
00864                 sum -= A(k,j)*x[k];
00865             x[j] = sum/p[j];
00866         }
00867         for (int j = i; j < n; j++) A(i,j) = x[j];
00868     }
00869 
00870 }
00871 
00872 void TNumericalStuff::InverseSymetric(TFltVV& A) {
00873     IAssert(A.GetRows() == A.GetCols());
00874     TFltV p;
00875     // first we calculate cholesky decomposition of A
00876     CholeskyDecomposition(A, p);
00877     // than we solve system A x_i = e_i for i = 1..n
00878     InverseSubstitute(A, p);
00879 }
00880 
00881 void TNumericalStuff::InverseTriagonal(TFltVV& A) {
00882   IAssert(A.GetRows() == A.GetCols());
00883   int n = A.GetRows(); TFltV x(n), p(n);
00884 
00885     int i, j, k; double sum;
00886     // copy upper triangle to lower one as we'll overwrite upper one
00887     for (i = 0; i < n; i++) {
00888         p[i] = A(i,i);
00889         for (j = i+1; j < n; j++)
00890             A(j,i) = A(i,j);
00891     }
00892     // solve
00893     for (i = 0; i < n; i++) {
00894         // solve R * x = e_i, store in x
00895         // elements from 0 to i-1 are 0.0
00896         for (j = n-1; j > i; j--) x[j] = 0.0;
00897         // solve l_ii * y_i = 1 => y_i = 1/l_ii
00898         x[i] = 1/p[i];
00899         // solve y_j for j > i
00900         for (j = i-1; j >= 0; j--) {
00901             for (sum = 0.0, k = i; k > j; k--)
00902                 sum -= A(k,j) * x[k];
00903             x[j] = sum / p[j];
00904         }
00905         for (int j = 0; j <= i; j++) A(j,i) = x[j];
00906     }
00907 }
00908 
00909 void TNumericalStuff::LUDecomposition(TFltVV& A, TIntV& indx, double& d) {
00910   Assert(A.GetRows() == A.GetCols());
00911   int n = A.GetRows(); indx.Reserve(n,n);
00912 
00913     int i=0,imax=0,j=0,k=0;
00914     double big,dum,sum,temp;
00915     TFltV vv(n); // vv stores the implicit scaling of each row.
00916     d=1.0;       // No row interchanges yet.
00917 
00918     // Loop over rows to get the implicit scaling information.
00919     for (i=1;i<=n;i++) {
00920         big=0.0;
00921         for (j=1;j<=n;j++)
00922             if ((temp=TFlt::Abs(A(i-1,j-1))) > big) big=temp;
00923         if (big == 0.0) nrerror("Singular matrix in routine LUDecomposition");
00924         vv[i-1]=1.0/big;
00925     }
00926 
00927     for (j=1;j<=n;j++) {
00928         for (i=1;i<j;i++) {
00929             sum=A(i-1,j-1);
00930             for (k=1;k<i;k++) sum -= A(i-1,k-1)*A(k-1,j-1);
00931             A(i-1,j-1)=sum;
00932         }
00933         big=0.0; //Initialize for the search for largest pivot element.
00934         for (i=j;i<=n;i++) {
00935             sum=A(i-1,j-1);
00936             for (k=1;k<j;k++)
00937                 sum -= A(i-1,k-1)*A(k-1,j-1);
00938             A(i-1,j-1)=sum;
00939 
00940             //Is the figure of merit for the pivot better than the best so far?
00941             if ((dum=vv[i-1] * TFlt::Abs(sum)) >= big) {
00942                 big=dum;
00943                 imax=i;
00944             }
00945         }
00946 
00947         //Do we need to interchange rows?
00948         if (j != imax) {
00949             //Yes, do so...
00950             for (k=1;k<=n;k++) {
00951                 dum=A(imax-1,k-1);
00952             A(imax-1,k-1)=A(j-1,k-1); // Tadej: imax-1,k looks wrong
00953             A(j-1,k-1)=dum;
00954             }
00955             //...and change the parity of d.
00956             d = -d;
00957             vv[imax-1]=vv[j-1]; //Also interchange the scale factor.
00958         }
00959         indx[j-1]=imax;
00960 
00961         //If the pivot element is zero the matrix is singular (at least to the precision of the
00962         //algorithm). For some applications on singular matrices, it is desirable to substitute
00963         //TINY for zero.
00964         if (A(j-1,j-1) == 0.0) A(j-1,j-1)=1e-20;
00965 
00966          //Now, finally, divide by the pivot element.
00967         if (j != n) {
00968             dum=1.0/(A(j-1,j-1));
00969             for (i=j+1;i<=n;i++) A(i-1,j-1) *= dum;
00970         }
00971     } //Go back for the next column in the reduction.
00972 }
00973 
00974 void TNumericalStuff::LUSolve(const TFltVV& A, const TIntV& indx, TFltV& b) {
00975   Assert(A.GetRows() == A.GetCols());
00976   int n = A.GetRows();
00977     int i,ii=0,ip,j;
00978     double sum;
00979     for (i=1;i<=n;i++) {
00980         ip=indx[i-1];
00981         sum=b[ip-1];
00982         b[ip-1]=b[i-1];
00983         if (ii)
00984             for (j=ii;j<=i-1;j++) sum -= A(i-1,j-1)*b[j-1];
00985         else if (sum) ii=i;b[i-1]=sum;
00986     }
00987     for (i=n;i>=1;i--) {
00988         sum=b[i-1];
00989         for (j=i+1;j<=n;j++) sum -= A(i-1,j-1)*b[j-1];
00990         b[i-1]=sum/A(i-1,i-1);
00991     }
00992 }
00993 
00994 void TNumericalStuff::SolveLinearSystem(TFltVV& A, const TFltV& b, TFltV& x) {
00995     TIntV indx; double d;
00996     LUDecomposition(A, indx, d);
00997     x = b;
00998     LUSolve(A, indx, x);
00999 }
01000 
01002 // Sparse-SVD
01003 void TSparseSVD::MultiplyATA(const TMatrix& Matrix,
01004         const TFltVV& Vec, int ColId, TFltV& Result) {
01005     TFltV tmp(Matrix.GetRows());
01006     // tmp = A * Vec(:,ColId)
01007     Matrix.Multiply(Vec, ColId, tmp);
01008     // Vec = A' * tmp
01009     Matrix.MultiplyT(tmp, Result);
01010 }
01011 
01012 void TSparseSVD::MultiplyATA(const TMatrix& Matrix,
01013         const TFltV& Vec, TFltV& Result) {
01014     TFltV tmp(Matrix.GetRows());
01015     // tmp = A * Vec
01016     Matrix.Multiply(Vec, tmp);
01017     // Vec = A' * tmp
01018     Matrix.MultiplyT(tmp, Result);
01019 }
01020 
01021 void TSparseSVD::OrtoIterSVD(const TMatrix& Matrix,
01022         int NumSV, int IterN, TFltV& SgnValV) {
01023 
01024     int i, j, k;
01025     int N = Matrix.GetCols(), M = NumSV;
01026     TFltVV Q(N, M);
01027 
01028     // Q = rand(N,M)
01029     TRnd rnd;
01030     for (i = 0; i < N; i++) {
01031         for (j = 0; j < M; j++)
01032             Q(i,j) = rnd.GetUniDev();
01033     }
01034 
01035     TFltV tmp(N);
01036     for (int IterC = 0; IterC < IterN; IterC++) {
01037         printf("%d..", IterC);
01038         // Gram-Schmidt
01039         TLinAlg::GS(Q);
01040         // Q = A'*A*Q
01041         for (int ColId = 0; ColId < M; ColId++) {
01042             MultiplyATA(Matrix, Q, ColId, tmp);
01043             for (k = 0; k < N; k++) Q(k,ColId) = tmp[k];
01044         }
01045     }
01046 
01047     SgnValV.Reserve(NumSV,0);
01048     for (i = 0; i < NumSV; i++)
01049         SgnValV.Add(sqrt(TLinAlg::Norm(Q,i)));
01050     TLinAlg::GS(Q);
01051 }
01052 
01053 void TSparseSVD::SimpleLanczos(const TMatrix& Matrix,
01054         const int& NumEig, TFltV& EigValV,
01055         const bool& DoLocalReortoP, const bool& SvdMatrixProductP) {
01056 
01057     if (SvdMatrixProductP) {
01058         // if this fails, use transposed matrix
01059         IAssert(Matrix.GetRows() >= Matrix.GetCols());
01060     } else {
01061         IAssert(Matrix.GetRows() == Matrix.GetCols());
01062     }
01063 
01064     const int N = Matrix.GetCols(); // size of matrix
01065     TFltV r(N), v0(N), v1(N); // current vector and 2 previous ones
01066     TFltV alpha(NumEig, 0), beta(NumEig, 0); // diagonal and subdiagonal of T
01067 
01068     printf("Calculating %d eigen-values of %d x %d matrix\n", NumEig, N, N);
01069 
01070     // set starting vector
01071     //TRnd Rnd(0);
01072     for (int i = 0; i < N; i++) {
01073         r[i] = 1/sqrt((double)N); // Rnd.GetNrmDev();
01074         v0[i] = v1[i] = 0.0;
01075     }
01076     beta.Add(TLinAlg::Norm(r));
01077 
01078     for (int j = 0; j < NumEig; j++) {
01079         printf("%d\r", j+1);
01080         // v_j -> v_(j-1)
01081         v0 = v1;
01082         // v_j = (1/beta_(j-1)) * r
01083         TLinAlg::MultiplyScalar(1/beta[j], r, v1);
01084         // r = A*v_j
01085         if (SvdMatrixProductP) {
01086             // A = Matrix'*Matrix
01087             MultiplyATA(Matrix, v1, r);
01088         } else {
01089             // A = Matrix
01090             Matrix.Multiply(v1, r);
01091         }
01092         // r = r - beta_(j-1) * v_(j-1)
01093         TLinAlg::AddVec(-beta[j], v0, r, r);
01094         // alpha_j = vj'*r
01095         alpha.Add(TLinAlg::DotProduct(v1, r));
01096         // r = r - v_j * alpha_j
01097         TLinAlg::AddVec(-alpha[j], v1, r, r);
01098         // reortogonalization if neessary
01099         if (DoLocalReortoP) { } //TODO
01100         // beta_j = ||r||_2
01101         beta.Add(TLinAlg::Norm(r));
01102         // compoute approximatie eigenvalues T_j
01103         // test bounds for convergence
01104     }
01105     printf("\n");
01106 
01107     // prepare matrix T
01108     TFltV d(NumEig + 1), e(NumEig + 1);
01109     d[1] = alpha[0]; d[0] = e[0] = e[1] = 0.0;
01110     for (int i = 1; i < NumEig; i++) {
01111         d[i+1] = alpha[i]; e[i+1] = beta[i]; }
01112     // solve eigne problem for tridiagonal matrix with diag d and subdiag e
01113     TFltVV S(NumEig+1,NumEig+1); // eigen-vectors
01114     TLAMisc::FillIdentity(S); // make it identity
01115     TNumericalStuff::EigSymmetricTridiag(d, e, NumEig, S); // solve
01116     //TLAMisc::PrintTFltV(d, "AllEigV");
01117 
01118     // check convergence
01119     TFltKdV AllEigValV(NumEig, 0);
01120     for (int i = 1; i <= NumEig; i++) {
01121         const double ResidualNorm = TFlt::Abs(S(i-1, NumEig-1) * beta.Last());
01122         if (ResidualNorm < 1e-5)
01123             AllEigValV.Add(TFltKd(TFlt::Abs(d[i]), d[i]));
01124     }
01125 
01126     // prepare results
01127     AllEigValV.Sort(false); EigValV.Gen(NumEig, 0);
01128     for (int i = 0; i < AllEigValV.Len(); i++) {
01129         if (i == 0 || (TFlt::Abs(AllEigValV[i].Dat/AllEigValV[i-1].Dat) < 0.9999))
01130             EigValV.Add(AllEigValV[i].Dat);
01131     }
01132 }
01133 
01134 void TSparseSVD::Lanczos(const TMatrix& Matrix, int NumEig,
01135         int Iters, const TSpSVDReOrtoType& ReOrtoType,
01136         TFltV& EigValV, TFltVV& EigVecVV, const bool& SvdMatrixProductP) {
01137 
01138   if (SvdMatrixProductP) {
01139     // if this fails, use transposed matrix
01140     IAssert(Matrix.GetRows() >= Matrix.GetCols());
01141   } else {
01142     IAssert(Matrix.GetRows() == Matrix.GetCols());
01143   }
01144   IAssertR(NumEig <= Iters, TStr::Fmt("%d <= %d", NumEig, Iters));
01145 
01146   //if (ReOrtoType == ssotFull) printf("Full reortogonalization\n");
01147   int i, N = Matrix.GetCols(), K = 0; // K - current dimension of T
01148   double t = 0.0, eps = 1e-6; // t - 1-norm of T
01149 
01150   //sequence of Ritz's vectors
01151   TFltVV Q(N, Iters);
01152   double tmp = 1/sqrt((double)N);
01153   for (i = 0; i < N; i++) {
01154     Q(i,0) = tmp;
01155   }
01156   //converget Ritz's vectors
01157   TVec<TFltV> ConvgQV(Iters);
01158   TIntV CountConvgV(Iters);
01159   for (i = 0; i < Iters; i++) CountConvgV[i] = 0;
01160   // const int ConvgTreshold = 50;
01161 
01162   //diagonal and subdiagonal of T
01163   TFltV d(Iters+1), e(Iters+1);
01164   //eigenvectors of T
01165   //TFltVV V;
01166   TFltVV V(Iters, Iters);
01167 
01168   // z - current Lanczos's vector
01169   TFltV z(N), bb(Iters), aa(Iters), y(N);
01170   //printf("svd(%d,%d)...\n", NumEig, Iters);
01171 
01172   if (SvdMatrixProductP) {
01173     // A = Matrix'*Matrix
01174     MultiplyATA(Matrix, Q, 0, z);
01175   } else {
01176     // A = Matrix
01177     Matrix.Multiply(Q, 0, z);
01178   }
01179 
01180   for (int j = 0; j < (Iters-1); j++) {
01181     //printf("%d..\r",j+2);
01182 
01183     //calculates (j+1)-th Lanczos's vector
01184     // aa[j] = <Q(:,j), z>
01185     aa[j] = TLinAlg::DotProduct(Q, j, z);
01186     //printf(" %g -- ", aa[j].Val); //HACK
01187 
01188     TLinAlg::AddVec(-aa[j], Q, j, z);
01189     if (j > 0) {
01190       // z := -aa[j] * Q(:,j) + z
01191       TLinAlg::AddVec(-bb[j-1], Q, j-1, z);
01192 
01193       //reortogonalization
01194       if (ReOrtoType == ssotSelective || ReOrtoType == ssotFull) {
01195         for (i = 0; i <= j; i++) {
01196         // if i-tj vector converget, than we have to ortogonalize against it
01197           if ((ReOrtoType == ssotFull) ||
01198             (bb[j-1] * TFlt::Abs(V(K-1, i)) < eps * t)) {
01199 
01200             ConvgQV[i].Reserve(N,N); CountConvgV[i]++;
01201             TFltV& vec = ConvgQV[i];
01202             //vec = Q * V(:,i)
01203             for (int k = 0; k < N; k++) {
01204               vec[k] = 0.0;
01205               for (int l = 0; l < K; l++) {
01206                 vec[k] += Q(k,l) * V(l,i);
01207               }
01208             }
01209             TLinAlg::AddVec(-TLinAlg::DotProduct(ConvgQV[i], z), ConvgQV[i], z ,z);
01210           }
01211         }
01212       }
01213     }
01214 
01215     //adds (j+1)-th Lanczos's vector to Q
01216     bb[j] = TLinAlg::Norm(z);
01217     if (!(bb[j] > 1e-10)) {
01218       printf("Rank of matrix is only %d\n", j+2);
01219       printf("Last singular value is %g\n", bb[j].Val);
01220       break;
01221     }
01222 
01223     for (i = 0; i < N; i++) {
01224       Q(i, j+1) = z[i] / bb[j];
01225     }
01226 
01227     //next Lanzcos vector
01228     if (SvdMatrixProductP) {
01229       // A = Matrix'*Matrix
01230       MultiplyATA(Matrix, Q, j+1, z);
01231     } else {
01232       // A = Matrix
01233       Matrix.Multiply(Q, j+1, z);
01234     }
01235 
01236     //calculate T (K x K matrix)
01237     K = j + 2;
01238     // calculate diagonal
01239     for (i = 1; i < K; i++) d[i] = aa[i-1];
01240     d[K] = TLinAlg::DotProduct(Q, K-1, z);
01241     // calculate subdiagonal
01242     e[1] = 0.0;
01243     for (i = 2; i <= K; i++) e[i] = bb[i-2];
01244 
01245     //calculate 1-norm of T
01246     t = TFlt::GetMx(TFlt::Abs(d[1]) + TFlt::Abs(e[2]), TFlt::Abs(e[K]) + TFlt::Abs(d[K]));
01247     for (i = 2; i < K; i++) {
01248       t = TFlt::GetMx(t, TFlt::Abs(e[i]) + TFlt::Abs(d[i]) + TFlt::Abs(e[i+1]));
01249     }
01250 
01251     //set V to identity matrix
01252     //V.Gen(K,K);
01253     for (i = 0; i < K; i++) {
01254       for (int k = 0; k < K; k++) {
01255         V(i,k) = 0.0;
01256       }
01257       V(i,i) = 1.0;
01258     }
01259 
01260     //eigenvectors of T
01261     TNumericalStuff::EigSymmetricTridiag(d, e, K, V);
01262   }//for
01263 
01264   //printf("\n");
01265 
01266   // Finds NumEig largest eigen values
01267   TFltIntKdV sv(K);
01268   for (i = 0; i < K; i++) {
01269     sv[i].Key = TFlt::Abs(d[i+1]);
01270     sv[i].Dat = i;
01271   }
01272   sv.Sort(false);
01273 
01274   TFltV uu(Matrix.GetRows());
01275   const int FinalNumEig = TInt::GetMn(NumEig, K);
01276   EigValV.Reserve(FinalNumEig,0);
01277   EigVecVV.Gen(Matrix.GetCols(), FinalNumEig);
01278   for (i = 0; i < FinalNumEig; i++) {
01279     //printf("s[%d] = %20.15f\r", i, sv[i].Key.Val);
01280     int ii = sv[i].Dat;
01281     double sigma = d[ii+1].Val;
01282     // calculate singular value
01283     EigValV.Add(sigma);
01284     // calculate i-th right singular vector ( V := Q * W )
01285     TLinAlg::Multiply(Q, V, ii, EigVecVV, i);
01286   }
01287   //printf("done                           \n");
01288 }
01289 
01290 void TSparseSVD::Lanczos2(const TMatrix& Matrix, int MaxNumEig,
01291     int MaxSecs, const TSpSVDReOrtoType& ReOrtoType,
01292     TFltV& EigValV, TFltVV& EigVecVV, const bool& SvdMatrixProductP) {
01293 
01294   if (SvdMatrixProductP) {
01295     // if this fails, use transposed matrix
01296     IAssert(Matrix.GetRows() >= Matrix.GetCols());
01297   } else {
01298     IAssert(Matrix.GetRows() == Matrix.GetCols());
01299   }
01300   //IAssertR(NumEig <= Iters, TStr::Fmt("%d <= %d", NumEig, Iters));
01301 
01302   //if (ReOrtoType == ssotFull) printf("Full reortogonalization\n");
01303   int i, N = Matrix.GetCols(), K = 0; // K - current dimension of T
01304   double t = 0.0, eps = 1e-6; // t - 1-norm of T
01305 
01306   //sequence of Ritz's vectors
01307   TFltVV Q(N, MaxNumEig);
01308   double tmp = 1/sqrt((double)N);
01309   for (i = 0; i < N; i++)
01310       Q(i,0) = tmp;
01311   //converget Ritz's vectors
01312   TVec<TFltV> ConvgQV(MaxNumEig);
01313   TIntV CountConvgV(MaxNumEig);
01314   for (i = 0; i < MaxNumEig; i++) CountConvgV[i] = 0;
01315   // const int ConvgTreshold = 50;
01316 
01317   //diagonal and subdiagonal of T
01318   TFltV d(MaxNumEig+1), e(MaxNumEig+1);
01319   //eigenvectors of T
01320   //TFltVV V;
01321   TFltVV V(MaxNumEig, MaxNumEig);
01322 
01323   // z - current Lanczos's vector
01324   TFltV z(N), bb(MaxNumEig), aa(MaxNumEig), y(N);
01325   //printf("svd(%d,%d)...\n", NumEig, Iters);
01326 
01327   if (SvdMatrixProductP) {
01328       // A = Matrix'*Matrix
01329       MultiplyATA(Matrix, Q, 0, z);
01330   } else {
01331       // A = Matrix
01332       Matrix.Multiply(Q, 0, z);
01333   }
01334   TExeTm ExeTm;
01335   for (int j = 0; j < (MaxNumEig-1); j++) {
01336     printf("%d [%s]..\r",j+2, ExeTm.GetStr());
01337     if (ExeTm.GetSecs() > MaxSecs) { break; }
01338 
01339     //calculates (j+1)-th Lanczos's vector
01340     // aa[j] = <Q(:,j), z>
01341     aa[j] = TLinAlg::DotProduct(Q, j, z);
01342     //printf(" %g -- ", aa[j].Val); //HACK
01343 
01344     TLinAlg::AddVec(-aa[j], Q, j, z);
01345     if (j > 0) {
01346         // z := -aa[j] * Q(:,j) + z
01347         TLinAlg::AddVec(-bb[j-1], Q, j-1, z);
01348 
01349         //reortogonalization
01350         if (ReOrtoType == ssotSelective || ReOrtoType == ssotFull) {
01351             for (i = 0; i <= j; i++) {
01352                 // if i-tj vector converget, than we have to ortogonalize against it
01353                 if ((ReOrtoType == ssotFull) ||
01354                     (bb[j-1] * TFlt::Abs(V(K-1, i)) < eps * t)) {
01355 
01356                     ConvgQV[i].Reserve(N,N); CountConvgV[i]++;
01357                     TFltV& vec = ConvgQV[i];
01358                     //vec = Q * V(:,i)
01359                     for (int k = 0; k < N; k++) {
01360                         vec[k] = 0.0;
01361                         for (int l = 0; l < K; l++)
01362                             vec[k] += Q(k,l) * V(l,i);
01363                     }
01364                     TLinAlg::AddVec(-TLinAlg::DotProduct(ConvgQV[i], z), ConvgQV[i], z ,z);
01365                 }
01366             }
01367         }
01368     }
01369 
01370     //adds (j+1)-th Lanczos's vector to Q
01371     bb[j] = TLinAlg::Norm(z);
01372     if (!(bb[j] > 1e-10)) {
01373       printf("Rank of matrix is only %d\n", j+2);
01374       printf("Last singular value is %g\n", bb[j].Val);
01375       break;
01376     }
01377     for (i = 0; i < N; i++)
01378         Q(i, j+1) = z[i] / bb[j];
01379 
01380     //next Lanzcos vector
01381     if (SvdMatrixProductP) {
01382         // A = Matrix'*Matrix
01383         MultiplyATA(Matrix, Q, j+1, z);
01384     } else {
01385         // A = Matrix
01386         Matrix.Multiply(Q, j+1, z);
01387     }
01388 
01389     //calculate T (K x K matrix)
01390     K = j + 2;
01391     // calculate diagonal
01392     for (i = 1; i < K; i++) d[i] = aa[i-1];
01393     d[K] = TLinAlg::DotProduct(Q, K-1, z);
01394     // calculate subdiagonal
01395     e[1] = 0.0;
01396     for (i = 2; i <= K; i++) e[i] = bb[i-2];
01397 
01398     //calculate 1-norm of T
01399     t = TFlt::GetMx(TFlt::Abs(d[1]) + TFlt::Abs(e[2]), TFlt::Abs(e[K]) + TFlt::Abs(d[K]));
01400     for (i = 2; i < K; i++)
01401         t = TFlt::GetMx(t, TFlt::Abs(e[i]) + TFlt::Abs(d[i]) + TFlt::Abs(e[i+1]));
01402 
01403     //set V to identity matrix
01404     //V.Gen(K,K);
01405     for (i = 0; i < K; i++) {
01406         for (int k = 0; k < K; k++)
01407             V(i,k) = 0.0;
01408         V(i,i) = 1.0;
01409     }
01410 
01411     //eigenvectors of T
01412     TNumericalStuff::EigSymmetricTridiag(d, e, K, V);
01413   }//for
01414   printf("... calc %d.", K);
01415   // Finds NumEig largest eigen values
01416   TFltIntKdV sv(K);
01417   for (i = 0; i < K; i++) {
01418     sv[i].Key = TFlt::Abs(d[i+1]);
01419     sv[i].Dat = i;
01420   }
01421   sv.Sort(false);
01422 
01423   TFltV uu(Matrix.GetRows());
01424   const int FinalNumEig = K; //TInt::GetMn(NumEig, K);
01425   EigValV.Reserve(FinalNumEig,0);
01426   EigVecVV.Gen(Matrix.GetCols(), FinalNumEig);
01427   for (i = 0; i < FinalNumEig; i++) {
01428     //printf("s[%d] = %20.15f\r", i, sv[i].Key.Val);
01429     int ii = sv[i].Dat;
01430     double sigma = d[ii+1].Val;
01431     // calculate singular value
01432     EigValV.Add(sigma);
01433     // calculate i-th right singular vector ( V := Q * W )
01434     TLinAlg::Multiply(Q, V, ii, EigVecVV, i);
01435   }
01436   printf("  done\n");
01437 }
01438 
01439 
01440 void TSparseSVD::SimpleLanczosSVD(const TMatrix& Matrix,
01441         const int& CalcSV, TFltV& SngValV, const bool& DoLocalReorto) {
01442 
01443     SimpleLanczos(Matrix, CalcSV, SngValV, DoLocalReorto, true);
01444     for (int SngValN = 0; SngValN < SngValV.Len(); SngValN++) {
01445       //IAssert(SngValV[SngValN] >= 0.0);
01446       if (SngValV[SngValN] < 0.0) {
01447         printf("bad sng val: %d %g\n", SngValN, SngValV[SngValN]());
01448         SngValV[SngValN] = 0;
01449       }
01450       SngValV[SngValN] = sqrt(SngValV[SngValN].Val);
01451     }
01452 }
01453 
01454 void TSparseSVD::LanczosSVD(const TMatrix& Matrix, int NumSV,
01455         int Iters, const TSpSVDReOrtoType& ReOrtoType,
01456         TFltV& SgnValV, TFltVV& LeftSgnVecVV, TFltVV& RightSgnVecVV) {
01457 
01458     // solve eigen problem for Matrix'*Matrix
01459     Lanczos(Matrix, NumSV, Iters, ReOrtoType, SgnValV, RightSgnVecVV, true);
01460     // calculate left singular vectors and sqrt singular values
01461     const int FinalNumSV = SgnValV.Len();
01462     LeftSgnVecVV.Gen(Matrix.GetRows(), FinalNumSV);
01463     TFltV LeftSgnVecV(Matrix.GetRows());
01464     for (int i = 0; i < FinalNumSV; i++) {
01465         if (SgnValV[i].Val < 0.0) { SgnValV[i] = 0.0; }
01466         const double SgnVal = sqrt(SgnValV[i]);
01467         SgnValV[i] = SgnVal;
01468         // calculate i-th left singular vector ( U := A * V * S^(-1) )
01469         Matrix.Multiply(RightSgnVecVV, i, LeftSgnVecV);
01470         for (int j = 0; j < LeftSgnVecV.Len(); j++) {
01471             LeftSgnVecVV(j,i) = LeftSgnVecV[j] / SgnVal; }
01472     }
01473     //printf("done                           \n");
01474 }
01475 
01476 void TSparseSVD::Project(const TIntFltKdV& Vec, const TFltVV& U, TFltV& ProjVec) {
01477     const int m = U.GetCols(); // number of columns
01478 
01479     ProjVec.Gen(m, 0);
01480     for (int j = 0; j < m; j++) {
01481         double x = 0.0;
01482         for (int i = 0; i < Vec.Len(); i++)
01483             x += U(Vec[i].Key, j) * Vec[i].Dat;
01484         ProjVec.Add(x);
01485     }
01486 }
01487 
01489 // Sigmoid
01490 double TSigmoid::EvaluateFit(const TFltIntKdV& data, const double A, const double B)
01491 {
01492   double J = 0.0;
01493   for (int i = 0; i < data.Len(); i++)
01494   {
01495     double zi = data[i].Key; int yi = data[i].Dat;
01496     double e = exp(-A * zi + B);
01497     double denum = 1.0 + e;
01498     double prob = (yi > 0) ? (1.0 / denum) : (e / denum);
01499     J -= log(prob < 1e-20 ? 1e-20 : prob);
01500   }
01501   return J;
01502 }
01503 
01504 void TSigmoid::EvaluateFit(const TFltIntKdV& data, const double A, const double B, double& J, double& JA, double& JB)
01505 {
01506   //               J(A, B) = \sum_{i : y_i = 1} ln [1 + e^{-Az_i + B}] + \sum_{i : y_i = -1} [ln [1 + e^{-Az_i + B}] - {-Az_i + B}]
01507   //                       = \sum_i ln [1 + e^{-Az_i + B}] - \sum_{i : y_i = -1} {-Az_i + B}.
01508   // partial J / partial A = \sum_i (-z_i) e^{-Az_i + B} / [1 + e^{-Az_i + B}] + \sum_{i : y_i = -1} Az_i.
01509   // partial J / partial B = \sum_i        e^{-Az_i + B} / [1 + e^{-Az_i + B}] + \sum_{i : y_i = -1} (-1).
01510   J = 0.0; double sum_all_PyNeg = 0.0, sum_all_ziPyNeg = 0.0, sum_yNeg_zi = 0.0, sum_yNeg_1 = 0.0;
01511   for (int i = 0; i < data.Len(); i++)
01512   {
01513     double zi = data[i].Key; int yi = data[i].Dat;
01514     double e = exp(-A * zi + B);
01515     double denum = 1.0 + e;
01516     double prob = (yi > 0) ? (1.0 / denum) : (e / denum);
01517     J -= log(prob < 1e-20 ? 1e-20 : prob);
01518     sum_all_PyNeg += e / denum;
01519     sum_all_ziPyNeg += zi * e / denum;
01520     if (yi < 0) { sum_yNeg_zi += zi; sum_yNeg_1 += 1; }
01521   }
01522   JA = -sum_all_ziPyNeg +     sum_yNeg_zi;
01523   JB =  sum_all_PyNeg   -     sum_yNeg_1;
01524 }
01525 
01526 void TSigmoid::EvaluateFit(const TFltIntKdV& data, const double A, const double B, const double U,
01527                            const double V, const double lambda, double& J, double& JJ, double& JJJ)
01528 {
01529   // Let E_i = e^{-(A + lambda U) z_i + (B + lambda V)}.  Then we have
01530   // J(lambda) = \sum_i ln [1 + E_i] - \sum_{i : y_i = -1} {-(A + lambda U)z_i + (B + lambda V)}.
01531   // J'(lambda) = \sum_i (V - U z_i) E_i / [1 + E_i] - \sum_{i : y_i = -1} {V - U z_i).
01532   //            = \sum_i (V - U z_i) [1 - 1 / [1 + E_i]] - \sum_{i : y_i = -1} {V - U z_i).
01533   // J"(lambda) = \sum_i (V - U z_i)^2 E_i / [1 + E_i]^2.
01534   J = 0.0; JJ = 0.0; JJJ = 0.0;
01535   for (int i = 0; i < data.Len(); i++)
01536   {
01537     double zi = data[i].Key; int yi = data[i].Dat;
01538     double e = exp(-A * zi + B);
01539     double denum = 1.0 + e;
01540     double prob = (yi > 0) ? (1.0 / denum) : (e / denum);
01541     J -= log(prob < 1e-20 ? 1e-20 : prob);
01542     double VU = V - U * zi;
01543     JJ += VU * (e / denum); if (yi < 0) JJ -= VU;
01544     JJJ += VU * VU * e / denum / denum;
01545   }
01546 }
01547 
01548 TSigmoid::TSigmoid(const TFltIntKdV& data) {
01549   // Let z_i be the projection of the i'th training example, and y_i \in {-1, +1} be its class label.
01550   // Our sigmoid is: P(Y = y | Z = z) = 1 / [1 + e^{-Az + B}]
01551   // and we want to maximize \prod_i P(Y = y_i | Z = z_i)
01552   //                       = \prod_{i : y_i = 1} 1 / [1 + e^{-Az_i + B}]  \prod_{i : y_i = -1} e^{-Az_i + B} / [1 + e^{-Az_i + B}]
01553   // or minimize its negative logarithm,
01554   //               J(A, B) = \sum_{i : y_i = 1} ln [1 + e^{-Az_i + B}] + \sum_{i : y_i = -1} [ln [1 + e^{-Az_i + B}] - {-Az_i + B}]
01555   //                       = \sum_i ln [1 + e^{-Az_i + B}] - \sum_{i : y_i = -1} {-Az_i + B}.
01556   // partial J / partial A = \sum_i (-z_i) e^{-Az_i + B} / [1 + e^{-Az_i + B}] + \sum_{i : y_i = -1} Az_i.
01557   // partial J / partial B = \sum_i        e^{-Az_i + B} / [1 + e^{-Az_i + B}] + \sum_{i : y_i = -1} (-1).
01558   double minProj = data[0].Key, maxProj = data[0].Key;
01559   {for (int i = 1; i < data.Len(); i++) {
01560     double zi = data[i].Key; if (zi < minProj) minProj = zi; if (zi > maxProj) maxProj = zi; }}
01561   //const bool dump = false;
01562   A = 1.0; B = 0.5 * (minProj + maxProj);
01563   double bestJ = 0.0, bestA = 0.0, bestB = 0.0, lambda = 1.0;
01564   for (int nIter = 0; nIter < 50; nIter++)
01565   {
01566     double J, JA, JB; TSigmoid::EvaluateFit(data, A, B, J, JA, JB);
01567     if (nIter == 0 || J < bestJ) { bestJ = J; bestA = A; bestB = B; }
01568     // How far should we move?
01569     //if (dump) printf("Iter %2d: A = %.5f, B = %.5f, J = %.5f, partial = (%.5f, %.5f)\n", nIter, A, B, J, JA, JB);
01570         double norm = TMath::Sqr(JA) + TMath::Sqr(JB);
01571     if (norm < 1e-10) break;
01572     const int cl = -1; // should be -1
01573 
01574     double Jc = TSigmoid::EvaluateFit(data, A + cl * lambda * JA / norm, B + cl * lambda * JB / norm);
01575     //if (dump) printf("  At lambda = %.5f, Jc = %.5f\n", lambda, Jc);
01576     if (Jc > J) {
01577       while (lambda > 1e-5) {
01578         lambda = 0.5 * lambda;
01579         Jc = TSigmoid::EvaluateFit(data, A + cl * lambda * JA / norm, B + cl * lambda * JB / norm);
01580         //if (dump) printf("  At lambda = %.5f, Jc = %.5f\n", lambda, Jc);
01581       } }
01582     else if (Jc < J) {
01583       while (lambda < 1e5) {
01584         double lambda2 = 2 * lambda;
01585         double Jc2 = TSigmoid::EvaluateFit(data, A + cl * lambda2 * JA / norm, B + cl * lambda2 * JB / norm);
01586         //if (dump) printf("  At lambda = %.5f, Jc = %.5f\n", lambda2, Jc2);
01587         if (Jc2 > Jc) break;
01588         lambda = lambda2; Jc = Jc2; } }
01589     if (Jc >= J) break;
01590     A += cl * lambda * JA / norm; B += cl * lambda * JB / norm;
01591     //if (dump) printf("   Lambda = %.5f, new A = %.5f, new B = %.5f, new J = %.5f\n", lambda, A, B, Jc);
01592   }
01593   A = bestA; B = bestB;
01594 }
01595 
01597 // Useful stuff (hopefuly)
01598 void TLAMisc::SaveCsvTFltV(const TFltV& Vec, TSOut& SOut) {
01599     for (int ValN = 0; ValN < Vec.Len(); ValN++) {
01600         SOut.PutFlt(Vec[ValN]); SOut.PutCh(',');
01601     }
01602     SOut.PutLn();
01603 }
01604 
01605 void TLAMisc::SaveMatlabTFltIntKdV(const TIntFltKdV& SpV, const int& ColN, TSOut& SOut) {
01606     const int Len = SpV.Len();
01607     for (int ValN = 0; ValN < Len; ValN++) {
01608         SOut.PutStrLn(TStr::Fmt("%d %d %g", SpV[ValN].Key+1, ColN+1, SpV[ValN].Dat()));
01609     }
01610 }
01611 
01612 void TLAMisc::SaveMatlabTFltV(const TFltV& m, const TStr& FName) {
01613     PSOut out = TFOut::New(FName);
01614     const int RowN = m.Len();
01615     for (int RowId = 0; RowId < RowN; RowId++) {
01616         out->PutStr(TFlt::GetStr(m[RowId], 20, 18));
01617         out->PutCh('\n');
01618     }
01619     out->Flush();
01620 }
01621 
01622 void TLAMisc::SaveMatlabTIntV(const TIntV& m, const TStr& FName) {
01623     PSOut out = TFOut::New(FName);
01624     const int RowN = m.Len();
01625     for (int RowId = 0; RowId < RowN; RowId++) {
01626         out->PutInt(m[RowId]);
01627         out->PutCh('\n');
01628     }
01629     out->Flush();
01630 }
01631 
01632 void TLAMisc::SaveMatlabTFltVVCol(const TFltVV& m, int ColId, const TStr& FName) {
01633     PSOut out = TFOut::New(FName);
01634     const int RowN = m.GetRows();
01635     for (int RowId = 0; RowId < RowN; RowId++) {
01636         out->PutStr(TFlt::GetStr(m(RowId,ColId), 20, 18));
01637         out->PutCh('\n');
01638     }
01639     out->Flush();
01640 }
01641 
01642 
01643 void TLAMisc::SaveMatlabTFltVV(const TFltVV& m, const TStr& FName) {
01644     PSOut out = TFOut::New(FName);
01645     const int RowN = m.GetRows();
01646     const int ColN = m.GetCols();
01647     for (int RowId = 0; RowId < RowN; RowId++) {
01648         for (int ColId = 0; ColId < ColN; ColId++) {
01649             out->PutStr(TFlt::GetStr(m(RowId,ColId), 20, 18));
01650             out->PutCh(' ');
01651         }
01652         out->PutCh('\n');
01653     }
01654     out->Flush();
01655 }
01656 
01657 void TLAMisc::SaveMatlabTFltVVMjrSubMtrx(const TFltVV& m,
01658         int RowN, int ColN, const TStr& FName) {
01659 
01660     PSOut out = TFOut::New(FName);
01661     for (int RowId = 0; RowId < RowN; RowId++) {
01662         for (int ColId = 0; ColId < ColN; ColId++) {
01663             out->PutStr(TFlt::GetStr(m(RowId,ColId), 20, 18)); out->PutCh(' ');
01664         }
01665         out->PutCh('\n');
01666     }
01667     out->Flush();
01668 }
01669 
01670 void TLAMisc::LoadMatlabTFltVV(const TStr& FNm, TVec<TFltV>& ColV) {
01671     PSIn SIn = TFIn::New(FNm);
01672     TILx Lx(SIn, TFSet()|iloRetEoln|iloSigNum|iloExcept);
01673     int Row = 0, Col = 0; ColV.Clr();
01674     Lx.GetSym(syFlt, syEof, syEoln);
01675     //printf("%d x %d\r", Row, ColV.Len());
01676     while (Lx.Sym != syEof) {
01677         if (Lx.Sym == syFlt) {
01678             if (ColV.Len() > Col) {
01679                 IAssert(ColV[Col].Len() == Row);
01680                 ColV[Col].Add(Lx.Flt);
01681             } else {
01682                 IAssert(Row == 0);
01683                 ColV.Add(TFltV::GetV(Lx.Flt));
01684             }
01685             Col++;
01686         } else if (Lx.Sym == syEoln) {
01687             IAssert(Col == ColV.Len());
01688             Col = 0; Row++;
01689             if (Row%100 == 0) {
01690                 //printf("%d x %d\r", Row, ColV.Len());
01691             }
01692         } else {
01693             Fail;
01694         }
01695         Lx.GetSym(syFlt, syEof, syEoln);
01696     }
01697     //printf("\n");
01698     IAssert(Col == ColV.Len() || Col == 0);
01699 }
01700 
01701 void TLAMisc::LoadMatlabTFltVV(const TStr& FNm, TFltVV& MatrixVV) {
01702     TVec<TFltV> ColV; LoadMatlabTFltVV(FNm, ColV);
01703     if (ColV.Empty()) { MatrixVV.Clr(); return; }
01704     const int Rows = ColV[0].Len(), Cols = ColV.Len();
01705     MatrixVV.Gen(Rows, Cols);
01706     for (int RowN = 0; RowN < Rows; RowN++) {
01707         for (int ColN = 0; ColN < Cols; ColN++) {
01708             MatrixVV(RowN, ColN) = ColV[ColN][RowN];
01709         }
01710     }
01711 }
01712 
01713 
01714 void TLAMisc::PrintTFltV(const TFltV& Vec, const TStr& VecNm) {
01715     printf("%s = [", VecNm.CStr());
01716     for (int i = 0; i < Vec.Len(); i++) {
01717         printf("%.5f", Vec[i]());
01718                 if (i < Vec.Len() - 1) { printf(", "); }
01719     }
01720     printf("]\n");
01721 }
01722 
01723 
01724 void TLAMisc::PrintTFltVV(const TFltVV& A, const TStr& MatrixNm) {
01725     printf("%s = [\n", MatrixNm.CStr());
01726         for (int j = 0; j < A.GetRows(); j++) {
01727                 for (int i = 0; i < A.GetCols(); i++) {
01728                         printf("%f\t", A.At(i, j).Val);
01729                 }
01730                 printf("\n");
01731         }
01732     printf("]\n");
01733 }
01734 
01735 void TLAMisc::PrintTIntV(const TIntV& Vec, const TStr& VecNm) {
01736     printf("%s = [", VecNm.CStr());
01737     for (int i = 0; i < Vec.Len(); i++) {
01738         printf("%d", Vec[i]());
01739         if (i < Vec.Len() - 1) printf(", ");
01740     }
01741     printf("]\n");
01742 }
01743 
01744 
01745 void TLAMisc::FillRnd(TFltV& Vec, TRnd& Rnd) {
01746     int Len = Vec.Len();
01747     for (int i = 0; i < Len; i++)
01748         Vec[i] = Rnd.GetNrmDev();
01749 }
01750 
01751 void TLAMisc::FillIdentity(TFltVV& M) {
01752     IAssert(M.GetRows() == M.GetCols());
01753     int Len = M.GetRows();
01754     for (int i = 0; i < Len; i++) {
01755         for (int j = 0; j < Len; j++) M(i,j) = 0.0;
01756         M(i,i) = 1.0;
01757     }
01758 }
01759 
01760 void TLAMisc::FillIdentity(TFltVV& M, const double& Elt) {
01761     IAssert(M.GetRows() == M.GetCols());
01762     int Len = M.GetRows();
01763     for (int i = 0; i < Len; i++) {
01764         for (int j = 0; j < Len; j++) M(i,j) = 0.0;
01765         M(i,i) = Elt;
01766     }
01767 }
01768 
01769 int TLAMisc::SumVec(const TIntV& Vec) {
01770     const int Len = Vec.Len();
01771     int res = 0;
01772     for (int i = 0; i < Len; i++)
01773         res += Vec[i];
01774     return res;
01775 }
01776 
01777 double TLAMisc::SumVec(const TFltV& Vec) {
01778     const int Len = Vec.Len();
01779     double res = 0.0;
01780     for (int i = 0; i < Len; i++)
01781         res += Vec[i];
01782     return res;
01783 }
01784 
01785 void TLAMisc::ToSpVec(const TFltV& Vec, TIntFltKdV& SpVec,
01786         const double& CutSumPrc) {
01787 
01788     // determine minimal element value
01789     IAssert(0.0 <= CutSumPrc && CutSumPrc <= 1.0);
01790     const int Elts = Vec.Len();
01791     double EltSum = 0.0;
01792     for (int EltN = 0; EltN < Elts; EltN++) {
01793         EltSum += TFlt::Abs(Vec[EltN]); }
01794     const double MnEltVal = CutSumPrc * EltSum;
01795     // create sparse vector
01796     SpVec.Clr();
01797     for (int EltN = 0; EltN < Elts; EltN++) {
01798         if (TFlt::Abs(Vec[EltN]) > MnEltVal) {
01799             SpVec.Add(TIntFltKd(EltN, Vec[EltN]));
01800         }
01801     }
01802     SpVec.Pack();
01803 }
01804 
01805 void TLAMisc::ToVec(const TIntFltKdV& SpVec, TFltV& Vec, const int& VecLen) {
01806     Vec.Gen(VecLen); Vec.PutAll(0.0);
01807     int Elts = SpVec.Len();
01808     for (int EltN = 0; EltN < Elts; EltN++) {
01809         if (SpVec[EltN].Key < VecLen) {
01810             Vec[SpVec[EltN].Key] = SpVec[EltN].Dat;
01811         }
01812     }
01813 }