📄 matvec.h
字号:
error("matrix-vector sizes not match");
BandMtx<T> tx(nrows, bwleft,
min(nrows-1, bwleft+bwrit));
for (int i = 0; i < nrows; i++)
for (int j = - bwleft; j <= bwrit; j++)
tx[i][j] = bdmx[i][j];
Vcr<int> pvt(nrows); // store pivoting info
// LU decomposition with column partial pivoting
const int nrowsmone = tx.nrows - 1;
for (int k = 0; k < nrowsmone; k++) {
int kbrow = min(nrowsmone - k, tx.bwleft);
int kbcol = min(nrowsmone - k, tx.bwrit);
// find the pivot in the k-th column
int pc = k;
double aet = abs(tx[k][0]);
for (int i = 1; i <= kbrow; i++) {
if (abs(tx[k+i][-i]) > aet) {
aet = abs(tx[k+i][-i]);
pc = k + i;
}
}
if (!aet) error("pivot is zero in GaussElimPP");
pvt[k] = pc;
// interchange pivot row and k-th row in U, not in L
if (pc != k) {
for (int j = 0; j <= kbcol; j++)
swap(tx[pc][k+j-pc], tx[k][j]);
}
// now eliminate column entries
for (int i = 1; i <= kbrow; i++) {
int kpi = k + i;
if (tx[kpi][-i] != 0) {
T dmul = tx[kpi][-i]/tx[k][0];
tx[kpi][-i] = dmul;
for (int j = 1; j <= kbcol; j++)
tx[kpi][j-i] -= dmul*tx[k][j];
}
}
}
pvt[nrowsmone] = nrowsmone;
// Forward substitution LY = b
for (int k = 0; k < nrowsmone; k++) {
int kbrow = min(nrowsmone - k, tx.bwleft);
int pvtk = pvt[k];
T sb = bb[pvtk];
if (k != pvtk) swap(bb[k], bb[pvtk]);
for (int j = 1; j <= kbrow; j++)
bb[k+j] -= tx[k+j][-j]*sb;
}
// Backward substitution U x = y
for (int k = nrowsmone; k>= 0; k--) {
int kb = min(nrowsmone -k, tx.bwrit);
for (int j = 1; j <= kb; j++) bb[k] -= tx[k][j]*bb[k+j];
bb[k] /= tx[k][0];
}
} // end GaussElimPP()
// BandGauss elimination with partial pivoting
template<class T>
void BandMtx<T>::GaussElimPP2(Vcr<T>& bb) const {
if (nrows != bb.size())
error("matrix-vector sizes not match");
// Gauss eliminations with paritial pivoting
// for band matrices. This version interchanges rows
// in L and U and does not have any advantage since
// it requires more storage than using full matrix
// storage format. A straight forward programming without
// thinking leads to this version.
// This version should never be used. instead use
// BandMtx<T>::GaussElimPP(), which requires more thinking
// to understand.
BandMtx<T> tx(nrows, nrows-1,
min(nrows-1, bwleft+bwrit));
for (int i = 0; i < nrows; i++)
for (int j = - bwleft; j <= bwrit; j++)
tx[i][j] = bdmx[i][j];
Vcr<int> pvt(nrows); // store pivoting info
for (int k = 0; k < nrows; k++) pvt[k] = k;
// LU decomposition with column partial pivoting
for (int k = 0; k < tx.nrows - 1; k++) {
int kbrow = min(nrows - 1 - k, tx.bwleft);
int kbcol = min(nrows - 1 - k, tx.bwrit);
// find the pivot in the k-th column
int pc = k;
double aet = abs(tx[k][0]);
for (int i = 1; i <= kbrow; i++) {
if (abs(tx[k+i][-i]) > aet) {
aet = abs(tx[k+i][-i]);
pc = k + i;
}
}
if (!aet) error("pivot is zero in GaussElimPP");
// interchange pivot row and k-th row
if (pc != k) {
swap(pvt[k], pvt[pc]);
for (int j = - min(k, tx.bwleft); j <= kbcol; j++)
swap(tx[pc][j+k-pc], tx[k][j]);
}
// now eliminate column entries below main
// diagonal entry at row k
for (int i = 1; i <= kbrow; i++) {
int kpi = k + i;
if (tx[kpi][-i] != 0) {
T dmul = tx[kpi][-i]/tx[k][0];
tx[kpi][-i] = dmul;
for (int j = 1; j <= kbcol; j++)
tx[kpi][j-i] -= dmul*tx[k][j];
}
}
}
Vcr<T> rhv(nrows);
for (int k = 0; k < nrows; k++) rhv[k] = bb[pvt[k]];
bb = rhv;
// Forward substitution LY = Pb
for (int k = 1; k < tx.nrows; k++) {
int kb = min(k, tx.bwleft);
for (int j = 1; j <= kb; j++)
bb[k] -= tx[k][-j]*bb[k-j];
}
// Backward substitution
for (int k = tx.nrows-1; k>= 0; k--) {
int kb = min(tx.nrows -1-k, tx.bwrit);
for (int j = 1; j <= kb; j++) bb[k] -= tx[k][j]*bb[k+j];
bb[k] /= tx[k][0];
}
} // end GaussElimPP2()
// *** definitions for members of class SSMtx
template<class T>
SSMtx<T>::SSMtx(int n, int m, T* ent, int* con, int* dia) {
nrows = n;
lenth = m;
sra = new T [lenth];
clm = new int [lenth];
dgn = new int [nrows];
for (int i =0; i< lenth; i++) sra[i] = ent[i];
for (int i =0; i< nrows; i++) dgn[i] = dia[i];
for (int i =0; i< lenth; i++) clm[i] = con[i];
}
template<class T>
SSMtx<T>::SSMtx(int n, int m) {
nrows = n;
lenth = m;
sra = new T [lenth];
clm = new int [lenth];
dgn = new int [nrows];
for (int i =0; i< lenth; i++) sra[i] = 0;
for (int i =0; i< nrows; i++) dgn[i] = 0;
for (int i =0; i< lenth; i++) clm[i] = 0;
}
template<class T>
SSMtx<T>::SSMtx(const SSMtx & mat) { // copy constructor
nrows = mat.nrows;
lenth = mat.lenth;
sra = new T [lenth];
clm = new int [lenth];
dgn = new int [nrows];
for (int i = 0; i< nrows; i++) dgn[i] = mat.dgn[i];
for (int i = 0; i < lenth; i++) {
sra[i] = mat[i];
clm[i] = mat.clm[i];
}
}
template<class T>
SSMtx<T>& SSMtx<T>::operator=(const SSMtx & ssm) {
if(nrows !=ssm.nrows || lenth != ssm.lenth) error("bad matrix sizes");
for (int i = 0; i < lenth; i++) {
sra[i] = ssm[i];
clm[i] = ssm.clm[i];
}
for (int i = 0; i < nrows; i++) dgn[i] = ssm.dgn[i];
return *this;
}
template<class T>
Vcr<T> SSMtx<T>::operator*(const Vcr<T> & vec) const {
if (nrows != vec.size()) error("matrix and vector sizes do not match");
Vcr<T> tm(nrows);
tm[0] += sra[0]*vec[0];
for (int j = 1; j < nrows; j++) {
int m = dgn[j-1]+1;
if (clm[m] == 0) tm[0] += sra[m]*vec[j];
}
for (int i = 1; i < nrows; i++) {
for (int j = dgn[i-1] + 1; j <= dgn[i]; j++)
tm[i] += sra[j]*vec[clm[j]];
for (int j = i+1; j < nrows; j++) {
for (int m = dgn[j-1]+1; m < dgn[j]; m++)
if (clm[m] == i) tm[i] += sra[m]*vec[j];
}
}
return tm;
}
template<class T>
Vcr<T> SSMtx<T>::preconding(const Vcr<T>& r, int prec) const {
// solve preconditioning matrix: Pz = r;
// prec: 0 if no preconditioning, 1 if diag prec, 2 if SSOR prec
if (prec == 0) { // no preconditioning
return r;
} else if (prec == 1 ){ // diagonal preconditioning
Vcr<T> z(nrows);
for (int i = 0; i < sz; i++) z[i] = r[i]/sra[dgn[i]];
return z;
} else if (prec == 2) { // symmetric SOR preconditioning
const T omega = 1; // SSOR parameter for preconditioning
Vcr<T> z(nrows);
Vcr<T> tm(nrows;
tm[0] = r[0]/sra[0];
for (int i = 0; i < sz; i++) {
if (abs(sra[dgn[i]]) <= SMALL) {
error("SSOR preconditioner did not work");
break;
}
T sum = 0;
for (int j = dgn[i-1]+1; j < dgn[i]; j++)
sum += omega*sra[j]*tm[clm[j]];
tm[i] = (r[i] - sum)/sra[dgn[i]];
}
for (int i = sz -1; i >= 0; i--) {
T sum = 0;
for (int j = i+1; j < sz; j++) {
for (int m = dgn[j-1]+1; m < dgn[j]; m++)
if (clm[m] == i) sum += omega*sra[m]*z[j];
}
z[i] = tm[i] - sum/sra[dgn[i]];
}
return z;
}
}
// *** definitions for members of class SparseMtx
template<class T>
SparseMtx<T>::SparseMtx(int n, int m, T* et, int* cn, int* da) {
nrows = n;
lenth = m;
sra = new T [lenth];
clm = new int [lenth];
fnz = new int [nrows +1];
for (int i =0; i< lenth; i++) {
sra[i] = et[i];
clm[i] = cn[i];
}
for (int i = 0; i <= nrows; i++) fnz[i] = da[i];
}
template<class T>
SparseMtx<T>::SparseMtx(int n, int m) {
nrows = n;
lenth = m;
sra = new T [lenth];
clm = new int [lenth];
fnz = new int [nrows +1];
for (int i =0; i< lenth; i++) {
sra[i] = 0;
clm[i] = 0;
}
for (int i =0; i <= nrows; i++) fnz[i] = 0;
}
template<class T>
SparseMtx<T>::SparseMtx(const SparseMtx & mat) { // copy constructor
nrows = mat.nrows;
lenth = mat.lenth;
sra = new T [lenth];
clm = new int [lenth];
fnz = new int [nrows +1];
for (int i = 0; i < lenth; i++) {
sra[i] = mat[i];
clm[i] = mat.clm[i];
}
for (int i = 0; i <= nrows; i++) fnz[i] = mat.fnz[i];
}
template<class T>
SparseMtx<T>& SparseMtx<T>::operator=(const SparseMtx & ssm) {
if(nrows != ssm.nrows || lenth != ssm.lenth)
error("bad matrix sizes in SparseMtx::operator=()");
for (int i = 0; i < lenth; i++) {
sra[i] = ssm[i];
clm[i] = ssm.clm[i];
}
for (int i = 0; i <= nrows; i++) fnz[i] = ssm.fnz[i];
return *this;
}
template<class T>
Vcr<T> SparseMtx<T>::operator*(const Vcr<T>& vec) const {
if (nrows != vec.size())
error("matrix and vector sizes do not match in SparseMtx::operator*()");
Vcr<T> tm(nrows);
for (int i = 0; i < nrows; i++)
for (int j = fnz[i]; j < fnz[i +1]; j++) tm[i] += sra[j]*vec[clm[j]];
return tm;
}
template<class T> // solve Pz = r, return z. (P: preconditioner)
Vcr<T> SparseMtx<T>::preconding(const Vcr<T>& r, int precn) const {
if (precn == 0) { // no preconditioning is used
return r;
} else if (precn == 1) { // diagonal preconditioning
Vcr<T> diag(nrows); // find diagonal entries
for (int i = 0; i < nrows; i++) { // geometrical average, more stable
for (int j = fnz[i]; j < fnz[i+1]; j++) diag[i] += sra[j]*sra[j];
diag[i] = sqrt(diag[i]);
}
Vcr<T> z(nrows);
for (int i = 0; i < nrows; i++) z[i] = r[i]/diag[i];
return z;
} else if (precn == 2) { // SSOR preconditioning
const T omega = 1; // SOR parameter for preconditioning
Vcr<T> diag(nrows); // find diagonal entries
for (int i = 0; i < nrows; i++) {
for (int j = fnz[i]; j < fnz[i+1]; j++) {
if (clm[j] == i) diag[i] += sra[j]*sra[j]; // averaging is used
else diag[i] += omega*omega*sra[j]*sra[j]; // it is more stable
}
diag[i] = sqrt(diag[i]);
}
Vcr<T> z(nrows);
for (int i = 0; i < nrows; i++) { // solve lower triangular system
T sum = 0;
for (int j = fnz[i]; j < fnz[i+1]; j++) {
if (clm[j] < i) sum += omega*sra[j]*z[clm[j]];
}
z[i] = (r[i] - sum)/diag[i];
}
for (int i = nrows -1; i >= 0; i--) { // solve upper triangular system
T sum = 0;
for (int j = fnz[i]; j < fnz[i+1]; j++) {
if (clm[j] > i) sum += omega*sra[j]*z[clm[j]];
}
z[i] -= sum/diag[i];
}
return z;
} else {
error("specified preconditioner in SparseMtx::preconding not implemented");
}
} // end SparseMtx::preconding()
#endif
//****** end matvec.h ***************/
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -