⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 cal_value.c

📁 cfd求解器使用与gmsh网格的求解
💻 C
📖 第 1 页 / 共 5 页
字号:
    if (Current.NbrHar == 1) {      SUB(0);    }    else {      for (k = 0 ; k < Current.NbrHar ; k++) {	CSUB(0);      }    }    R->Type = SCALAR ;  }  else if ((V1->Type == VECTOR      && V2->Type == VECTOR) ||	   (V1->Type == TENSOR_DIAG && V2->Type == TENSOR_DIAG)) {    if (Current.NbrHar == 1) {      SUB(0); SUB(1); SUB(2);     }    else {      for (k = 0 ; k < Current.NbrHar ; k++) {	CSUB(0); CSUB(1); CSUB(2);       }    }    R->Type = V1->Type ;  }  else if (V1->Type == TENSOR_SYM && V2->Type == TENSOR_SYM) {    if (Current.NbrHar == 1) {      SUB(0); SUB(1); SUB(2); SUB(3); SUB(4); SUB(5);    }    else {      for (k = 0 ; k < Current.NbrHar ; k++) {	CSUB(0); CSUB(1); CSUB(2); CSUB(3); CSUB(4); CSUB(5);      }    }    R->Type = TENSOR_SYM;  }  else if (V1->Type == TENSOR && V2->Type == TENSOR) {    if (Current.NbrHar == 1) {      SUB(0); SUB(1); SUB(2); SUB(3); SUB(4); SUB(5); SUB(6); SUB(7); SUB(8);    }    else {      for (k = 0 ; k < Current.NbrHar ; k++) {	CSUB(0); CSUB(1); CSUB(2); CSUB(3); CSUB(4); CSUB(5); CSUB(6); CSUB(7); CSUB(8);      }    }    R->Type = TENSOR;  }  else if ((V1->Type == TENSOR     && V2->Type == TENSOR_SYM) ||	   (V1->Type == TENSOR     && V2->Type == TENSOR_DIAG)||	   (V1->Type == TENSOR_SYM && V2->Type == TENSOR_DIAG)){    A.Type = V1->Type;    for (k = 0 ; k < Current.NbrHar ; k++) {      for(i=0 ; i<9 ; i++){	i1 = (V1->Type==TENSOR)?i:TENSOR_SYM_MAP[i];	i2 = (V2->Type==TENSOR_SYM)?TENSOR_SYM_MAP[i]:TENSOR_DIAG_MAP[i];	A.Val[MAX_DIM*k+i1] = 	  V1->Val[MAX_DIM*k+i1] - ((i2<0)?0.0:V2->Val[MAX_DIM*k+i2]);      }    }    Cal_CopyValue(&A,R);  }    else if ((V1->Type == TENSOR_SYM  && V2->Type == TENSOR)      ||	   (V1->Type == TENSOR_DIAG && V2->Type == TENSOR)      ||	   (V1->Type == TENSOR_DIAG && V2->Type == TENSOR_SYM)){    A.Type = V2->Type;    for (k = 0 ; k < Current.NbrHar ; k++) {      for(i=0 ; i<9 ; i++){	i1 = (V1->Type==TENSOR_SYM)?TENSOR_SYM_MAP[i]:TENSOR_DIAG_MAP[i];	i2 = (V2->Type==TENSOR)?i:TENSOR_SYM_MAP[i];	A.Val[MAX_DIM*k+i2] = 	  ((i1<0)?0.0:V1->Val[MAX_DIM*k+i1]) - V2->Val[MAX_DIM*k+i2];      }    }    Cal_CopyValue(&A,R);  }  else {    Msg(GERROR, "Substraction of different quantities: %s - %s",	Get_StringForDefine(Field_Type, V1->Type),	Get_StringForDefine(Field_Type, V2->Type));  }  GetDP_End ;}#undef SUB#undef CSUB/* ------------------------------------------------------------------------    R <- V1 * V2    ------------------------------------------------------------------------ */#define CMULT(a,b,c)								\  Cal_ComplexProduct(&(V1->Val[MAX_DIM*k+a]), &(V2->Val[MAX_DIM*k+b]), tmp[c])#define CPUT(a)					\  R->Val[MAX_DIM* k   +a] = tmp[a][0] ;		\  R->Val[MAX_DIM*(k+1)+a] = tmp[a][MAX_DIM]#define CPUT3(a,b,c,d)								\  R->Val[MAX_DIM* k   +d] = tmp[a][0]      +tmp[b][0]      +tmp[c][0] ;		\  R->Val[MAX_DIM*(k+1)+d] = tmp[a][MAX_DIM]+tmp[b][MAX_DIM]+tmp[c][MAX_DIM] void  Cal_ProductValue (struct Value * V1, struct Value * V2, struct Value * R) {  int k;    GetDP_Begin("Cal_ProductValue");  if (V1->Type == SCALAR && V2->Type == SCALAR) {    if (Current.NbrHar == 1) {      R->Val[0] = V1->Val[0]*V2->Val[0];    }    else {      for (k = 0 ; k < Current.NbrHar ; k += 2) {	CMULT(0,0,0);	CPUT(0);      }    }    R->Type = SCALAR ;  }    else if (V1->Type == SCALAR && (V2->Type == VECTOR || V2->Type == TENSOR_DIAG)) {    if (Current.NbrHar == 1) {      a0 = V1->Val[0] ;      R->Val[0] = a0 * V2->Val[0] ;      R->Val[1] = a0 * V2->Val[1] ;      R->Val[2] = a0 * V2->Val[2] ;    }    else {      for (k = 0 ; k < Current.NbrHar ; k += 2) {	CMULT(0,0,0); CMULT(0,1,1); CMULT(0,2,2);	CPUT(0); CPUT(1); CPUT(2);       }    }    R->Type = V2->Type ;  }    else if (V1->Type == SCALAR && V2->Type == TENSOR_SYM) {    if (Current.NbrHar == 1) {      a0 = V1->Val[0] ;      R->Val[0] = a0 * V2->Val[0] ;      R->Val[1] = a0 * V2->Val[1] ;      R->Val[2] = a0 * V2->Val[2] ;      R->Val[3] = a0 * V2->Val[3] ;      R->Val[4] = a0 * V2->Val[4] ;      R->Val[5] = a0 * V2->Val[5] ;    }    else {      for (k = 0 ; k < Current.NbrHar ; k += 2) {	CMULT(0,0,0); CMULT(0,1,1); CMULT(0,2,2); CMULT(0,3,3); CMULT(0,4,4); CMULT(0,5,5);	CPUT(0); CPUT(1); CPUT(2); CPUT(3); CPUT(4); CPUT(5);       }    }    R->Type = TENSOR_SYM ;  }    else if (V1->Type == SCALAR && V2->Type == TENSOR) {    if (Current.NbrHar == 1) {      a0 = V1->Val[0] ;      R->Val[0] = a0 * V2->Val[0] ;      R->Val[1] = a0 * V2->Val[1] ;      R->Val[2] = a0 * V2->Val[2] ;      R->Val[3] = a0 * V2->Val[3] ;      R->Val[4] = a0 * V2->Val[4] ;      R->Val[5] = a0 * V2->Val[5] ;      R->Val[6] = a0 * V2->Val[6] ;      R->Val[7] = a0 * V2->Val[7] ;      R->Val[8] = a0 * V2->Val[8] ;    }    else {      for (k = 0 ; k < Current.NbrHar ; k += 2) {	CMULT(0,0,0); CMULT(0,1,1); CMULT(0,2,2); CMULT(0,3,3); CMULT(0,4,4); CMULT(0,5,5);	CMULT(0,6,6); CMULT(0,7,7); CMULT(0,8,8);	CPUT(0); CPUT(1); CPUT(2); CPUT(3); CPUT(4); CPUT(5); 	CPUT(6); CPUT(7); CPUT(8);      }    }    R->Type = TENSOR ;  }    else if ((V1->Type == VECTOR || V1->Type == TENSOR_DIAG) && V2->Type == SCALAR) {    if (Current.NbrHar == 1) {      a0 = V2->Val[0] ;      R->Val[0] = V1->Val[0] * a0 ;      R->Val[1] = V1->Val[1] * a0 ;      R->Val[2] = V1->Val[2] * a0 ;    }    else {      for (k = 0 ; k < Current.NbrHar ; k += 2) {	CMULT(0,0,0); CMULT(1,0,1); CMULT(2,0,2);	CPUT(0); CPUT(1); CPUT(2);       }    }    R->Type = V1->Type ;  }    else if (V1->Type == TENSOR_SYM && V2->Type == SCALAR) {    if (Current.NbrHar == 1) {      a0 = V2->Val[0] ;      R->Val[0] = V1->Val[0] * a0 ;      R->Val[1] = V1->Val[1] * a0 ;      R->Val[2] = V1->Val[2] * a0 ;      R->Val[3] = V1->Val[3] * a0 ;      R->Val[4] = V1->Val[4] * a0 ;      R->Val[5] = V1->Val[5] * a0 ;    }    else {      for (k = 0 ; k < Current.NbrHar ; k += 2) {	CMULT(0,0,0); CMULT(1,0,1); CMULT(2,0,2); CMULT(3,0,3); CMULT(4,0,4); CMULT(5,0,5);	CPUT(0); CPUT(1); CPUT(2); CPUT(3); CPUT(4); CPUT(5);       }    }    R->Type = TENSOR_SYM ;  }  else if (V1->Type == TENSOR && V2->Type == SCALAR) {    if (Current.NbrHar == 1) {      a0 = V2->Val[0] ;      R->Val[0] = V1->Val[0] * a0 ;      R->Val[1] = V1->Val[1] * a0 ;      R->Val[2] = V1->Val[2] * a0 ;      R->Val[3] = V1->Val[3] * a0 ;      R->Val[4] = V1->Val[4] * a0 ;      R->Val[5] = V1->Val[5] * a0 ;      R->Val[6] = V1->Val[6] * a0 ;      R->Val[7] = V1->Val[7] * a0 ;      R->Val[8] = V1->Val[8] * a0 ;    }    else {      for (k = 0 ; k < Current.NbrHar ; k += 2) {	CMULT(0,0,0); CMULT(1,0,1); CMULT(2,0,2); CMULT(3,0,3); CMULT(4,0,4); CMULT(5,0,5);	CMULT(6,0,6); CMULT(7,0,7); CMULT(8,0,8);	CPUT(0); CPUT(1); CPUT(2); CPUT(3); CPUT(4); CPUT(5); 	CPUT(6); CPUT(7); CPUT(8);       }    }    R->Type = TENSOR ;  }    /* Scalar Product. See 'Cal_CrossProductValue' for the Vector Product */    else if (V1->Type == VECTOR && V2->Type == VECTOR) {    if (Current.NbrHar == 1) {      R->Val[0] = V1->Val[0] * V2->Val[0] +                   V1->Val[1] * V2->Val[1] + 	          V1->Val[2] * V2->Val[2] ;    }    else {      for (k = 0 ; k < Current.NbrHar ; k += 2) {	CMULT(0,0,0); CMULT(1,1,1); CMULT(2,2,2); 	CPUT3(0,1,2,0);      }    }    R->Type = SCALAR ;  }      else if ( (V1->Type == TENSOR_DIAG && V2->Type == VECTOR) ||	    (V2->Type == TENSOR_DIAG && V1->Type == VECTOR) ) { /* WARNING WARNING! */    if (Current.NbrHar == 1) {      R->Val[0] = V1->Val[0]*V2->Val[0];            R->Val[1] = V1->Val[1]*V2->Val[1];            R->Val[2] = V1->Val[2]*V2->Val[2];          }    else {      for (k = 0 ; k < Current.NbrHar ; k += 2) {	CMULT(0,0,0); CMULT(1,1,1); CMULT(2,2,2); 	CPUT(0); CPUT(1); CPUT(2);       }    }    R->Type = VECTOR ;  }  else if (V1->Type == TENSOR_SYM && V2->Type == VECTOR) {    if (Current.NbrHar == 1) {      a1[0] = V1->Val[0]*V2->Val[0] + V1->Val[1]*V2->Val[1] + V1->Val[2]*V2->Val[2];      a1[1] = V1->Val[1]*V2->Val[0] + V1->Val[3]*V2->Val[1] + V1->Val[4]*V2->Val[2];      a1[2] = V1->Val[2]*V2->Val[0] + V1->Val[4]*V2->Val[1] + V1->Val[5]*V2->Val[2];      R->Val[0] = a1[0];      R->Val[1] = a1[1];      R->Val[2] = a1[2];    }    else {      for (k = 0 ; k < Current.NbrHar ; k += 2) {	CMULT(0,0,0); CMULT(1,1,1); CMULT(2,2,2);	CMULT(1,0,3); CMULT(3,1,4); CMULT(4,2,5);	CMULT(2,0,6); CMULT(4,1,7); CMULT(5,2,8);	CPUT3(0,1,2,0);	CPUT3(3,4,5,1);	CPUT3(6,7,8,2);      }    }    R->Type = VECTOR ;  }  else if (V1->Type == TENSOR && V2->Type == VECTOR) {    if (Current.NbrHar == 1) {      a1[0] = V1->Val[0]*V2->Val[0] + V1->Val[1]*V2->Val[1] + V1->Val[2]*V2->Val[2];      a1[1] = V1->Val[3]*V2->Val[0] + V1->Val[4]*V2->Val[1] + V1->Val[5]*V2->Val[2];      a1[2] = V1->Val[6]*V2->Val[0] + V1->Val[7]*V2->Val[1] + V1->Val[8]*V2->Val[2];      R->Val[0] = a1[0];      R->Val[1] = a1[1];      R->Val[2] = a1[2];    }    else {      for (k = 0 ; k < Current.NbrHar ; k += 2) {	CMULT(0,0,0); CMULT(1,1,1); CMULT(2,2,2);	CMULT(3,0,3); CMULT(4,1,4); CMULT(5,2,5);	CMULT(6,0,6); CMULT(7,1,7); CMULT(8,2,8);	CPUT3(0,1,2,0);	CPUT3(3,4,5,1);	CPUT3(6,7,8,2);      }    }    R->Type = VECTOR ;  }    else if (V1->Type == TENSOR_DIAG && V2->Type == TENSOR_DIAG) {    if (Current.NbrHar == 1) {      R->Val[0] = V1->Val[0]*V2->Val[0];            R->Val[1] = V1->Val[1]*V2->Val[1];            R->Val[2] = V1->Val[2]*V2->Val[2];          }    else {      for (k = 0 ; k < Current.NbrHar ; k += 2) {	CMULT(0,0,0); CMULT(1,1,1); CMULT(2,2,2);	CPUT(0); CPUT(1); CPUT(2);      }    }    R->Type = TENSOR_DIAG;  }  else if (V1->Type == TENSOR_SYM && V2->Type == TENSOR_SYM) {    if (Current.NbrHar == 1) {      a1[0] = V1->Val[0]*V2->Val[0] + V1->Val[1]*V2->Val[1] + V1->Val[2]*V2->Val[2];      a1[1] = V1->Val[0]*V2->Val[1] + V1->Val[1]*V2->Val[2] + V1->Val[2]*V2->Val[4];      a1[2] = V1->Val[0]*V2->Val[2] + V1->Val[1]*V2->Val[4] + V1->Val[2]*V2->Val[5];      a1[3] = V1->Val[1]*V2->Val[0] + V1->Val[3]*V2->Val[1] + V1->Val[4]*V2->Val[2];      a1[4] = V1->Val[1]*V2->Val[1] + V1->Val[3]*V2->Val[2] + V1->Val[4]*V2->Val[4];      a1[5] = V1->Val[1]*V2->Val[2] + V1->Val[3]*V2->Val[4] + V1->Val[4]*V2->Val[5];      a1[6] = V1->Val[2]*V2->Val[0] + V1->Val[4]*V2->Val[1] + V1->Val[5]*V2->Val[2];      a1[7] = V1->Val[2]*V2->Val[1] + V1->Val[4]*V2->Val[2] + V1->Val[5]*V2->Val[4];      a1[8] = V1->Val[2]*V2->Val[2] + V1->Val[4]*V2->Val[4] + V1->Val[5]*V2->Val[5];      R->Val[0] = a1[0];  R->Val[1] = a1[1];  R->Val[2] = a1[2];      R->Val[3] = a1[3];  R->Val[4] = a1[4];  R->Val[5] = a1[5];      R->Val[6] = a1[6];  R->Val[7] = a1[7];  R->Val[8] = a1[8];    }    else {      for (k = 0 ; k < Current.NbrHar ; k += 2) {	CMULT(0,0,0);  CMULT(1,1,1);  CMULT(2,2,2);	CMULT(0,1,3);  CMULT(1,2,4);  CMULT(2,4,5);	CMULT(0,2,6);  CMULT(1,4,7);  CMULT(2,5,8);	CMULT(1,0,9);  CMULT(3,1,10); CMULT(4,2,11);	CMULT(1,1,12); CMULT(3,2,13); CMULT(4,4,14);	CMULT(1,2,15); CMULT(3,4,16); CMULT(4,5,17);	CMULT(2,0,18); CMULT(4,1,19); CMULT(5,2,20);	CMULT(2,1,21); CMULT(4,2,22); CMULT(5,4,23);	CMULT(2,2,24); CMULT(4,4,25); CMULT(5,5,26);	CPUT3(0,1,2,0);    CPUT3(3,4,5,1);    CPUT3(6,7,8,2);	CPUT3(9,10,11,3);  CPUT3(12,13,14,4); CPUT3(15,16,17,5);	CPUT3(18,19,20,6); CPUT3(21,22,23,7); CPUT3(24,25,26,8);      }    }    R->Type = TENSOR;  }  else if (V1->Type == TENSOR && V2->Type == TENSOR) {    if (Current.NbrHar == 1) {      a1[0] = V1->Val[0]*V2->Val[0] + V1->Val[1]*V2->Val[3] + V1->Val[2]*V2->Val[6];      a1[1] = V1->Val[0]*V2->Val[1] + V1->Val[1]*V2->Val[4] + V1->Val[2]*V2->Val[7];      a1[2] = V1->Val[0]*V2->Val[2] + V1->Val[1]*V2->Val[5] + V1->Val[2]*V2->Val[8];      a1[3] = V1->Val[3]*V2->Val[0] + V1->Val[4]*V2->Val[3] + V1->Val[5]*V2->Val[6];      a1[4] = V1->Val[3]*V2->Val[1] + V1->Val[4]*V2->Val[4] + V1->Val[5]*V2->Val[7];      a1[5] = V1->Val[3]*V2->Val[2] + V1->Val[4]*V2->Val[5] + V1->Val[5]*V2->Val[8];      a1[6] = V1->Val[6]*V2->Val[0] + V1->Val[7]*V2->Val[3] + V1->Val[8]*V2->Val[6];      a1[7] = V1->Val[6]*V2->Val[1] + V1->Val[7]*V2->Val[4] + V1->Val[8]*V2->Val[7];      a1[8] = V1->Val[6]*V2->Val[2] + V1->Val[7]*V2->Val[5] + V1->Val[8]*V2->Val[8];      R->Val[0] = a1[0];  R->Val[1] = a1[1];  R->Val[2] = a1[2];      R->Val[3] = a1[3];  R->Val[4] = a1[4];  R->Val[5] = a1[5];      R->Val[6] = a1[6];  R->Val[7] = a1[7];  R->Val[8] = a1[8];    }    else {      for (k = 0 ; k < Current.NbrHar ; k += 2) {	CMULT(0,0,0);  CMULT(1,3,1);  CMULT(2,6,2);	CMULT(0,1,3);  CMULT(1,4,4);  CMULT(2,7,5);	CMULT(0,2,6);  CMULT(1,5,7);  CMULT(2,8,8);	CMULT(3,0,9);  CMULT(4,3,10); CMULT(5,6,11);	CMULT(3,1,12); CMULT(4,4,13); CMULT(5,7,14);	CMULT(3,2,15); CMULT(4,5,16); CMULT(5,8,17);	CMULT(6,0,18); CMULT(7,3,19); CMULT(8,6,20);	CMULT(6,1,21); CMULT(7,4,22); CMULT(8,7,23);	CMULT(6,2,24); CMULT(7,5,25); CMULT(8,8,26);	CPUT3(0,1,2,0);    CPUT3(3,4,5,1);    CPUT3(6,7,8,2);	CPUT3(9,10,11,3);  CPUT3(12,13,14,4); CPUT3(15,16,17,5);	CPUT3(18,19,20,6); CPUT3(21,22,23,7); CPUT3(24,25,26,8);      }    }    R->Type = TENSOR;  }  /* a faire: differents tenseurs entre eux */  else {    Msg(GERROR, "Product of non adapted quantities: %s * %s",	Get_StringForDefine(Field_Type, V1->Type),	Get_StringForDefine(Field_Type, V2->Type));  }  GetDP_End ;}#undef CMULT#undef CPUT#undef CPUT3/* ------------------------------------------------------------------------    R <- V1 / V2    ------------------------------------------------------------------------ */#define CDIVI(a,b,c) 								\  Cal_ComplexDivision(&(V1->Val[MAX_DIM*k+a]), &(V2->Val[MAX_DIM*k+b]), tmp[c])#define CPUT(a) 				\  R->Val[MAX_DIM* k   +a] = tmp[a][0] ; 	\  R->Val[MAX_DIM*(k+1)+a] = tmp[a][MAX_DIM]void  Cal_DivideValue (struct Value * V1, struct Value * V2, struct Value * R) {

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -