📄 并行求大型方程组.txt
字号:
#include <mpi.h>
#include <iostream>
#include <stdlib.h>
#include <math.h>
using namespace std;
#define a 0.3
#define matrixnum 44050
#define Jtimes 10;
////////////////////
char processor_name[MPI_MAX_PROCESSOR_NAME]; //机器名
int myid; //本机id
int numprocs; //参与运算的机器总数
int namelen; //机器名长度
MPI_Status status;
MPI_Info mpiInfo;
MPI_File matrixAFileHandle,bFileHandle,outputFileHandle;
float b[44050];
//迭代求X向量
int JacobiXmatrix(MPI_File matrixAFileHandle)
{
MPI_Offset offset2;
long buffer[2];
float frequency;
long indexrow;
float linebuffer[matrixnum];
float matrixD[matrixnum];
float f[matrixnum];
int i,j=1,h,m;
float matrixB[matrixnum];//存放雅可比矩阵B的每行元素
float x1[matrixnum]; //存放X的中间结果
float x[matrixnum]; //存放X的最终结果
for(int i=0;i<matrixnum;i++)
linebuffer[i]=matrixD[i]=x1[i]=x[i]=0;
for(int l=0;l<5;l++)
{
for(i=0;i<matrixnum;i++) //处理每一行,迭代一次的X向量的值
{ offset2=0;
for(int h=0;h<matrixnum;h++)
linebuffer[h]=matrixB[h]=0; //循环一次,行缓冲区清空
//cout<<myid<<":"<<"zheng zai shu ru "<<i+1<<"hang"<<endl;
offset2=(i+1)*8;
MPI_File_read_at(matrixAFileHandle,offset2,buffer,8,MPI_BYTE,&status);
//cout<<buffer[0]<<endl<<buffer[1]<<endl;
for(m=0;m<buffer[1];m++)
{
offset2=8*matrixnum+8+buffer[0]+m*8;
MPI_File_read_at(matrixAFileHandle,offset2,&indexrow,4,MPI_BYTE,&status);
MPI_File_read_at(matrixAFileHandle,offset2+4,&frequency,4,MPI_BYTE,&status);
if(i==indexrow-1)
linebuffer[indexrow-1]=1-a*frequency;
else
linebuffer[indexrow-1]=-a*frequency;
}
matrixD[i]=1/linebuffer[i];
//cout<<matrixD[i]<<endl;
f[i]=matrixD[i]*b[i];
//求迭代矩阵B的每一行
//cout<<f[0]<<endl;
for(j=0;j<matrixnum;j++)
{
if(i!=j)
matrixB[j]=-linebuffer[j]*matrixD[i];
}
float temp=0;
for(int h=0;h<matrixnum;h++)
{
temp+=matrixB[h]*x1[h];
}
x[i]=temp+f[i];
}
for(int h=0;h<matrixnum;h++)//进入下一次迭代
x1[h]=x[h];
}
//for(int i=0;i<matrixnum;i++)
//cout<<"x["<<i<<"]="<<x1[i]<<endl;
if(myid!=0)
MPI_Send(x,matrixnum,MPI_FLOAT,0,0,MPI_COMM_WORLD);
return 0;
}
//主进程接收从进程计算的结果,并保存在文件中
int mpivector_pm(MPI_File outputFileHandle)
{
long matrixnum1=44050;
float x[matrixnum];
int i,retag,j;
MPI_Offset outoffset,outoffset1;
MPI_File_write_at(outputFileHandle,0,&matrixnum1,1,MPI_LONG,&status);
MPI_File_write_at(outputFileHandle,4,&matrixnum1,1,MPI_LONG,&status);
for(retag=1;retag<numprocs;retag++)
{
for(int j=0;j<matrixnum/(numprocs-1);j++)
{
outoffset=8+(retag-1)*matrixnum*(matrixnum/(numprocs-1))+j*8*matrixnum;
MPI_Recv(x,matrixnum,MPI_FLOAT,retag,0,MPI_COMM_WORLD,&status);
MPI_File_write_at(outputFileHandle,outoffset,x,matrixnum,MPI_LONG,&status);
}
//处理剩余部分
for(i=0;i<matrixnum%(numprocs-1);i++)
{
outoffset1=8+8*matrixnum*(matrixnum-matrixnum/(numprocs-1));
MPI_Recv(x,matrixnum,MPI_FLOAT,retag,0,MPI_COMM_WORLD,&status);
MPI_File_write_at(outputFileHandle,outoffset1,x,matrixnum,MPI_LONG,&status);
}
//for(int j=0;j<matrixnum;j++)
// cout<<x[j]<<endl;
}
}
//从进程计算X
int mpivector_ps(MPI_File matrixAFileHandle,MPI_File bFileHandle)
{
MPI_Offset offset=0,offset1,reoffset;
long buffer[2];
long buffer1[2];
buffer[0]=buffer[1]=0;
int i,j,h,k,m;
long indexcol=0;
float frequency=0;
offset=((matrixnum/(numprocs-1))*(myid-1))*8+8;
for(i=0;i<matrixnum/(numprocs-1);i++)
{
offset=offset+i*8;
MPI_File_read_at(bFileHandle,offset,buffer,8,MPI_BYTE,&status);
for(h=0;h<buffer[1];h++)
{
offset1=matrixnum*8+8+buffer[0]+h*8;
MPI_File_read_at(bFileHandle,offset1,&indexcol,4,MPI_BYTE,&status);
MPI_File_read_at(bFileHandle,offset1+4,&frequency,4,MPI_BYTE,&status);
b[indexcol-1]=(1-a)*frequency;
}
JacobiXmatrix(matrixAFileHandle);
}
///////////////////////////////////处理剩下的列
for(k=0;k<matrixnum%(numprocs-1);k++)
{
reoffset=(matrixnum-matrixnum%numprocs)*8+8;
MPI_File_read_at(bFileHandle,offset,buffer1,8,MPI_BYTE,&status);
for(m=0;m<buffer1[1];m++)
{
reoffset=matrixnum*8+8+buffer1[0]+m*8;
MPI_File_read_at(bFileHandle,reoffset,&indexcol,4,MPI_BYTE,&status);
MPI_File_read_at(bFileHandle,reoffset+4,&frequency,4,MPI_BYTE,&status);
b[indexcol-1]=(1-a)*frequency;
}
JacobiXmatrix(matrixAFileHandle);
}
return 0;
}
int mpivector_p(char matrixA[],char matrixb[],char outfile[] )
{
MPI_File_open(MPI_COMM_WORLD,matrixA,MPI_MODE_RDONLY,MPI_INFO_NULL,&matrixAFileHandle);
MPI_File_open(MPI_COMM_WORLD,matrixb,MPI_MODE_RDONLY,MPI_INFO_NULL,&bFileHandle);
MPI_File_open(MPI_COMM_WORLD,outfile,MPI_MODE_CREATE|MPI_MODE_WRONLY,MPI_INFO_NULL,&outputFileHandle);
if(myid==0)
mpivector_pm(outputFileHandle);
else
mpivector_ps(matrixAFileHandle,bFileHandle);
MPI_File_close(&matrixAFileHandle);
MPI_File_close(&bFileHandle);
SUCCEED:
return 0;
FAILED:
return 1;
}
int main(int argc, char *argv[])
{
MPI_Init(&argc, &argv);
//得到当前正在运行的进程的标识号
MPI_Comm_rank(MPI_COMM_WORLD,&myid);
//得到所有参加运算的进程的个数
MPI_Comm_size(MPI_COMM_WORLD,&numprocs);
//得到运行本进程的机器的名称
MPI_Get_processor_name(processor_name,&namelen);
//进程数不得低于两个
mpivector_p("c:/fre_r","c:/010","c:/myout");
MPI_Finalize();
return 0;
}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -