📄 矩阵相乘.txt
字号:
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#undef SEEK_SET
#undef SEEK_END
#undef SEEK_CUR
#include <mpi.h>
#include <string.h>
/////////////////////////////
long index1;
long index2;
float sum;
int sendnum,n;
char processor_name[MPI_MAX_PROCESSOR_NAME]; //机器名
int myid; //本机id
int numprocs; //参与运算的机器总数
int namelen; //机器名长度
MPI_Status status;
MPI_Info mpiInfo;
MPI_File m1FileHandle,m2FileHandle,outFileHandle;
long count;
struct content
{
long index;
float data;
};
//初始化
long bufPosition[2];
int recvNum,sender,tag;
float flag;
//////////////////////////////////主进程函数
//int MatrixMultiply_PM(MPI_File m1FileHandle,MPI_File m2FileHandle,MPI_File outFileHandle)
int MatrixMultiply_PM(MPI_File outFileHandle)
{
int k,j,outOffset;
long outBlocks,outIndex,blockCount,move,m;
blockCount=0;
outBlocks=0;
m=0;
count=0;
MPI_File_write_at(outFileHandle,0,&index2,1,MPI_LONG,&status);
MPI_File_write_at(outFileHandle,4,&index2,1,MPI_LONG,&status);
MPI_File_write_at(outFileHandle,8,&m,1,MPI_LONG,&status);
// printf("\n################################\n");
for(j=0;j<=index2/(numprocs-1)-1;j++)
{
for(recvNum=1;recvNum<=(numprocs-1);recvNum++)
{
for(k=1;k<=index2;k++)
{
MPI_Recv(&sum,1,MPI_FLOAT,recvNum,MPI_ANY_TAG,MPI_COMM_WORLD,&status);
sender=status.MPI_SOURCE;
tag=status.MPI_TAG;
outIndex=long(tag);
//printf("this is receive sum %f\n",sum);
// printf("receive %d %d \n",sender,tag);
if(sum!=0)
{
blockCount=blockCount+1;
outBlocks=outBlocks+1;
outOffset=(index2+1)*8+(outBlocks-1)*8;
MPI_File_write_at(outFileHandle,outOffset,&outIndex,1,MPI_LONG,&status);
outOffset=outOffset+4;
MPI_File_write_at(outFileHandle,outOffset,&sum,1,MPI_FLOAT,&status);
}
else
{
break;
}
}
outOffset=8+j*(numprocs-1)*8+(recvNum-1)*8+4;
MPI_File_write_at(outFileHandle,outOffset,&blockCount,1,MPI_LONG,&status);
outOffset=outOffset+4;
move=outBlocks*8;
MPI_File_write_at(outFileHandle,outOffset,&move,1,MPI_LONG,&status);
blockCount=0;
count=count+1;
printf("finish colum %d\n",count);
}
}
//////////////////////////////////////处理余列数据
for(j=1;j<=index1%(numprocs-1);j++)
{
for(k=1;k<=index2;k++)
{
//outIndex=long(k);
MPI_Recv(&sum,1,MPI_FLOAT,j,MPI_ANY_TAG,MPI_COMM_WORLD,&status);
sender=status.MPI_SOURCE;//MPI_ANY_SOURCE
tag=status.MPI_TAG;
outIndex=long(tag);
// printf("receive sum %f\n",sum);
printf("receive %d %d \n",sender,tag);
if(sum!=0)
{
blockCount=blockCount+1;
outBlocks=outBlocks+1;
outOffset=(index2+1)*8+(outBlocks-1)*8;
MPI_File_write_at(outFileHandle,outOffset,&outIndex,1,MPI_LONG,&status);
outOffset=outOffset+4;
MPI_File_write_at(outFileHandle,outOffset,&sum,1,MPI_FLOAT,&status);
}
else
{
break;
}
}
///////////////////////////
if(j!=index1%(numprocs-1)) //判断是不是最后一列。最后一列要少存一个位移数据
{
outOffset=8+(index2-index2%(numprocs-1))*8+(j-1)*8+4;
MPI_File_write_at(outFileHandle,outOffset,&blockCount,1,MPI_LONG,&status);
outOffset=outOffset+4;
move=outBlocks*8;
MPI_File_write_at(outFileHandle,outOffset,&move,1,MPI_LONG,&status);
blockCount=0;
}
else
{
outOffset=8+(index2-index2%(numprocs-1))*8+(j-1)*8+4;
MPI_File_write_at(outFileHandle,outOffset,&blockCount,1,MPI_LONG,&status);
}
}
}
//////////////////////////////////////////////////////////////////////////////
///////从进程函数
int MatrixMultiply_PS(MPI_File m1FileHandle,MPI_File m2FileHandle)
{//取列
MPI_Offset offset;
sendnum=0;
int i,j,k,m[2],kk,star;
n=0;
int colBlocks,rowBlocks,length,arraynum;
sendnum=0;
////////// ////////////////////////////////////////////////////////////
for(j=0;j<=index1/(numprocs-1)-1;j++)
{
//sum=0;
offset=(j*(numprocs-1)+myid)*8;
MPI_File_read_at(m2FileHandle,offset,bufPosition,8,MPI_BYTE,&status);
colBlocks=bufPosition[1];
content *bufColContent=new content[colBlocks]; //////////**********
offset=(index1+1)*8+bufPosition[0];
length=bufPosition[1]*8;
MPI_File_read_at(m2FileHandle,offset,bufColContent,length,MPI_BYTE,&status);
//存储列数据;
//////////////////////////////每个线程将所有行的读一遍
for (i=0;i<=index2-1;i++)
{
sum=0;
offset=i*8+8;
MPI_File_read_at(m1FileHandle,offset,bufPosition,8,MPI_BYTE,&status);
rowBlocks=bufPosition[1];
content *bufRowContent=new content[rowBlocks];///////////*****************
offset=(index1+1)*8+bufPosition[0];
length=bufPosition[1]*8;
MPI_File_read_at(m1FileHandle,offset,bufRowContent,length,MPI_BYTE,&status);
//存储一行列数据
for(k=0;k<=rowBlocks-1;k++)
{
if(k==0)
star=0;
for(kk=star;kk<=colBlocks-1;kk++)
{
if(bufRowContent[k].index==bufColContent[kk].index)
{
sum=sum+bufRowContent[k].data*bufColContent[kk].data;
star=kk;
break;
//MPI_Send(&sum,1,MPI_FLOAT,0,sendnum+1,MPI_COMM_WORLD);
}
else if(bufRowContent[k].index<bufColContent[kk].index)
{
break;
}
else if(bufRowContent[k].index>bufColContent[colBlocks-1].index)
{
goto lable;
}
}
}
lable:
if(sum!=0)
{
MPI_Send(&sum,1,MPI_FLOAT,0,sendnum+1,MPI_COMM_WORLD);
}
sendnum=sendnum+1;
delete [] bufRowContent;
}
////////////////////////////////
MPI_Send(&flag,1,MPI_FLOAT,0,0,MPI_COMM_WORLD);
sendnum=0;
delete [] bufColContent;
}
/////////////////////////////////////////////
//处理剩下的列;
if(myid<=index1%(numprocs-1))
{
offset=(index1-index1%(numprocs-1)+1)*8+(myid-1)*8;
MPI_File_read_at(m2FileHandle,offset,bufPosition,8,MPI_BYTE,&status);
colBlocks=bufPosition[1];
content *bufColContent=new content[colBlocks];////****************
offset=(index1+1)*8+bufPosition[0];
length=bufPosition[1]*8;
MPI_File_read_at(m2FileHandle,offset,bufColContent,length,MPI_BYTE,&status);
//存储行数据;
for (i=0;i<=index2-1;i++)
{
sum=0;
offset=i*8+8;
MPI_File_read_at(m1FileHandle,offset,bufPosition,8,MPI_BYTE,&status);
rowBlocks=bufPosition[1];
content *bufRowContent=new content[rowBlocks];///////*************
offset=(index1+1)*8+bufPosition[0];
length=bufPosition[1]*8;
MPI_File_read_at(m1FileHandle,offset,bufRowContent,length,MPI_BYTE,&status);
//存储一行列数据
for(k=0;k<=rowBlocks-1;k++)
{
if(k==0)
star=0;
for(kk=star;kk<=colBlocks-1;kk++)
{
if(bufRowContent[k].index==bufColContent[kk].index)
{
sum=sum+bufRowContent[k].data*bufColContent[kk].data;
star=kk;
break;
}
else if(bufRowContent[k].index<bufColContent[kk].index)
{
break;
}
else if(bufRowContent[k].index>bufColContent[colBlocks-1].index)
{
goto lable1;
}
}
}
lable1:
if(sum!=0)
{
MPI_Send(&sum,1,MPI_FLOAT,0,sendnum+1,MPI_COMM_WORLD);
// printf("send %f",sum);
}
sendnum=sendnum+1;
delete [] bufRowContent;
}
MPI_Send(&flag,1,MPI_FLOAT,0,0,MPI_COMM_WORLD);
delete [] bufColContent;
}
return(0);
}
//////////////////////////////////////
int MatrixMultiply_P(char matrix1File[],char matrix2File[],char outputFile[])
{
if(numprocs<2)
{
printf("Too Few Processes, Abort!\n");
MPI_Abort(MPI_COMM_WORLD,99);
}
MPI_File_open(MPI_COMM_WORLD,matrix1File,MPI_MODE_RDONLY,MPI_INFO_NULL,&m1FileHandle);
MPI_File_open(MPI_COMM_WORLD,matrix2File,MPI_MODE_RDONLY,MPI_INFO_NULL,&m2FileHandle);
MPI_File_open(MPI_COMM_WORLD,outputFile,MPI_MODE_CREATE|MPI_MODE_WRONLY,MPI_INFO_NULL,&outFileHandle);
if(myid==0)
{
//MatrixMultiply_PM(m1FileHandle,m2FileHandle,outFileHandle);
MatrixMultiply_PM(outFileHandle);
}
else
{
MatrixMultiply_PS(m1FileHandle,m2FileHandle);
}
MPI_File_close(&m1FileHandle);
MPI_File_close(&m2FileHandle);
MPI_File_close(&outFileHandle);
SUCCEED:
return 0;
FAILED:
return 1;
}
///////////////////////////////////////
int main(int argc, char *argv[])
{
index1=44050;
index2=44050;
flag=0;
MPI_Init(&argc, &argv);
//得到当前正在运行的进程的标识号
MPI_Comm_rank(MPI_COMM_WORLD,&myid);
//得到所有参加运算的进程的个数
MPI_Comm_size(MPI_COMM_WORLD,&numprocs);
//得到运行本进程的机器的名称
MPI_Get_processor_name(processor_name,&namelen);
//进程数不得低于两个
MatrixMultiply_P("010","011","out");
MPI_Finalize();
}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -