⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 matmult.c

📁 并行矩阵乘法。用于mpi和openMP混合并行求解矩阵乘法问题。适用于分布共享存储cluster环境。无解压密码。
💻 C
字号:
#include <stdio.h>#include <stdlib.h>#include <unistd.h>#include <math.h>#define OPENMP ok#define USE_MPI ok#ifdef USE_MPI  #include <mpi.h>#endif /* USE_MPI */#ifdef _OPENMP  #include <omp.h>#endif /* _OPENMP */#define DIM 1024int main(int argc, char **argv){double start1, finish1, start2, finish2;int i, j, p, me, nprocs, num_threads,nrows,ncols;double test[10];#ifdef USE_MPIMPI_File fpa;MPI_File fpb;MPI_File fpc;MPI_File fpt;MPI_Status status;MPI_Request request;MPI_Datatype filetypea;MPI_Datatype filetypeb;MPI_Datatype filetypec;#elseFILE fpa;FILE fpb;FILE fpc;FILE fpt;#endif/*USE_MPI*/#ifdef _OPENMP  int np;#endif /* _OPENMP */#ifdef USE_MPI  int namelen;  char processor_name[MPI_MAX_PROCESSOR_NAME];#endif /* USE_MPI */#ifdef USE_MPI  MPI_Init(&argc, &argv);  MPI_Comm_size(MPI_COMM_WORLD, &nprocs);  MPI_Comm_rank(MPI_COMM_WORLD, &me);  MPI_Get_processor_name(processor_name, &namelen);  start1 = MPI_Wtime();#else /* USE_MPI */  nprocs = 1;  me = 0;#endif /* USE_MPI */nrows = (int)ceil(log2(nprocs)/2);nrows = exp2(nrows);ncols = nprocs/nrows;printf("+++++++++++++++++++++++++++++++++++++++++++++\n");printf("%d rows, %d columns in total\n",nrows, ncols);printf("My ID is %d of %d\n",me,nprocs);printf("My name is %s\n",processor_name);int gsizes[2], distribs[2], dargs[2], psizesa[2], psizesb[2], psizesc[2];gsizes[0] = DIM;     gsizes[1] = DIM;    distribs[0] = MPI_DISTRIBUTE_BLOCK; distribs[1] = MPI_DISTRIBUTE_BLOCK;  dargs[0] = MPI_DISTRIBUTE_DFLT_DARG; dargs[1] = MPI_DISTRIBUTE_DFLT_DARG; psizesa[0] = nrows; psizesa[1] = 1; psizesb[0] = 1;psizesb[1] = ncols;psizesc[0] = nrows;psizesc[1] = ncols;int *odcol;odcol = (int *)malloc(nprocs*sizeof(int));for(i=0;i<nprocs;i++){  odcol[i] = (int)(i - ncols*(int)floor(i/ncols));  }int *odrow;odrow = (int *)malloc(nprocs*sizeof(int));for(i=0;i<nprocs;i++){  odrow[i] = (int)floor(i/ncols);}printf("I'm computing %dth row and %dth column\n",odrow[me],odcol[me]);MPI_Type_create_darray(nrows,odrow[me], 2, gsizes, distribs,                dargs, psizesa, MPI_ORDER_C, MPI_DOUBLE, &filetypea);MPI_Type_commit(&filetypea);MPI_Type_create_darray(ncols,odcol[me], 2, gsizes, distribs,                dargs, psizesb, MPI_ORDER_C, MPI_DOUBLE, &filetypeb); MPI_Type_commit(&filetypeb); MPI_Type_create_darray(nprocs, me, 2, gsizes, distribs, dargs,               psizesc, MPI_ORDER_C, MPI_DOUBLE, &filetypec);MPI_Type_commit(&filetypec);#ifdef _OPENMP  np = omp_get_num_procs();  omp_set_num_threads(np);  num_threads = omp_get_max_threads();#else /* _OPENMP */  num_threads = 1;#endif /* _OPENMP */#ifdef _OPENMP  printf(" using OpenMP with %d threads\n",     num_threads);#endif /* _OPENMP */#ifdef USE_MPIint sizeA = DIM * DIM / nrows;int sizeB = DIM * DIM / ncols;int sizeC = DIM * DIM /(nrows*ncols);double *myB = (double *)malloc(sizeB * sizeof(double));double *myA = (double *)malloc(sizeA * sizeof(double));double *myC = (double *)malloc(sizeC * sizeof(double));MPI_File_open(MPI_COMM_WORLD, "matrixA.dat", MPI_MODE_RDONLY, MPI_INFO_NULL, &fpa);MPI_File_set_view(fpa, 0 , MPI_DOUBLE, filetypea, "native", MPI_INFO_NULL);MPI_File_open(MPI_COMM_WORLD, "matrixB.dat", MPI_MODE_RDONLY, MPI_INFO_NULL, &fpb);MPI_File_set_view(fpb, 0, MPI_DOUBLE, filetypeb, "native", MPI_INFO_NULL);MPI_File_read_all(fpa, myA, sizeA , MPI_DOUBLE, &status);MPI_File_read_all(fpb, myB, sizeB, MPI_DOUBLE, &status);printf("Start of computation\n");start2 = MPI_Wtime();int l,u;#pragma omp parallel for private(j,l,u) for (i=0; i<DIM/nrows; i++)    for (j=0; j<DIM/ncols; j++)      { u=(int)(i*DIM/ncols)+j; myC[u]=0.0;        for (l=0; l<DIM; l++)          myC[u]=myC[u]+myA[i*DIM+l]*myB[(int)(l*DIM/ncols)+j];      }finish2 = MPI_Wtime();printf("My computation time is %f\n", finish2 - start2);MPI_File_open(MPI_COMM_WORLD, "mymC.dat",MPI_MODE_CREATE | MPI_MODE_WRONLY,MPI_INFO_NULL, &fpc);MPI_File_set_view(fpc, 0, MPI_DOUBLE, filetypec,"native",MPI_INFO_NULL);MPI_File_write_all(fpc, myC, sizeC, MPI_DOUBLE, &status);MPI_File_close(&fpa);MPI_File_close(&fpb);MPI_File_close(&fpc);#endif#ifdef USE_MPI  MPI_Barrier(MPI_COMM_WORLD);  finish1 = MPI_Wtime();  double time = finish1 - start1;  printf("My total time is %f\n",finish1 - start1);  printf("%d: Calling MPI_Finalize()\n", me);  MPI_Finalize();#endif /* USE_MPI */  exit(0);}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -