⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 az_comm.c

📁 并行解法器,功能强大
💻 C
📖 第 1 页 / 共 5 页
字号:
      (void) fprintf(stderr, "%sERROR on node %d\nmd_wait failed, message "                     "type = %d\n", yo, node, type);      exit(-1);    }  }} /* AZ_gdot_vec *//******************************************************************************//******************************************************************************//******************************************************************************/void AZ_print_sync_start(int proc, int do_print_line, int proc_config[])/*******************************************************************************  Routine to allow IO between print_sync_start and print_sync_end to be printed  by each processor entirely before the next processor begins its IO.  The  printing sequence is from proc = 0 to the last processor,  number_of_procs = nprocs - 1.  NOTE: THERE CAN BE NO COMMUNICATON BETWEEN THESE CALLS.  Author:          John N. Shadid, SNL, 1421  =======  Return code:     void  ============  Parameter list:  ===============  proc:            Current processor number.  do_print_line:   Boolean variable.  If true, a line of # is printed to                   indicate the start of a print_sync I/O block.*******************************************************************************/{  /* local variables */  int flag = 1, from, st, type;  MPI_AZRequest request;  /**************************** execution begins ******************************/  type = AZ_sys_msg_type;  if (proc_config[AZ_node] != 0) {    from = proc - 1;    mdwrap_iread((void *) &flag, sizeof(int), &from, &type, &request);    mdwrap_wait((void *) &flag, sizeof(int), &from, &type, &st, &request);  }  else {    if (do_print_line) {      (void) printf("\n");      for (flag = 0; flag < 37; flag++) (void) printf("#");      (void) printf(" PRINT_SYNC_START ");      for (flag = 0; flag < 25; flag++) (void) printf("#");      (void) printf("\n");    }  }} /* AZ_print_sync_start *//******************************************************************************//******************************************************************************//******************************************************************************/void AZ_print_sync_end(int proc_config[], int do_print_line)/*******************************************************************************  Routine to allow IO between print_sync_start and print_sync_end to be printed  by each processor entirely before the next processor begins its IO. The  printing sequence is from proc = 0 to the last processor,  number_of_procs = nprocs - 1.  NOTE: THERE CAN BE NO COMMUNICATON BETWEEN THESE CALLS.  Author:          John N. Shadid, SNL, 1421  =======  Return code:     void  ============  Parameter list:  ===============  proc:            Current processor number.  nprocs:          Number of processors in the current machine configuration.  do_print_line:   Boolean variable.  If true, a line of # is printed to                   indicate the start of a print_sync I/O block.*******************************************************************************/{  /* local variables */  int st, flag = 1, from, type, to, proc, nprocs;  MPI_AZRequest request, request2;  /**************************** execution begins ******************************/  proc = proc_config[AZ_node];  nprocs = proc_config[AZ_N_procs];  type            = AZ_sys_msg_type;  AZ_sys_msg_type = (AZ_sys_msg_type+1-AZ_MSG_TYPE) % AZ_NUM_MSGS + AZ_MSG_TYPE;  if (proc < nprocs -1) to = proc + 1;  else {    to = 0;    if (do_print_line) {      (void) printf("\n");      for (flag = 0; flag < 37; flag++) (void) printf("#");      (void) printf(" PRINT_SYNC_END__ ");      for (flag = 0; flag < 25; flag++) (void) printf("#");      (void) printf("\n\n");    }  }  mdwrap_iwrite((void *) &flag, sizeof(int), to, type, &st, &request);  if (proc == 0) {    from = nprocs -1;    mdwrap_iread((void *) &flag, sizeof(int), &from, &type, &request2);    mdwrap_wait((void *) &flag, sizeof(int), &from, &type, &st,                  &request2);  }  /*   * Do a final sync amongst all the processors, so that all of the other   * processors must wait for Proc 0 to receive the final message from   * Proc (Num_Proc-1).   */  AZ_sync(proc_config);} /* AZ_print_sync_end *//******************************************************************************//******************************************************************************//******************************************************************************/void AZ_sync(int proc_config[])/*******************************************************************************  Author:          John N. Shadid, SNL, 1421  =======  Return code:     void  ============  Parameter list:  ===============  node:            Current processor number.  nprocs:          Number of processors in the current machine configuration.*******************************************************************************/{  /* local variables */  int   type;                     /* type of next message */  int   partner;                  /* processor I exchange with */  int   mask;                     /* bit pattern identifying partner */  int   hbit;                     /* largest nonzero bit in nprocs */  int   nprocs_small;             /* largest power of 2 <= nprocs */  int   cflag;                    /* dummy argument for compatability */  int   node, nprocs;  char *yo = "sync: ";  MPI_AZRequest request;  /* Message handle */  /**************************** execution begins ******************************/  node   = proc_config[AZ_node];  nprocs = proc_config[AZ_N_procs];  type            = AZ_sys_msg_type;  AZ_sys_msg_type = (AZ_sys_msg_type+1-AZ_MSG_TYPE) % AZ_NUM_MSGS + AZ_MSG_TYPE;  /*  Find next lower power of 2. */  for (hbit = 0; (nprocs >> hbit) != 1; hbit++);  nprocs_small = 1 << hbit;  if (nprocs_small*2 == nprocs) {    nprocs_small *= 2;    hbit++;  }  partner = node ^ nprocs_small;  if (node+nprocs_small < nprocs) {    /* post receives on the hypercube portion of the machine partition */    if (mdwrap_iread((void *) NULL, 0, &partner, &type, &request)) {      (void) fprintf(stderr, "%sERROR on node %d\nmd_iread failed, message "                     "type = %d\n", yo, node, type);      exit(-1);    }  }  else if (node & nprocs_small) {    /*     * Send messages from the portion of the machine partition "above" the     * largest hypercube to the hypercube portion.     */    if (mdwrap_write((void *) NULL, 0, partner, type, &cflag)) {      (void) fprintf(stderr, "%sERROR on node %d\nmd_write failed, message "                     "type = %d\n", yo, node, type);      exit(-1);    }  }  if (node+nprocs_small < nprocs) {    /*     * Wait to receive the messages.  These messages will return length 1     * because MPI will not necessarily send a zero-length message.     */    (void) mdwrap_wait((void *) NULL, 0, &partner, &type, &cflag, &request);  }  /*  Now do a binary exchange on nprocs_small nodes. */  if (!(node & nprocs_small)) {    for (mask = nprocs_small>>1; mask; mask >>= 1) {      partner = node ^ mask;      if (mdwrap_iread((void *) NULL, 0, &partner, &type, &request)) {        (void) fprintf(stderr, "%sERROR on node %d\nmd_iread failed, message "                       "type = %d\n", yo, node, type);        exit(-1);      }      if (mdwrap_write((void *) NULL, 0, partner, type, &cflag)) {        (void) fprintf(stderr, "%sERROR on node %d\nmd_write failed, message "                       "type = %d\n", yo, node, type);        exit(-1);      }      /*       * Wait to receive the messages.  These messages will return length 1       * because MPI will not necessarily send a zero-length message.       */      (void) mdwrap_wait((void *) NULL, 0, &partner, &type, &cflag, &request);    }  }  /*  Finally, send message from lower half to upper half. */  partner = node ^ nprocs_small;  if (node & nprocs_small) {    if (mdwrap_iread((void *) NULL, 0, &partner, &type, &request)) {      (void) fprintf(stderr, "%sERROR on node %d\nmd_iread failed, message "                     "type = %d\n", yo, node, type);      exit(-1);    }  }  else if (node+nprocs_small < nprocs ) {    if (mdwrap_write((void *) NULL, 0, partner, type, &cflag)) {      (void) fprintf(stderr, "%sERROR on node %d\nmd_write failed, message "                     "type = %d\n", yo, node, type);      exit(-1);    }  }  /*   * Wait to receive the messages.  These messages will return length 1   * because MPI will not necessarily send a zero-length message.   */  if (node & nprocs_small) {    (void) mdwrap_wait((void *) NULL, 0, &partner, &type, &cflag, &request);  }} /* AZ_sync *//******************************************************************************//******************************************************************************//******************************************************************************/void AZ_gsum_vec_int(int vals[], int vals2[], int length, int proc_config[])/*******************************************************************************  For each element in vals[], perform a global sum with the other processors.  That is, on output vals[i] is equal to the sum of the input values in vals[i]  on all the processors.  Author:          Ray Tuminaro, SNL, 1422  =======  Return code:     void  ============  Parameter list:  ===============  vals:            On input, vals[i] on this processor is to be summed with                   vals[i] on all the other processors.                   On output, vals[i] is the sum of the input values in val[i]                   defined on all processors.  vals2:           Work space of size 'length'.  node:            Current processor number.  nprocs:          Number of processors in the current machine configuration.  length:          Number of values in 'vals' (i.e. number of global sums).*******************************************************************************/{  /* local variables */  int   type;             /* type of next message */  int   partner;          /* processor I exchange with */  int   mask;             /* bit pattern identifying partner */  int   hbit;             /* largest nonzero bit in nprocs */  int   nprocs_small;     /* largest power of 2 <= nprocs */  int   cflag;            /* dummy argument for compatability */  int   k;  int   node, nprocs;  char *yo = "AZ_gsum_vec_int: ";  MPI_AZRequest request;  /* Message handle */  /*********************** first executable statment *****************/  node   = proc_config[AZ_node];  nprocs = proc_config[AZ_N_procs];  type            = AZ_sys_msg_type;  AZ_sys_msg_type = (AZ_sys_msg_type+1-AZ_MSG_TYPE) % AZ_NUM_MSGS + AZ_MSG_TYPE;  /* Find next lower power of 2. */  for (hbit = 0; (nprocs >> hbit) != 1; hbit++);  nprocs_small = 1 << hbit;  if (nprocs_small * 2 == nprocs) {    nprocs_small *= 2;    hbit++;  }  partner = node ^ nprocs_small;  if (node+nprocs_small < nprocs) {    /* post receives on the hypercube portion of the machine partition */    if (mdwrap_iread((void *) vals2, length*sizeof(int), &partner, &type,                      &request)) {      (void) fprintf(stderr, "%sERROR on node %d\nmd_iread failed, message "                     "type = %d\n", yo, node, type);      exit(-1);    }  }  else if (node & nprocs_small) {    /*     * Send messages from the portion of the machine partition "above" the     * largest hypercube to the hypercube portion.     */    if (mdwrap_write((void *) vals, length*sizeof(int), partner, type,                      &cflag)) {      (void) fprintf(stderr, "%sERROR on node %d\nmd_write failed, message "                     "type = %d\n", yo, node, type);      exit(-1);    }  }  if (node+nprocs_small < nprocs) {    /* wait to receive the messages */    if (mdwrap_wait((void *) vals2

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -