📄 main_mpi.c
字号:
/* root tells all processes (itself as well) what size is the xml_buffer */ MPI_Bcast (&xml_buffer_size, 1, MPI_INT, ROOT, MPI_COMM_WORLD); fprintf (stderr, "[process %d] received xml_buffer_size=%d\n bytes\n", rank, xml_buffer_size); /* xml buffer alloc for slave */ if (rank != ROOT) { xml_buffer = (char *) malloc (xml_buffer_size * sizeof (char)); if (xml_buffer == NULL) { MPI_Abort (MPI_COMM_WORLD, 1); exit (1); } } /* root send all processes (also to itself) the xml_buffer */ MPI_Bcast (xml_buffer, xml_buffer_size, MPI_CHAR, ROOT, MPI_COMM_WORLD); fprintf (stderr, "[process %d] did received xml_buffer\n", rank); /* Timing */ fprintf (stdout, "*** [process %d] TIME: mesh(xml) send/receive operations= %f s.\n", rank, MPI_Wtime () - start_time); /* everyone parses the xml_buffer string */ if (!(mesh = mesh_init_from_memory (xml_buffer, xml_buffer_size, meshfile))) { fprintf (stderr, "[process %d] could not alloc mesh (mesh_init_from_memory failed)\n", rank); MPI_Abort (MPI_COMM_WORLD, 1); exit (1); } /* everyone frees the xml_buffer since it has been parsed now */ if (rank == ROOT) { unload_file_from_memory (xml_buffer, xml_buffer_size); /* Only ROOT has the raydata input file, how many rays to trace ? */ fprintf (stdout, "*** [root] counting how many rays to trace\n"); size = get_number_of_lines (fdinput); } else { free (xml_buffer); } fflush (stdout); /*****************************/ /* now, start the real stuff */ /*****************************/ comm_time = MPI_Wtime (); if (rank == ROOT) { /********/ /* ROOT */ /********/ /* read a bunch of lines from input file */ raydata = get_raydata (fdinput, size, &nbread, &nberr); if (!raydata) { fprintf (stdout, "*** [root] raydata=NULL ... aborting !\n"); MPI_Abort (MPI_COMM_WORLD, 1); exit (1); } fclose (fdinput); fprintf (stdout, "*** [root] read %d lines****\n", nbread);#ifndef USE_MPILB sizeperproc = nbread / nbprocs; /* if nbread is not a multiple of nbprocs, ROOT will treat the remainder */ if (nbread % nbprocs) { ray_remainder = (struct raydata_t *) malloc (nbread % nbprocs * sizeof (struct raydata_t)); if (ray_remainder == NULL) { MPI_Abort (MPI_COMM_WORLD, 1); exit (1); } memcpy (ray_remainder, raydata + sizeperproc * nbprocs, nbread % nbprocs * sizeof (struct raydata_t)); }#else if (get_spp (nbprocs, nbread) == -1) { fprintf (stderr, "[root] Could not get spp. Exiting.\n"); MPI_Abort (MPI_COMM_WORLD, 1); exit (1); } /* NOTE: there are no remaining data with USE_MPILB */#endif }#ifndef USE_MPILB /* broadcast to slaves the number of ray to wait for */ MPI_Bcast (&sizeperproc, 1, MPI_INT, ROOT, MPI_COMM_WORLD);#else /* if not ROOT, allocate memory for tag_spp[_sum] */ if (rank != ROOT && alloc_spp (nbprocs) == -1) { fprintf (stderr, "[%d] Could not alloc spp. Exiting.\n", rank); MPI_Abort (MPI_COMM_WORLD, 1); exit (1); } /* broadcast tag_spp[_sum] */ MPI_Bcast (tag_spp, nbprocs, MPI_INT, ROOT, MPI_COMM_WORLD); MPI_Bcast (tag_spp_sum, nbprocs, MPI_INT, ROOT, MPI_COMM_WORLD); sizeperproc = tag_spp[rank];#endif fprintf (stdout, "*** [process %d] received a chunk of %d rays\n", rank, sizeperproc); fflush (stdout); /* receive data */ recv_data = send_or_recv_raydata (raydata, sizeperproc, ROOT); comm_time = MPI_Wtime () - comm_time; /* each process computes its share of work */ ray_time_start = MPI_Wtime (); nb_ray_total = sizeperproc; /* offset is used to compute a unique rayid : a process starts numbering * at rank * s, where s = sizeperproc*nbprocs , assuming the number of * rays rejected > number of rays spawning multiple rays. */ offset = rank * sizeperproc * nbprocs; bunch_of_ray (recv_data, sizeperproc, offset, &ray_config, &ray_filter, &nb_ray_computed, &nb_ray_rejected, filter_fd, sparse_fd, res_fd, event_fd); /* raydata is freed after bunch of ray */#ifdef RAYTRACING_ONLY fprintf (stdout, "*** [process %d] ray-traced %d rays and rejected %d rays in %f s!\n", rank, nb_ray_computed, nb_ray_rejected, MPI_Wtime () - ray_time_start);#endif /* NOTE: there are no remaining data with USE_MPILB */#ifndef USE_MPILB /* ROOT treats the remainder of the rays : the remaining part */ /* of raydata has been copied to ray_remainder before running bunch_of_ray * which frees raydata */ if (rank == ROOT && nbread % nbprocs) { fprintf (stdout, "*** [process %d] rays remaining=%d ****\n", rank, nbread % nbprocs); nb_ray_total += nbread % nbprocs; /* get last ids (beyond all others) */ offset = rank * sizeperproc * (nbprocs + 1); bunch_of_ray (ray_remainder, nbread % nbprocs, offset, &ray_config, &ray_filter, &nb_ray_computed, &nb_ray_rejected, filter_fd, sparse_fd, res_fd, event_fd); }#endif fprintf (stdout, "*** [process %d] %d/%d rays computed and %d/%d rays rejected (%.2f)\n", rank, nb_ray_computed, nb_ray_total, nb_ray_rejected, nb_ray_total, (float) nb_ray_rejected / (float) nb_ray_total * 100); ray_time_end = MPI_Wtime ();#ifdef RAYTRACING_ONLY fprintf (stdout, "*** [process %d] raytracing TIME = %f s\n", rank, ray_time_end - start_time); fflush (stdout);#else /* Wait all process they have completed their raytracing */ MPI_Barrier (MPI_COMM_WORLD); /**********************************************/ /* data exchange : the merge */ /* send/receive cell info to/from other procs */ /**********************************************/ if (rank == ROOT) { fprintf (stdout, "Cell merge started\n"); } merge_times = send_recv_cell_info (MPI_COMM_WORLD, cell_info, mesh, tmpdir);#endif /*******************/ /* score computing */ /*******************/ date_stamp = get_date_stamp (); fprintf (stdout, "*** [process %d] computing score (%s)\n", rank, date_stamp); free (date_stamp); score_time = MPI_Wtime (); compute_score (cell_info, mesh); score_time = MPI_Wtime () - score_time; /**************/ /* show timer */ /**************/ fprintf (stdout, "*** [process %d] computing TIME (s) comm,ray,merge,score,total = %.2f,%.2f,%.2f,%.2f,%.2f (%d non empty cells)\n", rank, comm_time, ray_time_end - ray_time_start, merge_times[8], score_time, MPI_Wtime () - start_time, count_non_empty_cell_in_cellinfo (cell_info, mesh)); fprintf (stdout, "*** [process %d] merge TIME (s) timers[0..9]= %.2f,%.2f,%.2f,%.2f,%.2f,%.2f,%.2f,%.2f,%.2f,%.2f \n", rank, merge_times[0], merge_times[1], merge_times[2], merge_times[3], merge_times[4], merge_times[5], merge_times[6], merge_times[7], merge_times[8], merge_times[9]); free (merge_times); fflush (stdout); free (merge_times); /****************/ /* save results */ /****************/ fprintf (stdout, "*** [process %d] dumps info to file\n", rank); if (strchr (output_format, 'r')) { char *r2m_file; /* r2m */ /* nbprocs is used but there are only nbprocs-1 sub-domains */ /* rank-1 is the domain_id in [0,nbprocs-1[ */ r2m_file = construct_filename (celldatafilename, "r2m", rank, 0); fprintf (stdout, "*** [process %d] writing cell data (r2m formated) in %s\n", rank, r2m_file); mesh_add_data_filename (mesh, R2M, r2m_file); make_domain_info_file (r2m_file, cell_info, mesh, rank, nbprocs, rank); free (r2m_file); } else { /* sco */ char *sco_file; sco_file = construct_filename (celldatafilename, "sco", rank, 0); fprintf (stdout, "*** [process %d] writing cell data (sco formated) in %s\n", rank, sco_file); mesh_add_data_filename (mesh, SCO, sco_file); mesh_cellinfo_write_sco (sco_file, cell_info, mesh); free (sco_file); } /* close sparse, res and evt files */ if (sparse_fd) fclose (sparse_fd); if (res_fd) fclose (res_fd); if (event_fd) fclose (event_fd); /* everyone close its filtered ray file (if filtering used) */ if (filter_fd) fclose (filter_fd); /**************************************************************/ /* save the xml enrichied with SCO/R2M/SPARSE/RES sections */ /**************************************************************/ if (rank != ROOT) { char *xml_output; xml_output = (char *) malloc ((strlen (celldatafilename) + strlen (".xml") + 1 + MAX_LENGTH_RANK) * sizeof (char)); assert (xml_output); sprintf (xml_output, "%s-%d.xml", celldatafilename, rank); mesh2xml (mesh, xml_output); free (xml_output); } /**********************/ /* This is the end :) */ /**********************/ MPI_Barrier (MPI_COMM_WORLD); date_stamp = get_date_stamp (); fprintf (stdout, "*** [process %d] applications ends normally (%s)\n", rank, date_stamp); free (date_stamp); free (celldatafilename); free_velocity_model (ray_config.velocity_model); MPI_Finalize (); return (0);}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -