📄 system_io.c
字号:
* * Note that the actual IO is handled through the Xdr class * (to be renamed later?) which provides a uniform interface to * both the XDR (eXternal Data Representation) interface and standard * ASCII output. Thus this one section of code will read XDR or ASCII * files with no changes. */ std::string comment; libmesh_assert (io.writing()); std::vector<Number> io_buffer; io_buffer.reserve(this->solution->local_size()); // build the ordered nodes and element maps. // when writing/reading parallel files we need to iterate // over our nodes/elements in order of increasing global id(). // however, this is not guaranteed to be ordering we obtain // by using the node_iterators/element_iterators directly. // so build a set, sorted by id(), that provides the ordering. // further, for memory economy build the set but then transfer // its contents to vectors, which will be sorted. std::vector<const DofObject*> ordered_nodes, ordered_elements; { std::set<const DofObject*, CompareDofObjectsByID> ordered_nodes_set (this->get_mesh().local_nodes_begin(), this->get_mesh().local_nodes_end()); ordered_nodes.insert(ordered_nodes.end(), ordered_nodes_set.begin(), ordered_nodes_set.end()); } { std::set<const DofObject*, CompareDofObjectsByID> ordered_elements_set (this->get_mesh().local_elements_begin(), this->get_mesh().local_elements_end()); ordered_elements.insert(ordered_elements.end(), ordered_elements_set.begin(), ordered_elements_set.end()); } const unsigned int sys_num = this->number(); const unsigned int n_vars = this->n_vars(); // Loop over each variable and each node, and write out the value. for (unsigned int var=0; var<n_vars; var++) { // First write the node DOF values for (std::vector<const DofObject*>::const_iterator it = ordered_nodes.begin(); it != ordered_nodes.end(); ++it) for (unsigned int comp=0; comp<(*it)->n_comp(sys_num, var); comp++) { //std::cout << "(*it)->id()=" << (*it)->id() << std::endl; libmesh_assert ((*it)->dof_number(sys_num, var, comp) != DofObject::invalid_id); io_buffer.push_back((*this->solution)((*it)->dof_number(sys_num, var, comp))); } // Then write the element DOF values for (std::vector<const DofObject*>::const_iterator it = ordered_elements.begin(); it != ordered_elements.end(); ++it) for (unsigned int comp=0; comp<(*it)->n_comp(sys_num, var); comp++) { libmesh_assert ((*it)->dof_number(sys_num, var, comp) != DofObject::invalid_id); io_buffer.push_back((*this->solution)((*it)->dof_number(sys_num, var, comp))); } } // 9.) // // Actually write the reordered solution vector // for the ith system to disk // set up the comment { comment = "# System \""; comment += this->name(); comment += "\" Solution Vector"; } io.data (io_buffer, comment.c_str()); // Only write additional vectors if wanted if (write_additional_data) { std::map<std::string, NumericVector<Number>* >::const_iterator pos = _vectors.begin(); for(; pos != this->_vectors.end(); ++pos) { io_buffer.clear(); io_buffer.reserve( pos->second->local_size()); // Loop over each variable and each node, and write out the value. for (unsigned int var=0; var<n_vars; var++) { // First write the node DOF values for (std::vector<const DofObject*>::const_iterator it = ordered_nodes.begin(); it != ordered_nodes.end(); ++it) for (unsigned int comp=0; comp<(*it)->n_comp(sys_num, var); comp++) { libmesh_assert ((*it)->dof_number(sys_num, var, comp) != DofObject::invalid_id); io_buffer.push_back((*pos->second)((*it)->dof_number(sys_num, var, comp))); } // Then write the element DOF values for (std::vector<const DofObject*>::const_iterator it = ordered_elements.begin(); it != ordered_elements.end(); ++it) for (unsigned int comp=0; comp<(*it)->n_comp(sys_num, var); comp++) { libmesh_assert ((*it)->dof_number(sys_num, var, comp) != DofObject::invalid_id); io_buffer.push_back((*pos->second)((*it)->dof_number(sys_num, var, comp))); } } // 10.) // // Actually write the reordered additional vector // for this system to disk // set up the comment { comment = "# System \""; comment += this->name(); comment += "\" Additional Vector \""; comment += pos->first; comment += "\""; } io.data (io_buffer, comment.c_str()); } }}void System::write_serialized_data (Xdr& io, const bool write_additional_data) const{ /** * This method implements the output of the vectors * contained in this System object, embedded in the * output of an EquationSystems<T_sys>. * * 9.) The global solution vector, re-ordered to be node-major * (More on this later.) * * for each additional vector in the object * * 10.) The global additional vector, re-ordered to be * node-major (More on this later.) */ parallel_only(); std::string comment; this->write_serialized_vector(io, *this->solution); // set up the comment if (libMesh::processor_id() == 0) { comment = "# System \""; comment += this->name(); comment += "\" Solution Vector"; io.comment (comment); } // Only write additional vectors if wanted if (write_additional_data) { std::map<std::string, NumericVector<Number>* >::const_iterator pos = _vectors.begin(); for(; pos != this->_vectors.end(); ++pos) { this->write_serialized_vector(io, *pos->second); // set up the comment if (libMesh::processor_id() == 0) { comment = "# System \""; comment += this->name(); comment += "\" Additional Vector \""; comment += pos->first; comment += "\""; io.comment (comment); } } }}template <typename iterator_type>unsigned int System::write_serialized_blocked_dof_objects (const NumericVector<Number> &vec, const unsigned int var, const unsigned int n_objects, const iterator_type begin, const iterator_type end, Xdr &io) const{ const unsigned int sys_num = this->number(); unsigned int written_length=0; // The numer of values written. This will be returned std::vector<unsigned int> xfer_ids; // The global IDs and # of components for the local objects in the current block std::vector<Number> xfer_vals; // The raw values for the local objects in the current block std::vector<std::vector<unsigned int> > // The global ID and # of components received from each processor recv_ids (libMesh::n_processors()); // for the current block std::vector<std::vector<Number> > // The raw values received from each processor recv_vals(libMesh::n_processors()); // for the current block std::vector<std::vector<Number>::iterator> // The next value on each processor for the current block val_iters; val_iters.reserve(libMesh::n_processors()); std::vector<unsigned int> &idx_map = xfer_ids; // map to traverse entry-wise rather than processor-wise (renamed for notational convenience) std::vector<Number> &output_vals = xfer_vals; // The output buffer for the current block (renamed for notational convenience) //--------------------------------- // Collect the values for all objects unsigned int first_object=0, last_object=0; for (unsigned int blk=0; last_object<n_objects; blk++) { //std::cout << "Writing object block " << blk << " for var " << var << std::endl; // Each processor should build up its transfer buffers for its // local objects in [first_object,last_object). first_object = blk*io_blksize; last_object = std::min((blk+1)*io_blksize,n_objects); // Clear the transfer buffers for this block. xfer_ids.clear(); xfer_vals.clear(); for (iterator_type it=begin; it!=end; ++it) if (((*it)->id() >= first_object) && // object in [first_object,last_object) ((*it)->id() < last_object) && (*it)->n_comp(sys_num,var) ) // var has a nonzero # of components on this object { xfer_ids.push_back((*it)->id()); xfer_ids.push_back((*it)->n_comp(sys_num, var)); for (unsigned int comp=0; comp<(*it)->n_comp(sys_num, var); comp++) { libmesh_assert ((*it)->dof_number(sys_num, var, comp) >= vec.first_local_index()); libmesh_assert ((*it)->dof_number(sys_num, var, comp) < vec.last_local_index()); xfer_vals.push_back(vec((*it)->dof_number(sys_num, var, comp))); } } //----------------------------------------- // Send the transfer buffers to processor 0. // Get the size of the incoming buffers -- optionally // we could over-size the recv buffers based on // some maximum size to avoid these communications std::vector<unsigned int> ids_size, vals_size; const unsigned int my_ids_size = xfer_ids.size(); const unsigned int my_vals_size = xfer_vals.size(); Parallel::gather (0, my_ids_size, ids_size); Parallel::gather (0, my_vals_size, vals_size); // Note that we will actually send/receive to ourself if we are // processor 0, so let's use nonblocking receives. std::vector<Parallel::request> id_request_handles(libMesh::n_processors()), val_request_handles(libMesh::n_processors()); #ifdef HAVE_MPI const unsigned int id_tag=0, val_tag=1; // Post the receives -- do this on processor 0 only. if (libMesh::processor_id() == 0) for (unsigned int pid=0; pid<libMesh::n_processors(); pid++) { recv_ids[pid].resize(ids_size[pid]); recv_vals[pid].resize(vals_size[pid]); Parallel::irecv (pid, recv_ids[pid], id_request_handles[pid], id_tag); Parallel::irecv (pid, recv_vals[pid], val_request_handles[pid], val_tag); } // Send -- do this on all processors. Parallel::send(0, xfer_ids, id_tag); Parallel::send(0, xfer_vals, val_tag);#else // On one processor there's nothing to send recv_ids[0] = xfer_ids; recv_vals[0] = xfer_vals;#endif // ------------------------------------------------------- // Receive the messages and write the output on processor 0. if (libMesh::processor_id() == 0) { // Wait for all the receives to complete. We have no // need for the statuses since we already know the // buffer sizes. Parallel::wait (id_request_handles); Parallel::wait (val_request_handles); // Write the values in this block. unsigned int tot_id_size=0, tot_val_size=0; val_iters.clear(); for (unsigned int pid=0; pid<libMesh::n_processors(); pid++) { tot_id_size += recv_ids[pid].size(); tot_val_size += recv_vals[pid].size(); val_iters.push_back(recv_vals[pid].begin()); } libmesh_assert (tot_id_size <= 2*std::min(io_blksize,n_objects)); // Create a map to avoid searching. This will allow us to // traverse the received values in [first_object,last_object) order. idx_map.resize(3*io_blksize); std::fill (idx_map.begin(), idx_map.end(), libMesh::invalid_uint); for (unsigned int pid=0; pid<libMesh::n_processors(); pid++) for (unsigned int idx=0; idx<recv_ids[pid].size(); idx+=2) { const unsigned int local_idx = recv_ids[pid][idx+0]-first_object; libmesh_assert (local_idx < std::min(io_blksize,n_objects)); const unsigned int n_comp = recv_ids[pid][idx+1]; idx_map[3*local_idx+0] = pid; idx_map[3*local_idx+1] = n_comp; idx_map[3*local_idx+2] = std::distance(recv_vals[pid].begin(), val_iters[pid]); val_iters[pid] += n_comp; } output_vals.clear(); output_vals.reserve (tot_val_size); for (unsigned int idx=0; idx<idx_map.size(); idx+=3) if (idx_map[idx] != libMesh::invalid_uint) // this could happen when a local object { // has no components for the current variable const unsigned int pid = idx_map[idx+0]; const unsigned int n_comp = idx_map[idx+1]; const unsigned int first_pos = idx_map[idx+2]; for (unsigned int comp=0; comp<n_comp; comp++) { libmesh_assert (first_pos + comp < recv_vals[pid].size()); output_vals.push_back(recv_vals[pid][first_pos + comp]); } } libmesh_assert (output_vals.size() == tot_val_size); // write the stream io.data_stream (output_vals.empty() ? NULL : &output_vals[0], output_vals.size()); written_length += output_vals.size(); } // end processor 0 conditional block } // end object block loop return written_length;}void System::write_serialized_vector (Xdr& io, const NumericVector<Number>& vec) const{ parallel_only(); // If this is not the same on all processors we're in trouble! libmesh_assert (Parallel::verify(io_blksize)); libmesh_assert (io.writing()); unsigned int vec_length = vec.size(); if (libMesh::processor_id() == 0) io.data (vec_length, "# vector length"); unsigned int written_length = 0; // Loop over each variable in the system, and then each node/element in the mesh. for (unsigned int var=0; var<this->n_vars(); var++) { //--------------------------------- // Collect the values for all nodes written_length += this->write_serialized_blocked_dof_objects (vec, var, this->get_mesh().n_nodes(), this->get_mesh().local_nodes_begin(), this->get_mesh().local_nodes_end(), io); //------------------------------------ // Collect the values for all elements written_length += this->write_serialized_blocked_dof_objects (vec, var, this->get_mesh().n_elem(), this->get_mesh().local_elements_begin(), this->get_mesh().local_elements_end(), io); } // end variable loop if (libMesh::processor_id() == 0) libmesh_assert(written_length == vec_length);}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -