📄 spblink.cpp
字号:
// copy the fields back out of the receive buffer. for(int i=0;i<lenIn;i++) {/* lE[i]=fieldsBufin[i]; lB[i]=fieldsBufin[i+lenIn+2]; lJ[i]=fieldsBufin[i+2*lenIn+4]; */ lEa[i] = fieldsBufin[i]; lEb[i] = fieldsBufin[i + lenIn + 2]; lEc[i] = fieldsBufin[i + 2*lenIn +4]; lBa[i] = fieldsBufin[i + 3*lenIn +6]; lBb[i] = fieldsBufin[i + 4*lenIn +8]; lBc[i] = fieldsBufin[i + 5*lenIn +10]; lJa[i] = fieldsBufin[i + 6*lenIn +12]; lJb[i] = fieldsBufin[i + 7*lenIn +14]; lJc[i] = fieldsBufin[i + 8*lenIn +16]; } } break;#endif }} void SPBoundLink::sendShiftedParticles(vector<Particle*>& pl){// Make sure local buffer is at least as large as the particle list if(shftPtclsSize < pl.size()){ delete [] shftPtcls; shftPtcls = new ShiftedParticlePassData[pl.size()]; shftPtclsSize = pl.size(); }// Copy the particles into the list for(size_t i=0; i<pl.size(); i++){ shftPtcls[i].speciesID = (Scalar) pl[i]->get_speciesID(); shftPtcls[i].x = pl[i]->get_x(); shftPtcls[i].u = pl[i]->get_u(); shftPtcls[i].np2c = pl[i]->get_np2c(); if ( pl[i]->isVariableWeight() ) shftPtcls[i].vary_np2c = 1.; else shftPtcls[i].vary_np2c = 0.; } numShftPtcls = pl.size();// Send the particle over switch(LinkType) { case SAME_PROCESS: cout << "sendShiftedParticles not implemented for same process.\n"; assert(0); break;#ifdef MPI_VERSION case MPI_LINK: {#ifdef DEBUG // cout << "\nSBL:passParticles: sending " << n << " particles to " << // DestID << endl;#endif MPI_Isend(shftPtcls, (sizeof(ShiftedParticlePassData)/sizeof(Scalar))*numShftPtcls, MPI_Scalar, DestID, SHIFT_PTCL_TAG + tag_modifier, XOOPIC_COMM, &shftPtclSendRequest); // cout << "MPI_RANK = " << MPI_RANK << // ": SPBoundLink::sendShiftedParticles: shftPtclSendRequest = " << // shftPtclSendRequest << endl; } break;#endif }}// Wait for the particles from the boundary.// Return reference to list with particles.ParticleList& SPBoundLink::recvShiftedParticles(Species** speciesArray){// Empty out old particles // cout << "MPI_RANK = " << MPI_RANK << // ": deleting old particles in SPBoundLink::recvShiftedParticles." << endl; shftPtclList.deleteAll(); // cout << "MPI_RANK = " << MPI_RANK << // ": old particles deleted in SPBoundLink::recvShiftedParticles." << endl;// Get new particles from boundary switch(LinkType) { case SAME_PROCESS: cout << "waitShiftedParticles not implemented for same process.\n"; assert(0); break;#ifdef MPI_VERSION case MPI_LINK: {// Wait for our send to complete // cout << "MPI_RANK = " << MPI_RANK << ": waiting for receive " << // "to complete in SPBoundLink::recvShiftedParticles." << endl; // cout << "MPI_RANK = " << MPI_RANK << ": shftPtclSendRequest = " << // shftPtclSendRequest << endl; if(shftPtclSendRequest != MPI_REQUEST_NULL) MPI_Wait(&shftPtclSendRequest, &shftPtclSendStatus);// Count how many particles are incoming // cout << "MPI_RANK = " << MPI_RANK << ": probing " << // "in SPBoundLink::recvShiftedParticles." << endl; MPI_Probe(DestID, SHIFT_PTCL_TAG + tag_modifier, XOOPIC_COMM, &shftPtclRecvStatus); int numScalarIn; // cout << "MPI_RANK = " << MPI_RANK << ": getting count " << // "in SPBoundLink::recvShiftedParticles." << endl; MPI_Get_count(&shftPtclRecvStatus, MPI_Scalar, &numScalarIn); int numPtclsIn = numScalarIn * sizeof(Scalar) / sizeof(ShiftedParticlePassData); // cout << "MPI_RANK = " << MPI_RANK << ": receiving " << numPtclsIn << // " particles in SPBoundLink::recvShiftedParticles." << endl;// Local buffer must be at least as large as the number of incoming particles if(shftPtclsSize < numPtclsIn){ delete [] shftPtcls; shftPtcls = new ShiftedParticlePassData[numPtclsIn]; shftPtclsSize = numPtclsIn; }// Receive the incoming particles MPI_Recv(shftPtcls, numScalarIn, MPI_Scalar, DestID, SHIFT_PTCL_TAG + tag_modifier, XOOPIC_COMM, &shftPtclRecvStatus);// Add the particles to the list BOOL _vary_np2c; for(int i=0; i<numPtclsIn; i++) { if ( shftPtcls[i].vary_np2c > 0. ) _vary_np2c = TRUE; else _vary_np2c = FALSE; Particle* pp = new Particle(shftPtcls[i].x, shftPtcls[i].u, speciesArray[(int)(shftPtcls[i].speciesID+0.1)], shftPtcls[i].np2c, _vary_np2c); shftPtclList.add(pp); } } break;#endif // MPI_VERSION } return shftPtclList;}void SPBoundLink::sendShiftedFields(Vector3 *_lEsend, Vector3 *_lBsend, Vector3 *_lJsend) { switch(LinkType) { case SAME_PROCESS: {// Copy the data to be sent over to the other boundary for(int i=0;i<lenIn;i++) { lEdest[i] = _lEsend[i]; lBdest[i] = _lBsend[i]; lJdest[i] = _lJsend[i];// I think the following are not needed#ifdef MPI_VERSION // fieldsBufout[i] = _lEsend[i]; // fieldsBufout[i+lenIn+2] = _lBsend[i]; // fieldsBufout[i+2*lenIn+4] = _lJsend[i];#endif //MPI_VERSION } } break; #ifdef MPI_VERSION case MPI_LINK: { for(int i=0;i<lenIn;i++) { shftFldsBufout[i] = _lEsend[i]; shftFldsBufout[i+lenIn+2] = _lBsend[i]; shftFldsBufout[i+2*lenIn+4] = _lJsend[i]; }// send one message instead of 3 MPI_Isend(shftFldsBufout, 9 * lenIn+20, MPI_Scalar, DestID, SHIFT_FLD_TAG + tag_modifier, XOOPIC_COMM, &shftFldSendRequest); shiftedFieldsSent = true; } break;#endif }}void SPBoundLink::askShiftedFields() { switch(LinkType) { case SAME_PROCESS: { } break; // nothing needs doing. #ifdef MPI_VERSION case MPI_LINK: { MPI_Irecv(shftFldsBufin, 9*lenIn+20, MPI_Scalar, DestID, SHIFT_FLD_TAG + tag_modifier, XOOPIC_COMM, &shftFldRecvRequest); extern int MPI_RANK; // cout << "Request to receive fields made by MPI_RANK = " << // MPI_RANK << endl; } break;#endif }}void SPBoundLink::waitShiftedFields() { switch(LinkType) { case SAME_PROCESS: {// nothing needs doing. } break;#ifdef MPI_VERSION case MPI_LINK: {// make sure all the sends finish if(shiftedFieldsSent) { MPI_Wait(&shftFldSendRequest, &shftFldSendStatus); shiftedFieldsSent = false; }// make sure all the receives finish MPI_Wait(&shftFldRecvRequest, &shftFldRecvStatus);// copy the fields back out of the receive buffer. for(int i=0;i<lenIn;i++) { lE[i]=shftFldsBufin[i]; lB[i]=shftFldsBufin[i+lenIn+2]; lJ[i]=shftFldsBufin[i+2*lenIn+4]; }/* int kout = 5; cout << "In SPBoundLink::waitShiftedFields:\n"; cout << "E1 ="; for(int j1=0; j1<=lenIn; j1++) cerr << " " << shftFldsBufin[j1].e1(); cout << endl; cout << "E2 ="; for(int j1=0; j1<=lenIn; j1++) cerr << " " << shftFldsBufin[j1].e2(); cout << endl; cout << "I1 ="; for(int j1=0; j1<=lenIn; j1++) cout << " " << shftFldsBufin[j1+2*lenIn+4].e1(); cout << endl;*/ } break;#endif }}//// Communication of shifted NGDs// void SPBoundLink::sendShiftedNGDs( Scalar* NGDSendBuffer, int numElements ) throw(Oops){ switch(LinkType) { case SAME_PROCESS: { stringstream ss (stringstream::in | stringstream::out); ss << "SPBoundLink::sendShiftedNGDs: Error: \n"<< "The case SAME_PROCESS: is not implemented yet in" << endl << "since at the time of writing of this function (05/24/01)" << endl << "this case was never reachable. D. A. D. " << endl; std::string msg; ss >> msg; Oops oops(msg); throw oops; // exit() "SpatialRegionBoundary::sendNGDStripe: } break; #ifdef MPI_VERSION case MPI_LINK: { // // Check that the number of elements requested to send is not more // than the number of elements of the shftNGDsBufout // if ( numElements != numElemNGDsBuf ) { stringstream ss (stringstream::in | stringstream::out); ss <<"SPBoundLink::sendShiftedNGDs: Error: \n"<< "The number of elements = " << numElements << " requested is " << endl << "different from the number of elements numElemNGDsBuf = " << numElemNGDsBuf << " in the MPI send buffer." << endl; std::string msg; ss >> msg; Oops oops(msg); throw oops; // exit() "SpatialRegionBoundary::sendNGDStripe: } for(int i = 0; i < numElements; i++) { shftNGDsBufout[i] = NGDSendBuffer[i]; } MPI_Isend(shftNGDsBufout, numElemNGDsBuf, MPI_Scalar, DestID, SHIFT_NGD_TAG + tag_modifier, XOOPIC_COMM, &shftNGDSendRequest); shiftedNGDsSent = true; } break;#endif }}// // ask the other boundary to send the needed NGD data//void SPBoundLink::askShiftedNGDs() { switch(LinkType) { case SAME_PROCESS: { } break; // nothing needs doing. #ifdef MPI_VERSION case MPI_LINK: { MPI_Irecv(shftNGDsBufin, numElemNGDsBuf, MPI_Scalar, DestID, SHIFT_NGD_TAG + tag_modifier, XOOPIC_COMM, &shftNGDRecvRequest); extern int MPI_RANK; // cout << "Request to receive NGDs made by MPI_RANK = " << // MPI_RANK << endl; } break;#endif }}//// wait for the needed NGD data //void SPBoundLink::waitShiftedNGDs() { switch(LinkType) { case SAME_PROCESS: { // nothing needs doing. } break;#ifdef MPI_VERSION case MPI_LINK: { // make sure all the sends finish if( shiftedNGDsSent ) { MPI_Wait(&shftNGDSendRequest, &shftNGDSendStatus); shiftedNGDsSent = false; } // make sure all the receives finish MPI_Wait(&shftNGDRecvRequest, &shftNGDRecvStatus); // // copy the NGDs back out of the receive buffer. // for( int i = 0; i < numElemNGDsBuf; i++) { lNGD[i] = shftNGDsBufin[i]; } } break;#endif }}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -