📄 ch3u_handle_recv_req.c
字号:
MPIU_Free(lock_queue_entry); /* Release lock and grant next lock if there is one. */ mpi_errno = MPIDI_CH3I_Release_lock(win_ptr); } else { /* could not acquire lock. mark data recd as 1 */ lock_queue_entry->pt_single_op->data_recd = 1; } /* mark data transfer as complete and decrement CC */ MPIDI_CH3U_Request_complete(rreq); *complete = TRUE; } /* --BEGIN ERROR HANDLING-- */ else { /* We shouldn't reach this code because the only other request types are sends */ MPIU_Assert(MPIDI_Request_get_type(rreq) == MPIDI_REQUEST_TYPE_RECV); MPIDI_CH3U_Request_complete(rreq); *complete = TRUE; } /* --END ERROR HANDLING-- */ MPIDI_FUNC_EXIT(MPID_STATE_CH3_CA_COMPLETE) break; } case MPIDI_CH3_CA_UNPACK_UEBUF_AND_COMPLETE: { int recv_pending; MPIDI_Request_recv_pending(rreq, &recv_pending); if (!recv_pending) { if (rreq->dev.recv_data_sz > 0) { MPIDI_CH3U_Request_unpack_uebuf(rreq); MPIU_Free(rreq->dev.tmpbuf); } } else { /* The receive has not been posted yet. MPID_{Recv/Irecv}() is responsible for unpacking the buffer. */ } /* mark data transfer as complete and decrement CC */ MPIDI_CH3U_Request_complete(rreq); *complete = TRUE; break; } case MPIDI_CH3_CA_UNPACK_SRBUF_AND_COMPLETE: { MPIDI_CH3U_Request_unpack_srbuf(rreq); if ((MPIDI_Request_get_type(rreq) == MPIDI_REQUEST_TYPE_PUT_RESP) || (MPIDI_Request_get_type(rreq) == MPIDI_REQUEST_TYPE_ACCUM_RESP)) { if (MPIDI_Request_get_type(rreq) == MPIDI_REQUEST_TYPE_ACCUM_RESP) { /* accumulate data from tmp_buf into user_buf */ mpi_errno = do_accumulate_op(rreq); if (mpi_errno) { MPIU_ERR_POP(mpi_errno); } } MPID_Win_get_ptr(rreq->dev.target_win_handle, win_ptr); /* if passive target RMA, increment counter */ if (win_ptr->current_lock_type != MPID_LOCK_NONE) win_ptr->my_pt_rma_puts_accs++; if (rreq->dev.source_win_handle != MPI_WIN_NULL) { /* Last RMA operation from source. If active target RMA, decrement window counter. If passive target RMA, release lock on window and grant next lock in the lock queue if there is any. If it's a shared lock or a lock-put-unlock type of optimization, we also need to send an ack to the source. */ if (win_ptr->current_lock_type == MPID_LOCK_NONE) { /* FIXME: MT: this has to be done atomically */ win_ptr->my_counter -= 1; } else { if ((win_ptr->current_lock_type == MPI_LOCK_SHARED) || (rreq->dev.single_op_opt == 1)) { mpi_errno = MPIDI_CH3I_Send_pt_rma_done_pkt(vc, rreq->dev.source_win_handle); if (mpi_errno) { MPIU_ERR_POP(mpi_errno); } } mpi_errno = MPIDI_CH3I_Release_lock(win_ptr); } } } /* mark data transfer as complete and decrement CC */ MPIDI_CH3U_Request_complete(rreq); *complete = TRUE; break; } case MPIDI_CH3_CA_UNPACK_SRBUF_AND_RELOAD_IOV: { MPIDI_CH3U_Request_unpack_srbuf(rreq); mpi_errno = MPIDI_CH3U_Request_load_recv_iov(rreq); /* --BEGIN ERROR HANDLING-- */ if (mpi_errno != MPI_SUCCESS) { mpi_errno = MPIR_Err_create_code(mpi_errno, MPIR_ERR_FATAL, FCNAME, __LINE__, MPI_ERR_OTHER, "**ch3|loadrecviov", "**ch3|loadrecviov %s", "MPIDI_CH3_CA_UNPACK_SRBUF_AND_RELOAD_IOV"); goto fn_fail; } /* --END ERROR HANDLING-- */ *complete = FALSE; break; } case MPIDI_CH3_CA_RELOAD_IOV: { mpi_errno = MPIDI_CH3U_Request_load_recv_iov(rreq); /* --BEGIN ERROR HANDLING-- */ if (mpi_errno != MPI_SUCCESS) { mpi_errno = MPIR_Err_create_code(mpi_errno, MPIR_ERR_FATAL, FCNAME, __LINE__, MPI_ERR_OTHER, "**ch3|loadrecviov", "**ch3|loadrecviov %s", "MPIDI_CH3_CA_RELOAD_IOV"); goto fn_fail; } /* --END ERROR HANDLING-- */ *complete = FALSE; break; } /* --BEGIN ERROR HANDLING-- */ default: { *complete = TRUE; mpi_errno = MPIR_Err_create_code(MPI_SUCCESS, MPIR_ERR_FATAL, FCNAME, __LINE__, MPI_ERR_INTERN, "**ch3|badca", "**ch3|badca %d", rreq->dev.ca); break; } /* --END ERROR HANDLING-- */ } fn_exit: in_routine = FALSE; MPIDI_FUNC_EXIT(MPID_STATE_MPIDI_CH3U_HANDLE_RECV_REQ); return mpi_errno; fn_fail: goto fn_exit;}#undef FUNCNAME#define FUNCNAME create_derived_datatype#undef FCNAME#define FCNAME MPIDI_QUOTE(FUNCNAME)static int create_derived_datatype(MPID_Request *req, MPID_Datatype **dtp){ MPIDI_RMA_dtype_info *dtype_info; void *dataloop; MPID_Datatype *new_dtp; int mpi_errno=MPI_SUCCESS; MPI_Aint ptrdiff; dtype_info = req->dev.dtype_info; dataloop = req->dev.dataloop; /* allocate new datatype object and handle */ new_dtp = (MPID_Datatype *) MPIU_Handle_obj_alloc(&MPID_Datatype_mem); /* --BEGIN ERROR HANDLING-- */ if (!new_dtp) { mpi_errno = MPIR_Err_create_code(MPI_SUCCESS, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**nomem", 0); return mpi_errno; } /* --END ERROR HANDLING-- */ *dtp = new_dtp; /* Note: handle is filled in by MPIU_Handle_obj_alloc() */ MPIU_Object_set_ref(new_dtp, 1); new_dtp->is_permanent = 0; new_dtp->is_committed = 1; new_dtp->attributes = 0; new_dtp->cache_id = 0; new_dtp->name[0] = 0; new_dtp->is_contig = dtype_info->is_contig; new_dtp->n_contig_blocks = dtype_info->n_contig_blocks; new_dtp->size = dtype_info->size; new_dtp->extent = dtype_info->extent; new_dtp->dataloop_size = dtype_info->dataloop_size; new_dtp->dataloop_depth = dtype_info->dataloop_depth; new_dtp->eltype = dtype_info->eltype; /* set dataloop pointer */ new_dtp->dataloop = req->dev.dataloop; new_dtp->ub = dtype_info->ub; new_dtp->lb = dtype_info->lb; new_dtp->true_ub = dtype_info->true_ub; new_dtp->true_lb = dtype_info->true_lb; new_dtp->has_sticky_ub = dtype_info->has_sticky_ub; new_dtp->has_sticky_lb = dtype_info->has_sticky_lb; /* update pointers in dataloop */ ptrdiff = (MPI_Aint)((char *) (new_dtp->dataloop) - (char *) (dtype_info->dataloop)); MPID_Dataloop_update(new_dtp->dataloop, ptrdiff); new_dtp->contents = NULL; return mpi_errno;}#undef FUNCNAME#define FUNCNAME do_accumulate_op#undef FCNAME#define FCNAME MPIDI_QUOTE(FUNCNAME)static int do_accumulate_op(MPID_Request *rreq){ int mpi_errno = MPI_SUCCESS; MPI_Aint true_lb, true_extent; MPI_User_function *uop; if (rreq->dev.op == MPI_REPLACE) { /* simply copy the data */ mpi_errno = MPIR_Localcopy(rreq->dev.user_buf, rreq->dev.user_count, rreq->dev.datatype, rreq->dev.real_user_buf, rreq->dev.user_count, rreq->dev.datatype); /* --BEGIN ERROR HANDLING-- */ if (mpi_errno) { mpi_errno = MPIR_Err_create_code(mpi_errno, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**fail", 0); return mpi_errno; } /* --END ERROR HANDLING-- */ goto fn_exit; } if (HANDLE_GET_KIND(rreq->dev.op) == HANDLE_KIND_BUILTIN) { /* get the function by indexing into the op table */ uop = MPIR_Op_table[(rreq->dev.op)%16 - 1]; } else { /* --BEGIN ERROR HANDLING-- */ mpi_errno = MPIR_Err_create_code( MPI_SUCCESS, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OP, "**opnotpredefined", "**opnotpredefined %d", rreq->dev.op ); return mpi_errno; /* --END ERROR HANDLING-- */ } if (HANDLE_GET_KIND(rreq->dev.datatype) == HANDLE_KIND_BUILTIN) { (*uop)(rreq->dev.user_buf, rreq->dev.real_user_buf, &(rreq->dev.user_count), &(rreq->dev.datatype)); } else { /* derived datatype */ MPID_Segment *segp; DLOOP_VECTOR *dloop_vec; MPI_Aint first, last; int vec_len, i, type_size, count; MPI_Datatype type; MPID_Datatype *dtp; segp = MPID_Segment_alloc(); /* --BEGIN ERROR HANDLING-- */ if (!segp) { mpi_errno = MPIR_Err_create_code( MPI_SUCCESS, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**nomem", 0 ); return mpi_errno; } /* --END ERROR HANDLING-- */ MPID_Segment_init(NULL, rreq->dev.user_count, rreq->dev.datatype, segp, 0); first = 0; last = SEGMENT_IGNORE_LAST; MPID_Datatype_get_ptr(rreq->dev.datatype, dtp); vec_len = dtp->n_contig_blocks * rreq->dev.user_count + 1; /* +1 needed because Rob says so */ dloop_vec = (DLOOP_VECTOR *) MPIU_Malloc(vec_len * sizeof(DLOOP_VECTOR)); /* --BEGIN ERROR HANDLING-- */ if (!dloop_vec) { mpi_errno = MPIR_Err_create_code( MPI_SUCCESS, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**nomem", 0 ); return mpi_errno; } /* --END ERROR HANDLING-- */ MPID_Segment_pack_vector(segp, first, &last, dloop_vec, &vec_len); type = dtp->eltype; type_size = MPID_Datatype_get_basic_size(type); for (i=0; i<vec_len; i++) {
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -