⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 ch3u_handle_recv_req.c

📁 fortran并行计算包
💻 C
📖 第 1 页 / 共 3 页
字号:
/* -*- Mode: C; c-basic-offset:4 ; -*- *//* *  (C) 2001 by Argonne National Laboratory. *      See COPYRIGHT in top-level directory. */#include "mpidimpl.h"#include "mpidrma.h"static int create_derived_datatype(MPID_Request * rreq, MPID_Datatype ** dtp);static int do_accumulate_op(MPID_Request * rreq);static int do_simple_accumulate(MPIDI_PT_single_op *single_op);static int do_simple_get(MPID_Win *win_ptr, MPIDI_Win_lock_queue *lock_queue);#undef FUNCNAME#define FUNCNAME MPIDI_CH3U_Handle_recv_req#undef FCNAME#define FCNAME MPIDI_QUOTE(FUNCNAME)int MPIDI_CH3U_Handle_recv_req(MPIDI_VC_t * vc, MPID_Request * rreq, 			       int * complete){    static int in_routine = FALSE;    int mpi_errno = MPI_SUCCESS;    int (*reqFn)(MPIDI_VC_t *, MPID_Request *, int *);    MPIDI_STATE_DECL(MPID_STATE_MPIDI_CH3U_HANDLE_RECV_REQ);    MPIDI_FUNC_ENTER(MPID_STATE_MPIDI_CH3U_HANDLE_RECV_REQ);    MPIU_Assert(in_routine == FALSE);    in_routine = TRUE;    reqFn = rreq->dev.OnDataAvail;    if (!reqFn) {	MPIU_Assert(MPIDI_Request_get_type(rreq) == MPIDI_REQUEST_TYPE_RECV);	MPIDI_CH3U_Request_complete(rreq);	*complete = TRUE;    }    else {        mpi_errno = reqFn( vc, rreq, complete );    }    in_routine = FALSE;    MPIDI_FUNC_EXIT(MPID_STATE_MPIDI_CH3U_HANDLE_RECV_REQ);    return mpi_errno;}/* ----------------------------------------------------------------------- *//* Here are the functions that implement the actions that are taken when  * data is available for a receive request (or other completion operations) * These include "receive" requests that are part of the RMA implementation. * * The convention for the names of routines that are called when data is * available is *    MPIDI_CH3_ReqHandler_<type>( MPIDI_VC_t *, MPID_Request *, int * ) * as in  *    MPIDI_CH3_ReqHandler_... * * ToDo:  *    We need a way for each of these functions to describe what they are, *    so that given a pointer to one of these functions, we can retrieve *    a description of the routine.  We may want to use a static string  *    and require the user to maintain thread-safety, at least while *    accessing the string. *//* ----------------------------------------------------------------------- */int MPIDI_CH3_ReqHandler_RecvComplete( MPIDI_VC_t *vc, MPID_Request *rreq, 				       int *complete ){    /* mark data transfer as complete and decrement CC */    MPIDI_CH3U_Request_complete(rreq);    *complete = TRUE;    return MPI_SUCCESS;}#undef FUNCNAME#define FUNCNAME MPIDI_CH3_ReqHandler_PutAccumRespComplete#undef FCNAME#define FCNAME MPIDI_QUOTE(FUNCNAME)int MPIDI_CH3_ReqHandler_PutAccumRespComplete( MPIDI_VC_t *vc, 					       MPID_Request *rreq, 					       int *complete ){    int mpi_errno = MPI_SUCCESS;    MPID_Win *win_ptr;    MPIDI_STATE_DECL(MPID_STATE_MPIDI_CH3_REQHANDLER_PUTACCUMRESPCOMPLETE);        MPIDI_FUNC_ENTER(MPID_STATE_MPIDI_CH3_REQHANDLER_PUTACCUMRESPCOMPLETE);    if (MPIDI_Request_get_type(rreq) == MPIDI_REQUEST_TYPE_ACCUM_RESP) {	/* accumulate data from tmp_buf into user_buf */	mpi_errno = do_accumulate_op(rreq);	if (mpi_errno) {	    MPIU_ERR_POP(mpi_errno);	}    }        MPID_Win_get_ptr(rreq->dev.target_win_handle, win_ptr);        /* if passive target RMA, increment counter */    if (win_ptr->current_lock_type != MPID_LOCK_NONE)	win_ptr->my_pt_rma_puts_accs++;        if (rreq->dev.source_win_handle != MPI_WIN_NULL) {	/* Last RMA operation from source. If active	   target RMA, decrement window counter. If	   passive target RMA, release lock on window and	   grant next lock in the lock queue if there is	   any. If it's a shared lock or a lock-put-unlock	   type of optimization, we also need to send an	   ack to the source. */ 		if (win_ptr->current_lock_type == MPID_LOCK_NONE) {	    /* FIXME: MT: this has to be done atomically */	    win_ptr->my_counter -= 1;	}	else {	    if ((win_ptr->current_lock_type == MPI_LOCK_SHARED) ||		(rreq->dev.single_op_opt == 1)) {		mpi_errno = MPIDI_CH3I_Send_pt_rma_done_pkt(vc, 				    rreq->dev.source_win_handle);		if (mpi_errno) {		    MPIU_ERR_POP(mpi_errno);		}	    }	    mpi_errno = MPIDI_CH3I_Release_lock(win_ptr);	}    }        /* mark data transfer as complete and decrement CC */    MPIDI_CH3U_Request_complete(rreq);    *complete = TRUE; fn_fail:    MPIDI_FUNC_EXIT(MPID_STATE_MPIDI_CH3_REQHANDLER_PUTACCUMRESPCOMPLETE);    return MPI_SUCCESS;}#undef FUNCNAME#define FUNCNAME MPIDI_CH3_ReqHandler_PutRespDerivedDTComplete#undef FCNAME#define FCNAME MPIDI_QUOTE(FUNCNAME)int MPIDI_CH3_ReqHandler_PutRespDerivedDTComplete( MPIDI_VC_t *vc, 						   MPID_Request *rreq, 						   int *complete ){    int mpi_errno = MPI_SUCCESS;    MPID_Datatype *new_dtp;    MPIDI_STATE_DECL(MPID_STATE_MPIDI_CH3_REQHANDLER_PUTRESPDERIVEDDTCOMPLETE);        MPIDI_FUNC_ENTER(MPID_STATE_MPIDI_CH3_REQHANDLER_PUTRESPDERIVEDDTCOMPLETE);                    /* create derived datatype */    create_derived_datatype(rreq, &new_dtp);        /* update request to get the data */    MPIDI_Request_set_type(rreq, MPIDI_REQUEST_TYPE_PUT_RESP);    rreq->dev.datatype = new_dtp->handle;    rreq->dev.recv_data_sz = new_dtp->size * rreq->dev.user_count;         rreq->dev.datatype_ptr = new_dtp;    /* this will cause the datatype to be freed when the       request is freed. free dtype_info here. */    MPIU_Free(rreq->dev.dtype_info);        rreq->dev.segment_ptr = MPID_Segment_alloc( );    /* if (!rreq->dev.segment_ptr) { MPIU_ERR_POP(); } */    MPID_Segment_init(rreq->dev.user_buf,		      rreq->dev.user_count,		      rreq->dev.datatype,		      rreq->dev.segment_ptr, 0);    rreq->dev.segment_first = 0;    rreq->dev.segment_size = rreq->dev.recv_data_sz;        mpi_errno = MPIDI_CH3U_Request_load_recv_iov(rreq);    if (mpi_errno != MPI_SUCCESS) {	MPIU_ERR_SETANDJUMP(mpi_errno,MPI_ERR_OTHER,			    "**ch3|loadrecviov");    }    if (!rreq->dev.OnDataAvail) 	rreq->dev.OnDataAvail = MPIDI_CH3_ReqHandler_PutAccumRespComplete;        *complete = FALSE; fn_fail:    MPIDI_FUNC_EXIT(MPID_STATE_MPIDI_CH3_REQHANDLER_PUTRESPDERIVEDDTCOMPLETE);    return mpi_errno;}#undef FUNCNAME#define FUNCNAME MPIDI_CH3_ReqHandler_AccumRespDerivedDTComplete#undef FCNAME#define FCNAME MPIDI_QUOTE(FUNCNAME)int MPIDI_CH3_ReqHandler_AccumRespDerivedDTComplete( MPIDI_VC_t *vc, 						     MPID_Request *rreq, 						     int *complete ){    int mpi_errno = MPI_SUCCESS;    MPID_Datatype *new_dtp;    MPI_Aint true_lb, true_extent, extent;    void *tmp_buf;    MPIU_THREADPRIV_DECL;    MPIDI_STATE_DECL(MPID_STATE_MPIDI_CH3_REQHANDLER_ACCUMRESPDERIVEDDTCOMPLETE);        MPIDI_FUNC_ENTER(MPID_STATE_MPIDI_CH3_REQHANDLER_ACCUMRESPDERIVEDDTCOMPLETE);        MPIU_THREADPRIV_GET;        /* create derived datatype */    create_derived_datatype(rreq, &new_dtp);        /* update new request to get the data */    MPIDI_Request_set_type(rreq, MPIDI_REQUEST_TYPE_ACCUM_RESP);        /* first need to allocate tmp_buf to recv the data into */        MPIR_Nest_incr();    mpi_errno = NMPI_Type_get_true_extent(new_dtp->handle, 					  &true_lb, &true_extent);    MPIR_Nest_decr();    if (mpi_errno) {	MPIU_ERR_POP(mpi_errno);    }        MPID_Datatype_get_extent_macro(new_dtp->handle, extent);         tmp_buf = MPIU_Malloc(rreq->dev.user_count * 			  (MPIR_MAX(extent,true_extent)));      if (!tmp_buf) {	MPIU_ERR_SETANDJUMP(mpi_errno,MPI_ERR_OTHER,"**nomem");    }        /* adjust for potential negative lower bound in datatype */    tmp_buf = (void *)((char*)tmp_buf - true_lb);        rreq->dev.user_buf = tmp_buf;    rreq->dev.datatype = new_dtp->handle;    rreq->dev.recv_data_sz = new_dtp->size *	rreq->dev.user_count;     rreq->dev.datatype_ptr = new_dtp;    /* this will cause the datatype to be freed when the       request is freed. free dtype_info here. */    MPIU_Free(rreq->dev.dtype_info);        rreq->dev.segment_ptr = MPID_Segment_alloc( );    /* if (!rreq->dev.segment_ptr) { MPIU_ERR_POP(); } */    MPID_Segment_init(rreq->dev.user_buf,		      rreq->dev.user_count,		      rreq->dev.datatype,		      rreq->dev.segment_ptr, 0);    rreq->dev.segment_first = 0;    rreq->dev.segment_size = rreq->dev.recv_data_sz;        mpi_errno = MPIDI_CH3U_Request_load_recv_iov(rreq);    if (mpi_errno != MPI_SUCCESS) {	MPIU_ERR_SETANDJUMP(mpi_errno,MPI_ERR_OTHER,			    "**ch3|loadrecviov");    }    if (!rreq->dev.OnDataAvail)	rreq->dev.OnDataAvail = MPIDI_CH3_ReqHandler_PutAccumRespComplete;        *complete = FALSE; fn_fail:    MPIDI_FUNC_EXIT(MPID_STATE_MPIDI_CH3_REQHANDLER_ACCUMRESPDERIVEDDTCOMPLETE);    return mpi_errno;}#undef FUNCNAME#define FUNCNAME MPIDI_CH3_ReqHandler_GetRespDerivedDTComplete#undef FCNAME#define FCNAME MPIDI_QUOTE(FUNCNAME)int MPIDI_CH3_ReqHandler_GetRespDerivedDTComplete( MPIDI_VC_t *vc, 						   MPID_Request *rreq, 						   int *complete ){    int mpi_errno = MPI_SUCCESS;    MPID_Datatype *new_dtp;    MPIDI_CH3_Pkt_t upkt;    MPIDI_CH3_Pkt_get_resp_t * get_resp_pkt = &upkt.get_resp;    MPID_IOV iov[MPID_IOV_LIMIT];    MPID_Request * sreq;    int iov_n;    MPIDI_STATE_DECL(MPID_STATE_MPIDI_CH3_REQHANDLER_GETRESPDERIVEDDTCOMPLETE);        MPIDI_FUNC_ENTER(MPID_STATE_MPIDI_CH3_REQHANDLER_GETRESPDERIVEDDTCOMPLETE);                    /* create derived datatype */    create_derived_datatype(rreq, &new_dtp);    MPIU_Free(rreq->dev.dtype_info);        /* create request for sending data */    sreq = MPID_Request_create();    if (sreq == NULL) {	MPIU_ERR_SETANDJUMP(mpi_errno,MPI_ERR_OTHER,"**nomem");    }    sreq->kind = MPID_REQUEST_SEND;    MPIDI_Request_set_type(sreq, MPIDI_REQUEST_TYPE_GET_RESP);    sreq->dev.OnDataAvail = MPIDI_CH3_ReqHandler_GetSendRespComplete;    sreq->dev.OnFinal     = MPIDI_CH3_ReqHandler_GetSendRespComplete;    sreq->dev.user_buf = rreq->dev.user_buf;    sreq->dev.user_count = rreq->dev.user_count;    sreq->dev.datatype = new_dtp->handle;    sreq->dev.datatype_ptr = new_dtp;    sreq->dev.target_win_handle = rreq->dev.target_win_handle;    sreq->dev.source_win_handle = rreq->dev.source_win_handle;        MPIDI_Pkt_init(get_resp_pkt, MPIDI_CH3_PKT_GET_RESP);    get_resp_pkt->request_handle = rreq->dev.request_handle;        iov[0].MPID_IOV_BUF = (MPID_IOV_BUF_CAST) get_resp_pkt;    iov[0].MPID_IOV_LEN = sizeof(*get_resp_pkt);        sreq->dev.segment_ptr = MPID_Segment_alloc( );    /* if (!sreq->dev.segment_ptr) { MPIU_ERR_POP(); } */    MPID_Segment_init(sreq->dev.user_buf,		      sreq->dev.user_count,		      sreq->dev.datatype,		      sreq->dev.segment_ptr, 0);    sreq->dev.segment_first = 0;    sreq->dev.segment_size = new_dtp->size * sreq->dev.user_count;        iov_n = MPID_IOV_LIMIT - 1;    /* Note that the OnFinal handler was set above */    mpi_errno = MPIDI_CH3U_Request_load_send_iov(sreq, &iov[1], &iov_n);    if (mpi_errno == MPI_SUCCESS)    {	iov_n += 1;			mpi_errno = MPIU_CALL(MPIDI_CH3,iSendv(vc, sreq, iov, iov_n));	/* --BEGIN ERROR HANDLING-- */	if (mpi_errno != MPI_SUCCESS)	{	    MPIU_Object_set_ref(sreq, 0);	    MPIDI_CH3_Request_destroy(sreq);	    sreq = NULL;	    MPIU_ERR_SETFATALANDJUMP(mpi_errno,MPI_ERR_OTHER,"**ch3|rmamsg");	}	/* --END ERROR HANDLING-- */    }        /* mark receive data transfer as complete and decrement CC in receive        request */    MPIDI_CH3U_Request_complete(rreq);    *complete = TRUE;     fn_fail:    MPIDI_FUNC_EXIT(MPID_STATE_MPIDI_CH3_REQHANDLER_GETRESPDERIVEDDTCOMPLETE);    return mpi_errno;}#undef FUNCNAME#define FUNCNAME MPIDI_CH3_ReqHandler_SinglePutAccumComplete#undef FCNAME#define FCNAME MPIDI_QUOTE(FUNCNAME)int MPIDI_CH3_ReqHandler_SinglePutAccumComplete( MPIDI_VC_t *vc, 						 MPID_Request *rreq, 

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -