⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 ch3u_handle_recv_req.c

📁 fortran并行计算包
💻 C
📖 第 1 页 / 共 3 页
字号:
        for (i=0; i<vec_len; i++)	{            count = (dloop_vec[i].DLOOP_VECTOR_LEN)/type_size;            (*uop)((char *)rreq->dev.user_buf + MPIU_PtrToAint(dloop_vec[i].DLOOP_VECTOR_BUF),                   (char *)rreq->dev.real_user_buf + MPIU_PtrToAint(dloop_vec[i].DLOOP_VECTOR_BUF),                   &count, &type);        }                MPID_Segment_free(segp);        MPIU_Free(dloop_vec);    } fn_exit:    /* free the temporary buffer */    MPIR_Nest_incr();    mpi_errno = NMPI_Type_get_true_extent(rreq->dev.datatype, &true_lb, &true_extent);    MPIR_Nest_decr();    /* --BEGIN ERROR HANDLING-- */    if (mpi_errno)     {	mpi_errno = MPIR_Err_create_code(mpi_errno, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**fail", 0);	MPIDI_FUNC_EXIT(MPID_STATE_DO_ACCUMULATE_OP);	return mpi_errno;    }    /* --END ERROR HANDLING-- */        MPIU_Free((char *) rreq->dev.user_buf + true_lb);    MPIDI_FUNC_EXIT(MPID_STATE_DO_ACCUMULATE_OP);    return mpi_errno; fn_fail:    goto fn_exit;}static int entered_flag = 0;static int entered_count = 0;/* Release the current lock on the window and grant the next lock in the   queue if any */#undef FUNCNAME#define FUNCNAME MPIDI_CH3I_Release_lock#undef FCNAME#define FCNAME MPIDI_QUOTE(FUNCNAME)int MPIDI_CH3I_Release_lock(MPID_Win *win_ptr){    MPIDI_Win_lock_queue *lock_queue, **lock_queue_ptr;    int requested_lock, mpi_errno = MPI_SUCCESS, temp_entered_count;    MPIDI_STATE_DECL(MPID_STATE_MPIDI_CH3I_RELEASE_LOCK);        MPIDI_FUNC_ENTER(MPID_STATE_MPIDI_CH3I_RELEASE_LOCK);    if (win_ptr->current_lock_type == MPI_LOCK_SHARED) {        /* decr ref cnt */        /* FIXME: MT: Must be done atomically */        win_ptr->shared_lock_ref_cnt--;    }    /* If shared lock ref count is 0 (which is also true if the lock is an       exclusive lock), release the lock. */    if (win_ptr->shared_lock_ref_cnt == 0) {	/* This function needs to be reentrant even in the single-threaded case           because when going through the lock queue, the do_simple_get 	   called in the 	   lock-get-unlock case may itself cause a request to complete, and 	   this function           may again get called in the completion action in 	   ch3u_handle_send_req.c. To           handle this possibility, we use an entered_flag. If the flag is 	   not 0, we simply	   increment the entered_count and return. The loop through the lock 	   queue is repeated 	   if the entered_count has changed while we are in the loop.	 */	if (entered_flag != 0) {	    entered_count++;	    goto fn_exit;	}	else {	    entered_flag = 1;	    temp_entered_count = entered_count;	}	do { 	    if (temp_entered_count != entered_count) temp_entered_count++;	    /* FIXME: MT: The setting of the lock type must be done atomically */	    win_ptr->current_lock_type = MPID_LOCK_NONE;	    	    /* If there is a lock queue, try to satisfy as many lock requests as 	       possible. If the first one is a shared lock, grant it and grant all 	       other shared locks. If the first one is an exclusive lock, grant 	       only that one. */	    	    /* FIXME: MT: All queue accesses need to be made atomic */	    lock_queue = (MPIDI_Win_lock_queue *) win_ptr->lock_queue;	    lock_queue_ptr = (MPIDI_Win_lock_queue **) &(win_ptr->lock_queue);	    while (lock_queue) {		/* if it is not a lock-op-unlock type case or if it is a 		   lock-op-unlock type case but all the data has been received, 		   try to acquire the lock */		if ((lock_queue->pt_single_op == NULL) || 		    (lock_queue->pt_single_op->data_recd == 1)) {		    		    requested_lock = lock_queue->lock_type;		    if (MPIDI_CH3I_Try_acquire_win_lock(win_ptr, requested_lock) 			== 1) {						if (lock_queue->pt_single_op != NULL) {			    /* single op. do it here */			    MPIDI_PT_single_op * single_op;			    			    single_op = lock_queue->pt_single_op;			    if (single_op->type == MPIDI_RMA_PUT) {				mpi_errno = MPIR_Localcopy(single_op->data,							   single_op->count,							   single_op->datatype,							   single_op->addr,							   single_op->count,							   single_op->datatype);			    }   			    else if (single_op->type == MPIDI_RMA_ACCUMULATE) {				mpi_errno = do_simple_accumulate(single_op);			    }			    else if (single_op->type == MPIDI_RMA_GET) {				mpi_errno = do_simple_get(win_ptr, lock_queue);			    }			    			    if (mpi_errno != MPI_SUCCESS) goto fn_exit;			    			    /* if put or accumulate, send rma done packet and release lock. */			    if (single_op->type != MPIDI_RMA_GET) {				/* increment counter */				win_ptr->my_pt_rma_puts_accs++;								mpi_errno = 				    MPIDI_CH3I_Send_pt_rma_done_pkt(lock_queue->vc, 								    lock_queue->source_win_handle);				if (mpi_errno != MPI_SUCCESS) goto fn_exit;								/* release the lock */				if (win_ptr->current_lock_type == MPI_LOCK_SHARED) {				    /* decr ref cnt */				    /* FIXME: MT: Must be done atomically */				    win_ptr->shared_lock_ref_cnt--;				}								/* If shared lock ref count is 0 				   (which is also true if the lock is an				   exclusive lock), release the lock. */				if (win_ptr->shared_lock_ref_cnt == 0) {				    /* FIXME: MT: The setting of the lock type 				       must be done atomically */				    win_ptr->current_lock_type = MPID_LOCK_NONE;				}								/* dequeue entry from lock queue */				MPIU_Free(single_op->data);				MPIU_Free(single_op);				*lock_queue_ptr = lock_queue->next;				MPIU_Free(lock_queue);				lock_queue = *lock_queue_ptr;			    }			    			    else {				/* it's a get. The operation is not complete. It 				   will be completed in ch3u_handle_send_req.c. 				   Free the single_op structure. If it's an 				   exclusive lock, break. Otherwise continue to the				   next operation. */								MPIU_Free(single_op);				*lock_queue_ptr = lock_queue->next;				MPIU_Free(lock_queue);				lock_queue = *lock_queue_ptr;								if (requested_lock == MPI_LOCK_EXCLUSIVE)				    break;			    }			}						else {			    /* send lock granted packet. */			    mpi_errno = 				MPIDI_CH3I_Send_lock_granted_pkt(lock_queue->vc,								 lock_queue->source_win_handle);			    			    /* dequeue entry from lock queue */			    *lock_queue_ptr = lock_queue->next;			    MPIU_Free(lock_queue);			    lock_queue = *lock_queue_ptr;			    			    /* if the granted lock is exclusive, 			       no need to continue */			    if (requested_lock == MPI_LOCK_EXCLUSIVE)				break;			}		    }		}		else {		    lock_queue_ptr = &(lock_queue->next);		    lock_queue = lock_queue->next;		}	    }	} while (temp_entered_count != entered_count);	entered_count = entered_flag = 0;    } fn_exit:    MPIDI_FUNC_EXIT(MPID_STATE_MPIDI_CH3I_RELEASE_LOCK);    return mpi_errno;}#undef FUNCNAME#define FUNCNAME MPIDI_CH3I_Send_pt_rma_done_pkt#undef FCNAME #define FCNAME MPIDI_QUOTE(FUNCNAME)int MPIDI_CH3I_Send_pt_rma_done_pkt(MPIDI_VC_t *vc, MPI_Win source_win_handle){    MPIDI_CH3_Pkt_t upkt;    MPIDI_CH3_Pkt_pt_rma_done_t *pt_rma_done_pkt = &upkt.pt_rma_done;    MPID_Request *req;    int mpi_errno=MPI_SUCCESS;    MPIDI_STATE_DECL(MPID_STATE_MPIDI_CH3I_SEND_PT_RMA_DONE_PKT);        MPIDI_FUNC_ENTER(MPID_STATE_MPIDI_CH3I_SEND_PT_RMA_DONE_PKT);    MPIDI_Pkt_init(pt_rma_done_pkt, MPIDI_CH3_PKT_PT_RMA_DONE);    pt_rma_done_pkt->source_win_handle = source_win_handle;    mpi_errno = MPIU_CALL(MPIDI_CH3,iStartMsg(vc, pt_rma_done_pkt,					      sizeof(*pt_rma_done_pkt), &req));    if (mpi_errno != MPI_SUCCESS) {	MPIU_ERR_SETFATALANDJUMP(mpi_errno,MPI_ERR_OTHER,"**ch3|rmamsg");    }    if (req != NULL)    {        MPID_Request_release(req);    } fn_fail:    MPIDI_FUNC_EXIT(MPID_STATE_MPIDI_CH3I_SEND_PT_RMA_DONE_PKT);    return mpi_errno;}#undef FUNCNAME#define FUNCNAME do_simple_accumulate#undef FCNAME #define FCNAME MPIDI_QUOTE(FUNCNAME)static int do_simple_accumulate(MPIDI_PT_single_op *single_op){    int mpi_errno = MPI_SUCCESS;    MPI_User_function *uop;    MPIDI_STATE_DECL(MPID_STATE_DO_SIMPLE_ACCUMULATE);        MPIDI_FUNC_ENTER(MPID_STATE_DO_SIMPLE_ACCUMULATE);    if (single_op->op == MPI_REPLACE)    {        /* simply copy the data */        mpi_errno = MPIR_Localcopy(single_op->data, single_op->count,                                   single_op->datatype, single_op->addr,                                   single_op->count, single_op->datatype);        if (mpi_errno) {	    MPIU_ERR_POP(mpi_errno);	}        goto fn_exit;    }    if (HANDLE_GET_KIND(single_op->op) == HANDLE_KIND_BUILTIN)    {        /* get the function by indexing into the op table */        uop = MPIR_Op_table[(single_op->op)%16 - 1];    }    else    {	/* --BEGIN ERROR HANDLING-- */        mpi_errno = MPIR_Err_create_code( MPI_SUCCESS, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OP, "**opnotpredefined", "**opnotpredefined %d", single_op->op );        goto fn_fail;	/* --END ERROR HANDLING-- */    }        /* only basic datatypes supported for this optimization. */    (*uop)(single_op->data, single_op->addr,           &(single_op->count), &(single_op->datatype)); fn_fail: fn_exit:    MPIDI_FUNC_EXIT(MPID_STATE_DO_SIMPLE_ACCUMULATE);    return mpi_errno;}#undef FUNCNAME#define FUNCNAME do_simple_get#undef FCNAME #define FCNAME MPIDI_QUOTE(FUNCNAME)static int do_simple_get(MPID_Win *win_ptr, MPIDI_Win_lock_queue *lock_queue){    MPIDI_CH3_Pkt_t upkt;    MPIDI_CH3_Pkt_get_resp_t * get_resp_pkt = &upkt.get_resp;    MPID_Request *req;    MPID_IOV iov[MPID_IOV_LIMIT];    int type_size, mpi_errno=MPI_SUCCESS;    MPIDI_STATE_DECL(MPID_STATE_DO_SIMPLE_GET);        MPIDI_FUNC_ENTER(MPID_STATE_DO_SIMPLE_GET);    req = MPID_Request_create();    if (req == NULL) {        MPIU_ERR_SETANDJUMP(mpi_errno,MPI_ERR_OTHER,"**nomem");    }    req->dev.target_win_handle = win_ptr->handle;    req->dev.source_win_handle = lock_queue->source_win_handle;    req->dev.single_op_opt = 1;        MPIDI_Request_set_type(req, MPIDI_REQUEST_TYPE_GET_RESP);     req->kind = MPID_REQUEST_SEND;    req->dev.OnDataAvail = MPIDI_CH3_ReqHandler_GetSendRespComplete;    req->dev.OnFinal     = MPIDI_CH3_ReqHandler_GetSendRespComplete;        MPIDI_Pkt_init(get_resp_pkt, MPIDI_CH3_PKT_GET_RESP);    get_resp_pkt->request_handle = lock_queue->pt_single_op->request_handle;        iov[0].MPID_IOV_BUF = (MPID_IOV_BUF_CAST) get_resp_pkt;    iov[0].MPID_IOV_LEN = sizeof(*get_resp_pkt);        iov[1].MPID_IOV_BUF = (MPID_IOV_BUF_CAST)lock_queue->pt_single_op->addr;    MPID_Datatype_get_size_macro(lock_queue->pt_single_op->datatype, type_size);    iov[1].MPID_IOV_LEN = lock_queue->pt_single_op->count * type_size;        mpi_errno = MPIU_CALL(MPIDI_CH3,iSendv(lock_queue->vc, req, iov, 2));    /* --BEGIN ERROR HANDLING-- */    if (mpi_errno != MPI_SUCCESS)    {        MPIU_Object_set_ref(req, 0);        MPIDI_CH3_Request_destroy(req);	MPIU_ERR_SETFATALANDJUMP(mpi_errno,MPI_ERR_OTHER,"**ch3|rmamsg");    }    /* --END ERROR HANDLING-- */ fn_fail:    MPIDI_FUNC_EXIT(MPID_STATE_DO_SIMPLE_GET);    return mpi_errno;}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -