⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 ch3_progress_sock.c

📁 fortran并行计算包
💻 C
📖 第 1 页 / 共 2 页
字号:
/* -*- Mode: C; c-basic-offset:4 ; -*- *//* *  (C) 2001 by Argonne National Laboratory. *      See COPYRIGHT in top-level directory. */#include "ch3i_progress.h"/* FIXME: This is nowhere set to true.  The name is non-conforming if it is   not static */static int shutting_down = FALSE;#if 0static int connection_post_send_pkt_and_pgid(MPIDI_CH3I_Connection_t * conn);#endifstatic inline int connection_post_recv_pkt(MPIDI_CH3I_Connection_t * conn);static inline int connection_post_send_pkt(MPIDI_CH3I_Connection_t * conn);static inline int connection_post_sendq_req(MPIDI_CH3I_Connection_t * conn);static int adjust_iov(MPID_IOV ** iovp, int * countp, MPIU_Size_t nb);extern MPIDI_CH3_PktHandler_Fcn *MPIDI_pktArray[MPIDI_CH3_PKT_END_CH3+1];#undef FUNCNAME#define FUNCNAME connection_post_sendq_req#undef FCNAME#define FCNAME MPIDI_QUOTE(FUNCNAME)static inline int connection_post_sendq_req(MPIDI_CH3I_Connection_t * conn){    int mpi_errno = MPI_SUCCESS;    MPIDI_CH3I_VC *vcch;    MPIDI_STATE_DECL(MPID_STATE_CONNECTION_POST_SENDQ_REQ);    MPIDI_FUNC_ENTER(MPID_STATE_CONNECTION_POST_SENDQ_REQ);    /* post send of next request on the send queue */    vcch = (MPIDI_CH3I_VC *)conn->vc->channel_private;    conn->send_active = MPIDI_CH3I_SendQ_head(vcch); /* MT */    if (conn->send_active != NULL)    {	mpi_errno = MPIDU_Sock_post_writev(	      conn->sock, conn->send_active->dev.iov, 	      conn->send_active->dev.iov_count, NULL);	if (mpi_errno != MPI_SUCCESS) {	    MPIU_ERR_SET(mpi_errno,MPI_ERR_OTHER,"**fail");	}    }        MPIDI_FUNC_EXIT(MPID_STATE_CONNECTION_POST_SENDQ_REQ);    return mpi_errno;}#undef FUNCNAME#define FUNCNAME connection_post_send_pkt#undef FCNAME#define FCNAME MPIDI_QUOTE(FUNCNAME)static inline int connection_post_send_pkt(MPIDI_CH3I_Connection_t * conn){    int mpi_errno = MPI_SUCCESS;    MPIDI_STATE_DECL(MPID_STATE_CONNECTION_POST_SEND_PKT);    MPIDI_FUNC_ENTER(MPID_STATE_CONNECTION_POST_SEND_PKT);        mpi_errno = MPIDU_Sock_post_write(conn->sock, &conn->pkt, 				      sizeof(conn->pkt), sizeof(conn->pkt), 				      NULL);    if (mpi_errno) {	MPIU_ERR_SET(mpi_errno,MPI_ERR_OTHER,"**fail");    }    MPIDI_FUNC_EXIT(MPID_STATE_CONNECTION_POST_SEND_PKT);    return mpi_errno;}#undef FUNCNAME#define FUNCNAME connection_post_recv_pkt#undef FCNAME#define FCNAME MPIDI_QUOTE(FUNCNAME)static inline int connection_post_recv_pkt(MPIDI_CH3I_Connection_t * conn){    int mpi_errno = MPI_SUCCESS;    MPIDI_STATE_DECL(MPID_STATE_CONNECTION_POST_RECV_PKT);    MPIDI_FUNC_ENTER(MPID_STATE_CONNECTION_POST_RECV_PKT);    mpi_errno = MPIDU_Sock_post_read(conn->sock, &conn->pkt, sizeof(conn->pkt),				     sizeof(conn->pkt), NULL);    if (mpi_errno) {	MPIU_ERR_SET(mpi_errno,MPI_ERR_OTHER,"**fail");    }    MPIDI_FUNC_EXIT(MPID_STATE_CONNECTION_POST_RECV_PKT);    return mpi_errno;}#undef FUNCNAME#define FUNCNAME adjust_iov#undef FCNAME#define FCNAME MPIU_QUOTE(FUNCNAME)static int adjust_iov(MPID_IOV ** iovp, int * countp, MPIU_Size_t nb){    MPID_IOV * const iov = *iovp;    const int count = *countp;    int offset = 0;    while (offset < count)    {	if (iov[offset].MPID_IOV_LEN <= nb)	{	    nb -= iov[offset].MPID_IOV_LEN;	    offset++;	}	else	{	    iov[offset].MPID_IOV_BUF = (MPID_IOV_BUF_CAST)((char *) iov[offset].MPID_IOV_BUF + nb);	    iov[offset].MPID_IOV_LEN -= nb;	    break;	}    }    *iovp += offset;    *countp -= offset;    return (*countp == 0);}#if 0#undef FUNCNAME#define FUNCNAME connection_post_send_pkt_and_pgid#undef FCNAME#define FCNAME MPIDI_QUOTE(FUNCNAME)static int connection_post_send_pkt_and_pgid(MPIDI_CH3I_Connection_t * conn){    int mpi_errno;    MPIDI_STATE_DECL(MPID_STATE_CONNECTION_POST_SEND_PKT_AND_PGID);    MPIDI_FUNC_ENTER(MPID_STATE_CONNECTION_POST_SEND_PKT_AND_PGID);        conn->iov[0].MPID_IOV_BUF = (MPID_IOV_BUF_CAST) &conn->pkt;    conn->iov[0].MPID_IOV_LEN = (int) sizeof(conn->pkt);    conn->iov[1].MPID_IOV_BUF = (MPID_IOV_BUF_CAST) MPIDI_Process.my_pg->id;    conn->iov[1].MPID_IOV_LEN = (int) strlen(MPIDI_Process.my_pg->id) + 1;    mpi_errno = MPIDU_Sock_post_writev(conn->sock, conn->iov, 2, NULL);    if (mpi_errno != MPI_SUCCESS) {	MPIU_ERR_SET(mpi_errno,MPI_ERR_OTHER,"**fail");    }    MPIDI_FUNC_EXIT(MPID_STATE_CONNECTION_POST_SEND_PKT_AND_PGID);    return mpi_errno;}#endif#undef FUNCNAME#define FUNCNAME MPIDI_CH3I_Progress_handle_sock_event#undef FCNAME#define FCNAME MPIDI_QUOTE(FUNCNAME)int MPIDI_CH3I_Progress_handle_sock_event(MPIDU_Sock_event_t * event){    int complete;    int mpi_errno = MPI_SUCCESS;    MPIDI_STATE_DECL(MPID_STATE_MPIDI_CH3I_PROGRESS_HANDLE_SOCK_EVENT);    MPIDI_FUNC_ENTER(MPID_STATE_MPIDI_CH3I_PROGRESS_HANDLE_SOCK_EVENT);    switch (event->op_type)    {	case MPIDU_SOCK_OP_READ:	{	    MPIDI_CH3I_Connection_t * conn = (MPIDI_CH3I_Connection_t *) event->user_ptr;			    MPID_Request * rreq = conn->recv_active;	    /* --BEGIN ERROR HANDLING-- */	    if (event->error != MPI_SUCCESS)	    {		/* FIXME: the following should be handled by the close 		   protocol */		if (!shutting_down || MPIR_ERR_GET_CLASS(event->error) != MPIDU_SOCK_ERR_CONN_CLOSED)		{		    mpi_errno = event->error;		    MPIU_ERR_POP(mpi_errno);		}		    		break;	    }	    /* --END ERROR HANDLING-- */	    if (conn->state == CONN_STATE_CONNECTED)	    {		if (conn->recv_active == NULL)		{                    MPIDI_msg_sz_t buflen = sizeof (MPIDI_CH3_Pkt_t);		    MPIU_Assert(conn->pkt.type < MPIDI_CH3_PKT_END_CH3);					    mpi_errno = 			MPIDI_pktArray[conn->pkt.type]( conn->vc, &conn->pkt,							&buflen, &rreq );		    if (mpi_errno != MPI_SUCCESS) {			MPIU_ERR_POP(mpi_errno);		    }                    MPIU_Assert(buflen == sizeof (MPIDI_CH3_Pkt_t));		    if (rreq == NULL)		    {			if (conn->state != CONN_STATE_CLOSING)			{			    /* conn->recv_active = NULL;  -- already set to NULL */			    mpi_errno = connection_post_recv_pkt(conn);			    if (mpi_errno != MPI_SUCCESS) {				MPIU_ERR_POP(mpi_errno);			    }			}		    }		    else		    {			for(;;)			{			    MPID_IOV * iovp;			    MPIU_Size_t nb;							    iovp = rreq->dev.iov;			    			    mpi_errno = MPIDU_Sock_readv(conn->sock, iovp, 						   rreq->dev.iov_count, &nb);			    /* --BEGIN ERROR HANDLING-- */			    if (mpi_errno != MPI_SUCCESS)			    {				mpi_errno = MPIR_Err_create_code(mpi_errno, MPIR_ERR_FATAL, FCNAME, __LINE__, MPI_ERR_OTHER,								 "**ch3|sock|immedread", "ch3|sock|immedread %p %p %p",								 rreq, conn, conn->vc);				goto fn_fail;			    }			    /* --END ERROR HANDLING-- */			    MPIU_DBG_MSG_FMT(CH3_CHANNEL,VERBOSE,    (MPIU_DBG_FDEST,"immediate readv, vc=%p nb=" MPIDI_MSG_SZ_FMT ", rreq=0x%08x",     conn->vc, nb, rreq->handle));			    if (nb > 0 && adjust_iov(&iovp, &rreq->dev.iov_count, nb))			    {				int (*reqFn)(MPIDI_VC_t *, MPID_Request *, int *);				reqFn = rreq->dev.OnDataAvail;				if (!reqFn) {				    MPIDI_CH3U_Request_complete(rreq);				    complete = TRUE;				}				else {				    mpi_errno = reqFn( conn->vc, rreq, &complete );				    if (mpi_errno) MPIU_ERR_POP(mpi_errno);				}				if (complete)				{				    /* conn->recv_active = NULL; -- already set to NULL */				    mpi_errno = connection_post_recv_pkt(conn);				    if (mpi_errno != MPI_SUCCESS) {					MPIU_ERR_POP(mpi_errno);				    }				    break;				}			    }			    else			    {				MPIU_DBG_MSG_FMT(CH3_CHANNEL,VERBOSE,        (MPIU_DBG_FDEST,"posting readv, vc=%p, rreq=0x%08x", 	 conn->vc, rreq->handle));				conn->recv_active = rreq;				mpi_errno = MPIDU_Sock_post_readv(conn->sock, iovp, rreq->dev.iov_count, NULL);				/* --BEGIN ERROR HANDLING-- */				if (mpi_errno != MPI_SUCCESS)				{				    mpi_errno = MPIR_Err_create_code(					mpi_errno, MPIR_ERR_FATAL, FCNAME, __LINE__, MPI_ERR_OTHER, "**ch3|sock|postread",					"ch3|sock|postread %p %p %p", rreq, conn, conn->vc);				    goto fn_fail;				}				/* --END ERROR HANDLING-- */				break;			    }			}		    }		}		else /* incoming data */		{		    int (*reqFn)(MPIDI_VC_t *, MPID_Request *, int *);		    reqFn = rreq->dev.OnDataAvail;		    if (!reqFn) {			MPIDI_CH3U_Request_complete(rreq);			complete = TRUE;		    }		    else {			mpi_errno = reqFn( conn->vc, rreq, &complete );			if (mpi_errno) MPIU_ERR_POP(mpi_errno);		    }		    if (complete)		    {			conn->recv_active = NULL;			mpi_errno = connection_post_recv_pkt(conn);			if (mpi_errno != MPI_SUCCESS) {			    MPIU_ERR_POP(mpi_errno);			}		    }		    else /* more data to be read */		    {			for(;;)			{			    MPID_IOV * iovp;			    MPIU_Size_t nb;							    iovp = rreq->dev.iov;			    			    mpi_errno = MPIDU_Sock_readv(conn->sock, iovp, rreq->dev.iov_count, &nb);			    /* --BEGIN ERROR HANDLING-- */			    if (mpi_errno != MPI_SUCCESS)			    {				mpi_errno = MPIR_Err_create_code(mpi_errno, MPIR_ERR_FATAL, FCNAME, __LINE__, MPI_ERR_OTHER,								 "**ch3|sock|immedread", "ch3|sock|immedread %p %p %p",								 rreq, conn, conn->vc);				goto fn_fail;			    }			    /* --END ERROR HANDLING-- */			    MPIU_DBG_MSG_FMT(CH3_CHANNEL,VERBOSE,        (MPIU_DBG_FDEST,"immediate readv, vc=%p nb=" MPIDI_MSG_SZ_FMT ", rreq=0x%08x",	 conn->vc, nb, rreq->handle));							    if (nb > 0 && adjust_iov(&iovp, &rreq->dev.iov_count, nb))			    {				int (*reqFn)(MPIDI_VC_t *, MPID_Request *, int *);				reqFn = rreq->dev.OnDataAvail;				if (!reqFn) {				    MPIDI_CH3U_Request_complete(rreq);				    complete = TRUE;				}				else {				    mpi_errno = reqFn( conn->vc, rreq, &complete );				    if (mpi_errno) MPIU_ERR_POP(mpi_errno);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -