⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 iwch_qp.c

📁 linux内核源码
💻 C
📖 第 1 页 / 共 2 页
字号:
		else			*ecode = RDMAP_STAG_NOT_ASSOC;		break;	case TPT_ERR_QPID:		*layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;		*ecode = RDMAP_STAG_NOT_ASSOC;		break;	case TPT_ERR_ACCESS:		*layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;		*ecode = RDMAP_ACC_VIOL;		break;	case TPT_ERR_WRAP:		*layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;		*ecode = RDMAP_TO_WRAP;		break;	case TPT_ERR_BOUND:		if (tagged) {			*layer_type = LAYER_DDP|DDP_TAGGED_ERR;			*ecode = DDPT_BASE_BOUNDS;		} else {			*layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;			*ecode = RDMAP_BASE_BOUNDS;		}		break;	case TPT_ERR_INVALIDATE_SHARED_MR:	case TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND:		*layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;		*ecode = RDMAP_CANT_INV_STAG;		break;	case TPT_ERR_ECC:	case TPT_ERR_ECC_PSTAG:	case TPT_ERR_INTERNAL_ERR:		*layer_type = LAYER_RDMAP|RDMAP_LOCAL_CATA;		*ecode = 0;		break;	case TPT_ERR_OUT_OF_RQE:		*layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;		*ecode = DDPU_INV_MSN_NOBUF;		break;	case TPT_ERR_PBL_ADDR_BOUND:		*layer_type = LAYER_DDP|DDP_TAGGED_ERR;		*ecode = DDPT_BASE_BOUNDS;		break;	case TPT_ERR_CRC:		*layer_type = LAYER_MPA|DDP_LLP;		*ecode = MPA_CRC_ERR;		break;	case TPT_ERR_MARKER:		*layer_type = LAYER_MPA|DDP_LLP;		*ecode = MPA_MARKER_ERR;		break;	case TPT_ERR_PDU_LEN_ERR:		*layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;		*ecode = DDPU_MSG_TOOBIG;		break;	case TPT_ERR_DDP_VERSION:		if (tagged) {			*layer_type = LAYER_DDP|DDP_TAGGED_ERR;			*ecode = DDPT_INV_VERS;		} else {			*layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;			*ecode = DDPU_INV_VERS;		}		break;	case TPT_ERR_RDMA_VERSION:		*layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;		*ecode = RDMAP_INV_VERS;		break;	case TPT_ERR_OPCODE:		*layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;		*ecode = RDMAP_INV_OPCODE;		break;	case TPT_ERR_DDP_QUEUE_NUM:		*layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;		*ecode = DDPU_INV_QN;		break;	case TPT_ERR_MSN:	case TPT_ERR_MSN_GAP:	case TPT_ERR_MSN_RANGE:	case TPT_ERR_IRD_OVERFLOW:		*layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;		*ecode = DDPU_INV_MSN_RANGE;		break;	case TPT_ERR_TBIT:		*layer_type = LAYER_DDP|DDP_LOCAL_CATA;		*ecode = 0;		break;	case TPT_ERR_MO:		*layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;		*ecode = DDPU_INV_MO;		break;	default:		*layer_type = LAYER_RDMAP|DDP_LOCAL_CATA;		*ecode = 0;		break;	}}/* * This posts a TERMINATE with layer=RDMA, type=catastrophic. */int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg){	union t3_wr *wqe;	struct terminate_message *term;	struct sk_buff *skb;	PDBG("%s %d\n", __FUNCTION__, __LINE__);	skb = alloc_skb(40, GFP_ATOMIC);	if (!skb) {		printk(KERN_ERR "%s cannot send TERMINATE!\n", __FUNCTION__);		return -ENOMEM;	}	wqe = (union t3_wr *)skb_put(skb, 40);	memset(wqe, 0, 40);	wqe->send.rdmaop = T3_TERMINATE;	/* immediate data length */	wqe->send.plen = htonl(4);	/* immediate data starts here. */	term = (struct terminate_message *)wqe->send.sgl;	build_term_codes(rsp_msg, &term->layer_etype, &term->ecode);	wqe->send.wrh.op_seop_flags = cpu_to_be32(V_FW_RIWR_OP(T3_WR_SEND) |			 V_FW_RIWR_FLAGS(T3_COMPLETION_FLAG | T3_NOTIFY_FLAG));	wqe->send.wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(qhp->ep->hwtid));	skb->priority = CPL_PRIORITY_DATA;	return cxgb3_ofld_send(qhp->rhp->rdev.t3cdev_p, skb);}/* * Assumes qhp lock is held. */static void __flush_qp(struct iwch_qp *qhp, unsigned long *flag){	struct iwch_cq *rchp, *schp;	int count;	rchp = get_chp(qhp->rhp, qhp->attr.rcq);	schp = get_chp(qhp->rhp, qhp->attr.scq);	PDBG("%s qhp %p rchp %p schp %p\n", __FUNCTION__, qhp, rchp, schp);	/* take a ref on the qhp since we must release the lock */	atomic_inc(&qhp->refcnt);	spin_unlock_irqrestore(&qhp->lock, *flag);	/* locking heirarchy: cq lock first, then qp lock. */	spin_lock_irqsave(&rchp->lock, *flag);	spin_lock(&qhp->lock);	cxio_flush_hw_cq(&rchp->cq);	cxio_count_rcqes(&rchp->cq, &qhp->wq, &count);	cxio_flush_rq(&qhp->wq, &rchp->cq, count);	spin_unlock(&qhp->lock);	spin_unlock_irqrestore(&rchp->lock, *flag);	/* locking heirarchy: cq lock first, then qp lock. */	spin_lock_irqsave(&schp->lock, *flag);	spin_lock(&qhp->lock);	cxio_flush_hw_cq(&schp->cq);	cxio_count_scqes(&schp->cq, &qhp->wq, &count);	cxio_flush_sq(&qhp->wq, &schp->cq, count);	spin_unlock(&qhp->lock);	spin_unlock_irqrestore(&schp->lock, *flag);	/* deref */	if (atomic_dec_and_test(&qhp->refcnt))	        wake_up(&qhp->wait);	spin_lock_irqsave(&qhp->lock, *flag);}static void flush_qp(struct iwch_qp *qhp, unsigned long *flag){	if (t3b_device(qhp->rhp))		cxio_set_wq_in_error(&qhp->wq);	else		__flush_qp(qhp, flag);}/* * Return non zero if at least one RECV was pre-posted. */static int rqes_posted(struct iwch_qp *qhp){	return fw_riwrh_opcode((struct fw_riwrh *)qhp->wq.queue) == T3_WR_RCV;}static int rdma_init(struct iwch_dev *rhp, struct iwch_qp *qhp,				enum iwch_qp_attr_mask mask,				struct iwch_qp_attributes *attrs){	struct t3_rdma_init_attr init_attr;	int ret;	init_attr.tid = qhp->ep->hwtid;	init_attr.qpid = qhp->wq.qpid;	init_attr.pdid = qhp->attr.pd;	init_attr.scqid = qhp->attr.scq;	init_attr.rcqid = qhp->attr.rcq;	init_attr.rq_addr = qhp->wq.rq_addr;	init_attr.rq_size = 1 << qhp->wq.rq_size_log2;	init_attr.mpaattrs = uP_RI_MPA_IETF_ENABLE |		qhp->attr.mpa_attr.recv_marker_enabled |		(qhp->attr.mpa_attr.xmit_marker_enabled << 1) |		(qhp->attr.mpa_attr.crc_enabled << 2);	/*	 * XXX - The IWCM doesn't quite handle getting these	 * attrs set before going into RTS.  For now, just turn	 * them on always...	 */#if 0	init_attr.qpcaps = qhp->attr.enableRdmaRead |		(qhp->attr.enableRdmaWrite << 1) |		(qhp->attr.enableBind << 2) |		(qhp->attr.enable_stag0_fastreg << 3) |		(qhp->attr.enable_stag0_fastreg << 4);#else	init_attr.qpcaps = 0x1f;#endif	init_attr.tcp_emss = qhp->ep->emss;	init_attr.ord = qhp->attr.max_ord;	init_attr.ird = qhp->attr.max_ird;	init_attr.qp_dma_addr = qhp->wq.dma_addr;	init_attr.qp_dma_size = (1UL << qhp->wq.size_log2);	init_attr.flags = rqes_posted(qhp) ? RECVS_POSTED : 0;	init_attr.irs = qhp->ep->rcv_seq;	PDBG("%s init_attr.rq_addr 0x%x init_attr.rq_size = %d "	     "flags 0x%x qpcaps 0x%x\n", __FUNCTION__,	     init_attr.rq_addr, init_attr.rq_size,	     init_attr.flags, init_attr.qpcaps);	ret = cxio_rdma_init(&rhp->rdev, &init_attr);	PDBG("%s ret %d\n", __FUNCTION__, ret);	return ret;}int iwch_modify_qp(struct iwch_dev *rhp, struct iwch_qp *qhp,				enum iwch_qp_attr_mask mask,				struct iwch_qp_attributes *attrs,				int internal){	int ret = 0;	struct iwch_qp_attributes newattr = qhp->attr;	unsigned long flag;	int disconnect = 0;	int terminate = 0;	int abort = 0;	int free = 0;	struct iwch_ep *ep = NULL;	PDBG("%s qhp %p qpid 0x%x ep %p state %d -> %d\n", __FUNCTION__,	     qhp, qhp->wq.qpid, qhp->ep, qhp->attr.state,	     (mask & IWCH_QP_ATTR_NEXT_STATE) ? attrs->next_state : -1);	spin_lock_irqsave(&qhp->lock, flag);	/* Process attr changes if in IDLE */	if (mask & IWCH_QP_ATTR_VALID_MODIFY) {		if (qhp->attr.state != IWCH_QP_STATE_IDLE) {			ret = -EIO;			goto out;		}		if (mask & IWCH_QP_ATTR_ENABLE_RDMA_READ)			newattr.enable_rdma_read = attrs->enable_rdma_read;		if (mask & IWCH_QP_ATTR_ENABLE_RDMA_WRITE)			newattr.enable_rdma_write = attrs->enable_rdma_write;		if (mask & IWCH_QP_ATTR_ENABLE_RDMA_BIND)			newattr.enable_bind = attrs->enable_bind;		if (mask & IWCH_QP_ATTR_MAX_ORD) {			if (attrs->max_ord >			    rhp->attr.max_rdma_read_qp_depth) {				ret = -EINVAL;				goto out;			}			newattr.max_ord = attrs->max_ord;		}		if (mask & IWCH_QP_ATTR_MAX_IRD) {			if (attrs->max_ird >			    rhp->attr.max_rdma_reads_per_qp) {				ret = -EINVAL;				goto out;			}			newattr.max_ird = attrs->max_ird;		}		qhp->attr = newattr;	}	if (!(mask & IWCH_QP_ATTR_NEXT_STATE))		goto out;	if (qhp->attr.state == attrs->next_state)		goto out;	switch (qhp->attr.state) {	case IWCH_QP_STATE_IDLE:		switch (attrs->next_state) {		case IWCH_QP_STATE_RTS:			if (!(mask & IWCH_QP_ATTR_LLP_STREAM_HANDLE)) {				ret = -EINVAL;				goto out;			}			if (!(mask & IWCH_QP_ATTR_MPA_ATTR)) {				ret = -EINVAL;				goto out;			}			qhp->attr.mpa_attr = attrs->mpa_attr;			qhp->attr.llp_stream_handle = attrs->llp_stream_handle;			qhp->ep = qhp->attr.llp_stream_handle;			qhp->attr.state = IWCH_QP_STATE_RTS;			/*			 * Ref the endpoint here and deref when we			 * disassociate the endpoint from the QP.  This			 * happens in CLOSING->IDLE transition or *->ERROR			 * transition.			 */			get_ep(&qhp->ep->com);			spin_unlock_irqrestore(&qhp->lock, flag);			ret = rdma_init(rhp, qhp, mask, attrs);			spin_lock_irqsave(&qhp->lock, flag);			if (ret)				goto err;			break;		case IWCH_QP_STATE_ERROR:			qhp->attr.state = IWCH_QP_STATE_ERROR;			flush_qp(qhp, &flag);			break;		default:			ret = -EINVAL;			goto out;		}		break;	case IWCH_QP_STATE_RTS:		switch (attrs->next_state) {		case IWCH_QP_STATE_CLOSING:			BUG_ON(atomic_read(&qhp->ep->com.kref.refcount) < 2);			qhp->attr.state = IWCH_QP_STATE_CLOSING;			if (!internal) {				abort=0;				disconnect = 1;				ep = qhp->ep;			}			break;		case IWCH_QP_STATE_TERMINATE:			qhp->attr.state = IWCH_QP_STATE_TERMINATE;			if (t3b_device(qhp->rhp))				cxio_set_wq_in_error(&qhp->wq);			if (!internal)				terminate = 1;			break;		case IWCH_QP_STATE_ERROR:			qhp->attr.state = IWCH_QP_STATE_ERROR;			if (!internal) {				abort=1;				disconnect = 1;				ep = qhp->ep;			}			goto err;			break;		default:			ret = -EINVAL;			goto out;		}		break;	case IWCH_QP_STATE_CLOSING:		if (!internal) {			ret = -EINVAL;			goto out;		}		switch (attrs->next_state) {			case IWCH_QP_STATE_IDLE:				qhp->attr.state = IWCH_QP_STATE_IDLE;				qhp->attr.llp_stream_handle = NULL;				put_ep(&qhp->ep->com);				qhp->ep = NULL;				wake_up(&qhp->wait);				break;			case IWCH_QP_STATE_ERROR:				goto err;			default:				ret = -EINVAL;				goto err;		}		break;	case IWCH_QP_STATE_ERROR:		if (attrs->next_state != IWCH_QP_STATE_IDLE) {			ret = -EINVAL;			goto out;		}		if (!Q_EMPTY(qhp->wq.sq_rptr, qhp->wq.sq_wptr) ||		    !Q_EMPTY(qhp->wq.rq_rptr, qhp->wq.rq_wptr)) {			ret = -EINVAL;			goto out;		}		qhp->attr.state = IWCH_QP_STATE_IDLE;		memset(&qhp->attr, 0, sizeof(qhp->attr));		break;	case IWCH_QP_STATE_TERMINATE:		if (!internal) {			ret = -EINVAL;			goto out;		}		goto err;		break;	default:		printk(KERN_ERR "%s in a bad state %d\n",		       __FUNCTION__, qhp->attr.state);		ret = -EINVAL;		goto err;		break;	}	goto out;err:	PDBG("%s disassociating ep %p qpid 0x%x\n", __FUNCTION__, qhp->ep,	     qhp->wq.qpid);	/* disassociate the LLP connection */	qhp->attr.llp_stream_handle = NULL;	ep = qhp->ep;	qhp->ep = NULL;	qhp->attr.state = IWCH_QP_STATE_ERROR;	free=1;	wake_up(&qhp->wait);	BUG_ON(!ep);	flush_qp(qhp, &flag);out:	spin_unlock_irqrestore(&qhp->lock, flag);	if (terminate)		iwch_post_terminate(qhp, NULL);	/*	 * If disconnect is 1, then we need to initiate a disconnect	 * on the EP.  This can be a normal close (RTS->CLOSING) or	 * an abnormal close (RTS/CLOSING->ERROR).	 */	if (disconnect)		iwch_ep_disconnect(ep, abort, GFP_KERNEL);	/*	 * If free is 1, then we've disassociated the EP from the QP	 * and we need to dereference the EP.	 */	if (free)		put_ep(&ep->com);	PDBG("%s exit state %d\n", __FUNCTION__, qhp->attr.state);	return ret;}static int quiesce_qp(struct iwch_qp *qhp){	spin_lock_irq(&qhp->lock);	iwch_quiesce_tid(qhp->ep);	qhp->flags |= QP_QUIESCED;	spin_unlock_irq(&qhp->lock);	return 0;}static int resume_qp(struct iwch_qp *qhp){	spin_lock_irq(&qhp->lock);	iwch_resume_tid(qhp->ep);	qhp->flags &= ~QP_QUIESCED;	spin_unlock_irq(&qhp->lock);	return 0;}int iwch_quiesce_qps(struct iwch_cq *chp){	int i;	struct iwch_qp *qhp;	for (i=0; i < T3_MAX_NUM_QP; i++) {		qhp = get_qhp(chp->rhp, i);		if (!qhp)			continue;		if ((qhp->attr.rcq == chp->cq.cqid) && !qp_quiesced(qhp)) {			quiesce_qp(qhp);			continue;		}		if ((qhp->attr.scq == chp->cq.cqid) && !qp_quiesced(qhp))			quiesce_qp(qhp);	}	return 0;}int iwch_resume_qps(struct iwch_cq *chp){	int i;	struct iwch_qp *qhp;	for (i=0; i < T3_MAX_NUM_QP; i++) {		qhp = get_qhp(chp->rhp, i);		if (!qhp)			continue;		if ((qhp->attr.rcq == chp->cq.cqid) && qp_quiesced(qhp)) {			resume_qp(qhp);			continue;		}		if ((qhp->attr.scq == chp->cq.cqid) && qp_quiesced(qhp))			resume_qp(qhp);	}	return 0;}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -