⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 iwch_cm.c

📁 linux内核源码
💻 C
📖 第 1 页 / 共 4 页
字号:
		__state_set(&ep->com, MORIBUND);		break;	case MORIBUND:		stop_ep_timer(ep);		if ((ep->com.cm_id) && (ep->com.qp)) {			attrs.next_state = IWCH_QP_STATE_IDLE;			iwch_modify_qp(ep->com.qp->rhp,					     ep->com.qp,					     IWCH_QP_ATTR_NEXT_STATE,					     &attrs, 1);		}		close_complete_upcall(ep);		__state_set(&ep->com, DEAD);		release = 1;		break;	case ABORTING:		break;	case DEAD:	default:		BUG_ON(1);		break;	}	spin_unlock_irqrestore(&ep->com.lock, flags);	if (release)		release_ep_resources(ep);	return CPL_RET_BUF_DONE;}/* * T3A does 3 things when a TERM is received: * 1) send up a CPL_RDMA_TERMINATE message with the TERM packet * 2) generate an async event on the QP with the TERMINATE opcode * 3) post a TERMINATE opcde cqe into the associated CQ. * * For (1), we save the message in the qp for later consumer consumption. * For (2), we move the QP into TERMINATE, post a QP event and disconnect. * For (3), we toss the CQE in cxio_poll_cq(). * * terminate() handles case (1)... */static int terminate(struct t3cdev *tdev, struct sk_buff *skb, void *ctx){	struct iwch_ep *ep = ctx;	PDBG("%s ep %p\n", __FUNCTION__, ep);	skb_pull(skb, sizeof(struct cpl_rdma_terminate));	PDBG("%s saving %d bytes of term msg\n", __FUNCTION__, skb->len);	skb_copy_from_linear_data(skb, ep->com.qp->attr.terminate_buffer,				  skb->len);	ep->com.qp->attr.terminate_msg_len = skb->len;	ep->com.qp->attr.is_terminate_local = 0;	return CPL_RET_BUF_DONE;}static int ec_status(struct t3cdev *tdev, struct sk_buff *skb, void *ctx){	struct cpl_rdma_ec_status *rep = cplhdr(skb);	struct iwch_ep *ep = ctx;	PDBG("%s ep %p tid %u status %d\n", __FUNCTION__, ep, ep->hwtid,	     rep->status);	if (rep->status) {		struct iwch_qp_attributes attrs;		printk(KERN_ERR MOD "%s BAD CLOSE - Aborting tid %u\n",		       __FUNCTION__, ep->hwtid);		stop_ep_timer(ep);		attrs.next_state = IWCH_QP_STATE_ERROR;		iwch_modify_qp(ep->com.qp->rhp,			       ep->com.qp, IWCH_QP_ATTR_NEXT_STATE,			       &attrs, 1);		abort_connection(ep, NULL, GFP_KERNEL);	}	return CPL_RET_BUF_DONE;}static void ep_timeout(unsigned long arg){	struct iwch_ep *ep = (struct iwch_ep *)arg;	struct iwch_qp_attributes attrs;	unsigned long flags;	spin_lock_irqsave(&ep->com.lock, flags);	PDBG("%s ep %p tid %u state %d\n", __FUNCTION__, ep, ep->hwtid,	     ep->com.state);	switch (ep->com.state) {	case MPA_REQ_SENT:		connect_reply_upcall(ep, -ETIMEDOUT);		break;	case MPA_REQ_WAIT:		break;	case CLOSING:	case MORIBUND:		if (ep->com.cm_id && ep->com.qp) {			attrs.next_state = IWCH_QP_STATE_ERROR;			iwch_modify_qp(ep->com.qp->rhp,				     ep->com.qp, IWCH_QP_ATTR_NEXT_STATE,				     &attrs, 1);		}		break;	default:		BUG();	}	__state_set(&ep->com, CLOSING);	spin_unlock_irqrestore(&ep->com.lock, flags);	abort_connection(ep, NULL, GFP_ATOMIC);	put_ep(&ep->com);}int iwch_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len){	int err;	struct iwch_ep *ep = to_ep(cm_id);	PDBG("%s ep %p tid %u\n", __FUNCTION__, ep, ep->hwtid);	if (state_read(&ep->com) == DEAD) {		put_ep(&ep->com);		return -ECONNRESET;	}	BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);	if (mpa_rev == 0)		abort_connection(ep, NULL, GFP_KERNEL);	else {		err = send_mpa_reject(ep, pdata, pdata_len);		err = iwch_ep_disconnect(ep, 0, GFP_KERNEL);	}	return 0;}int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param){	int err;	struct iwch_qp_attributes attrs;	enum iwch_qp_attr_mask mask;	struct iwch_ep *ep = to_ep(cm_id);	struct iwch_dev *h = to_iwch_dev(cm_id->device);	struct iwch_qp *qp = get_qhp(h, conn_param->qpn);	PDBG("%s ep %p tid %u\n", __FUNCTION__, ep, ep->hwtid);	if (state_read(&ep->com) == DEAD)		return -ECONNRESET;	BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);	BUG_ON(!qp);	if ((conn_param->ord > qp->rhp->attr.max_rdma_read_qp_depth) ||	    (conn_param->ird > qp->rhp->attr.max_rdma_reads_per_qp)) {		abort_connection(ep, NULL, GFP_KERNEL);		return -EINVAL;	}	cm_id->add_ref(cm_id);	ep->com.cm_id = cm_id;	ep->com.qp = qp;	ep->com.rpl_done = 0;	ep->com.rpl_err = 0;	ep->ird = conn_param->ird;	ep->ord = conn_param->ord;	PDBG("%s %d ird %d ord %d\n", __FUNCTION__, __LINE__, ep->ird, ep->ord);	get_ep(&ep->com);	/* bind QP to EP and move to RTS */	attrs.mpa_attr = ep->mpa_attr;	attrs.max_ird = ep->ord;	attrs.max_ord = ep->ord;	attrs.llp_stream_handle = ep;	attrs.next_state = IWCH_QP_STATE_RTS;	/* bind QP and TID with INIT_WR */	mask = IWCH_QP_ATTR_NEXT_STATE |			     IWCH_QP_ATTR_LLP_STREAM_HANDLE |			     IWCH_QP_ATTR_MPA_ATTR |			     IWCH_QP_ATTR_MAX_IRD |			     IWCH_QP_ATTR_MAX_ORD;	err = iwch_modify_qp(ep->com.qp->rhp,			     ep->com.qp, mask, &attrs, 1);	if (err)		goto err;	err = send_mpa_reply(ep, conn_param->private_data,			     conn_param->private_data_len);	if (err)		goto err;	/* wait for wr_ack */	wait_event(ep->com.waitq, ep->com.rpl_done);	err = ep->com.rpl_err;	if (err)		goto err;	state_set(&ep->com, FPDU_MODE);	established_upcall(ep);	put_ep(&ep->com);	return 0;err:	ep->com.cm_id = NULL;	ep->com.qp = NULL;	cm_id->rem_ref(cm_id);	put_ep(&ep->com);	return err;}int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param){	int err = 0;	struct iwch_dev *h = to_iwch_dev(cm_id->device);	struct iwch_ep *ep;	struct rtable *rt;	ep = alloc_ep(sizeof(*ep), GFP_KERNEL);	if (!ep) {		printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __FUNCTION__);		err = -ENOMEM;		goto out;	}	init_timer(&ep->timer);	ep->plen = conn_param->private_data_len;	if (ep->plen)		memcpy(ep->mpa_pkt + sizeof(struct mpa_message),		       conn_param->private_data, ep->plen);	ep->ird = conn_param->ird;	ep->ord = conn_param->ord;	ep->com.tdev = h->rdev.t3cdev_p;	cm_id->add_ref(cm_id);	ep->com.cm_id = cm_id;	ep->com.qp = get_qhp(h, conn_param->qpn);	BUG_ON(!ep->com.qp);	PDBG("%s qpn 0x%x qp %p cm_id %p\n", __FUNCTION__, conn_param->qpn,	     ep->com.qp, cm_id);	/*	 * Allocate an active TID to initiate a TCP connection.	 */	ep->atid = cxgb3_alloc_atid(h->rdev.t3cdev_p, &t3c_client, ep);	if (ep->atid == -1) {		printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __FUNCTION__);		err = -ENOMEM;		goto fail2;	}	/* find a route */	rt = find_route(h->rdev.t3cdev_p,			cm_id->local_addr.sin_addr.s_addr,			cm_id->remote_addr.sin_addr.s_addr,			cm_id->local_addr.sin_port,			cm_id->remote_addr.sin_port, IPTOS_LOWDELAY);	if (!rt) {		printk(KERN_ERR MOD "%s - cannot find route.\n", __FUNCTION__);		err = -EHOSTUNREACH;		goto fail3;	}	ep->dst = &rt->u.dst;	/* get a l2t entry */	ep->l2t = t3_l2t_get(ep->com.tdev, ep->dst->neighbour,			     ep->dst->neighbour->dev);	if (!ep->l2t) {		printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __FUNCTION__);		err = -ENOMEM;		goto fail4;	}	state_set(&ep->com, CONNECTING);	ep->tos = IPTOS_LOWDELAY;	ep->com.local_addr = cm_id->local_addr;	ep->com.remote_addr = cm_id->remote_addr;	/* send connect request to rnic */	err = send_connect(ep);	if (!err)		goto out;	l2t_release(L2DATA(h->rdev.t3cdev_p), ep->l2t);fail4:	dst_release(ep->dst);fail3:	cxgb3_free_atid(ep->com.tdev, ep->atid);fail2:	put_ep(&ep->com);out:	return err;}int iwch_create_listen(struct iw_cm_id *cm_id, int backlog){	int err = 0;	struct iwch_dev *h = to_iwch_dev(cm_id->device);	struct iwch_listen_ep *ep;	might_sleep();	ep = alloc_ep(sizeof(*ep), GFP_KERNEL);	if (!ep) {		printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __FUNCTION__);		err = -ENOMEM;		goto fail1;	}	PDBG("%s ep %p\n", __FUNCTION__, ep);	ep->com.tdev = h->rdev.t3cdev_p;	cm_id->add_ref(cm_id);	ep->com.cm_id = cm_id;	ep->backlog = backlog;	ep->com.local_addr = cm_id->local_addr;	/*	 * Allocate a server TID.	 */	ep->stid = cxgb3_alloc_stid(h->rdev.t3cdev_p, &t3c_client, ep);	if (ep->stid == -1) {		printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __FUNCTION__);		err = -ENOMEM;		goto fail2;	}	state_set(&ep->com, LISTEN);	err = listen_start(ep);	if (err)		goto fail3;	/* wait for pass_open_rpl */	wait_event(ep->com.waitq, ep->com.rpl_done);	err = ep->com.rpl_err;	if (!err) {		cm_id->provider_data = ep;		goto out;	}fail3:	cxgb3_free_stid(ep->com.tdev, ep->stid);fail2:	cm_id->rem_ref(cm_id);	put_ep(&ep->com);fail1:out:	return err;}int iwch_destroy_listen(struct iw_cm_id *cm_id){	int err;	struct iwch_listen_ep *ep = to_listen_ep(cm_id);	PDBG("%s ep %p\n", __FUNCTION__, ep);	might_sleep();	state_set(&ep->com, DEAD);	ep->com.rpl_done = 0;	ep->com.rpl_err = 0;	err = listen_stop(ep);	wait_event(ep->com.waitq, ep->com.rpl_done);	cxgb3_free_stid(ep->com.tdev, ep->stid);	err = ep->com.rpl_err;	cm_id->rem_ref(cm_id);	put_ep(&ep->com);	return err;}int iwch_ep_disconnect(struct iwch_ep *ep, int abrupt, gfp_t gfp){	int ret=0;	unsigned long flags;	int close = 0;	spin_lock_irqsave(&ep->com.lock, flags);	PDBG("%s ep %p state %s, abrupt %d\n", __FUNCTION__, ep,	     states[ep->com.state], abrupt);	if (ep->com.state == DEAD) {		PDBG("%s already dead ep %p\n", __FUNCTION__, ep);		goto out;	}	if (abrupt) {		if (ep->com.state != ABORTING) {			ep->com.state = ABORTING;			close = 1;		}		goto out;	}	switch (ep->com.state) {	case MPA_REQ_WAIT:	case MPA_REQ_SENT:	case MPA_REQ_RCVD:	case MPA_REP_SENT:	case FPDU_MODE:		start_ep_timer(ep);		ep->com.state = CLOSING;		close = 1;		break;	case CLOSING:		ep->com.state = MORIBUND;		close = 1;		break;	case MORIBUND:		break;	default:		BUG();		break;	}out:	spin_unlock_irqrestore(&ep->com.lock, flags);	if (close) {		if (abrupt)			ret = send_abort(ep, NULL, gfp);		else			ret = send_halfclose(ep, gfp);	}	return ret;}int iwch_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,		     struct l2t_entry *l2t){	struct iwch_ep *ep = ctx;	if (ep->dst != old)		return 0;	PDBG("%s ep %p redirect to dst %p l2t %p\n", __FUNCTION__, ep, new,	     l2t);	dst_hold(new);	l2t_release(L2DATA(ep->com.tdev), ep->l2t);	ep->l2t = l2t;	dst_release(old);	ep->dst = new;	return 1;}/* * All the CM events are handled on a work queue to have a safe context. */static int sched(struct t3cdev *tdev, struct sk_buff *skb, void *ctx){	struct iwch_ep_common *epc = ctx;	get_ep(epc);	/*	 * Save ctx and tdev in the skb->cb area.	 */	*((void **) skb->cb) = ctx;	*((struct t3cdev **) (skb->cb + sizeof(void *))) = tdev;	/*	 * Queue the skb and schedule the worker thread.	 */	skb_queue_tail(&rxq, skb);	queue_work(workq, &skb_work);	return 0;}static int set_tcb_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx){	struct cpl_set_tcb_rpl *rpl = cplhdr(skb);	if (rpl->status != CPL_ERR_NONE) {		printk(KERN_ERR MOD "Unexpected SET_TCB_RPL status %u "		       "for tid %u\n", rpl->status, GET_TID(rpl));	}	return CPL_RET_BUF_DONE;}int __init iwch_cm_init(void){	skb_queue_head_init(&rxq);	workq = create_singlethread_workqueue("iw_cxgb3");	if (!workq)		return -ENOMEM;	/*	 * All upcalls from the T3 Core go to sched() to	 * schedule the processing on a work queue.	 */	t3c_handlers[CPL_ACT_ESTABLISH] = sched;	t3c_handlers[CPL_ACT_OPEN_RPL] = sched;	t3c_handlers[CPL_RX_DATA] = sched;	t3c_handlers[CPL_TX_DMA_ACK] = sched;	t3c_handlers[CPL_ABORT_RPL_RSS] = sched;	t3c_handlers[CPL_ABORT_RPL] = sched;	t3c_handlers[CPL_PASS_OPEN_RPL] = sched;	t3c_handlers[CPL_CLOSE_LISTSRV_RPL] = sched;	t3c_handlers[CPL_PASS_ACCEPT_REQ] = sched;	t3c_handlers[CPL_PASS_ESTABLISH] = sched;	t3c_handlers[CPL_PEER_CLOSE] = sched;	t3c_handlers[CPL_CLOSE_CON_RPL] = sched;	t3c_handlers[CPL_ABORT_REQ_RSS] = sched;	t3c_handlers[CPL_RDMA_TERMINATE] = sched;	t3c_handlers[CPL_RDMA_EC_STATUS] = sched;	t3c_handlers[CPL_SET_TCB_RPL] = set_tcb_rpl;	/*	 * These are the real handlers that are called from a	 * work queue.	 */	work_handlers[CPL_ACT_ESTABLISH] = act_establish;	work_handlers[CPL_ACT_OPEN_RPL] = act_open_rpl;	work_handlers[CPL_RX_DATA] = rx_data;	work_handlers[CPL_TX_DMA_ACK] = tx_ack;	work_handlers[CPL_ABORT_RPL_RSS] = abort_rpl;	work_handlers[CPL_ABORT_RPL] = abort_rpl;	work_handlers[CPL_PASS_OPEN_RPL] = pass_open_rpl;	work_handlers[CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl;	work_handlers[CPL_PASS_ACCEPT_REQ] = pass_accept_req;	work_handlers[CPL_PASS_ESTABLISH] = pass_establish;	work_handlers[CPL_PEER_CLOSE] = peer_close;	work_handlers[CPL_ABORT_REQ_RSS] = peer_abort;	work_handlers[CPL_CLOSE_CON_RPL] = close_con_rpl;	work_handlers[CPL_RDMA_TERMINATE] = terminate;	work_handlers[CPL_RDMA_EC_STATUS] = ec_status;	return 0;}void __exit iwch_cm_term(void){	flush_workqueue(workq);	destroy_workqueue(workq);}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -