⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 iwch_cm.c

📁 linux内核源码
💻 C
📖 第 1 页 / 共 4 页
字号:
/* * Copyright (c) 2006 Chelsio, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses.  You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * *     Redistribution and use in source and binary forms, with or *     without modification, are permitted provided that the following *     conditions are met: * *      - Redistributions of source code must retain the above *        copyright notice, this list of conditions and the following *        disclaimer. * *      - Redistributions in binary form must reproduce the above *        copyright notice, this list of conditions and the following *        disclaimer in the documentation and/or other materials *        provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */#include <linux/module.h>#include <linux/list.h>#include <linux/workqueue.h>#include <linux/skbuff.h>#include <linux/timer.h>#include <linux/notifier.h>#include <net/neighbour.h>#include <net/netevent.h>#include <net/route.h>#include "tcb.h"#include "cxgb3_offload.h"#include "iwch.h"#include "iwch_provider.h"#include "iwch_cm.h"static char *states[] = {	"idle",	"listen",	"connecting",	"mpa_wait_req",	"mpa_req_sent",	"mpa_req_rcvd",	"mpa_rep_sent",	"fpdu_mode",	"aborting",	"closing",	"moribund",	"dead",	NULL,};static int ep_timeout_secs = 10;module_param(ep_timeout_secs, int, 0644);MODULE_PARM_DESC(ep_timeout_secs, "CM Endpoint operation timeout "				   "in seconds (default=10)");static int mpa_rev = 1;module_param(mpa_rev, int, 0644);MODULE_PARM_DESC(mpa_rev, "MPA Revision, 0 supports amso1100, "		 "1 is spec compliant. (default=1)");static int markers_enabled = 0;module_param(markers_enabled, int, 0644);MODULE_PARM_DESC(markers_enabled, "Enable MPA MARKERS (default(0)=disabled)");static int crc_enabled = 1;module_param(crc_enabled, int, 0644);MODULE_PARM_DESC(crc_enabled, "Enable MPA CRC (default(1)=enabled)");static int rcv_win = 256 * 1024;module_param(rcv_win, int, 0644);MODULE_PARM_DESC(rcv_win, "TCP receive window in bytes (default=256)");static int snd_win = 32 * 1024;module_param(snd_win, int, 0644);MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=32KB)");static unsigned int nocong = 0;module_param(nocong, uint, 0644);MODULE_PARM_DESC(nocong, "Turn off congestion control (default=0)");static unsigned int cong_flavor = 1;module_param(cong_flavor, uint, 0644);MODULE_PARM_DESC(cong_flavor, "TCP Congestion control flavor (default=1)");static void process_work(struct work_struct *work);static struct workqueue_struct *workq;static DECLARE_WORK(skb_work, process_work);static struct sk_buff_head rxq;static cxgb3_cpl_handler_func work_handlers[NUM_CPL_CMDS];static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp);static void ep_timeout(unsigned long arg);static void connect_reply_upcall(struct iwch_ep *ep, int status);static void start_ep_timer(struct iwch_ep *ep){	PDBG("%s ep %p\n", __FUNCTION__, ep);	if (timer_pending(&ep->timer)) {		PDBG("%s stopped / restarted timer ep %p\n", __FUNCTION__, ep);		del_timer_sync(&ep->timer);	} else		get_ep(&ep->com);	ep->timer.expires = jiffies + ep_timeout_secs * HZ;	ep->timer.data = (unsigned long)ep;	ep->timer.function = ep_timeout;	add_timer(&ep->timer);}static void stop_ep_timer(struct iwch_ep *ep){	PDBG("%s ep %p\n", __FUNCTION__, ep);	del_timer_sync(&ep->timer);	put_ep(&ep->com);}static void release_tid(struct t3cdev *tdev, u32 hwtid, struct sk_buff *skb){	struct cpl_tid_release *req;	skb = get_skb(skb, sizeof *req, GFP_KERNEL);	if (!skb)		return;	req = (struct cpl_tid_release *) skb_put(skb, sizeof(*req));	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, hwtid));	skb->priority = CPL_PRIORITY_SETUP;	cxgb3_ofld_send(tdev, skb);	return;}int iwch_quiesce_tid(struct iwch_ep *ep){	struct cpl_set_tcb_field *req;	struct sk_buff *skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);	if (!skb)		return -ENOMEM;	req = (struct cpl_set_tcb_field *) skb_put(skb, sizeof(*req));	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));	req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, ep->hwtid));	req->reply = 0;	req->cpu_idx = 0;	req->word = htons(W_TCB_RX_QUIESCE);	req->mask = cpu_to_be64(1ULL << S_TCB_RX_QUIESCE);	req->val = cpu_to_be64(1 << S_TCB_RX_QUIESCE);	skb->priority = CPL_PRIORITY_DATA;	cxgb3_ofld_send(ep->com.tdev, skb);	return 0;}int iwch_resume_tid(struct iwch_ep *ep){	struct cpl_set_tcb_field *req;	struct sk_buff *skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);	if (!skb)		return -ENOMEM;	req = (struct cpl_set_tcb_field *) skb_put(skb, sizeof(*req));	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));	req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, ep->hwtid));	req->reply = 0;	req->cpu_idx = 0;	req->word = htons(W_TCB_RX_QUIESCE);	req->mask = cpu_to_be64(1ULL << S_TCB_RX_QUIESCE);	req->val = 0;	skb->priority = CPL_PRIORITY_DATA;	cxgb3_ofld_send(ep->com.tdev, skb);	return 0;}static void set_emss(struct iwch_ep *ep, u16 opt){	PDBG("%s ep %p opt %u\n", __FUNCTION__, ep, opt);	ep->emss = T3C_DATA(ep->com.tdev)->mtus[G_TCPOPT_MSS(opt)] - 40;	if (G_TCPOPT_TSTAMP(opt))		ep->emss -= 12;	if (ep->emss < 128)		ep->emss = 128;	PDBG("emss=%d\n", ep->emss);}static enum iwch_ep_state state_read(struct iwch_ep_common *epc){	unsigned long flags;	enum iwch_ep_state state;	spin_lock_irqsave(&epc->lock, flags);	state = epc->state;	spin_unlock_irqrestore(&epc->lock, flags);	return state;}static void __state_set(struct iwch_ep_common *epc, enum iwch_ep_state new){	epc->state = new;}static void state_set(struct iwch_ep_common *epc, enum iwch_ep_state new){	unsigned long flags;	spin_lock_irqsave(&epc->lock, flags);	PDBG("%s - %s -> %s\n", __FUNCTION__, states[epc->state], states[new]);	__state_set(epc, new);	spin_unlock_irqrestore(&epc->lock, flags);	return;}static void *alloc_ep(int size, gfp_t gfp){	struct iwch_ep_common *epc;	epc = kzalloc(size, gfp);	if (epc) {		kref_init(&epc->kref);		spin_lock_init(&epc->lock);		init_waitqueue_head(&epc->waitq);	}	PDBG("%s alloc ep %p\n", __FUNCTION__, epc);	return epc;}void __free_ep(struct kref *kref){	struct iwch_ep_common *epc;	epc = container_of(kref, struct iwch_ep_common, kref);	PDBG("%s ep %p state %s\n", __FUNCTION__, epc, states[state_read(epc)]);	kfree(epc);}static void release_ep_resources(struct iwch_ep *ep){	PDBG("%s ep %p tid %d\n", __FUNCTION__, ep, ep->hwtid);	cxgb3_remove_tid(ep->com.tdev, (void *)ep, ep->hwtid);	dst_release(ep->dst);	l2t_release(L2DATA(ep->com.tdev), ep->l2t);	put_ep(&ep->com);}static void process_work(struct work_struct *work){	struct sk_buff *skb = NULL;	void *ep;	struct t3cdev *tdev;	int ret;	while ((skb = skb_dequeue(&rxq))) {		ep = *((void **) (skb->cb));		tdev = *((struct t3cdev **) (skb->cb + sizeof(void *)));		ret = work_handlers[G_OPCODE(ntohl((__force __be32)skb->csum))](tdev, skb, ep);		if (ret & CPL_RET_BUF_DONE)			kfree_skb(skb);		/*		 * ep was referenced in sched(), and is freed here.		 */		put_ep((struct iwch_ep_common *)ep);	}}static int status2errno(int status){	switch (status) {	case CPL_ERR_NONE:		return 0;	case CPL_ERR_CONN_RESET:		return -ECONNRESET;	case CPL_ERR_ARP_MISS:		return -EHOSTUNREACH;	case CPL_ERR_CONN_TIMEDOUT:		return -ETIMEDOUT;	case CPL_ERR_TCAM_FULL:		return -ENOMEM;	case CPL_ERR_CONN_EXIST:		return -EADDRINUSE;	default:		return -EIO;	}}/* * Try and reuse skbs already allocated... */static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp){	if (skb && !skb_is_nonlinear(skb) && !skb_cloned(skb)) {		skb_trim(skb, 0);		skb_get(skb);	} else {		skb = alloc_skb(len, gfp);	}	return skb;}static struct rtable *find_route(struct t3cdev *dev, __be32 local_ip,				 __be32 peer_ip, __be16 local_port,				 __be16 peer_port, u8 tos){	struct rtable *rt;	struct flowi fl = {		.oif = 0,		.nl_u = {			 .ip4_u = {				   .daddr = peer_ip,				   .saddr = local_ip,				   .tos = tos}			 },		.proto = IPPROTO_TCP,		.uli_u = {			  .ports = {				    .sport = local_port,				    .dport = peer_port}			  }	};	if (ip_route_output_flow(&rt, &fl, NULL, 0))		return NULL;	return rt;}static unsigned int find_best_mtu(const struct t3c_data *d, unsigned short mtu){	int i = 0;	while (i < d->nmtus - 1 && d->mtus[i + 1] <= mtu)		++i;	return i;}static void arp_failure_discard(struct t3cdev *dev, struct sk_buff *skb){	PDBG("%s t3cdev %p\n", __FUNCTION__, dev);	kfree_skb(skb);}/* * Handle an ARP failure for an active open. */static void act_open_req_arp_failure(struct t3cdev *dev, struct sk_buff *skb){	printk(KERN_ERR MOD "ARP failure duing connect\n");	kfree_skb(skb);}/* * Handle an ARP failure for a CPL_ABORT_REQ.  Change it into a no RST variant * and send it along. */static void abort_arp_failure(struct t3cdev *dev, struct sk_buff *skb){	struct cpl_abort_req *req = cplhdr(skb);	PDBG("%s t3cdev %p\n", __FUNCTION__, dev);	req->cmd = CPL_ABORT_NO_RST;	cxgb3_ofld_send(dev, skb);}static int send_halfclose(struct iwch_ep *ep, gfp_t gfp){	struct cpl_close_con_req *req;	struct sk_buff *skb;	PDBG("%s ep %p\n", __FUNCTION__, ep);	skb = get_skb(NULL, sizeof(*req), gfp);	if (!skb) {		printk(KERN_ERR MOD "%s - failed to alloc skb\n", __FUNCTION__);		return -ENOMEM;	}	skb->priority = CPL_PRIORITY_DATA;	set_arp_failure_handler(skb, arp_failure_discard);	req = (struct cpl_close_con_req *) skb_put(skb, sizeof(*req));	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_CLOSE_CON));	req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, ep->hwtid));	l2t_send(ep->com.tdev, skb, ep->l2t);	return 0;}static int send_abort(struct iwch_ep *ep, struct sk_buff *skb, gfp_t gfp){	struct cpl_abort_req *req;	PDBG("%s ep %p\n", __FUNCTION__, ep);	skb = get_skb(skb, sizeof(*req), gfp);	if (!skb) {		printk(KERN_ERR MOD "%s - failed to alloc skb.\n",		       __FUNCTION__);		return -ENOMEM;	}	skb->priority = CPL_PRIORITY_DATA;	set_arp_failure_handler(skb, abort_arp_failure);	req = (struct cpl_abort_req *) skb_put(skb, sizeof(*req));	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_REQ));	req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid));	req->cmd = CPL_ABORT_SEND_RST;	l2t_send(ep->com.tdev, skb, ep->l2t);	return 0;}static int send_connect(struct iwch_ep *ep){	struct cpl_act_open_req *req;	struct sk_buff *skb;	u32 opt0h, opt0l, opt2;	unsigned int mtu_idx;	int wscale;	PDBG("%s ep %p\n", __FUNCTION__, ep);	skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);	if (!skb) {		printk(KERN_ERR MOD "%s - failed to alloc skb.\n",		       __FUNCTION__);		return -ENOMEM;	}	mtu_idx = find_best_mtu(T3C_DATA(ep->com.tdev), dst_mtu(ep->dst));	wscale = compute_wscale(rcv_win);	opt0h = V_NAGLE(0) |	    V_NO_CONG(nocong) |	    V_KEEP_ALIVE(1) |	    F_TCAM_BYPASS |	    V_WND_SCALE(wscale) |	    V_MSS_IDX(mtu_idx) |	    V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx);	opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10);	opt2 = V_FLAVORS_VALID(1) | V_CONG_CONTROL_FLAVOR(cong_flavor);	skb->priority = CPL_PRIORITY_SETUP;	set_arp_failure_handler(skb, act_open_req_arp_failure);	req = (struct cpl_act_open_req *) skb_put(skb, sizeof(*req));	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, ep->atid));	req->local_port = ep->com.local_addr.sin_port;	req->peer_port = ep->com.remote_addr.sin_port;	req->local_ip = ep->com.local_addr.sin_addr.s_addr;	req->peer_ip = ep->com.remote_addr.sin_addr.s_addr;	req->opt0h = htonl(opt0h);	req->opt0l = htonl(opt0l);	req->params = 0;	req->opt2 = htonl(opt2);	l2t_send(ep->com.tdev, skb, ep->l2t);	return 0;}static void send_mpa_req(struct iwch_ep *ep, struct sk_buff *skb){	int mpalen;	struct tx_data_wr *req;	struct mpa_message *mpa;	int len;	PDBG("%s ep %p pd_len %d\n", __FUNCTION__, ep, ep->plen);	BUG_ON(skb_cloned(skb));	mpalen = sizeof(*mpa) + ep->plen;	if (skb->data + mpalen + sizeof(*req) > skb_end_pointer(skb)) {		kfree_skb(skb);		skb=alloc_skb(mpalen + sizeof(*req), GFP_KERNEL);		if (!skb) {			connect_reply_upcall(ep, -ENOMEM);			return;		}	}	skb_trim(skb, 0);	skb_reserve(skb, sizeof(*req));	skb_put(skb, mpalen);	skb->priority = CPL_PRIORITY_DATA;	mpa = (struct mpa_message *) skb->data;	memset(mpa, 0, sizeof(*mpa));	memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key));	mpa->flags = (crc_enabled ? MPA_CRC : 0) |		     (markers_enabled ? MPA_MARKERS : 0);	mpa->private_data_size = htons(ep->plen);	mpa->revision = mpa_rev;	if (ep->plen)		memcpy(mpa->private_data, ep->mpa_pkt + sizeof(*mpa), ep->plen);	/*	 * Reference the mpa skb.  This ensures the data area	 * will remain in memory until the hw acks the tx.	 * Function tx_ack() will deref it.	 */	skb_get(skb);	set_arp_failure_handler(skb, arp_failure_discard);	skb_reset_transport_header(skb);	len = skb->len;	req = (struct tx_data_wr *) skb_push(skb, sizeof(*req));	req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA));	req->wr_lo = htonl(V_WR_TID(ep->hwtid));	req->len = htonl(len);	req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) |			   V_TX_SNDBUF(snd_win>>15));	req->flags = htonl(F_TX_INIT);	req->sndseq = htonl(ep->snd_seq);	BUG_ON(ep->mpa_skb);	ep->mpa_skb = skb;	l2t_send(ep->com.tdev, skb, ep->l2t);	start_ep_timer(ep);	state_set(&ep->com, MPA_REQ_SENT);	return;}static int send_mpa_reject(struct iwch_ep *ep, const void *pdata, u8 plen){	int mpalen;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -