cxgb3_offload.c

来自「linux 内核源代码」· C语言 代码 · 共 1,284 行 · 第 1/3 页

C
1,284
字号
	union active_open_entry *p = atid2entry(t, atid);	void *ctx = p->t3c_tid.ctx;	spin_lock_bh(&t->atid_lock);	p->next = t->afree;	t->afree = p;	t->atids_in_use--;	spin_unlock_bh(&t->atid_lock);	return ctx;}EXPORT_SYMBOL(cxgb3_free_atid);/* * Free a server TID and return it to the free pool. */void cxgb3_free_stid(struct t3cdev *tdev, int stid){	struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;	union listen_entry *p = stid2entry(t, stid);	spin_lock_bh(&t->stid_lock);	p->next = t->sfree;	t->sfree = p;	t->stids_in_use--;	spin_unlock_bh(&t->stid_lock);}EXPORT_SYMBOL(cxgb3_free_stid);void cxgb3_insert_tid(struct t3cdev *tdev, struct cxgb3_client *client,		      void *ctx, unsigned int tid){	struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;	t->tid_tab[tid].client = client;	t->tid_tab[tid].ctx = ctx;	atomic_inc(&t->tids_in_use);}EXPORT_SYMBOL(cxgb3_insert_tid);/* * Populate a TID_RELEASE WR.  The skb must be already propely sized. */static inline void mk_tid_release(struct sk_buff *skb, unsigned int tid){	struct cpl_tid_release *req;	skb->priority = CPL_PRIORITY_SETUP;	req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));}static void t3_process_tid_release_list(struct work_struct *work){	struct t3c_data *td = container_of(work, struct t3c_data,					   tid_release_task);	struct sk_buff *skb;	struct t3cdev *tdev = td->dev;		spin_lock_bh(&td->tid_release_lock);	while (td->tid_release_list) {		struct t3c_tid_entry *p = td->tid_release_list;		td->tid_release_list = (struct t3c_tid_entry *)p->ctx;		spin_unlock_bh(&td->tid_release_lock);		skb = alloc_skb(sizeof(struct cpl_tid_release),				GFP_KERNEL | __GFP_NOFAIL);		mk_tid_release(skb, p - td->tid_maps.tid_tab);		cxgb3_ofld_send(tdev, skb);		p->ctx = NULL;		spin_lock_bh(&td->tid_release_lock);	}	spin_unlock_bh(&td->tid_release_lock);}/* use ctx as a next pointer in the tid release list */void cxgb3_queue_tid_release(struct t3cdev *tdev, unsigned int tid){	struct t3c_data *td = T3C_DATA(tdev);	struct t3c_tid_entry *p = &td->tid_maps.tid_tab[tid];	spin_lock_bh(&td->tid_release_lock);	p->ctx = (void *)td->tid_release_list;	p->client = NULL;	td->tid_release_list = p;	if (!p->ctx)		schedule_work(&td->tid_release_task);	spin_unlock_bh(&td->tid_release_lock);}EXPORT_SYMBOL(cxgb3_queue_tid_release);/* * Remove a tid from the TID table.  A client may defer processing its last * CPL message if it is locked at the time it arrives, and while the message * sits in the client's backlog the TID may be reused for another connection. * To handle this we atomically switch the TID association if it still points * to the original client context. */void cxgb3_remove_tid(struct t3cdev *tdev, void *ctx, unsigned int tid){	struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;	BUG_ON(tid >= t->ntids);	if (tdev->type == T3A)		(void)cmpxchg(&t->tid_tab[tid].ctx, ctx, NULL);	else {		struct sk_buff *skb;		skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);		if (likely(skb)) {			mk_tid_release(skb, tid);			cxgb3_ofld_send(tdev, skb);			t->tid_tab[tid].ctx = NULL;		} else			cxgb3_queue_tid_release(tdev, tid);	}	atomic_dec(&t->tids_in_use);}EXPORT_SYMBOL(cxgb3_remove_tid);int cxgb3_alloc_atid(struct t3cdev *tdev, struct cxgb3_client *client,		     void *ctx){	int atid = -1;	struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;	spin_lock_bh(&t->atid_lock);	if (t->afree &&	    t->atids_in_use + atomic_read(&t->tids_in_use) + MC5_MIN_TIDS <=	    t->ntids) {		union active_open_entry *p = t->afree;		atid = (p - t->atid_tab) + t->atid_base;		t->afree = p->next;		p->t3c_tid.ctx = ctx;		p->t3c_tid.client = client;		t->atids_in_use++;	}	spin_unlock_bh(&t->atid_lock);	return atid;}EXPORT_SYMBOL(cxgb3_alloc_atid);int cxgb3_alloc_stid(struct t3cdev *tdev, struct cxgb3_client *client,		     void *ctx){	int stid = -1;	struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;	spin_lock_bh(&t->stid_lock);	if (t->sfree) {		union listen_entry *p = t->sfree;		stid = (p - t->stid_tab) + t->stid_base;		t->sfree = p->next;		p->t3c_tid.ctx = ctx;		p->t3c_tid.client = client;		t->stids_in_use++;	}	spin_unlock_bh(&t->stid_lock);	return stid;}EXPORT_SYMBOL(cxgb3_alloc_stid);/* Get the t3cdev associated with a net_device */struct t3cdev *dev2t3cdev(struct net_device *dev){	const struct port_info *pi = netdev_priv(dev);	return (struct t3cdev *)pi->adapter;}EXPORT_SYMBOL(dev2t3cdev);static int do_smt_write_rpl(struct t3cdev *dev, struct sk_buff *skb){	struct cpl_smt_write_rpl *rpl = cplhdr(skb);	if (rpl->status != CPL_ERR_NONE)		printk(KERN_ERR		       "Unexpected SMT_WRITE_RPL status %u for entry %u\n",		       rpl->status, GET_TID(rpl));	return CPL_RET_BUF_DONE;}static int do_l2t_write_rpl(struct t3cdev *dev, struct sk_buff *skb){	struct cpl_l2t_write_rpl *rpl = cplhdr(skb);	if (rpl->status != CPL_ERR_NONE)		printk(KERN_ERR		       "Unexpected L2T_WRITE_RPL status %u for entry %u\n",		       rpl->status, GET_TID(rpl));	return CPL_RET_BUF_DONE;}static int do_act_open_rpl(struct t3cdev *dev, struct sk_buff *skb){	struct cpl_act_open_rpl *rpl = cplhdr(skb);	unsigned int atid = G_TID(ntohl(rpl->atid));	struct t3c_tid_entry *t3c_tid;	t3c_tid = lookup_atid(&(T3C_DATA(dev))->tid_maps, atid);	if (t3c_tid && t3c_tid->ctx && t3c_tid->client &&	    t3c_tid->client->handlers &&	    t3c_tid->client->handlers[CPL_ACT_OPEN_RPL]) {		return t3c_tid->client->handlers[CPL_ACT_OPEN_RPL] (dev, skb,								    t3c_tid->								    ctx);	} else {		printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",		       dev->name, CPL_ACT_OPEN_RPL);		return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;	}}static int do_stid_rpl(struct t3cdev *dev, struct sk_buff *skb){	union opcode_tid *p = cplhdr(skb);	unsigned int stid = G_TID(ntohl(p->opcode_tid));	struct t3c_tid_entry *t3c_tid;	t3c_tid = lookup_stid(&(T3C_DATA(dev))->tid_maps, stid);	if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&	    t3c_tid->client->handlers[p->opcode]) {		return t3c_tid->client->handlers[p->opcode] (dev, skb,							     t3c_tid->ctx);	} else {		printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",		       dev->name, p->opcode);		return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;	}}static int do_hwtid_rpl(struct t3cdev *dev, struct sk_buff *skb){	union opcode_tid *p = cplhdr(skb);	unsigned int hwtid = G_TID(ntohl(p->opcode_tid));	struct t3c_tid_entry *t3c_tid;	t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);	if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&	    t3c_tid->client->handlers[p->opcode]) {		return t3c_tid->client->handlers[p->opcode]		    (dev, skb, t3c_tid->ctx);	} else {		printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",		       dev->name, p->opcode);		return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;	}}static int do_cr(struct t3cdev *dev, struct sk_buff *skb){	struct cpl_pass_accept_req *req = cplhdr(skb);	unsigned int stid = G_PASS_OPEN_TID(ntohl(req->tos_tid));	struct tid_info *t = &(T3C_DATA(dev))->tid_maps;	struct t3c_tid_entry *t3c_tid;	unsigned int tid = GET_TID(req);	if (unlikely(tid >= t->ntids)) {		printk("%s: passive open TID %u too large\n",		       dev->name, tid);		t3_fatal_err(tdev2adap(dev));		return CPL_RET_BUF_DONE;	}	t3c_tid = lookup_stid(t, stid);	if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&	    t3c_tid->client->handlers[CPL_PASS_ACCEPT_REQ]) {		return t3c_tid->client->handlers[CPL_PASS_ACCEPT_REQ]		    (dev, skb, t3c_tid->ctx);	} else {		printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",		       dev->name, CPL_PASS_ACCEPT_REQ);		return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;	}}/* * Returns an sk_buff for a reply CPL message of size len.  If the input * sk_buff has no other users it is trimmed and reused, otherwise a new buffer * is allocated.  The input skb must be of size at least len.  Note that this * operation does not destroy the original skb data even if it decides to reuse * the buffer. */static struct sk_buff *cxgb3_get_cpl_reply_skb(struct sk_buff *skb, size_t len,					       gfp_t gfp){	if (likely(!skb_cloned(skb))) {		BUG_ON(skb->len < len);		__skb_trim(skb, len);		skb_get(skb);	} else {		skb = alloc_skb(len, gfp);		if (skb)			__skb_put(skb, len);	}	return skb;}static int do_abort_req_rss(struct t3cdev *dev, struct sk_buff *skb){	union opcode_tid *p = cplhdr(skb);	unsigned int hwtid = G_TID(ntohl(p->opcode_tid));	struct t3c_tid_entry *t3c_tid;	t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);	if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&	    t3c_tid->client->handlers[p->opcode]) {		return t3c_tid->client->handlers[p->opcode]		    (dev, skb, t3c_tid->ctx);	} else {		struct cpl_abort_req_rss *req = cplhdr(skb);		struct cpl_abort_rpl *rpl;		struct sk_buff *reply_skb;		unsigned int tid = GET_TID(req);		u8 cmd = req->status;		if (req->status == CPL_ERR_RTX_NEG_ADVICE ||		    req->status == CPL_ERR_PERSIST_NEG_ADVICE)			goto out;		reply_skb = cxgb3_get_cpl_reply_skb(skb,						    sizeof(struct							   cpl_abort_rpl),						    GFP_ATOMIC);		if (!reply_skb) {			printk("do_abort_req_rss: couldn't get skb!\n");			goto out;		}		reply_skb->priority = CPL_PRIORITY_DATA;		__skb_put(reply_skb, sizeof(struct cpl_abort_rpl));		rpl = cplhdr(reply_skb);		rpl->wr.wr_hi =		    htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL));		rpl->wr.wr_lo = htonl(V_WR_TID(tid));		OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, tid));		rpl->cmd = cmd;		cxgb3_ofld_send(dev, reply_skb);out:		return CPL_RET_BUF_DONE;	}}static int do_act_establish(struct t3cdev *dev, struct sk_buff *skb){	struct cpl_act_establish *req = cplhdr(skb);	unsigned int atid = G_PASS_OPEN_TID(ntohl(req->tos_tid));	struct tid_info *t = &(T3C_DATA(dev))->tid_maps;	struct t3c_tid_entry *t3c_tid;	unsigned int tid = GET_TID(req);	if (unlikely(tid >= t->ntids)) {		printk("%s: active establish TID %u too large\n",		       dev->name, tid);		t3_fatal_err(tdev2adap(dev));		return CPL_RET_BUF_DONE;	}	t3c_tid = lookup_atid(t, atid);	if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&	    t3c_tid->client->handlers[CPL_ACT_ESTABLISH]) {		return t3c_tid->client->handlers[CPL_ACT_ESTABLISH]		    (dev, skb, t3c_tid->ctx);	} else {		printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",		       dev->name, CPL_ACT_ESTABLISH);		return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;	}}static int do_trace(struct t3cdev *dev, struct sk_buff *skb){	struct cpl_trace_pkt *p = cplhdr(skb);	skb->protocol = htons(0xffff);	skb->dev = dev->lldev;	skb_pull(skb, sizeof(*p));	skb_reset_mac_header(skb);	netif_receive_skb(skb);	return 0;}static int do_term(struct t3cdev *dev, struct sk_buff *skb){	unsigned int hwtid = ntohl(skb->priority) >> 8 & 0xfffff;	unsigned int opcode = G_OPCODE(ntohl(skb->csum));	struct t3c_tid_entry *t3c_tid;	t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);	if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&	    t3c_tid->client->handlers[opcode]) {		return t3c_tid->client->handlers[opcode] (dev, skb,							  t3c_tid->ctx);	} else {		printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",		       dev->name, opcode);		return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;	}}static int nb_callback(struct notifier_block *self, unsigned long event,		       void *ctx){	switch (event) {	case (NETEVENT_NEIGH_UPDATE):{		cxgb_neigh_update((struct neighbour *)ctx);		break;	}	case (NETEVENT_PMTU_UPDATE):		break;	case (NETEVENT_REDIRECT):{		struct netevent_redirect *nr = ctx;		cxgb_redirect(nr->old, nr->new);

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?