cxgb3_offload.c

来自「linux 内核源代码」· C语言 代码 · 共 1,284 行 · 第 1/3 页

C
1,284
字号
/* * Copyright (c) 2006-2007 Chelsio, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses.  You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * *     Redistribution and use in source and binary forms, with or *     without modification, are permitted provided that the following *     conditions are met: * *      - Redistributions of source code must retain the above *        copyright notice, this list of conditions and the following *        disclaimer. * *      - Redistributions in binary form must reproduce the above *        copyright notice, this list of conditions and the following *        disclaimer in the documentation and/or other materials *        provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */#include <linux/list.h>#include <net/neighbour.h>#include <linux/notifier.h>#include <asm/atomic.h>#include <linux/proc_fs.h>#include <linux/if_vlan.h>#include <net/netevent.h>#include <linux/highmem.h>#include <linux/vmalloc.h>#include "common.h"#include "regs.h"#include "cxgb3_ioctl.h"#include "cxgb3_ctl_defs.h"#include "cxgb3_defs.h"#include "l2t.h"#include "firmware_exports.h"#include "cxgb3_offload.h"static LIST_HEAD(client_list);static LIST_HEAD(ofld_dev_list);static DEFINE_MUTEX(cxgb3_db_lock);static DEFINE_RWLOCK(adapter_list_lock);static LIST_HEAD(adapter_list);static const unsigned int MAX_ATIDS = 64 * 1024;static const unsigned int ATID_BASE = 0x10000;static inline int offload_activated(struct t3cdev *tdev){	const struct adapter *adapter = tdev2adap(tdev);	return (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map));}/** *	cxgb3_register_client - register an offload client *	@client: the client * *	Add the client to the client list, *	and call backs the client for each activated offload device */void cxgb3_register_client(struct cxgb3_client *client){	struct t3cdev *tdev;	mutex_lock(&cxgb3_db_lock);	list_add_tail(&client->client_list, &client_list);	if (client->add) {		list_for_each_entry(tdev, &ofld_dev_list, ofld_dev_list) {			if (offload_activated(tdev))				client->add(tdev);		}	}	mutex_unlock(&cxgb3_db_lock);}EXPORT_SYMBOL(cxgb3_register_client);/** *	cxgb3_unregister_client - unregister an offload client *	@client: the client * *	Remove the client to the client list, *	and call backs the client for each activated offload device. */void cxgb3_unregister_client(struct cxgb3_client *client){	struct t3cdev *tdev;	mutex_lock(&cxgb3_db_lock);	list_del(&client->client_list);	if (client->remove) {		list_for_each_entry(tdev, &ofld_dev_list, ofld_dev_list) {			if (offload_activated(tdev))				client->remove(tdev);		}	}	mutex_unlock(&cxgb3_db_lock);}EXPORT_SYMBOL(cxgb3_unregister_client);/** *	cxgb3_add_clients - activate registered clients for an offload device *	@tdev: the offload device * *	Call backs all registered clients once a offload device is activated */void cxgb3_add_clients(struct t3cdev *tdev){	struct cxgb3_client *client;	mutex_lock(&cxgb3_db_lock);	list_for_each_entry(client, &client_list, client_list) {		if (client->add)			client->add(tdev);	}	mutex_unlock(&cxgb3_db_lock);}/** *	cxgb3_remove_clients - deactivates registered clients *			       for an offload device *	@tdev: the offload device * *	Call backs all registered clients once a offload device is deactivated */void cxgb3_remove_clients(struct t3cdev *tdev){	struct cxgb3_client *client;	mutex_lock(&cxgb3_db_lock);	list_for_each_entry(client, &client_list, client_list) {		if (client->remove)			client->remove(tdev);	}	mutex_unlock(&cxgb3_db_lock);}static struct net_device *get_iff_from_mac(struct adapter *adapter,					   const unsigned char *mac,					   unsigned int vlan){	int i;	for_each_port(adapter, i) {		struct vlan_group *grp;		struct net_device *dev = adapter->port[i];		const struct port_info *p = netdev_priv(dev);		if (!memcmp(dev->dev_addr, mac, ETH_ALEN)) {			if (vlan && vlan != VLAN_VID_MASK) {				grp = p->vlan_grp;				dev = NULL;				if (grp)					dev = vlan_group_get_device(grp, vlan);			} else				while (dev->master)					dev = dev->master;			return dev;		}	}	return NULL;}static int cxgb_ulp_iscsi_ctl(struct adapter *adapter, unsigned int req,			      void *data){	int ret = 0;	struct ulp_iscsi_info *uiip = data;	switch (req) {	case ULP_ISCSI_GET_PARAMS:		uiip->pdev = adapter->pdev;		uiip->llimit = t3_read_reg(adapter, A_ULPRX_ISCSI_LLIMIT);		uiip->ulimit = t3_read_reg(adapter, A_ULPRX_ISCSI_ULIMIT);		uiip->tagmask = t3_read_reg(adapter, A_ULPRX_ISCSI_TAGMASK);		/*		 * On tx, the iscsi pdu has to be <= tx page size and has to		 * fit into the Tx PM FIFO.		 */		uiip->max_txsz = min(adapter->params.tp.tx_pg_size,				     t3_read_reg(adapter, A_PM1_TX_CFG) >> 17);		/* on rx, the iscsi pdu has to be < rx page size and the		   whole pdu + cpl headers has to fit into one sge buffer */		uiip->max_rxsz = min_t(unsigned int,				       adapter->params.tp.rx_pg_size,				       (adapter->sge.qs[0].fl[1].buf_size -					sizeof(struct cpl_rx_data) * 2 -					sizeof(struct cpl_rx_data_ddp)));		break;	case ULP_ISCSI_SET_PARAMS:		t3_write_reg(adapter, A_ULPRX_ISCSI_TAGMASK, uiip->tagmask);		break;	default:		ret = -EOPNOTSUPP;	}	return ret;}/* Response queue used for RDMA events. */#define ASYNC_NOTIF_RSPQ 0static int cxgb_rdma_ctl(struct adapter *adapter, unsigned int req, void *data){	int ret = 0;	switch (req) {	case RDMA_GET_PARAMS: {		struct rdma_info *rdma = data;		struct pci_dev *pdev = adapter->pdev;		rdma->udbell_physbase = pci_resource_start(pdev, 2);		rdma->udbell_len = pci_resource_len(pdev, 2);		rdma->tpt_base =			t3_read_reg(adapter, A_ULPTX_TPT_LLIMIT);		rdma->tpt_top = t3_read_reg(adapter, A_ULPTX_TPT_ULIMIT);		rdma->pbl_base =			t3_read_reg(adapter, A_ULPTX_PBL_LLIMIT);		rdma->pbl_top = t3_read_reg(adapter, A_ULPTX_PBL_ULIMIT);		rdma->rqt_base = t3_read_reg(adapter, A_ULPRX_RQ_LLIMIT);		rdma->rqt_top = t3_read_reg(adapter, A_ULPRX_RQ_ULIMIT);		rdma->kdb_addr = adapter->regs + A_SG_KDOORBELL;		rdma->pdev = pdev;		break;	}	case RDMA_CQ_OP:{		unsigned long flags;		struct rdma_cq_op *rdma = data;		/* may be called in any context */		spin_lock_irqsave(&adapter->sge.reg_lock, flags);		ret = t3_sge_cqcntxt_op(adapter, rdma->id, rdma->op,					rdma->credits);		spin_unlock_irqrestore(&adapter->sge.reg_lock, flags);		break;	}	case RDMA_GET_MEM:{		struct ch_mem_range *t = data;		struct mc7 *mem;		if ((t->addr & 7) || (t->len & 7))			return -EINVAL;		if (t->mem_id == MEM_CM)			mem = &adapter->cm;		else if (t->mem_id == MEM_PMRX)			mem = &adapter->pmrx;		else if (t->mem_id == MEM_PMTX)			mem = &adapter->pmtx;		else			return -EINVAL;		ret =			t3_mc7_bd_read(mem, t->addr / 8, t->len / 8,					(u64 *) t->buf);		if (ret)			return ret;		break;	}	case RDMA_CQ_SETUP:{		struct rdma_cq_setup *rdma = data;		spin_lock_irq(&adapter->sge.reg_lock);		ret =			t3_sge_init_cqcntxt(adapter, rdma->id,					rdma->base_addr, rdma->size,					ASYNC_NOTIF_RSPQ,					rdma->ovfl_mode, rdma->credits,					rdma->credit_thres);		spin_unlock_irq(&adapter->sge.reg_lock);		break;	}	case RDMA_CQ_DISABLE:		spin_lock_irq(&adapter->sge.reg_lock);		ret = t3_sge_disable_cqcntxt(adapter, *(unsigned int *)data);		spin_unlock_irq(&adapter->sge.reg_lock);		break;	case RDMA_CTRL_QP_SETUP:{		struct rdma_ctrlqp_setup *rdma = data;		spin_lock_irq(&adapter->sge.reg_lock);		ret = t3_sge_init_ecntxt(adapter, FW_RI_SGEEC_START, 0,						SGE_CNTXT_RDMA,						ASYNC_NOTIF_RSPQ,						rdma->base_addr, rdma->size,						FW_RI_TID_START, 1, 0);		spin_unlock_irq(&adapter->sge.reg_lock);		break;	}	default:		ret = -EOPNOTSUPP;	}	return ret;}static int cxgb_offload_ctl(struct t3cdev *tdev, unsigned int req, void *data){	struct adapter *adapter = tdev2adap(tdev);	struct tid_range *tid;	struct mtutab *mtup;	struct iff_mac *iffmacp;	struct ddp_params *ddpp;	struct adap_ports *ports;	struct ofld_page_info *rx_page_info;	struct tp_params *tp = &adapter->params.tp;	int i;	switch (req) {	case GET_MAX_OUTSTANDING_WR:		*(unsigned int *)data = FW_WR_NUM;		break;	case GET_WR_LEN:		*(unsigned int *)data = WR_FLITS;		break;	case GET_TX_MAX_CHUNK:		*(unsigned int *)data = 1 << 20;	/* 1MB */		break;	case GET_TID_RANGE:		tid = data;		tid->num = t3_mc5_size(&adapter->mc5) -		    adapter->params.mc5.nroutes -		    adapter->params.mc5.nfilters - adapter->params.mc5.nservers;		tid->base = 0;		break;	case GET_STID_RANGE:		tid = data;		tid->num = adapter->params.mc5.nservers;		tid->base = t3_mc5_size(&adapter->mc5) - tid->num -		    adapter->params.mc5.nfilters - adapter->params.mc5.nroutes;		break;	case GET_L2T_CAPACITY:		*(unsigned int *)data = 2048;		break;	case GET_MTUS:		mtup = data;		mtup->size = NMTUS;		mtup->mtus = adapter->params.mtus;		break;	case GET_IFF_FROM_MAC:		iffmacp = data;		iffmacp->dev = get_iff_from_mac(adapter, iffmacp->mac_addr,						iffmacp->vlan_tag &						VLAN_VID_MASK);		break;	case GET_DDP_PARAMS:		ddpp = data;		ddpp->llimit = t3_read_reg(adapter, A_ULPRX_TDDP_LLIMIT);		ddpp->ulimit = t3_read_reg(adapter, A_ULPRX_TDDP_ULIMIT);		ddpp->tag_mask = t3_read_reg(adapter, A_ULPRX_TDDP_TAGMASK);		break;	case GET_PORTS:		ports = data;		ports->nports = adapter->params.nports;		for_each_port(adapter, i)			ports->lldevs[i] = adapter->port[i];		break;	case ULP_ISCSI_GET_PARAMS:	case ULP_ISCSI_SET_PARAMS:		if (!offload_running(adapter))			return -EAGAIN;		return cxgb_ulp_iscsi_ctl(adapter, req, data);	case RDMA_GET_PARAMS:	case RDMA_CQ_OP:	case RDMA_CQ_SETUP:	case RDMA_CQ_DISABLE:	case RDMA_CTRL_QP_SETUP:	case RDMA_GET_MEM:		if (!offload_running(adapter))			return -EAGAIN;		return cxgb_rdma_ctl(adapter, req, data);	case GET_RX_PAGE_INFO:		rx_page_info = data;		rx_page_info->page_size = tp->rx_pg_size;		rx_page_info->num = tp->rx_num_pgs;		break;	default:		return -EOPNOTSUPP;	}	return 0;}/* * Dummy handler for Rx offload packets in case we get an offload packet before * proper processing is setup.  This complains and drops the packet as it isn't * normal to get offload packets at this stage. */static int rx_offload_blackhole(struct t3cdev *dev, struct sk_buff **skbs,				int n){	CH_ERR(tdev2adap(dev), "%d unexpected offload packets, first data %u\n",	       n, ntohl(*(__be32 *)skbs[0]->data));	while (n--)		dev_kfree_skb_any(skbs[n]);	return 0;}static void dummy_neigh_update(struct t3cdev *dev, struct neighbour *neigh){}void cxgb3_set_dummy_ops(struct t3cdev *dev){	dev->recv = rx_offload_blackhole;	dev->neigh_update = dummy_neigh_update;}/* * Free an active-open TID. */void *cxgb3_free_atid(struct t3cdev *tdev, int atid){	struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?