tehuti.c

来自「linux 内核源代码」· C语言 代码 · 共 2,340 行 · 第 1/5 页

C
2,340
字号
	BDX_ASSERT(*pptr < db->start ||	/* pointer has to be */		   *pptr >= db->end);	/* in range */	++*pptr;	if (unlikely(*pptr == db->end))		*pptr = db->start;}/* bdx_tx_db_inc_rptr - increment read pointer * @d   - tx data base */static inline void bdx_tx_db_inc_rptr(struct txdb *db){	BDX_ASSERT(db->rptr == db->wptr);	/* can't read from empty db */	__bdx_tx_db_ptr_next(db, &db->rptr);}/* bdx_tx_db_inc_rptr - increment write pointer * @d   - tx data base */static inline void bdx_tx_db_inc_wptr(struct txdb *db){	__bdx_tx_db_ptr_next(db, &db->wptr);	BDX_ASSERT(db->rptr == db->wptr);	/* we can not get empty db as						   a result of write */}/* bdx_tx_db_init - creates and initializes tx db * @d       - tx data base * @sz_type - size of tx fifo * Returns 0 on success, error code otherwise */static int bdx_tx_db_init(struct txdb *d, int sz_type){	int memsz = FIFO_SIZE * (1 << (sz_type + 1));	d->start = vmalloc(memsz);	if (!d->start)		return -ENOMEM;	/*	 * In order to differentiate between db is empty and db is full	 * states at least one element should always be empty in order to	 * avoid rptr == wptr which means db is empty	 */	d->size = memsz / sizeof(struct tx_map) - 1;	d->end = d->start + d->size + 1;	/* just after last element */	/* all dbs are created equally empty */	d->rptr = d->start;	d->wptr = d->start;	return 0;}/* bdx_tx_db_close - closes tx db and frees all memory * @d - tx data base */static void bdx_tx_db_close(struct txdb *d){	BDX_ASSERT(d == NULL);	if (d->start) {		vfree(d->start);		d->start = NULL;	}}/************************************************************************* *     Tx Engine                                                         * *************************************************************************//* sizes of tx desc (including padding if needed) as function * of skb's frag number */static struct {	u16 bytes;	u16 qwords;		/* qword = 64 bit */} txd_sizes[MAX_SKB_FRAGS + 1];/* txdb_map_skb - creates and stores dma mappings for skb's data blocks * @priv - NIC private structure * @skb  - socket buffer to map * * It makes dma mappings for skb's data blocks and writes them to PBL of * new tx descriptor. It also stores them in the tx db, so they could be * unmaped after data was sent. It is reponsibility of a caller to make * sure that there is enough space in the tx db. Last element holds pointer * to skb itself and marked with zero length */static inline voidbdx_tx_map_skb(struct bdx_priv *priv, struct sk_buff *skb,	       struct txd_desc *txdd){	struct txdb *db = &priv->txdb;	struct pbl *pbl = &txdd->pbl[0];	int nr_frags = skb_shinfo(skb)->nr_frags;	int i;	db->wptr->len = skb->len - skb->data_len;	db->wptr->addr.dma = pci_map_single(priv->pdev, skb->data,					    db->wptr->len, PCI_DMA_TODEVICE);	pbl->len = CPU_CHIP_SWAP32(db->wptr->len);	pbl->pa_lo = CPU_CHIP_SWAP32(L32_64(db->wptr->addr.dma));	pbl->pa_hi = CPU_CHIP_SWAP32(H32_64(db->wptr->addr.dma));	DBG("=== pbl   len: 0x%x ================\n", pbl->len);	DBG("=== pbl pa_lo: 0x%x ================\n", pbl->pa_lo);	DBG("=== pbl pa_hi: 0x%x ================\n", pbl->pa_hi);	bdx_tx_db_inc_wptr(db);	for (i = 0; i < nr_frags; i++) {		struct skb_frag_struct *frag;		frag = &skb_shinfo(skb)->frags[i];		db->wptr->len = frag->size;		db->wptr->addr.dma =		    pci_map_page(priv->pdev, frag->page, frag->page_offset,				 frag->size, PCI_DMA_TODEVICE);		pbl++;		pbl->len = CPU_CHIP_SWAP32(db->wptr->len);		pbl->pa_lo = CPU_CHIP_SWAP32(L32_64(db->wptr->addr.dma));		pbl->pa_hi = CPU_CHIP_SWAP32(H32_64(db->wptr->addr.dma));		bdx_tx_db_inc_wptr(db);	}	/* add skb clean up info. */	db->wptr->len = -txd_sizes[nr_frags].bytes;	db->wptr->addr.skb = skb;	bdx_tx_db_inc_wptr(db);}/* init_txd_sizes - precalculate sizes of descriptors for skbs up to 16 frags * number of frags is used as index to fetch correct descriptors size, * instead of calculating it each time */static void __init init_txd_sizes(void){	int i, lwords;	/* 7 - is number of lwords in txd with one phys buffer	 * 3 - is number of lwords used for every additional phys buffer */	for (i = 0; i < MAX_SKB_FRAGS + 1; i++) {		lwords = 7 + (i * 3);		if (lwords & 1)			lwords++;	/* pad it with 1 lword */		txd_sizes[i].qwords = lwords >> 1;		txd_sizes[i].bytes = lwords << 2;	}}/* bdx_tx_init - initialize all Tx related stuff. * Namely, TXD and TXF fifos, database etc */static int bdx_tx_init(struct bdx_priv *priv){	if (bdx_fifo_init(priv, &priv->txd_fifo0.m, priv->txd_size,			  regTXD_CFG0_0,			  regTXD_CFG1_0, regTXD_RPTR_0, regTXD_WPTR_0))		goto err_mem;	if (bdx_fifo_init(priv, &priv->txf_fifo0.m, priv->txf_size,			  regTXF_CFG0_0,			  regTXF_CFG1_0, regTXF_RPTR_0, regTXF_WPTR_0))		goto err_mem;	/* The TX db has to keep mappings for all packets sent (on TxD)	 * and not yet reclaimed (on TxF) */	if (bdx_tx_db_init(&priv->txdb, max(priv->txd_size, priv->txf_size)))		goto err_mem;	priv->tx_level = BDX_MAX_TX_LEVEL;#ifdef BDX_DELAY_WPTR	priv->tx_update_mark = priv->tx_level - 1024;#endif	return 0;err_mem:	ERR("tehuti: %s: Tx init failed\n", priv->ndev->name);	return -ENOMEM;}/* * bdx_tx_space - calculates avalable space in TX fifo * @priv - NIC private structure * Returns avaliable space in TX fifo in bytes */static inline int bdx_tx_space(struct bdx_priv *priv){	struct txd_fifo *f = &priv->txd_fifo0;	int fsize;	f->m.rptr = READ_REG(priv, f->m.reg_RPTR) & TXF_WPTR_WR_PTR;	fsize = f->m.rptr - f->m.wptr;	if (fsize <= 0)		fsize = f->m.memsz + fsize;	return (fsize);}/* bdx_tx_transmit - send packet to NIC * @skb - packet to send * ndev - network device assigned to NIC * Return codes: * o NETDEV_TX_OK everything ok. * o NETDEV_TX_BUSY Cannot transmit packet, try later *   Usually a bug, means queue start/stop flow control is broken in *   the driver. Note: the driver must NOT put the skb in its DMA ring. * o NETDEV_TX_LOCKED Locking failed, please retry quickly. */static int bdx_tx_transmit(struct sk_buff *skb, struct net_device *ndev){	struct bdx_priv *priv = ndev->priv;	struct txd_fifo *f = &priv->txd_fifo0;	int txd_checksum = 7;	/* full checksum */	int txd_lgsnd = 0;	int txd_vlan_id = 0;	int txd_vtag = 0;	int txd_mss = 0;	int nr_frags = skb_shinfo(skb)->nr_frags;	struct txd_desc *txdd;	int len;	unsigned long flags;	ENTER;	local_irq_save(flags);	if (!spin_trylock(&priv->tx_lock)) {		local_irq_restore(flags);		DBG("%s[%s]: TX locked, returning NETDEV_TX_LOCKED\n",		    BDX_DRV_NAME, ndev->name);		return NETDEV_TX_LOCKED;	}	/* build tx descriptor */	BDX_ASSERT(f->m.wptr >= f->m.memsz);	/* started with valid wptr */	txdd = (struct txd_desc *)(f->m.va + f->m.wptr);	if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL))		txd_checksum = 0;	if (skb_shinfo(skb)->gso_size) {		txd_mss = skb_shinfo(skb)->gso_size;		txd_lgsnd = 1;		DBG("skb %p skb len %d gso size = %d\n", skb, skb->len,		    txd_mss);	}	if (vlan_tx_tag_present(skb)) {		/*Cut VLAN ID to 12 bits */		txd_vlan_id = vlan_tx_tag_get(skb) & BITS_MASK(12);		txd_vtag = 1;	}	txdd->length = CPU_CHIP_SWAP16(skb->len);	txdd->mss = CPU_CHIP_SWAP16(txd_mss);	txdd->txd_val1 =	    CPU_CHIP_SWAP32(TXD_W1_VAL			    (txd_sizes[nr_frags].qwords, txd_checksum, txd_vtag,			     txd_lgsnd, txd_vlan_id));	DBG("=== TxD desc =====================\n");	DBG("=== w1: 0x%x ================\n", txdd->txd_val1);	DBG("=== w2: mss 0x%x len 0x%x\n", txdd->mss, txdd->length);	bdx_tx_map_skb(priv, skb, txdd);	/* increment TXD write pointer. In case of	   fifo wrapping copy reminder of the descriptor	   to the beginning */	f->m.wptr += txd_sizes[nr_frags].bytes;	len = f->m.wptr - f->m.memsz;	if (unlikely(len >= 0)) {		f->m.wptr = len;		if (len > 0) {			BDX_ASSERT(len > f->m.memsz);			memcpy(f->m.va, f->m.va + f->m.memsz, len);		}	}	BDX_ASSERT(f->m.wptr >= f->m.memsz);	/* finished with valid wptr */	priv->tx_level -= txd_sizes[nr_frags].bytes;	BDX_ASSERT(priv->tx_level <= 0 || priv->tx_level > BDX_MAX_TX_LEVEL);#ifdef BDX_DELAY_WPTR	if (priv->tx_level > priv->tx_update_mark) {		/* Force memory writes to complete before letting h/w		   know there are new descriptors to fetch.		   (might be needed on platforms like IA64)		   wmb(); */		WRITE_REG(priv, f->m.reg_WPTR, f->m.wptr & TXF_WPTR_WR_PTR);	} else {		if (priv->tx_noupd++ > BDX_NO_UPD_PACKETS) {			priv->tx_noupd = 0;			WRITE_REG(priv, f->m.reg_WPTR,				  f->m.wptr & TXF_WPTR_WR_PTR);		}	}#else	/* Force memory writes to complete before letting h/w	   know there are new descriptors to fetch.	   (might be needed on platforms like IA64)	   wmb(); */	WRITE_REG(priv, f->m.reg_WPTR, f->m.wptr & TXF_WPTR_WR_PTR);#endif	ndev->trans_start = jiffies;	priv->net_stats.tx_packets++;	priv->net_stats.tx_bytes += skb->len;	if (priv->tx_level < BDX_MIN_TX_LEVEL) {		DBG("%s: %s: TX Q STOP level %d\n",		    BDX_DRV_NAME, ndev->name, priv->tx_level);		netif_stop_queue(ndev);	}	spin_unlock_irqrestore(&priv->tx_lock, flags);	return NETDEV_TX_OK;}/* bdx_tx_cleanup - clean TXF fifo, run in the context of IRQ. * @priv - bdx adapter * It scans TXF fifo for descriptors, frees DMA mappings and reports to OS * that those packets were sent */static void bdx_tx_cleanup(struct bdx_priv *priv){	struct txf_fifo *f = &priv->txf_fifo0;	struct txdb *db = &priv->txdb;	int tx_level = 0;	ENTER;	f->m.wptr = READ_REG(priv, f->m.reg_WPTR) & TXF_WPTR_MASK;	BDX_ASSERT(f->m.rptr >= f->m.memsz);	/* started with valid rptr */	while (f->m.wptr != f->m.rptr) {		f->m.rptr += BDX_TXF_DESC_SZ;		f->m.rptr &= f->m.size_mask;		/* unmap all the fragments */		/* first has to come tx_maps containing dma */		BDX_ASSERT(db->rptr->len == 0);		do {			BDX_ASSERT(db->rptr->addr.dma == 0);			pci_unmap_page(priv->pdev, db->rptr->addr.dma,				       db->rptr->len, PCI_DMA_TODEVICE);			bdx_tx_db_inc_rptr(db);		} while (db->rptr->len > 0);		tx_level -= db->rptr->len;	/* '-' koz len is negative */		/* now should come skb pointer - free it */		dev_kfree_skb_irq(db->rptr->addr.skb);		bdx_tx_db_inc_rptr(db);	}	/* let h/w know which TXF descriptors were cleaned */	BDX_ASSERT((f->m.wptr & TXF_WPTR_WR_PTR) >= f->m.memsz);	WRITE_REG(priv, f->m.reg_RPTR, f->m.rptr & TXF_WPTR_WR_PTR);	/* We reclaimed resources, so in case the Q is stopped by xmit callback,	 * we resume the transmition and use tx_lock to synchronize with xmit.*/	spin_lock(&priv->tx_lock);	priv->tx_level += tx_level;	BDX_ASSERT(priv->tx_level <= 0 || priv->tx_level > BDX_MAX_TX_LEVEL);#ifdef BDX_DELAY_WPTR	if (priv->tx_noupd) {		priv->tx_noupd = 0;		WRITE_REG(priv, priv->txd_fifo0.m.reg_WPTR,			  priv->txd_fifo0.m.wptr & TXF_WPTR_WR_PTR);	}#endif	if (unlikely(netif_queue_stopped(priv->ndev)		     && netif_carrier_ok(priv->ndev)		     && (priv->tx_level >= BDX_MIN_TX_LEVEL))) {		DBG("%s: %s: TX Q WAKE level %d\n",		    BDX_DRV_NAME, priv->ndev->name, priv->tx_level);		netif_wake_queue(priv->ndev);	}	spin_unlock(&priv->tx_lock);}/* bdx_tx_free_skbs - frees all skbs from TXD fifo. * It gets called when OS stops this dev, eg upon "ifconfig down" or rmmod */static void bdx_tx_free_skbs(struct bdx_priv *priv){	struct txdb *db = &priv->txdb;	ENTER;	while (db->rptr != db->wptr) {		if (likely(db->rptr->len))			pci_unmap_page(priv->pdev, db->rptr->addr.dma,				       db->rptr->len, PCI_DMA_TODEVICE);		else			dev_kfree_skb(db->rptr->addr.skb);		bdx_tx_db_inc_rptr(db);	}	RET();}/* bdx_tx_free - frees all Tx resources */static void bdx_tx_free(struct bdx_priv *priv){	ENTER;	bdx_tx_free_skbs(priv);	bdx_fifo_free(priv, &priv->txd_fifo0.m);	bdx_fifo_free(priv, &priv->txf_fifo0.m);	bdx_tx_db_close(&priv->txdb);}/* bdx_tx_push_desc - push descriptor to TxD fifo * @priv - NIC private structure * @data - desc's data * @size - desc's size * * Pushes desc to TxD fifo and overlaps it if needed. * NOTE: this func does not check for available space. this is responsibility *    of the caller. Neither does it check that data size is smaller then *    fifo size. */static void bdx_tx_push_desc(struct bdx_priv *priv, void *data, int size){	struct txd_fifo *f = &priv->txd_fifo0;	int i = f->m.memsz - f->m.wptr;	if (size == 0)		return;	if (i > size) {		memcpy(f->m.va + f->m.wptr, data, size);		f->m.wptr += size;	} else {		memcpy(f->m.va + f->m.wptr, data, i);		f->m.wptr = size - i;		memcpy(f->m.va, data + i, f->m.wptr);	}	WRITE_REG(priv, f->m.reg_WPTR, f->m.wptr & TXF_WPTR_WR_PTR);}/* bdx_tx_push_desc_safe - push descriptor to TxD fifo in a safe way * @priv - NIC private structure * @data - desc's data * @size - desc's size * * NOTE: this func does check for available space and, if neccessary, waits for *   NIC to read existing data before writing new one. */static void bdx_tx_push_desc_safe(struct bdx_priv *priv, void *data, int size){	int timer = 0;	ENTER;	while (size > 0) {		/* we substruct 8 because when fifo is full rptr == wptr		   which also means that fifo is empty, we can understand		   the difference, but could hw do the same ??? :) */		int avail = bdx_tx_space(priv) - 8;		if (avail <= 0) {			if (timer++ > 300) {	/* prevent endless loop */				DBG("timeout while writing desc to TxD fifo\n");				break;			}			udelay(50);	/* give hw a chance to clean fifo */			continue;		}		avail = MIN(avail, size);		DBG("about to push  %d bytes starting %p size %d\n", avail,		    data, size);		bdx_tx_push_desc(priv, data, avail);		size -= avail;		data += avail;	}	RET();}

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?