📄 pasemi_mac.c
字号:
/* * Copyright (C) 2006-2007 PA Semi, Inc * * Driver for the PA Semi PWRficient onchip 1G/10G Ethernet MACs * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */#include <linux/init.h>#include <linux/module.h>#include <linux/pci.h>#include <linux/interrupt.h>#include <linux/dmaengine.h>#include <linux/delay.h>#include <linux/netdevice.h>#include <linux/etherdevice.h>#include <asm/dma-mapping.h>#include <linux/in.h>#include <linux/skbuff.h>#include <linux/ip.h>#include <linux/tcp.h>#include <net/checksum.h>#include <asm/irq.h>#include <asm/firmware.h>#include "pasemi_mac.h"/* We have our own align, since ppc64 in general has it at 0 because * of design flaws in some of the server bridge chips. However, for * PWRficient doing the unaligned copies is more expensive than doing * unaligned DMA, so make sure the data is aligned instead. */#define LOCAL_SKB_ALIGN 2/* TODO list * * - Multicast support * - Large MTU support * - SW LRO * - Multiqueue RX/TX *//* Must be a power of two */#define RX_RING_SIZE 4096#define TX_RING_SIZE 4096#define DEFAULT_MSG_ENABLE \ (NETIF_MSG_DRV | \ NETIF_MSG_PROBE | \ NETIF_MSG_LINK | \ NETIF_MSG_TIMER | \ NETIF_MSG_IFDOWN | \ NETIF_MSG_IFUP | \ NETIF_MSG_RX_ERR | \ NETIF_MSG_TX_ERR)#define TX_RING(mac, num) ((mac)->tx->ring[(num) & (TX_RING_SIZE-1)])#define TX_RING_INFO(mac, num) ((mac)->tx->ring_info[(num) & (TX_RING_SIZE-1)])#define RX_RING(mac, num) ((mac)->rx->ring[(num) & (RX_RING_SIZE-1)])#define RX_RING_INFO(mac, num) ((mac)->rx->ring_info[(num) & (RX_RING_SIZE-1)])#define RX_BUFF(mac, num) ((mac)->rx->buffers[(num) & (RX_RING_SIZE-1)])#define RING_USED(ring) (((ring)->next_to_fill - (ring)->next_to_clean) \ & ((ring)->size - 1))#define RING_AVAIL(ring) ((ring->size) - RING_USED(ring))#define BUF_SIZE 1646 /* 1500 MTU + ETH_HLEN + VLAN_HLEN + 2 64B cachelines */MODULE_LICENSE("GPL");MODULE_AUTHOR ("Olof Johansson <olof@lixom.net>");MODULE_DESCRIPTION("PA Semi PWRficient Ethernet driver");static int debug = -1; /* -1 == use DEFAULT_MSG_ENABLE as value */module_param(debug, int, 0);MODULE_PARM_DESC(debug, "PA Semi MAC bitmapped debugging message enable value");static struct pasdma_status *dma_status;static int translation_enabled(void){#if defined(CONFIG_PPC_PASEMI_IOMMU_DMA_FORCE) return 1;#else return firmware_has_feature(FW_FEATURE_LPAR);#endif}static void write_iob_reg(struct pasemi_mac *mac, unsigned int reg, unsigned int val){ out_le32(mac->iob_regs+reg, val);}static unsigned int read_mac_reg(struct pasemi_mac *mac, unsigned int reg){ return in_le32(mac->regs+reg);}static void write_mac_reg(struct pasemi_mac *mac, unsigned int reg, unsigned int val){ out_le32(mac->regs+reg, val);}static unsigned int read_dma_reg(struct pasemi_mac *mac, unsigned int reg){ return in_le32(mac->dma_regs+reg);}static void write_dma_reg(struct pasemi_mac *mac, unsigned int reg, unsigned int val){ out_le32(mac->dma_regs+reg, val);}static int pasemi_get_mac_addr(struct pasemi_mac *mac){ struct pci_dev *pdev = mac->pdev; struct device_node *dn = pci_device_to_OF_node(pdev); int len; const u8 *maddr; u8 addr[6]; if (!dn) { dev_dbg(&pdev->dev, "No device node for mac, not configuring\n"); return -ENOENT; } maddr = of_get_property(dn, "local-mac-address", &len); if (maddr && len == 6) { memcpy(mac->mac_addr, maddr, 6); return 0; } /* Some old versions of firmware mistakenly uses mac-address * (and as a string) instead of a byte array in local-mac-address. */ if (maddr == NULL) maddr = of_get_property(dn, "mac-address", NULL); if (maddr == NULL) { dev_warn(&pdev->dev, "no mac address in device tree, not configuring\n"); return -ENOENT; } if (sscanf(maddr, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", &addr[0], &addr[1], &addr[2], &addr[3], &addr[4], &addr[5]) != 6) { dev_warn(&pdev->dev, "can't parse mac address, not configuring\n"); return -EINVAL; } memcpy(mac->mac_addr, addr, 6); return 0;}static int pasemi_mac_unmap_tx_skb(struct pasemi_mac *mac, struct sk_buff *skb, dma_addr_t *dmas){ int f; int nfrags = skb_shinfo(skb)->nr_frags; pci_unmap_single(mac->dma_pdev, dmas[0], skb_headlen(skb), PCI_DMA_TODEVICE); for (f = 0; f < nfrags; f++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; pci_unmap_page(mac->dma_pdev, dmas[f+1], frag->size, PCI_DMA_TODEVICE); } dev_kfree_skb_irq(skb); /* Freed descriptor slot + main SKB ptr + nfrags additional ptrs, * aligned up to a power of 2 */ return (nfrags + 3) & ~1;}static int pasemi_mac_setup_rx_resources(struct net_device *dev){ struct pasemi_mac_rxring *ring; struct pasemi_mac *mac = netdev_priv(dev); int chan_id = mac->dma_rxch; unsigned int cfg; ring = kzalloc(sizeof(*ring), GFP_KERNEL); if (!ring) goto out_ring; spin_lock_init(&ring->lock); ring->size = RX_RING_SIZE; ring->ring_info = kzalloc(sizeof(struct pasemi_mac_buffer) * RX_RING_SIZE, GFP_KERNEL); if (!ring->ring_info) goto out_ring_info; /* Allocate descriptors */ ring->ring = dma_alloc_coherent(&mac->dma_pdev->dev, RX_RING_SIZE * sizeof(u64), &ring->dma, GFP_KERNEL); if (!ring->ring) goto out_ring_desc; memset(ring->ring, 0, RX_RING_SIZE * sizeof(u64)); ring->buffers = dma_alloc_coherent(&mac->dma_pdev->dev, RX_RING_SIZE * sizeof(u64), &ring->buf_dma, GFP_KERNEL); if (!ring->buffers) goto out_buffers; memset(ring->buffers, 0, RX_RING_SIZE * sizeof(u64)); write_dma_reg(mac, PAS_DMA_RXCHAN_BASEL(chan_id), PAS_DMA_RXCHAN_BASEL_BRBL(ring->dma)); write_dma_reg(mac, PAS_DMA_RXCHAN_BASEU(chan_id), PAS_DMA_RXCHAN_BASEU_BRBH(ring->dma >> 32) | PAS_DMA_RXCHAN_BASEU_SIZ(RX_RING_SIZE >> 3)); cfg = PAS_DMA_RXCHAN_CFG_HBU(2); if (translation_enabled()) cfg |= PAS_DMA_RXCHAN_CFG_CTR; write_dma_reg(mac, PAS_DMA_RXCHAN_CFG(chan_id), cfg); write_dma_reg(mac, PAS_DMA_RXINT_BASEL(mac->dma_if), PAS_DMA_RXINT_BASEL_BRBL(ring->buf_dma)); write_dma_reg(mac, PAS_DMA_RXINT_BASEU(mac->dma_if), PAS_DMA_RXINT_BASEU_BRBH(ring->buf_dma >> 32) | PAS_DMA_RXINT_BASEU_SIZ(RX_RING_SIZE >> 3)); cfg = PAS_DMA_RXINT_CFG_DHL(3) | PAS_DMA_RXINT_CFG_L2 | PAS_DMA_RXINT_CFG_LW | PAS_DMA_RXINT_CFG_RBP | PAS_DMA_RXINT_CFG_HEN; if (translation_enabled()) cfg |= PAS_DMA_RXINT_CFG_ITRR | PAS_DMA_RXINT_CFG_ITR; write_dma_reg(mac, PAS_DMA_RXINT_CFG(mac->dma_if), cfg); ring->next_to_fill = 0; ring->next_to_clean = 0; snprintf(ring->irq_name, sizeof(ring->irq_name), "%s rx", dev->name); mac->rx = ring; return 0;out_buffers: dma_free_coherent(&mac->dma_pdev->dev, RX_RING_SIZE * sizeof(u64), mac->rx->ring, mac->rx->dma);out_ring_desc: kfree(ring->ring_info);out_ring_info: kfree(ring);out_ring: return -ENOMEM;}static int pasemi_mac_setup_tx_resources(struct net_device *dev){ struct pasemi_mac *mac = netdev_priv(dev); u32 val; int chan_id = mac->dma_txch; struct pasemi_mac_txring *ring; unsigned int cfg; ring = kzalloc(sizeof(*ring), GFP_KERNEL); if (!ring) goto out_ring; spin_lock_init(&ring->lock); ring->size = TX_RING_SIZE; ring->ring_info = kzalloc(sizeof(struct pasemi_mac_buffer) * TX_RING_SIZE, GFP_KERNEL); if (!ring->ring_info) goto out_ring_info; /* Allocate descriptors */ ring->ring = dma_alloc_coherent(&mac->dma_pdev->dev, TX_RING_SIZE * sizeof(u64), &ring->dma, GFP_KERNEL); if (!ring->ring) goto out_ring_desc; memset(ring->ring, 0, TX_RING_SIZE * sizeof(u64)); write_dma_reg(mac, PAS_DMA_TXCHAN_BASEL(chan_id), PAS_DMA_TXCHAN_BASEL_BRBL(ring->dma)); val = PAS_DMA_TXCHAN_BASEU_BRBH(ring->dma >> 32); val |= PAS_DMA_TXCHAN_BASEU_SIZ(TX_RING_SIZE >> 3); write_dma_reg(mac, PAS_DMA_TXCHAN_BASEU(chan_id), val); cfg = PAS_DMA_TXCHAN_CFG_TY_IFACE | PAS_DMA_TXCHAN_CFG_TATTR(mac->dma_if) | PAS_DMA_TXCHAN_CFG_UP | PAS_DMA_TXCHAN_CFG_WT(2); if (translation_enabled()) cfg |= PAS_DMA_TXCHAN_CFG_TRD | PAS_DMA_TXCHAN_CFG_TRR; write_dma_reg(mac, PAS_DMA_TXCHAN_CFG(chan_id), cfg); ring->next_to_fill = 0; ring->next_to_clean = 0; snprintf(ring->irq_name, sizeof(ring->irq_name), "%s tx", dev->name); mac->tx = ring; return 0;out_ring_desc: kfree(ring->ring_info);out_ring_info: kfree(ring);out_ring: return -ENOMEM;}static void pasemi_mac_free_tx_resources(struct net_device *dev){ struct pasemi_mac *mac = netdev_priv(dev); unsigned int i, j; struct pasemi_mac_buffer *info; dma_addr_t dmas[MAX_SKB_FRAGS+1]; int freed; int start, limit; start = mac->tx->next_to_clean; limit = mac->tx->next_to_fill; /* Compensate for when fill has wrapped and clean has not */ if (start > limit) limit += TX_RING_SIZE; for (i = start; i < limit; i += freed) { info = &TX_RING_INFO(mac, i+1); if (info->dma && info->skb) { for (j = 0; j <= skb_shinfo(info->skb)->nr_frags; j++) dmas[j] = TX_RING_INFO(mac, i+1+j).dma; freed = pasemi_mac_unmap_tx_skb(mac, info->skb, dmas); } else freed = 2; } for (i = 0; i < TX_RING_SIZE; i++) TX_RING(mac, i) = 0; dma_free_coherent(&mac->dma_pdev->dev, TX_RING_SIZE * sizeof(u64), mac->tx->ring, mac->tx->dma); kfree(mac->tx->ring_info); kfree(mac->tx); mac->tx = NULL;}static void pasemi_mac_free_rx_resources(struct net_device *dev){ struct pasemi_mac *mac = netdev_priv(dev); unsigned int i; struct pasemi_mac_buffer *info; for (i = 0; i < RX_RING_SIZE; i++) { info = &RX_RING_INFO(mac, i); if (info->skb && info->dma) { pci_unmap_single(mac->dma_pdev, info->dma, info->skb->len, PCI_DMA_FROMDEVICE); dev_kfree_skb_any(info->skb); } info->dma = 0; info->skb = NULL; } for (i = 0; i < RX_RING_SIZE; i++) RX_RING(mac, i) = 0; dma_free_coherent(&mac->dma_pdev->dev, RX_RING_SIZE * sizeof(u64), mac->rx->ring, mac->rx->dma); dma_free_coherent(&mac->dma_pdev->dev, RX_RING_SIZE * sizeof(u64), mac->rx->buffers, mac->rx->buf_dma); kfree(mac->rx->ring_info); kfree(mac->rx); mac->rx = NULL;}static void pasemi_mac_replenish_rx_ring(struct net_device *dev, int limit){ struct pasemi_mac *mac = netdev_priv(dev); int fill, count; if (limit <= 0) return; fill = mac->rx->next_to_fill; for (count = 0; count < limit; count++) { struct pasemi_mac_buffer *info = &RX_RING_INFO(mac, fill); u64 *buff = &RX_BUFF(mac, fill); struct sk_buff *skb; dma_addr_t dma; /* Entry in use? */ WARN_ON(*buff); /* skb might still be in there for recycle on short receives */ if (info->skb) skb = info->skb; else { skb = dev_alloc_skb(BUF_SIZE); skb_reserve(skb, LOCAL_SKB_ALIGN); } if (unlikely(!skb)) break; dma = pci_map_single(mac->dma_pdev, skb->data, BUF_SIZE - LOCAL_SKB_ALIGN, PCI_DMA_FROMDEVICE); if (unlikely(dma_mapping_error(dma))) { dev_kfree_skb_irq(info->skb); break; } info->skb = skb; info->dma = dma; *buff = XCT_RXB_LEN(BUF_SIZE) | XCT_RXB_ADDR(dma); fill++; } wmb(); write_dma_reg(mac, PAS_DMA_RXINT_INCR(mac->dma_if), count); mac->rx->next_to_fill = (mac->rx->next_to_fill + count) & (RX_RING_SIZE - 1);}static void pasemi_mac_restart_rx_intr(struct pasemi_mac *mac){ unsigned int reg, pcnt; /* Re-enable packet count interrupts: finally * ack the packet count interrupt we got in rx_intr. */ pcnt = *mac->rx_status & PAS_STATUS_PCNT_M; reg = PAS_IOB_DMA_RXCH_RESET_PCNT(pcnt) | PAS_IOB_DMA_RXCH_RESET_PINTC; write_iob_reg(mac, PAS_IOB_DMA_RXCH_RESET(mac->dma_rxch), reg);}static void pasemi_mac_restart_tx_intr(struct pasemi_mac *mac){ unsigned int reg, pcnt; /* Re-enable packet count interrupts */ pcnt = *mac->tx_status & PAS_STATUS_PCNT_M; reg = PAS_IOB_DMA_TXCH_RESET_PCNT(pcnt) | PAS_IOB_DMA_TXCH_RESET_PINTC; write_iob_reg(mac, PAS_IOB_DMA_TXCH_RESET(mac->dma_txch), reg);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -