b44.c
来自「linux 内核源代码」· C语言 代码 · 共 2,335 行 · 第 1/4 页
C
2,335 行
static void b44_timer(unsigned long __opaque){ struct b44 *bp = (struct b44 *) __opaque; spin_lock_irq(&bp->lock); b44_check_phy(bp); b44_stats_update(bp); spin_unlock_irq(&bp->lock); mod_timer(&bp->timer, round_jiffies(jiffies + HZ));}static void b44_tx(struct b44 *bp){ u32 cur, cons; cur = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK; cur /= sizeof(struct dma_desc); /* XXX needs updating when NETIF_F_SG is supported */ for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) { struct ring_info *rp = &bp->tx_buffers[cons]; struct sk_buff *skb = rp->skb; BUG_ON(skb == NULL); dma_unmap_single(bp->sdev->dev, rp->mapping, skb->len, DMA_TO_DEVICE); rp->skb = NULL; dev_kfree_skb_irq(skb); } bp->tx_cons = cons; if (netif_queue_stopped(bp->dev) && TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH) netif_wake_queue(bp->dev); bw32(bp, B44_GPTIMER, 0);}/* Works like this. This chip writes a 'struct rx_header" 30 bytes * before the DMA address you give it. So we allocate 30 more bytes * for the RX buffer, DMA map all of it, skb_reserve the 30 bytes, then * point the chip at 30 bytes past where the rx_header will go. */static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked){ struct dma_desc *dp; struct ring_info *src_map, *map; struct rx_header *rh; struct sk_buff *skb; dma_addr_t mapping; int dest_idx; u32 ctrl; src_map = NULL; if (src_idx >= 0) src_map = &bp->rx_buffers[src_idx]; dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1); map = &bp->rx_buffers[dest_idx]; skb = netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ); if (skb == NULL) return -ENOMEM; mapping = dma_map_single(bp->sdev->dev, skb->data, RX_PKT_BUF_SZ, DMA_FROM_DEVICE); /* Hardware bug work-around, the chip is unable to do PCI DMA to/from anything above 1GB :-( */ if (dma_mapping_error(mapping) || mapping + RX_PKT_BUF_SZ > DMA_30BIT_MASK) { /* Sigh... */ if (!dma_mapping_error(mapping)) dma_unmap_single(bp->sdev->dev, mapping, RX_PKT_BUF_SZ, DMA_FROM_DEVICE); dev_kfree_skb_any(skb); skb = __netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ, GFP_ATOMIC|GFP_DMA); if (skb == NULL) return -ENOMEM; mapping = dma_map_single(bp->sdev->dev, skb->data, RX_PKT_BUF_SZ, DMA_FROM_DEVICE); if (dma_mapping_error(mapping) || mapping + RX_PKT_BUF_SZ > DMA_30BIT_MASK) { if (!dma_mapping_error(mapping)) dma_unmap_single(bp->sdev->dev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE); dev_kfree_skb_any(skb); return -ENOMEM; } } rh = (struct rx_header *) skb->data; skb_reserve(skb, RX_PKT_OFFSET); rh->len = 0; rh->flags = 0; map->skb = skb; map->mapping = mapping; if (src_map != NULL) src_map->skb = NULL; ctrl = (DESC_CTRL_LEN & (RX_PKT_BUF_SZ - RX_PKT_OFFSET)); if (dest_idx == (B44_RX_RING_SIZE - 1)) ctrl |= DESC_CTRL_EOT; dp = &bp->rx_ring[dest_idx]; dp->ctrl = cpu_to_le32(ctrl); dp->addr = cpu_to_le32((u32) mapping + RX_PKT_OFFSET + bp->dma_offset); if (bp->flags & B44_FLAG_RX_RING_HACK) b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma, dest_idx * sizeof(dp), DMA_BIDIRECTIONAL); return RX_PKT_BUF_SZ;}static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked){ struct dma_desc *src_desc, *dest_desc; struct ring_info *src_map, *dest_map; struct rx_header *rh; int dest_idx; __le32 ctrl; dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1); dest_desc = &bp->rx_ring[dest_idx]; dest_map = &bp->rx_buffers[dest_idx]; src_desc = &bp->rx_ring[src_idx]; src_map = &bp->rx_buffers[src_idx]; dest_map->skb = src_map->skb; rh = (struct rx_header *) src_map->skb->data; rh->len = 0; rh->flags = 0; dest_map->mapping = src_map->mapping; if (bp->flags & B44_FLAG_RX_RING_HACK) b44_sync_dma_desc_for_cpu(bp->sdev, bp->rx_ring_dma, src_idx * sizeof(src_desc), DMA_BIDIRECTIONAL); ctrl = src_desc->ctrl; if (dest_idx == (B44_RX_RING_SIZE - 1)) ctrl |= cpu_to_le32(DESC_CTRL_EOT); else ctrl &= cpu_to_le32(~DESC_CTRL_EOT); dest_desc->ctrl = ctrl; dest_desc->addr = src_desc->addr; src_map->skb = NULL; if (bp->flags & B44_FLAG_RX_RING_HACK) b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma, dest_idx * sizeof(dest_desc), DMA_BIDIRECTIONAL); dma_sync_single_for_device(bp->sdev->dev, le32_to_cpu(src_desc->addr), RX_PKT_BUF_SZ, DMA_FROM_DEVICE);}static int b44_rx(struct b44 *bp, int budget){ int received; u32 cons, prod; received = 0; prod = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK; prod /= sizeof(struct dma_desc); cons = bp->rx_cons; while (cons != prod && budget > 0) { struct ring_info *rp = &bp->rx_buffers[cons]; struct sk_buff *skb = rp->skb; dma_addr_t map = rp->mapping; struct rx_header *rh; u16 len; dma_sync_single_for_cpu(bp->sdev->dev, map, RX_PKT_BUF_SZ, DMA_FROM_DEVICE); rh = (struct rx_header *) skb->data; len = le16_to_cpu(rh->len); if ((len > (RX_PKT_BUF_SZ - RX_PKT_OFFSET)) || (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) { drop_it: b44_recycle_rx(bp, cons, bp->rx_prod); drop_it_no_recycle: bp->stats.rx_dropped++; goto next_pkt; } if (len == 0) { int i = 0; do { udelay(2); barrier(); len = le16_to_cpu(rh->len); } while (len == 0 && i++ < 5); if (len == 0) goto drop_it; } /* Omit CRC. */ len -= 4; if (len > RX_COPY_THRESHOLD) { int skb_size; skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod); if (skb_size < 0) goto drop_it; dma_unmap_single(bp->sdev->dev, map, skb_size, DMA_FROM_DEVICE); /* Leave out rx_header */ skb_put(skb, len + RX_PKT_OFFSET); skb_pull(skb, RX_PKT_OFFSET); } else { struct sk_buff *copy_skb; b44_recycle_rx(bp, cons, bp->rx_prod); copy_skb = dev_alloc_skb(len + 2); if (copy_skb == NULL) goto drop_it_no_recycle; skb_reserve(copy_skb, 2); skb_put(copy_skb, len); /* DMA sync done above, copy just the actual packet */ skb_copy_from_linear_data_offset(skb, RX_PKT_OFFSET, copy_skb->data, len); skb = copy_skb; } skb->ip_summed = CHECKSUM_NONE; skb->protocol = eth_type_trans(skb, bp->dev); netif_receive_skb(skb); bp->dev->last_rx = jiffies; received++; budget--; next_pkt: bp->rx_prod = (bp->rx_prod + 1) & (B44_RX_RING_SIZE - 1); cons = (cons + 1) & (B44_RX_RING_SIZE - 1); } bp->rx_cons = cons; bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc)); return received;}static int b44_poll(struct napi_struct *napi, int budget){ struct b44 *bp = container_of(napi, struct b44, napi); struct net_device *netdev = bp->dev; int work_done; spin_lock_irq(&bp->lock); if (bp->istat & (ISTAT_TX | ISTAT_TO)) { /* spin_lock(&bp->tx_lock); */ b44_tx(bp); /* spin_unlock(&bp->tx_lock); */ } spin_unlock_irq(&bp->lock); work_done = 0; if (bp->istat & ISTAT_RX) work_done += b44_rx(bp, budget); if (bp->istat & ISTAT_ERRORS) { unsigned long flags; spin_lock_irqsave(&bp->lock, flags); b44_halt(bp); b44_init_rings(bp); b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY); netif_wake_queue(bp->dev); spin_unlock_irqrestore(&bp->lock, flags); work_done = 0; } if (work_done < budget) { netif_rx_complete(netdev, napi); b44_enable_ints(bp); } return work_done;}static irqreturn_t b44_interrupt(int irq, void *dev_id){ struct net_device *dev = dev_id; struct b44 *bp = netdev_priv(dev); u32 istat, imask; int handled = 0; spin_lock(&bp->lock); istat = br32(bp, B44_ISTAT); imask = br32(bp, B44_IMASK); /* The interrupt mask register controls which interrupt bits * will actually raise an interrupt to the CPU when set by hw/firmware, * but doesn't mask off the bits. */ istat &= imask; if (istat) { handled = 1; if (unlikely(!netif_running(dev))) { printk(KERN_INFO "%s: late interrupt.\n", dev->name); goto irq_ack; } if (netif_rx_schedule_prep(dev, &bp->napi)) { /* NOTE: These writes are posted by the readback of * the ISTAT register below. */ bp->istat = istat; __b44_disable_ints(bp); __netif_rx_schedule(dev, &bp->napi); } else { printk(KERN_ERR PFX "%s: Error, poll already scheduled\n", dev->name); }irq_ack: bw32(bp, B44_ISTAT, istat); br32(bp, B44_ISTAT); } spin_unlock(&bp->lock); return IRQ_RETVAL(handled);}static void b44_tx_timeout(struct net_device *dev){ struct b44 *bp = netdev_priv(dev); printk(KERN_ERR PFX "%s: transmit timed out, resetting\n", dev->name); spin_lock_irq(&bp->lock); b44_halt(bp); b44_init_rings(bp); b44_init_hw(bp, B44_FULL_RESET); spin_unlock_irq(&bp->lock); b44_enable_ints(bp); netif_wake_queue(dev);}static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev){ struct b44 *bp = netdev_priv(dev); int rc = NETDEV_TX_OK; dma_addr_t mapping; u32 len, entry, ctrl; len = skb->len; spin_lock_irq(&bp->lock); /* This is a hard error, log it. */ if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) { netif_stop_queue(dev); printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n", dev->name); goto err_out; } mapping = dma_map_single(bp->sdev->dev, skb->data, len, DMA_TO_DEVICE); if (dma_mapping_error(mapping) || mapping + len > DMA_30BIT_MASK) { struct sk_buff *bounce_skb; /* Chip can't handle DMA to/from >1GB, use bounce buffer */ if (!dma_mapping_error(mapping)) dma_unmap_single(bp->sdev->dev, mapping, len, DMA_TO_DEVICE); bounce_skb = __dev_alloc_skb(len, GFP_ATOMIC | GFP_DMA); if (!bounce_skb) goto err_out; mapping = dma_map_single(bp->sdev->dev, bounce_skb->data, len, DMA_TO_DEVICE); if (dma_mapping_error(mapping) || mapping + len > DMA_30BIT_MASK) { if (!dma_mapping_error(mapping)) dma_unmap_single(bp->sdev->dev, mapping, len, DMA_TO_DEVICE); dev_kfree_skb_any(bounce_skb); goto err_out; } skb_copy_from_linear_data(skb, skb_put(bounce_skb, len), len); dev_kfree_skb_any(skb); skb = bounce_skb; } entry = bp->tx_prod; bp->tx_buffers[entry].skb = skb; bp->tx_buffers[entry].mapping = mapping; ctrl = (len & DESC_CTRL_LEN); ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF; if (entry == (B44_TX_RING_SIZE - 1)) ctrl |= DESC_CTRL_EOT; bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl); bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset); if (bp->flags & B44_FLAG_TX_RING_HACK) b44_sync_dma_desc_for_device(bp->sdev, bp->tx_ring_dma, entry * sizeof(bp->tx_ring[0]), DMA_TO_DEVICE); entry = NEXT_TX(entry); bp->tx_prod = entry; wmb(); bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc)); if (bp->flags & B44_FLAG_BUGGY_TXPTR) bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc)); if (bp->flags & B44_FLAG_REORDER_BUG) br32(bp, B44_DMATX_PTR); if (TX_BUFFS_AVAIL(bp) < 1) netif_stop_queue(dev); dev->trans_start = jiffies;out_unlock: spin_unlock_irq(&bp->lock); return rc;err_out: rc = NETDEV_TX_BUSY; goto out_unlock;}static int b44_change_mtu(struct net_device *dev, int new_mtu){ struct b44 *bp = netdev_priv(dev); if (new_mtu < B44_MIN_MTU || new_mtu > B44_MAX_MTU) return -EINVAL; if (!netif_running(dev)) { /* We'll just catch it later when the * device is up'd. */ dev->mtu = new_mtu; return 0; } spin_lock_irq(&bp->lock); b44_halt(bp); dev->mtu = new_mtu; b44_init_rings(bp); b44_init_hw(bp, B44_FULL_RESET); spin_unlock_irq(&bp->lock); b44_enable_ints(bp); return 0;}/* Free up pending packets in all rx/tx rings. * * The chip has been shut down and the driver detached from * the networking, so no interrupts or new tx packets will * end up in the driver. bp->lock is not held and we are not * in an interrupt context and thus may sleep. */static void b44_free_rings(struct b44 *bp){ struct ring_info *rp; int i; for (i = 0; i < B44_RX_RING_SIZE; i++) { rp = &bp->rx_buffers[i]; if (rp->skb == NULL) continue; dma_unmap_single(bp->sdev->dev, rp->mapping, RX_PKT_BUF_SZ, DMA_FROM_DEVICE); dev_kfree_skb_any(rp->skb); rp->skb = NULL; } /* XXX needs changes once NETIF_F_SG is set... */ for (i = 0; i < B44_TX_RING_SIZE; i++) { rp = &bp->tx_buffers[i]; if (rp->skb == NULL) continue; dma_unmap_single(bp->sdev->dev, rp->mapping, rp->skb->len, DMA_TO_DEVICE); dev_kfree_skb_any(rp->skb); rp->skb = NULL; }}/* Initialize tx/rx rings for packet processing. * * The chip has been shut down and the driver detached from * the networking, so no interrupts or new tx packets will * end up in the driver. */static void b44_init_rings(struct b44 *bp){ int i; b44_free_rings(bp); memset(bp->rx_ring, 0, B44_RX_RING_BYTES); memset(bp->tx_ring, 0, B44_TX_RING_BYTES); if (bp->flags & B44_FLAG_RX_RING_HACK) dma_sync_single_for_device(bp->sdev->dev, bp->rx_ring_dma, DMA_TABLE_BYTES, DMA_BIDIRECTIONAL); if (bp->flags & B44_FLAG_TX_RING_HACK) dma_sync_single_for_device(bp->sdev->dev, bp->tx_ring_dma, DMA_TABLE_BYTES, DMA_TO_DEVICE); for (i = 0; i < bp->rx_pending; i++) { if (b44_alloc_rx_skb(bp, -1, i) < 0) break; }}/* * Must not be invoked with interrupt sources disabled and * the hardware shutdown down. */static void b44_free_consistent(struct b44 *bp){ kfree(bp->rx_buffers); bp->rx_buffers = NULL; kfree(bp->tx_buffers); bp->tx_buffers = NULL; if (bp->rx_ring) { if (bp->flags & B44_FLAG_RX_RING_HACK) { dma_unmap_single(bp->sdev->dev, bp->rx_ring_dma, DMA_TABLE_BYTES, DMA_BIDIRECTIONAL); kfree(bp->rx_ring); } else dma_free_coherent(bp->sdev->dev, DMA_TABLE_BYTES, bp->rx_ring, bp->rx_ring_dma); bp->rx_ring = NULL; bp->flags &= ~B44_FLAG_RX_RING_HACK; } if (bp->tx_ring) { if (bp->flags & B44_FLAG_TX_RING_HACK) { dma_unmap_single(bp->sdev->dev, bp->tx_ring_dma, DMA_TABLE_BYTES, DMA_TO_DEVICE); kfree(bp->tx_ring); } else dma_free_coherent(bp->sdev->dev, DMA_TABLE_BYTES, bp->tx_ring, bp->tx_ring_dma); bp->tx_ring = NULL; bp->flags &= ~B44_FLAG_TX_RING_HACK; }}
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?