yellowfin.c
来自「linux 内核源代码」· C语言 代码 · 共 1,437 行 · 第 1/4 页
C
1,437 行
{ struct yellowfin_private *yp = netdev_priv(dev); int i; yp->tx_full = 0; yp->cur_rx = yp->cur_tx = 0; yp->dirty_tx = 0; yp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32); for (i = 0; i < RX_RING_SIZE; i++) { yp->rx_ring[i].dbdma_cmd = cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | yp->rx_buf_sz); yp->rx_ring[i].branch_addr = cpu_to_le32(yp->rx_ring_dma + ((i+1)%RX_RING_SIZE)*sizeof(struct yellowfin_desc)); } for (i = 0; i < RX_RING_SIZE; i++) { struct sk_buff *skb = dev_alloc_skb(yp->rx_buf_sz); yp->rx_skbuff[i] = skb; if (skb == NULL) break; skb->dev = dev; /* Mark as being used by this device. */ skb_reserve(skb, 2); /* 16 byte align the IP header. */ yp->rx_ring[i].addr = cpu_to_le32(pci_map_single(yp->pci_dev, skb->data, yp->rx_buf_sz, PCI_DMA_FROMDEVICE)); } yp->rx_ring[i-1].dbdma_cmd = cpu_to_le32(CMD_STOP); yp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);#define NO_TXSTATS#ifdef NO_TXSTATS /* In this mode the Tx ring needs only a single descriptor. */ for (i = 0; i < TX_RING_SIZE; i++) { yp->tx_skbuff[i] = NULL; yp->tx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP); yp->tx_ring[i].branch_addr = cpu_to_le32(yp->tx_ring_dma + ((i+1)%TX_RING_SIZE)*sizeof(struct yellowfin_desc)); } /* Wrap ring */ yp->tx_ring[--i].dbdma_cmd = cpu_to_le32(CMD_STOP | BRANCH_ALWAYS);#else{ int j; /* Tx ring needs a pair of descriptors, the second for the status. */ for (i = 0; i < TX_RING_SIZE; i++) { j = 2*i; yp->tx_skbuff[i] = 0; /* Branch on Tx error. */ yp->tx_ring[j].dbdma_cmd = cpu_to_le32(CMD_STOP); yp->tx_ring[j].branch_addr = cpu_to_le32(yp->tx_ring_dma + (j+1)*sizeof(struct yellowfin_desc); j++; if (yp->flags & FullTxStatus) { yp->tx_ring[j].dbdma_cmd = cpu_to_le32(CMD_TXSTATUS | sizeof(*yp->tx_status)); yp->tx_ring[j].request_cnt = sizeof(*yp->tx_status); yp->tx_ring[j].addr = cpu_to_le32(yp->tx_status_dma + i*sizeof(struct tx_status_words); } else { /* Symbios chips write only tx_errs word. */ yp->tx_ring[j].dbdma_cmd = cpu_to_le32(CMD_TXSTATUS | INTR_ALWAYS | 2); yp->tx_ring[j].request_cnt = 2; /* Om pade ummmmm... */ yp->tx_ring[j].addr = cpu_to_le32(yp->tx_status_dma + i*sizeof(struct tx_status_words) + &(yp->tx_status[0].tx_errs) - &(yp->tx_status[0])); } yp->tx_ring[j].branch_addr = cpu_to_le32(yp->tx_ring_dma + ((j+1)%(2*TX_RING_SIZE))*sizeof(struct yellowfin_desc)); } /* Wrap ring */ yp->tx_ring[++j].dbdma_cmd |= cpu_to_le32(BRANCH_ALWAYS | INTR_ALWAYS);}#endif yp->tx_tail_desc = &yp->tx_status[0]; return;}static int yellowfin_start_xmit(struct sk_buff *skb, struct net_device *dev){ struct yellowfin_private *yp = netdev_priv(dev); unsigned entry; int len = skb->len; netif_stop_queue (dev); /* Note: Ordering is important here, set the field with the "ownership" bit last, and only then increment cur_tx. */ /* Calculate the next Tx descriptor entry. */ entry = yp->cur_tx % TX_RING_SIZE; if (gx_fix) { /* Note: only works for paddable protocols e.g. IP. */ int cacheline_end = ((unsigned long)skb->data + skb->len) % 32; /* Fix GX chipset errata. */ if (cacheline_end > 24 || cacheline_end == 0) { len = skb->len + 32 - cacheline_end + 1; if (skb_padto(skb, len)) { yp->tx_skbuff[entry] = NULL; netif_wake_queue(dev); return 0; } } } yp->tx_skbuff[entry] = skb;#ifdef NO_TXSTATS yp->tx_ring[entry].addr = cpu_to_le32(pci_map_single(yp->pci_dev, skb->data, len, PCI_DMA_TODEVICE)); yp->tx_ring[entry].result_status = 0; if (entry >= TX_RING_SIZE-1) { /* New stop command. */ yp->tx_ring[0].dbdma_cmd = cpu_to_le32(CMD_STOP); yp->tx_ring[TX_RING_SIZE-1].dbdma_cmd = cpu_to_le32(CMD_TX_PKT|BRANCH_ALWAYS | len); } else { yp->tx_ring[entry+1].dbdma_cmd = cpu_to_le32(CMD_STOP); yp->tx_ring[entry].dbdma_cmd = cpu_to_le32(CMD_TX_PKT | BRANCH_IFTRUE | len); } yp->cur_tx++;#else yp->tx_ring[entry<<1].request_cnt = len; yp->tx_ring[entry<<1].addr = cpu_to_le32(pci_map_single(yp->pci_dev, skb->data, len, PCI_DMA_TODEVICE)); /* The input_last (status-write) command is constant, but we must rewrite the subsequent 'stop' command. */ yp->cur_tx++; { unsigned next_entry = yp->cur_tx % TX_RING_SIZE; yp->tx_ring[next_entry<<1].dbdma_cmd = cpu_to_le32(CMD_STOP); } /* Final step -- overwrite the old 'stop' command. */ yp->tx_ring[entry<<1].dbdma_cmd = cpu_to_le32( ((entry % 6) == 0 ? CMD_TX_PKT|INTR_ALWAYS|BRANCH_IFTRUE : CMD_TX_PKT | BRANCH_IFTRUE) | len);#endif /* Non-x86 Todo: explicitly flush cache lines here. */ /* Wake the potentially-idle transmit channel. */ iowrite32(0x10001000, yp->base + TxCtrl); if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE) netif_start_queue (dev); /* Typical path */ else yp->tx_full = 1; dev->trans_start = jiffies; if (yellowfin_debug > 4) { printk(KERN_DEBUG "%s: Yellowfin transmit frame #%d queued in slot %d.\n", dev->name, yp->cur_tx, entry); } return 0;}/* The interrupt handler does all of the Rx thread work and cleans up after the Tx thread. */static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance){ struct net_device *dev = dev_instance; struct yellowfin_private *yp; void __iomem *ioaddr; int boguscnt = max_interrupt_work; unsigned int handled = 0; yp = netdev_priv(dev); ioaddr = yp->base; spin_lock (&yp->lock); do { u16 intr_status = ioread16(ioaddr + IntrClear); if (yellowfin_debug > 4) printk(KERN_DEBUG "%s: Yellowfin interrupt, status %4.4x.\n", dev->name, intr_status); if (intr_status == 0) break; handled = 1; if (intr_status & (IntrRxDone | IntrEarlyRx)) { yellowfin_rx(dev); iowrite32(0x10001000, ioaddr + RxCtrl); /* Wake Rx engine. */ }#ifdef NO_TXSTATS for (; yp->cur_tx - yp->dirty_tx > 0; yp->dirty_tx++) { int entry = yp->dirty_tx % TX_RING_SIZE; struct sk_buff *skb; if (yp->tx_ring[entry].result_status == 0) break; skb = yp->tx_skbuff[entry]; dev->stats.tx_packets++; dev->stats.tx_bytes += skb->len; /* Free the original skb. */ pci_unmap_single(yp->pci_dev, le32_to_cpu(yp->tx_ring[entry].addr), skb->len, PCI_DMA_TODEVICE); dev_kfree_skb_irq(skb); yp->tx_skbuff[entry] = NULL; } if (yp->tx_full && yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE - 4) { /* The ring is no longer full, clear tbusy. */ yp->tx_full = 0; netif_wake_queue(dev); }#else if ((intr_status & IntrTxDone) || (yp->tx_tail_desc->tx_errs)) { unsigned dirty_tx = yp->dirty_tx; for (dirty_tx = yp->dirty_tx; yp->cur_tx - dirty_tx > 0; dirty_tx++) { /* Todo: optimize this. */ int entry = dirty_tx % TX_RING_SIZE; u16 tx_errs = yp->tx_status[entry].tx_errs; struct sk_buff *skb;#ifndef final_version if (yellowfin_debug > 5) printk(KERN_DEBUG "%s: Tx queue %d check, Tx status " "%4.4x %4.4x %4.4x %4.4x.\n", dev->name, entry, yp->tx_status[entry].tx_cnt, yp->tx_status[entry].tx_errs, yp->tx_status[entry].total_tx_cnt, yp->tx_status[entry].paused);#endif if (tx_errs == 0) break; /* It still hasn't been Txed */ skb = yp->tx_skbuff[entry]; if (tx_errs & 0xF810) { /* There was an major error, log it. */#ifndef final_version if (yellowfin_debug > 1) printk(KERN_DEBUG "%s: Transmit error, Tx status %4.4x.\n", dev->name, tx_errs);#endif dev->stats.tx_errors++; if (tx_errs & 0xF800) dev->stats.tx_aborted_errors++; if (tx_errs & 0x0800) dev->stats.tx_carrier_errors++; if (tx_errs & 0x2000) dev->stats.tx_window_errors++; if (tx_errs & 0x8000) dev->stats.tx_fifo_errors++; } else {#ifndef final_version if (yellowfin_debug > 4) printk(KERN_DEBUG "%s: Normal transmit, Tx status %4.4x.\n", dev->name, tx_errs);#endif dev->stats.tx_bytes += skb->len; dev->stats.collisions += tx_errs & 15; dev->stats.tx_packets++; } /* Free the original skb. */ pci_unmap_single(yp->pci_dev, yp->tx_ring[entry<<1].addr, skb->len, PCI_DMA_TODEVICE); dev_kfree_skb_irq(skb); yp->tx_skbuff[entry] = 0; /* Mark status as empty. */ yp->tx_status[entry].tx_errs = 0; }#ifndef final_version if (yp->cur_tx - dirty_tx > TX_RING_SIZE) { printk(KERN_ERR "%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n", dev->name, dirty_tx, yp->cur_tx, yp->tx_full); dirty_tx += TX_RING_SIZE; }#endif if (yp->tx_full && yp->cur_tx - dirty_tx < TX_QUEUE_SIZE - 2) { /* The ring is no longer full, clear tbusy. */ yp->tx_full = 0; netif_wake_queue(dev); } yp->dirty_tx = dirty_tx; yp->tx_tail_desc = &yp->tx_status[dirty_tx % TX_RING_SIZE]; }#endif /* Log errors and other uncommon events. */ if (intr_status & 0x2ee) /* Abnormal error summary. */ yellowfin_error(dev, intr_status); if (--boguscnt < 0) { printk(KERN_WARNING "%s: Too much work at interrupt, " "status=0x%4.4x.\n", dev->name, intr_status); break; } } while (1); if (yellowfin_debug > 3) printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n", dev->name, ioread16(ioaddr + IntrStatus)); spin_unlock (&yp->lock); return IRQ_RETVAL(handled);}/* This routine is logically part of the interrupt handler, but separated for clarity and better register allocation. */static int yellowfin_rx(struct net_device *dev){ struct yellowfin_private *yp = netdev_priv(dev); int entry = yp->cur_rx % RX_RING_SIZE; int boguscnt = yp->dirty_rx + RX_RING_SIZE - yp->cur_rx; if (yellowfin_debug > 4) { printk(KERN_DEBUG " In yellowfin_rx(), entry %d status %8.8x.\n", entry, yp->rx_ring[entry].result_status); printk(KERN_DEBUG " #%d desc. %8.8x %8.8x %8.8x.\n", entry, yp->rx_ring[entry].dbdma_cmd, yp->rx_ring[entry].addr, yp->rx_ring[entry].result_status); } /* If EOP is set on the next entry, it's a new packet. Send it up. */ while (1) { struct yellowfin_desc *desc = &yp->rx_ring[entry]; struct sk_buff *rx_skb = yp->rx_skbuff[entry]; s16 frame_status; u16 desc_status; int data_size; u8 *buf_addr; if(!desc->result_status) break; pci_dma_sync_single_for_cpu(yp->pci_dev, le32_to_cpu(desc->addr), yp->rx_buf_sz, PCI_DMA_FROMDEVICE); desc_status = le32_to_cpu(desc->result_status) >> 16; buf_addr = rx_skb->data; data_size = (le32_to_cpu(desc->dbdma_cmd) - le32_to_cpu(desc->result_status)) & 0xffff; frame_status = le16_to_cpu(get_unaligned((__le16*)&(buf_addr[data_size - 2]))); if (yellowfin_debug > 4) printk(KERN_DEBUG " yellowfin_rx() status was %4.4x.\n", frame_status); if (--boguscnt < 0) break; if ( ! (desc_status & RX_EOP)) { if (data_size != 0) printk(KERN_WARNING "%s: Oversized Ethernet frame spanned multiple buffers," " status %4.4x, data_size %d!\n", dev->name, desc_status, data_size); dev->stats.rx_length_errors++; } else if ((yp->drv_flags & IsGigabit) && (frame_status & 0x0038)) { /* There was a error. */ if (yellowfin_debug > 3) printk(KERN_DEBUG " yellowfin_rx() Rx error was %4.4x.\n", frame_status);
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?