📄 yellowfin.c
字号:
{ struct yellowfin_private *yp = dev->priv; int i; yp->tx_full = 0; yp->cur_rx = yp->cur_tx = 0; yp->dirty_tx = 0; yp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32); for (i = 0; i < RX_RING_SIZE; i++) { yp->rx_ring[i].dbdma_cmd = cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | yp->rx_buf_sz); yp->rx_ring[i].branch_addr = cpu_to_le32(yp->rx_ring_dma + ((i+1)%RX_RING_SIZE)*sizeof(struct yellowfin_desc)); } for (i = 0; i < RX_RING_SIZE; i++) { struct sk_buff *skb = dev_alloc_skb(yp->rx_buf_sz); yp->rx_skbuff[i] = skb; if (skb == NULL) break; skb->dev = dev; /* Mark as being used by this device. */ skb_reserve(skb, 2); /* 16 byte align the IP header. */ yp->rx_ring[i].addr = cpu_to_le32(pci_map_single(yp->pci_dev, skb->tail, yp->rx_buf_sz, PCI_DMA_FROMDEVICE)); } yp->rx_ring[i-1].dbdma_cmd = cpu_to_le32(CMD_STOP); yp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);#define NO_TXSTATS#ifdef NO_TXSTATS /* In this mode the Tx ring needs only a single descriptor. */ for (i = 0; i < TX_RING_SIZE; i++) { yp->tx_skbuff[i] = 0; yp->tx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP); yp->tx_ring[i].branch_addr = cpu_to_le32(yp->tx_ring_dma + ((i+1)%TX_RING_SIZE)*sizeof(struct yellowfin_desc)); } /* Wrap ring */ yp->tx_ring[--i].dbdma_cmd = cpu_to_le32(CMD_STOP | BRANCH_ALWAYS);#else{ int j; /* Tx ring needs a pair of descriptors, the second for the status. */ for (i = 0; i < TX_RING_SIZE; i++) { j = 2*i; yp->tx_skbuff[i] = 0; /* Branch on Tx error. */ yp->tx_ring[j].dbdma_cmd = cpu_to_le32(CMD_STOP); yp->tx_ring[j].branch_addr = cpu_to_le32(yp->tx_ring_dma + (j+1)*sizeof(struct yellowfin_desc); j++; if (yp->flags & FullTxStatus) { yp->tx_ring[j].dbdma_cmd = cpu_to_le32(CMD_TXSTATUS | sizeof(*yp->tx_status)); yp->tx_ring[j].request_cnt = sizeof(*yp->tx_status); yp->tx_ring[j].addr = cpu_to_le32(yp->tx_status_dma + i*sizeof(struct tx_status_words); } else { /* Symbios chips write only tx_errs word. */ yp->tx_ring[j].dbdma_cmd = cpu_to_le32(CMD_TXSTATUS | INTR_ALWAYS | 2); yp->tx_ring[j].request_cnt = 2; /* Om pade ummmmm... */ yp->tx_ring[j].addr = cpu_to_le32(yp->tx_status_dma + i*sizeof(struct tx_status_words) + &(yp->tx_status[0].tx_errs) - &(yp->tx_status[0])); } yp->tx_ring[j].branch_addr = cpu_to_le32(yp->tx_ring_dma + ((j+1)%(2*TX_RING_SIZE))*sizeof(struct yellowfin_desc)); } /* Wrap ring */ yp->tx_ring[++j].dbdma_cmd |= cpu_to_le32(BRANCH_ALWAYS | INTR_ALWAYS);}#endif yp->tx_tail_desc = &yp->tx_status[0]; return;}static int yellowfin_start_xmit(struct sk_buff *skb, struct net_device *dev){ struct yellowfin_private *yp = dev->priv; unsigned entry; netif_stop_queue (dev); /* Note: Ordering is important here, set the field with the "ownership" bit last, and only then increment cur_tx. */ /* Calculate the next Tx descriptor entry. */ entry = yp->cur_tx % TX_RING_SIZE; yp->tx_skbuff[entry] = skb; if (gx_fix) { /* Note: only works for paddable protocols e.g. IP. */ int cacheline_end = ((unsigned long)skb->data + skb->len) % 32; /* Fix GX chipset errata. */ if (cacheline_end > 24 || cacheline_end == 0) skb->len += 32 - cacheline_end + 1; }#ifdef NO_TXSTATS yp->tx_ring[entry].addr = cpu_to_le32(pci_map_single(yp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE)); yp->tx_ring[entry].result_status = 0; if (entry >= TX_RING_SIZE-1) { /* New stop command. */ yp->tx_ring[0].dbdma_cmd = cpu_to_le32(CMD_STOP); yp->tx_ring[TX_RING_SIZE-1].dbdma_cmd = cpu_to_le32(CMD_TX_PKT|BRANCH_ALWAYS | skb->len); } else { yp->tx_ring[entry+1].dbdma_cmd = cpu_to_le32(CMD_STOP); yp->tx_ring[entry].dbdma_cmd = cpu_to_le32(CMD_TX_PKT | BRANCH_IFTRUE | skb->len); } yp->cur_tx++;#else yp->tx_ring[entry<<1].request_cnt = skb->len; yp->tx_ring[entry<<1].addr = cpu_to_le32(pci_map_single(yp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE)); /* The input_last (status-write) command is constant, but we must rewrite the subsequent 'stop' command. */ yp->cur_tx++; { unsigned next_entry = yp->cur_tx % TX_RING_SIZE; yp->tx_ring[next_entry<<1].dbdma_cmd = cpu_to_le32(CMD_STOP); } /* Final step -- overwrite the old 'stop' command. */ yp->tx_ring[entry<<1].dbdma_cmd = cpu_to_le32( ((entry % 6) == 0 ? CMD_TX_PKT|INTR_ALWAYS|BRANCH_IFTRUE : CMD_TX_PKT | BRANCH_IFTRUE) | skb->len);#endif /* Non-x86 Todo: explicitly flush cache lines here. */ /* Wake the potentially-idle transmit channel. */ outl(0x10001000, dev->base_addr + TxCtrl); if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE) netif_start_queue (dev); /* Typical path */ else yp->tx_full = 1; dev->trans_start = jiffies; if (yellowfin_debug > 4) { printk(KERN_DEBUG "%s: Yellowfin transmit frame #%d queued in slot %d.\n", dev->name, yp->cur_tx, entry); } return 0;}/* The interrupt handler does all of the Rx thread work and cleans up after the Tx thread. */static void yellowfin_interrupt(int irq, void *dev_instance, struct pt_regs *regs){ struct net_device *dev = dev_instance; struct yellowfin_private *yp; long ioaddr; int boguscnt = max_interrupt_work;#ifndef final_version /* Can never occur. */ if (dev == NULL) { printk (KERN_ERR "yellowfin_interrupt(): irq %d for unknown device.\n", irq); return; }#endif ioaddr = dev->base_addr; yp = dev->priv; spin_lock (&yp->lock); do { u16 intr_status = inw(ioaddr + IntrClear); if (yellowfin_debug > 4) printk(KERN_DEBUG "%s: Yellowfin interrupt, status %4.4x.\n", dev->name, intr_status); if (intr_status == 0) break; if (intr_status & (IntrRxDone | IntrEarlyRx)) { yellowfin_rx(dev); outl(0x10001000, ioaddr + RxCtrl); /* Wake Rx engine. */ }#ifdef NO_TXSTATS for (; yp->cur_tx - yp->dirty_tx > 0; yp->dirty_tx++) { int entry = yp->dirty_tx % TX_RING_SIZE; struct sk_buff *skb; if (yp->tx_ring[entry].result_status == 0) break; skb = yp->tx_skbuff[entry]; yp->stats.tx_packets++; yp->stats.tx_bytes += skb->len; /* Free the original skb. */ pci_unmap_single(yp->pci_dev, yp->tx_ring[entry].addr, skb->len, PCI_DMA_TODEVICE); dev_kfree_skb_irq(skb); yp->tx_skbuff[entry] = 0; } if (yp->tx_full && yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE - 4) { /* The ring is no longer full, clear tbusy. */ yp->tx_full = 0; netif_wake_queue(dev); }#else if ((intr_status & IntrTxDone) || (yp->tx_tail_desc->tx_errs)) { unsigned dirty_tx = yp->dirty_tx; for (dirty_tx = yp->dirty_tx; yp->cur_tx - dirty_tx > 0; dirty_tx++) { /* Todo: optimize this. */ int entry = dirty_tx % TX_RING_SIZE; u16 tx_errs = yp->tx_status[entry].tx_errs; struct sk_buff *skb;#ifndef final_version if (yellowfin_debug > 5) printk(KERN_DEBUG "%s: Tx queue %d check, Tx status " "%4.4x %4.4x %4.4x %4.4x.\n", dev->name, entry, yp->tx_status[entry].tx_cnt, yp->tx_status[entry].tx_errs, yp->tx_status[entry].total_tx_cnt, yp->tx_status[entry].paused);#endif if (tx_errs == 0) break; /* It still hasn't been Txed */ skb = yp->tx_skbuff[entry]; if (tx_errs & 0xF810) { /* There was an major error, log it. */#ifndef final_version if (yellowfin_debug > 1) printk(KERN_DEBUG "%s: Transmit error, Tx status %4.4x.\n", dev->name, tx_errs);#endif yp->stats.tx_errors++; if (tx_errs & 0xF800) yp->stats.tx_aborted_errors++; if (tx_errs & 0x0800) yp->stats.tx_carrier_errors++; if (tx_errs & 0x2000) yp->stats.tx_window_errors++; if (tx_errs & 0x8000) yp->stats.tx_fifo_errors++;#ifdef ETHER_STATS if (tx_errs & 0x1000) yp->stats.collisions16++;#endif } else {#ifndef final_version if (yellowfin_debug > 4) printk(KERN_DEBUG "%s: Normal transmit, Tx status %4.4x.\n", dev->name, tx_errs);#endif#ifdef ETHER_STATS if (tx_errs & 0x0400) yp->stats.tx_deferred++;#endif yp->stats.tx_bytes += skb->len; yp->stats.collisions += tx_errs & 15; yp->stats.tx_packets++; } /* Free the original skb. */ pci_unmap_single(yp->pci_dev, yp->tx_ring[entry<<1].addr, skb->len, PCI_DMA_TODEVICE); dev_kfree_skb_irq(skb); yp->tx_skbuff[entry] = 0; /* Mark status as empty. */ yp->tx_status[entry].tx_errs = 0; }#ifndef final_version if (yp->cur_tx - dirty_tx > TX_RING_SIZE) { printk(KERN_ERR "%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n", dev->name, dirty_tx, yp->cur_tx, yp->tx_full); dirty_tx += TX_RING_SIZE; }#endif if (yp->tx_full && yp->cur_tx - dirty_tx < TX_QUEUE_SIZE - 2) { /* The ring is no longer full, clear tbusy. */ yp->tx_full = 0; netif_wake_queue(dev); } yp->dirty_tx = dirty_tx; yp->tx_tail_desc = &yp->tx_status[dirty_tx % TX_RING_SIZE]; }#endif /* Log errors and other uncommon events. */ if (intr_status & 0x2ee) /* Abnormal error summary. */ yellowfin_error(dev, intr_status); if (--boguscnt < 0) { printk(KERN_WARNING "%s: Too much work at interrupt, " "status=0x%4.4x.\n", dev->name, intr_status); break; } } while (1); if (yellowfin_debug > 3) printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n", dev->name, inw(ioaddr + IntrStatus)); spin_unlock (&yp->lock); return;}/* This routine is logically part of the interrupt handler, but separated for clarity and better register allocation. */static int yellowfin_rx(struct net_device *dev){ struct yellowfin_private *yp = dev->priv; int entry = yp->cur_rx % RX_RING_SIZE; int boguscnt = yp->dirty_rx + RX_RING_SIZE - yp->cur_rx; if (yellowfin_debug > 4) { printk(KERN_DEBUG " In yellowfin_rx(), entry %d status %8.8x.\n", entry, yp->rx_ring[entry].result_status); printk(KERN_DEBUG " #%d desc. %8.8x %8.8x %8.8x.\n", entry, yp->rx_ring[entry].dbdma_cmd, yp->rx_ring[entry].addr, yp->rx_ring[entry].result_status); } /* If EOP is set on the next entry, it's a new packet. Send it up. */ while (1) { struct yellowfin_desc *desc = &yp->rx_ring[entry]; struct sk_buff *rx_skb = yp->rx_skbuff[entry]; s16 frame_status; u16 desc_status; int data_size; u8 *buf_addr; if(!desc->result_status) break; pci_dma_sync_single(yp->pci_dev, desc->addr, yp->rx_buf_sz, PCI_DMA_FROMDEVICE); desc_status = le32_to_cpu(desc->result_status) >> 16; buf_addr = rx_skb->tail; data_size = (le32_to_cpu(desc->dbdma_cmd) - le32_to_cpu(desc->result_status)) & 0xffff; frame_status = le16_to_cpu(get_unaligned((s16*)&(buf_addr[data_size - 2]))); if (yellowfin_debug > 4) printk(KERN_DEBUG " yellowfin_rx() status was %4.4x.\n", frame_status); if (--boguscnt < 0) break; if ( ! (desc_status & RX_EOP)) { printk(KERN_WARNING "%s: Oversized Ethernet frame spanned multiple buffers," " status %4.4x!\n", dev->name, desc_status); yp->stats.rx_length_errors++; } else if ((yp->drv_flags & IsGigabit) && (frame_status & 0x0038)) { /* There was a error. */ if (yellowfin_debug > 3) printk(KERN_DEBUG " yellowfin_rx() Rx error was %4.4x.\n", frame_status); yp->stats.rx_errors++; if (frame_status & 0x0060) yp->stats.rx_length_errors++; if (frame_status & 0x0008) yp->stats.rx_frame_errors++; if (frame_status & 0x0010) yp->stats.rx_crc_errors++; if (frame_status < 0) yp->stats.rx_dropped++; } else if ( !(yp->drv_flags & IsGigabit) && ((buf_addr[data_size-1] & 0x85) || buf_addr[data_size-2] & 0xC0)) { u8 status1 = buf_addr[data_size-2]; u8 status2 = buf_addr[data_size-1]; yp->stats.rx_errors++; if (status1 & 0xC0) yp->stats.rx_length_errors++; if (status2 & 0x03) yp->stats.rx_frame_errors++; if (status2 & 0x04) yp->stats.rx_crc_errors++; if (status2 & 0x80) yp->stats.rx_dropped++;#ifdef YF_PROTOTYPE /* Support for prototype hardware errata. */ } else if ((yp->flags & HasMACAddrBug) && memcmp(le32_to_cpu(yp->rx_ring_dma + entry*sizeof(struct yellowfin_desc)), dev->dev_addr, 6) != 0 && memcmp(le32_to_cpu(yp->rx_ring_dma + entry*sizeof(struct yellowfin_desc)), "\377\377\377\377\377\377", 6) != 0) { if (bogus_rx++ == 0)
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -