fealnx.c
来自「linux 内核源代码」· C语言 代码 · 共 1,989 行 · 第 1/4 页
C
1,989 行
if (data & SPD_DET_100) np->line_speed = 2; /* 100M */ else np->line_speed = 1; /* 10M */ if (data & DPLX_DET_FULL) np->duplexmode = 2; /* full duplex mode */ else np->duplexmode = 1; /* half duplex mode */ } else if (np->PHYType == AhdocPHY) { unsigned int data; data = mdio_read(dev, np->phys[0], DiagnosticReg); if (data & Speed_100) np->line_speed = 2; /* 100M */ else np->line_speed = 1; /* 10M */ if (data & DPLX_FULL) np->duplexmode = 2; /* full duplex mode */ else np->duplexmode = 1; /* half duplex mode */ }/* 89/6/13 add, (begin) */ else if (np->PHYType == MarvellPHY) { unsigned int data; data = mdio_read(dev, np->phys[0], SpecificReg); if (data & Full_Duplex) np->duplexmode = 2; /* full duplex mode */ else np->duplexmode = 1; /* half duplex mode */ data &= SpeedMask; if (data == Speed_1000M) np->line_speed = 3; /* 1000M */ else if (data == Speed_100M) np->line_speed = 2; /* 100M */ else np->line_speed = 1; /* 10M */ }/* 89/6/13 add, (end) *//* 89/7/27 add, (begin) */ else if (np->PHYType == Myson981) { unsigned int data; data = mdio_read(dev, np->phys[0], StatusRegister); if (data & SPEED100) np->line_speed = 2; else np->line_speed = 1; if (data & FULLMODE) np->duplexmode = 2; else np->duplexmode = 1; }/* 89/7/27 add, (end) *//* 89/12/29 add */ else if (np->PHYType == LevelOnePHY) { unsigned int data; data = mdio_read(dev, np->phys[0], SpecificReg); if (data & LXT1000_Full) np->duplexmode = 2; /* full duplex mode */ else np->duplexmode = 1; /* half duplex mode */ data &= SpeedMask; if (data == LXT1000_1000M) np->line_speed = 3; /* 1000M */ else if (data == LXT1000_100M) np->line_speed = 2; /* 100M */ else np->line_speed = 1; /* 10M */ } np->crvalue &= (~CR_W_PS10) & (~CR_W_FD) & (~CR_W_PS1000); if (np->line_speed == 1) np->crvalue |= CR_W_PS10; else if (np->line_speed == 3) np->crvalue |= CR_W_PS1000; if (np->duplexmode == 2) np->crvalue |= CR_W_FD; }}/* Take lock before calling this */static void allocate_rx_buffers(struct net_device *dev){ struct netdev_private *np = netdev_priv(dev); /* allocate skb for rx buffers */ while (np->really_rx_count != RX_RING_SIZE) { struct sk_buff *skb; skb = dev_alloc_skb(np->rx_buf_sz); if (skb == NULL) break; /* Better luck next round. */ while (np->lack_rxbuf->skbuff) np->lack_rxbuf = np->lack_rxbuf->next_desc_logical; skb->dev = dev; /* Mark as being used by this device. */ np->lack_rxbuf->skbuff = skb; np->lack_rxbuf->buffer = pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE); np->lack_rxbuf->status = RXOWN; ++np->really_rx_count; }}static void netdev_timer(unsigned long data){ struct net_device *dev = (struct net_device *) data; struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = np->mem; int old_crvalue = np->crvalue; unsigned int old_linkok = np->linkok; unsigned long flags; if (debug) printk(KERN_DEBUG "%s: Media selection timer tick, status %8.8x " "config %8.8x.\n", dev->name, ioread32(ioaddr + ISR), ioread32(ioaddr + TCRRCR)); spin_lock_irqsave(&np->lock, flags); if (np->flags == HAS_MII_XCVR) { getlinkstatus(dev); if ((old_linkok == 0) && (np->linkok == 1)) { /* we need to detect the media type again */ getlinktype(dev); if (np->crvalue != old_crvalue) { stop_nic_rxtx(ioaddr, np->crvalue); iowrite32(np->crvalue, ioaddr + TCRRCR); } } } allocate_rx_buffers(dev); spin_unlock_irqrestore(&np->lock, flags); np->timer.expires = RUN_AT(10 * HZ); add_timer(&np->timer);}/* Take lock before calling *//* Reset chip and disable rx, tx and interrupts */static void reset_and_disable_rxtx(struct net_device *dev){ struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = np->mem; int delay=51; /* Reset the chip's Tx and Rx processes. */ stop_nic_rxtx(ioaddr, 0); /* Disable interrupts by clearing the interrupt mask. */ iowrite32(0, ioaddr + IMR); /* Reset the chip to erase previous misconfiguration. */ iowrite32(0x00000001, ioaddr + BCR); /* Ueimor: wait for 50 PCI cycles (and flush posted writes btw). We surely wait too long (address+data phase). Who cares? */ while (--delay) { ioread32(ioaddr + BCR); rmb(); }}/* Take lock before calling *//* Restore chip after reset */static void enable_rxtx(struct net_device *dev){ struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = np->mem; reset_rx_descriptors(dev); iowrite32(np->tx_ring_dma + ((char*)np->cur_tx - (char*)np->tx_ring), ioaddr + TXLBA); iowrite32(np->rx_ring_dma + ((char*)np->cur_rx - (char*)np->rx_ring), ioaddr + RXLBA); iowrite32(np->bcrvalue, ioaddr + BCR); iowrite32(0, ioaddr + RXPDR); __set_rx_mode(dev); /* changes np->crvalue, writes it into TCRRCR */ /* Clear and Enable interrupts by setting the interrupt mask. */ iowrite32(FBE | TUNF | CNTOVF | RBU | TI | RI, ioaddr + ISR); iowrite32(np->imrvalue, ioaddr + IMR); iowrite32(0, ioaddr + TXPDR);}static void reset_timer(unsigned long data){ struct net_device *dev = (struct net_device *) data; struct netdev_private *np = netdev_priv(dev); unsigned long flags; printk(KERN_WARNING "%s: resetting tx and rx machinery\n", dev->name); spin_lock_irqsave(&np->lock, flags); np->crvalue = np->crvalue_sv; np->imrvalue = np->imrvalue_sv; reset_and_disable_rxtx(dev); /* works for me without this: reset_tx_descriptors(dev); */ enable_rxtx(dev); netif_start_queue(dev); /* FIXME: or netif_wake_queue(dev); ? */ np->reset_timer_armed = 0; spin_unlock_irqrestore(&np->lock, flags);}static void tx_timeout(struct net_device *dev){ struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = np->mem; unsigned long flags; int i; printk(KERN_WARNING "%s: Transmit timed out, status %8.8x," " resetting...\n", dev->name, ioread32(ioaddr + ISR)); { printk(KERN_DEBUG " Rx ring %p: ", np->rx_ring); for (i = 0; i < RX_RING_SIZE; i++) printk(" %8.8x", (unsigned int) np->rx_ring[i].status); printk("\n" KERN_DEBUG " Tx ring %p: ", np->tx_ring); for (i = 0; i < TX_RING_SIZE; i++) printk(" %4.4x", np->tx_ring[i].status); printk("\n"); } spin_lock_irqsave(&np->lock, flags); reset_and_disable_rxtx(dev); reset_tx_descriptors(dev); enable_rxtx(dev); spin_unlock_irqrestore(&np->lock, flags); dev->trans_start = jiffies; np->stats.tx_errors++; netif_wake_queue(dev); /* or .._start_.. ?? */}/* Initialize the Rx and Tx rings, along with various 'dev' bits. */static void init_ring(struct net_device *dev){ struct netdev_private *np = netdev_priv(dev); int i; /* initialize rx variables */ np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32); np->cur_rx = &np->rx_ring[0]; np->lack_rxbuf = np->rx_ring; np->really_rx_count = 0; /* initial rx descriptors. */ for (i = 0; i < RX_RING_SIZE; i++) { np->rx_ring[i].status = 0; np->rx_ring[i].control = np->rx_buf_sz << RBSShift; np->rx_ring[i].next_desc = np->rx_ring_dma + (i + 1)*sizeof(struct fealnx_desc); np->rx_ring[i].next_desc_logical = &np->rx_ring[i + 1]; np->rx_ring[i].skbuff = NULL; } /* for the last rx descriptor */ np->rx_ring[i - 1].next_desc = np->rx_ring_dma; np->rx_ring[i - 1].next_desc_logical = np->rx_ring; /* allocate skb for rx buffers */ for (i = 0; i < RX_RING_SIZE; i++) { struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz); if (skb == NULL) { np->lack_rxbuf = &np->rx_ring[i]; break; } ++np->really_rx_count; np->rx_ring[i].skbuff = skb; skb->dev = dev; /* Mark as being used by this device. */ np->rx_ring[i].buffer = pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE); np->rx_ring[i].status = RXOWN; np->rx_ring[i].control |= RXIC; } /* initialize tx variables */ np->cur_tx = &np->tx_ring[0]; np->cur_tx_copy = &np->tx_ring[0]; np->really_tx_count = 0; np->free_tx_count = TX_RING_SIZE; for (i = 0; i < TX_RING_SIZE; i++) { np->tx_ring[i].status = 0; /* do we need np->tx_ring[i].control = XXX; ?? */ np->tx_ring[i].next_desc = np->tx_ring_dma + (i + 1)*sizeof(struct fealnx_desc); np->tx_ring[i].next_desc_logical = &np->tx_ring[i + 1]; np->tx_ring[i].skbuff = NULL; } /* for the last tx descriptor */ np->tx_ring[i - 1].next_desc = np->tx_ring_dma; np->tx_ring[i - 1].next_desc_logical = &np->tx_ring[0];}static int start_tx(struct sk_buff *skb, struct net_device *dev){ struct netdev_private *np = netdev_priv(dev); unsigned long flags; spin_lock_irqsave(&np->lock, flags); np->cur_tx_copy->skbuff = skb;#define one_buffer#define BPT 1022#if defined(one_buffer) np->cur_tx_copy->buffer = pci_map_single(np->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE); np->cur_tx_copy->control = TXIC | TXLD | TXFD | CRCEnable | PADEnable; np->cur_tx_copy->control |= (skb->len << PKTSShift); /* pkt size */ np->cur_tx_copy->control |= (skb->len << TBSShift); /* buffer size */// 89/12/29 add, if (np->pci_dev->device == 0x891) np->cur_tx_copy->control |= ETIControl | RetryTxLC; np->cur_tx_copy->status = TXOWN; np->cur_tx_copy = np->cur_tx_copy->next_desc_logical; --np->free_tx_count;#elif defined(two_buffer) if (skb->len > BPT) { struct fealnx_desc *next; /* for the first descriptor */ np->cur_tx_copy->buffer = pci_map_single(np->pci_dev, skb->data, BPT, PCI_DMA_TODEVICE); np->cur_tx_copy->control = TXIC | TXFD | CRCEnable | PADEnable; np->cur_tx_copy->control |= (skb->len << PKTSShift); /* pkt size */ np->cur_tx_copy->control |= (BPT << TBSShift); /* buffer size */ /* for the last descriptor */ next = np->cur_tx_copy->next_desc_logical; next->skbuff = skb; next->control = TXIC | TXLD | CRCEnable | PADEnable; next->control |= (skb->len << PKTSShift); /* pkt size */ next->control |= ((skb->len - BPT) << TBSShift); /* buf size */// 89/12/29 add, if (np->pci_dev->device == 0x891) np->cur_tx_copy->control |= ETIControl | RetryTxLC; next->buffer = pci_map_single(ep->pci_dev, skb->data + BPT, skb->len - BPT, PCI_DMA_TODEVICE); next->status = TXOWN; np->cur_tx_copy->status = TXOWN; np->cur_tx_copy = next->next_desc_logical; np->free_tx_count -= 2; } else { np->cur_tx_copy->buffer = pci_map_single(np->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE); np->cur_tx_copy->control = TXIC | TXLD | TXFD | CRCEnable | PADEnable; np->cur_tx_copy->control |= (skb->len << PKTSShift); /* pkt size */ np->cur_tx_copy->control |= (skb->len << TBSShift); /* buffer size */// 89/12/29 add, if (np->pci_dev->device == 0x891) np->cur_tx_copy->control |= ETIControl | RetryTxLC; np->cur_tx_copy->status = TXOWN; np->cur_tx_copy = np->cur_tx_copy->next_desc_logical; --np->free_tx_count; }#endif if (np->free_tx_count < 2) netif_stop_queue(dev); ++np->really_tx_count; iowrite32(0, np->mem + TXPDR); dev->trans_start = jiffies; spin_unlock_irqrestore(&np->lock, flags); return 0;}/* Take lock before calling *//* Chip probably hosed tx ring. Clean up. */static void reset_tx_descriptors(struct net_device *dev){ struct netdev_private *np = netdev_priv(dev); struct fealnx_desc *cur; int i; /* initialize tx variables */ np->cur_tx = &np->tx_ring[0]; np->cur_tx_copy = &np->tx_ring[0]; np->really_tx_count = 0; np->free_tx_count = TX_RING_SIZE; for (i = 0; i < TX_RING_SIZE; i++) { cur = &np->tx_ring[i]; if (cur->skbuff) { pci_unmap_single(np->pci_dev, cur->buffer, cur->skbuff->len, PCI_DMA_TODEVICE); dev_kfree_skb_any(cur->skbuff); cur->skbuff = NULL; } cur->status = 0; cur->control = 0; /* needed? */ /* probably not needed. We do it for purely paranoid reasons */ cur->next_desc = np->tx_ring_dma + (i + 1)*sizeof(struct fealnx_desc); cur->next_desc_logical = &np->tx_ring[i + 1]; } /* for the last tx descriptor */ np->tx_ring[TX_RING_SIZE - 1].next_desc = np->tx_ring_dma; np->tx_ring[TX_RING_SIZE - 1].next_desc_logical = &np->tx_ring[0];}/* Take lock and stop rx before calling this */static void reset_rx_descriptors(struct net_device *dev){ struct netdev_private *np = netdev_priv(dev); struct fealnx_desc *cur = np->cur_rx; int i; allocate_rx_buffers(dev); for (i = 0; i < RX_RING_SIZE; i++) { if (cur->skbuff) cur->status = RXOWN; cur = cur->next_desc_logical; } iowrite32(np->rx_ring_dma + ((char*)np->cur_rx - (char*)np->rx_ring), np->mem + RXLBA);}/* The interrupt handler does all of the Rx thread work and cleans up after the Tx thread. */static irqreturn_t intr_handler(int irq, void *dev_instance){ struct net_device *dev = (struct net_device *) dev_instance; struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = np->mem; long boguscnt = max_interrupt_work; unsigned int num_tx = 0; int handled = 0; spin_lock(&np->lock); iowrite32(0, ioaddr + IMR); do { u32 intr_status = ioread32(ioaddr + ISR); /* Acknowledge all of the current interrupt sources ASAP. */ iowrite32(intr_status, ioaddr + ISR); if (debug) printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n", dev->name, intr_status); if (!(intr_status & np->imrvalue)) break; handled = 1;// 90/1/16 delete,//// if (intr_status & FBE)// { /* fatal error */// stop_nic_tx(ioaddr, 0);// stop_nic_rx(ioaddr, 0);// break;// }; if (intr_status & TUNF) iowrite32(0, ioaddr + TXPDR); if (intr_status & CNTOVF) { /* missed pkts */
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?