📄 fealnx.c
字号:
/* input : dev... pointer to the adapter block. *//* output : none. */{ struct netdev_private *np = dev->priv; unsigned int i, DelayTime = 0x1000; np->linkok = 0; if (np->PHYType == MysonPHY) { for (i = 0; i < DelayTime; ++i) { if (readl(dev->base_addr + BMCRSR) & LinkIsUp2) { np->linkok = 1; return; } // delay m80x_delay(100); } } else { for (i = 0; i < DelayTime; ++i) { if (mdio_read(dev, np->phys[0], MII_BMSR) & BMSR_LSTATUS) { np->linkok = 1; return; } // delay m80x_delay(100); } }}static void getlinktype(struct net_device *dev){ struct netdev_private *np = dev->priv; if (np->PHYType == MysonPHY) { /* 3-in-1 case */ if (readl(dev->base_addr + TCRRCR) & FD) np->duplexmode = 2; /* full duplex */ else np->duplexmode = 1; /* half duplex */ if (readl(dev->base_addr + TCRRCR) & PS10) np->line_speed = 1; /* 10M */ else np->line_speed = 2; /* 100M */ } else { if (np->PHYType == SeeqPHY) { /* this PHY is SEEQ 80225 */ unsigned int data; data = mdio_read(dev, np->phys[0], MIIRegister18); if (data & SPD_DET_100) np->line_speed = 2; /* 100M */ else np->line_speed = 1; /* 10M */ if (data & DPLX_DET_FULL) np->duplexmode = 2; /* full duplex mode */ else np->duplexmode = 1; /* half duplex mode */ } else if (np->PHYType == AhdocPHY) { unsigned int data; data = mdio_read(dev, np->phys[0], DiagnosticReg); if (data & Speed_100) np->line_speed = 2; /* 100M */ else np->line_speed = 1; /* 10M */ if (data & DPLX_FULL) np->duplexmode = 2; /* full duplex mode */ else np->duplexmode = 1; /* half duplex mode */ }/* 89/6/13 add, (begin) */ else if (np->PHYType == MarvellPHY) { unsigned int data; data = mdio_read(dev, np->phys[0], SpecificReg); if (data & Full_Duplex) np->duplexmode = 2; /* full duplex mode */ else np->duplexmode = 1; /* half duplex mode */ data &= SpeedMask; if (data == Speed_1000M) np->line_speed = 3; /* 1000M */ else if (data == Speed_100M) np->line_speed = 2; /* 100M */ else np->line_speed = 1; /* 10M */ }/* 89/6/13 add, (end) *//* 89/7/27 add, (begin) */ else if (np->PHYType == Myson981) { unsigned int data; data = mdio_read(dev, np->phys[0], StatusRegister); if (data & SPEED100) np->line_speed = 2; else np->line_speed = 1; if (data & FULLMODE) np->duplexmode = 2; else np->duplexmode = 1; }/* 89/7/27 add, (end) *//* 89/12/29 add */ else if (np->PHYType == LevelOnePHY) { unsigned int data; data = mdio_read(dev, np->phys[0], SpecificReg); if (data & LXT1000_Full) np->duplexmode = 2; /* full duplex mode */ else np->duplexmode = 1; /* half duplex mode */ data &= SpeedMask; if (data == LXT1000_1000M) np->line_speed = 3; /* 1000M */ else if (data == LXT1000_100M) np->line_speed = 2; /* 100M */ else np->line_speed = 1; /* 10M */ } // chage crvalue // np->crvalue&=(~PS10)&(~FD); np->crvalue &= (~PS10) & (~FD) & (~PS1000); if (np->line_speed == 1) np->crvalue |= PS10; else if (np->line_speed == 3) np->crvalue |= PS1000; if (np->duplexmode == 2) np->crvalue |= FD; }}static void allocate_rx_buffers(struct net_device *dev){ struct netdev_private *np = dev->priv; /* allocate skb for rx buffers */ while (np->really_rx_count != RX_RING_SIZE) { struct sk_buff *skb; skb = dev_alloc_skb(np->rx_buf_sz); np->lack_rxbuf->skbuff = skb; if (skb == NULL) break; /* Better luck next round. */ skb->dev = dev; /* Mark as being used by this device. */ np->lack_rxbuf->buffer = pci_map_single(np->pci_dev, skb->tail, np->rx_buf_sz, PCI_DMA_FROMDEVICE); np->lack_rxbuf = np->lack_rxbuf->next_desc_logical; ++np->really_rx_count; }}static void netdev_timer(unsigned long data){ struct net_device *dev = (struct net_device *) data; struct netdev_private *np = dev->priv; long ioaddr = dev->base_addr; int next_tick = 10 * HZ; int old_crvalue = np->crvalue; unsigned int old_linkok = np->linkok; if (debug) printk(KERN_DEBUG "%s: Media selection timer tick, status %8.8x " "config %8.8x.\n", dev->name, readl(ioaddr + ISR), readl(ioaddr + TCRRCR)); if (np->flags == HAS_MII_XCVR) { getlinkstatus(dev); if ((old_linkok == 0) && (np->linkok == 1)) { /* we need to detect the media type again */ getlinktype(dev); if (np->crvalue != old_crvalue) { stop_nic_tx(ioaddr, np->crvalue); stop_nic_rx(ioaddr, np->crvalue & (~0x40000)); writel(np->crvalue, ioaddr + TCRRCR); } } } allocate_rx_buffers(dev); np->timer.expires = RUN_AT(next_tick); add_timer(&np->timer);}static void tx_timeout(struct net_device *dev){ struct netdev_private *np = dev->priv; long ioaddr = dev->base_addr; int i; printk(KERN_WARNING "%s: Transmit timed out, status %8.8x," " resetting...\n", dev->name, readl(ioaddr + ISR)); { printk(KERN_DEBUG " Rx ring %p: ", np->rx_ring); for (i = 0; i < RX_RING_SIZE; i++) printk(" %8.8x", (unsigned int) np->rx_ring[i].status); printk("\n" KERN_DEBUG " Tx ring %p: ", np->tx_ring); for (i = 0; i < TX_RING_SIZE; i++) printk(" %4.4x", np->tx_ring[i].status); printk("\n"); } + dev->if_port = np->default_port; /* Reinit. Gross */ /* Reset the chip's Tx and Rx processes. */ stop_nic_tx(ioaddr, 0); reset_rx_descriptors(dev); /* Disable interrupts by clearing the interrupt mask. */ writel(0x0000, ioaddr + IMR); /* Reset the chip to erase previous misconfiguration. */ writel(0x00000001, ioaddr + BCR); /* Ueimor: wait for 50 PCI cycles (and flush posted writes btw). We surely wait too long (address+data phase). Who cares ? */ for (i = 0; i < 50; i++) { readl(ioaddr + BCR); rmb(); } writel((np->cur_tx - np->tx_ring)*sizeof(struct fealnx_desc) + np->tx_ring_dma, ioaddr + TXLBA); writel((np->cur_rx - np->rx_ring)*sizeof(struct fealnx_desc) + np->rx_ring_dma, ioaddr + RXLBA); writel(np->bcrvalue, ioaddr + BCR); writel(0, dev->base_addr + RXPDR); set_rx_mode(dev); /* Clear and Enable interrupts by setting the interrupt mask. */ writel(FBE | TUNF | CNTOVF | RBU | TI | RI, ioaddr + ISR); writel(np->imrvalue, ioaddr + IMR); writel(0, dev->base_addr + TXPDR); dev->trans_start = jiffies; np->stats.tx_errors++; return;}/* Initialize the Rx and Tx rings, along with various 'dev' bits. */static void init_ring(struct net_device *dev){ struct netdev_private *np = dev->priv; int i; /* initialize rx variables */ np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32); np->cur_rx = &np->rx_ring[0]; np->lack_rxbuf = NULL; np->really_rx_count = 0; /* initial rx descriptors. */ for (i = 0; i < RX_RING_SIZE; i++) { np->rx_ring[i].status = 0; np->rx_ring[i].control = np->rx_buf_sz << RBSShift; np->rx_ring[i].next_desc = np->rx_ring_dma + (i + 1)*sizeof(struct fealnx_desc); np->rx_ring[i].next_desc_logical = &np->rx_ring[i + 1]; np->rx_ring[i].skbuff = NULL; } /* for the last rx descriptor */ np->rx_ring[i - 1].next_desc = np->rx_ring_dma; np->rx_ring[i - 1].next_desc_logical = np->rx_ring; /* allocate skb for rx buffers */ for (i = 0; i < RX_RING_SIZE; i++) { struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz); if (skb == NULL) { np->lack_rxbuf = &np->rx_ring[i]; break; } ++np->really_rx_count; np->rx_ring[i].skbuff = skb; skb->dev = dev; /* Mark as being used by this device. */ np->rx_ring[i].buffer = pci_map_single(np->pci_dev, skb->tail, np->rx_buf_sz, PCI_DMA_FROMDEVICE); np->rx_ring[i].status = RXOWN; np->rx_ring[i].control |= RXIC; } /* initialize tx variables */ np->cur_tx = &np->tx_ring[0]; np->cur_tx_copy = &np->tx_ring[0]; np->really_tx_count = 0; np->free_tx_count = TX_RING_SIZE; for (i = 0; i < TX_RING_SIZE; i++) { np->tx_ring[i].status = 0; np->tx_ring[i].next_desc = np->tx_ring_dma + (i + 1)*sizeof(struct fealnx_desc); np->tx_ring[i].next_desc_logical = &np->tx_ring[i + 1]; np->tx_ring[i].skbuff = NULL; } /* for the last tx descriptor */ np->tx_ring[i - 1].next_desc = np->tx_ring_dma; np->tx_ring[i - 1].next_desc_logical = &np->tx_ring[0]; return;}static int start_tx(struct sk_buff *skb, struct net_device *dev){ struct netdev_private *np = dev->priv; np->cur_tx_copy->skbuff = skb;#define one_buffer#define BPT 1022#if defined(one_buffer) np->cur_tx_copy->buffer = pci_map_single(np->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE); np->cur_tx_copy->control = TXIC | TXLD | TXFD | CRCEnable | PADEnable; np->cur_tx_copy->control |= (skb->len << PKTSShift); /* pkt size */ np->cur_tx_copy->control |= (skb->len << TBSShift); /* buffer size */// 89/12/29 add, if (np->pci_dev->device == 0x891) np->cur_tx_copy->control |= ETIControl | RetryTxLC; np->cur_tx_copy->status = TXOWN; np->cur_tx_copy = np->cur_tx_copy->next_desc_logical; --np->free_tx_count;#elif defined(two_buffer) if (skb->len > BPT) { struct fealnx_desc *next; /* for the first descriptor */ np->cur_tx_copy->buffer = pci_map_single(np->pci_dev, skb->data, BPT, PCI_DMA_TODEVICE); np->cur_tx_copy->control = TXIC | TXFD | CRCEnable | PADEnable; np->cur_tx_copy->control |= (skb->len << PKTSShift); /* pkt size */ np->cur_tx_copy->control |= (BPT << TBSShift); /* buffer size */ /* for the last descriptor */ next = (struct fealnx *) np->cur_tx_copy.next_desc_logical; next->skbuff = skb; next->control = TXIC | TXLD | CRCEnable | PADEnable; next->control |= (skb->len << PKTSShift); /* pkt size */ next->control |= ((skb->len - BPT) << TBSShift); /* buf size */// 89/12/29 add, if (np->pci_dev->device == 0x891) np->cur_tx_copy->control |= ETIControl | RetryTxLC; next->buffer = pci_map_single(ep->pci_dev, skb->data + BPT, skb->len - BPT, PCI_DMA_TODEVICE); next->status = TXOWN; np->cur_tx_copy->status = TXOWN; np->cur_tx_copy = next->next_desc_logical; np->free_tx_count -= 2; } else { np->cur_tx_copy->buffer = pci_map_single(np->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE); np->cur_tx_copy->control = TXIC | TXLD | TXFD | CRCEnable | PADEnable; np->cur_tx_copy->control |= (skb->len << PKTSShift); /* pkt size */ np->cur_tx_copy->control |= (skb->len << TBSShift); /* buffer size */// 89/12/29 add, if (np->pci_dev->device == 0x891) np->cur_tx_copy->control |= ETIControl | RetryTxLC; np->cur_tx_copy->status = TXOWN; np->cur_tx_copy = np->cur_tx_copy->next_desc_logical; --np->free_tx_count; }#endif if (np->free_tx_count < 2) netif_stop_queue(dev); ++np->really_tx_count; writel(0, dev->base_addr + TXPDR); dev->trans_start = jiffies; return 0;}void free_one_rx_descriptor(struct netdev_private *np){ if (np->really_rx_count == RX_RING_SIZE) np->cur_rx->status = RXOWN; else { np->lack_rxbuf->skbuff = np->cur_rx->skbuff; np->lack_rxbuf->buffer = np->cur_rx->buffer; np->lack_rxbuf->status = RXOWN; ++np->really_rx_count; np->lack_rxbuf = np->lack_rxbuf->next_desc_logical; } np->cur_rx = np->cur_rx->next_desc_logical;}void reset_rx_descriptors(struct net_device *dev){ struct netdev_private *np = dev->priv; stop_nic_rx(dev->base_addr, np->crvalue); while (!(np->cur_rx->status & RXOWN)) free_one_rx_descriptor(np); allocate_rx_buffers(dev); writel(np->rx_ring_dma + (np->cur_rx - np->rx_ring), dev->base_addr + RXLBA); writel(np->crvalue, dev->base_addr + TCRRCR);}/* The interrupt handler does all of the Rx thread work and cleans up after the Tx thread. */static void intr_handler(int irq, void *dev_instance, struct pt_regs *rgs){ struct net_device *dev = (struct net_device *) dev_instance; struct netdev_private *np = dev->priv; long ioaddr, boguscnt = max_interrupt_work; unsigned int num_tx = 0; writel(0, dev->base_addr + IMR); ioaddr = dev->base_addr; np = (struct netdev_private *) dev->priv; do { u32 intr_status = readl(ioaddr + ISR); /* Acknowledge all of the current interrupt sources ASAP. */ writel(intr_status, ioaddr + ISR); if (debug) printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n", dev->name, intr_status); if (!(intr_status & np->imrvalue)) break;// 90/1/16 delete,//// if (intr_status & FBE)// { /* fatal error */// stop_nic_tx(ioaddr, 0);// stop_nic_rx(ioaddr, 0);// break;// }; if (intr_status & TUNF) writel(0, ioaddr + TXPDR); if (intr_status & CNTOVF) { /* missed pkts */ np->stats.rx_missed_errors += readl(ioaddr + TALLY) & 0x7fff; /* crc error */ np->stats.rx_crc_errors += (readl(ioaddr + TALLY) & 0x7fff0000) >> 16; } if (intr_status & (RI | RBU)) { if (intr_status & RI) netdev_rx(dev); else reset_rx_descriptors(dev); } while (np->really_tx_count) { long tx_status = np->cur_tx->status; long tx_control = np->cur_tx->control; if (!(tx_control & TXLD)) { /* this pkt is combined by two tx descriptors */ struct fealnx_desc *next; next = np->cur_tx->next_desc_logical; tx_status = next->status; tx_control = next->control; } if (tx_status & TXOWN) break; if (!(np->crvalue & 0x02000000)) {
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -