📄 sis900.c
字号:
struct mii_phy *phy = sis_priv->mii; int phy_addr = sis_priv->cur_phy; u32 status; u16 autoadv, autorec; int i = 0; while (i++ < 2) status = mdio_read(net_dev, phy_addr, MII_STATUS); if (!(status & MII_STAT_LINK)) return; /* AutoNegotiate completed */ autoadv = mdio_read(net_dev, phy_addr, MII_ANADV); autorec = mdio_read(net_dev, phy_addr, MII_ANLPAR); status = autoadv & autorec; *speed = HW_SPEED_10_MBPS; *duplex = FDX_CAPABLE_HALF_SELECTED; if (status & (MII_NWAY_TX | MII_NWAY_TX_FDX)) *speed = HW_SPEED_100_MBPS; if (status & ( MII_NWAY_TX_FDX | MII_NWAY_T_FDX)) *duplex = FDX_CAPABLE_FULL_SELECTED; sis_priv->autong_complete = 1; /* Workaround for Realtek RTL8201 PHY issue */ if((phy->phy_id0 == 0x0000) && ((phy->phy_id1 & 0xFFF0) == 0x8200)){ if(mdio_read(net_dev, phy_addr, MII_CONTROL) & MII_CNTL_FDX) *duplex = FDX_CAPABLE_FULL_SELECTED; if(mdio_read(net_dev, phy_addr, 0x0019) & 0x01) *speed = HW_SPEED_100_MBPS; } printk(KERN_INFO "%s: Media Link On %s %s-duplex \n", net_dev->name, *speed == HW_SPEED_100_MBPS ? "100mbps" : "10mbps", *duplex == FDX_CAPABLE_FULL_SELECTED ? "full" : "half");}/** * sis900_tx_timeout: - sis900 transmit timeout routine * @net_dev: the net device to transmit * * print transmit timeout status * disable interrupts and do some tasks */static void sis900_tx_timeout(struct net_device *net_dev){ struct sis900_private *sis_priv = net_dev->priv; long ioaddr = net_dev->base_addr; unsigned long flags; int i; printk(KERN_INFO "%s: Transmit timeout, status %8.8x %8.8x \n", net_dev->name, inl(ioaddr + cr), inl(ioaddr + isr)); /* Disable interrupts by clearing the interrupt mask. */ outl(0x0000, ioaddr + imr); /* use spinlock to prevent interrupt handler accessing buffer ring */ spin_lock_irqsave(&sis_priv->lock, flags); /* discard unsent packets */ sis_priv->dirty_tx = sis_priv->cur_tx = 0; for (i = 0; i < NUM_TX_DESC; i++) { struct sk_buff *skb = sis_priv->tx_skbuff[i]; if (skb) { pci_unmap_single(sis_priv->pci_dev, sis_priv->tx_ring[i].bufptr, skb->len, PCI_DMA_TODEVICE); dev_kfree_skb(skb); sis_priv->tx_skbuff[i] = 0; sis_priv->tx_ring[i].cmdsts = 0; sis_priv->tx_ring[i].bufptr = 0; sis_priv->stats.tx_dropped++; } } sis_priv->tx_full = 0; netif_wake_queue(net_dev); spin_unlock_irqrestore(&sis_priv->lock, flags); net_dev->trans_start = jiffies; /* FIXME: Should we restart the transmission thread here ?? */ outl(TxENA | inl(ioaddr + cr), ioaddr + cr); /* Enable all known interrupts by setting the interrupt mask. */ outl((RxSOVR|RxORN|RxERR|RxOK|TxURN|TxERR|TxIDLE), ioaddr + imr); return;}/** * sis900_start_xmit: - sis900 start transmit routine * @skb: socket buffer pointer to put the data being transmitted * @net_dev: the net device to transmit with * * Set the transmit buffer descriptor, * and write TxENA to enable transimt state machine. * tell upper layer if the buffer is full */static intsis900_start_xmit(struct sk_buff *skb, struct net_device *net_dev){ struct sis900_private *sis_priv = net_dev->priv; long ioaddr = net_dev->base_addr; unsigned int entry; unsigned long flags; /* Don't transmit data before the complete of auto-negotiation */ if(!sis_priv->autong_complete){ netif_stop_queue(net_dev); return 1; } spin_lock_irqsave(&sis_priv->lock, flags); /* Calculate the next Tx descriptor entry. */ entry = sis_priv->cur_tx % NUM_TX_DESC; sis_priv->tx_skbuff[entry] = skb; /* set the transmit buffer descriptor and enable Transmit State Machine */ sis_priv->tx_ring[entry].bufptr = pci_map_single(sis_priv->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE); sis_priv->tx_ring[entry].cmdsts = (OWN | skb->len); outl(TxENA | inl(ioaddr + cr), ioaddr + cr); if (++sis_priv->cur_tx - sis_priv->dirty_tx < NUM_TX_DESC) { /* Typical path, tell upper layer that more transmission is possible */ netif_start_queue(net_dev); } else { /* buffer full, tell upper layer no more transmission */ sis_priv->tx_full = 1; netif_stop_queue(net_dev); } spin_unlock_irqrestore(&sis_priv->lock, flags); net_dev->trans_start = jiffies; if (sis900_debug > 3) printk(KERN_INFO "%s: Queued Tx packet at %p size %d " "to slot %d.\n", net_dev->name, skb->data, (int)skb->len, entry); return 0;}/** * sis900_interrupt: - sis900 interrupt handler * @irq: the irq number * @dev_instance: the client data object * @regs: snapshot of processor context * * The interrupt handler does all of the Rx thread work, * and cleans up after the Tx thread */static void sis900_interrupt(int irq, void *dev_instance, struct pt_regs *regs){ struct net_device *net_dev = dev_instance; struct sis900_private *sis_priv = net_dev->priv; int boguscnt = max_interrupt_work; long ioaddr = net_dev->base_addr; u32 status; spin_lock (&sis_priv->lock); do { status = inl(ioaddr + isr); if ((status & (HIBERR|TxURN|TxERR|TxIDLE|RxORN|RxERR|RxOK)) == 0) /* nothing intresting happened */ break; /* why dow't we break after Tx/Rx case ?? keyword: full-duplex */ if (status & (RxORN | RxERR | RxOK)) /* Rx interrupt */ sis900_rx(net_dev); if (status & (TxURN | TxERR | TxIDLE)) /* Tx interrupt */ sis900_finish_xmit(net_dev); /* something strange happened !!! */ if (status & HIBERR) { printk(KERN_INFO "%s: Abnormal interrupt," "status %#8.8x.\n", net_dev->name, status); break; } if (--boguscnt < 0) { printk(KERN_INFO "%s: Too much work at interrupt, " "interrupt status = %#8.8x.\n", net_dev->name, status); break; } } while (1); if (sis900_debug > 3) printk(KERN_INFO "%s: exiting interrupt, " "interrupt status = 0x%#8.8x.\n", net_dev->name, inl(ioaddr + isr)); spin_unlock (&sis_priv->lock); return;}/** * sis900_rx: - sis900 receive routine * @net_dev: the net device which receives data * * Process receive interrupt events, * put buffer to higher layer and refill buffer pool * Note: This fucntion is called by interrupt handler, * don't do "too much" work here */static int sis900_rx(struct net_device *net_dev){ struct sis900_private *sis_priv = net_dev->priv; long ioaddr = net_dev->base_addr; unsigned int entry = sis_priv->cur_rx % NUM_RX_DESC; u32 rx_status = sis_priv->rx_ring[entry].cmdsts; if (sis900_debug > 3) printk(KERN_INFO "sis900_rx, cur_rx:%4.4d, dirty_rx:%4.4d " "status:0x%8.8x\n", sis_priv->cur_rx, sis_priv->dirty_rx, rx_status); while (rx_status & OWN) { unsigned int rx_size; rx_size = (rx_status & DSIZE) - CRC_SIZE; if (rx_status & (ABORT|OVERRUN|TOOLONG|RUNT|RXISERR|CRCERR|FAERR)) { /* corrupted packet received */ if (sis900_debug > 3) printk(KERN_INFO "%s: Corrupted packet " "received, buffer status = 0x%8.8x.\n", net_dev->name, rx_status); sis_priv->stats.rx_errors++; if (rx_status & OVERRUN) sis_priv->stats.rx_over_errors++; if (rx_status & (TOOLONG|RUNT)) sis_priv->stats.rx_length_errors++; if (rx_status & (RXISERR | FAERR)) sis_priv->stats.rx_frame_errors++; if (rx_status & CRCERR) sis_priv->stats.rx_crc_errors++; /* reset buffer descriptor state */ sis_priv->rx_ring[entry].cmdsts = RX_BUF_SIZE; } else { struct sk_buff * skb; /* This situation should never happen, but due to some unknow bugs, it is possible that we are working on NULL sk_buff :-( */ if (sis_priv->rx_skbuff[entry] == NULL) { printk(KERN_INFO "%s: NULL pointer " "encountered in Rx ring, skipping\n", net_dev->name); break; } pci_dma_sync_single(sis_priv->pci_dev, sis_priv->rx_ring[entry].bufptr, RX_BUF_SIZE, PCI_DMA_FROMDEVICE); pci_unmap_single(sis_priv->pci_dev, sis_priv->rx_ring[entry].bufptr, RX_BUF_SIZE, PCI_DMA_FROMDEVICE); /* give the socket buffer to upper layers */ skb = sis_priv->rx_skbuff[entry]; skb_put(skb, rx_size); skb->protocol = eth_type_trans(skb, net_dev); netif_rx(skb); /* some network statistics */ if ((rx_status & BCAST) == MCAST) sis_priv->stats.multicast++; net_dev->last_rx = jiffies; sis_priv->stats.rx_bytes += rx_size; sis_priv->stats.rx_packets++; /* refill the Rx buffer, what if there is not enought memory for new socket buffer ?? */ if ((skb = dev_alloc_skb(RX_BUF_SIZE)) == NULL) { /* not enough memory for skbuff, this makes a "hole" on the buffer ring, it is not clear how the hardware will react to this kind of degenerated buffer */ printk(KERN_INFO "%s: Memory squeeze," "deferring packet.\n", net_dev->name); sis_priv->rx_skbuff[entry] = NULL; /* reset buffer descriptor state */ sis_priv->rx_ring[entry].cmdsts = 0; sis_priv->rx_ring[entry].bufptr = 0; sis_priv->stats.rx_dropped++; break; } skb->dev = net_dev; sis_priv->rx_skbuff[entry] = skb; sis_priv->rx_ring[entry].cmdsts = RX_BUF_SIZE; sis_priv->rx_ring[entry].bufptr = pci_map_single(sis_priv->pci_dev, skb->tail, RX_BUF_SIZE, PCI_DMA_FROMDEVICE); sis_priv->dirty_rx++; } sis_priv->cur_rx++; entry = sis_priv->cur_rx % NUM_RX_DESC; rx_status = sis_priv->rx_ring[entry].cmdsts; } // while /* refill the Rx buffer, what if the rate of refilling is slower than consuming ?? */ for (;sis_priv->cur_rx - sis_priv->dirty_rx > 0; sis_priv->dirty_rx++) { struct sk_buff *skb; entry = sis_priv->dirty_rx % NUM_RX_DESC; if (sis_priv->rx_skbuff[entry] == NULL) { if ((skb = dev_alloc_skb(RX_BUF_SIZE)) == NULL) { /* not enough memory for skbuff, this makes a "hole" on the buffer ring, it is not clear how the hardware will react to this kind of degenerated buffer */ printk(KERN_INFO "%s: Memory squeeze," "deferring packet.\n", net_dev->name); sis_priv->stats.rx_dropped++; break; } skb->dev = net_dev; sis_priv->rx_skbuff[entry] = skb; sis_priv->rx_ring[entry].cmdsts = RX_BUF_SIZE; sis_priv->rx_ring[entry].bufptr = pci_map_single(sis_priv->pci_dev, skb->tail, RX_BUF_SIZE, PCI_DMA_FROMDEVICE); } } /* re-enable the potentially idle receive state matchine */ outl(RxENA | inl(ioaddr + cr), ioaddr + cr ); return 0;}/** * sis900_finish_xmit: - finish up transmission of packets * @net_dev: the net device to be transmitted on * * Check for error condition and free socket buffer etc * schedule for more transmission as needed * Note: This fucntion is called by interrupt handler, * don't do "too much" work here */static void sis900_finish_xmit (struct net_device *net_dev){ struct sis900_private *sis_priv = net_dev->priv; for (; sis_priv->dirty_tx < sis_priv->cur_tx; sis_priv->dirty_tx++) { struct sk_buff *skb; unsigned int entry; u32 tx_status; entry = sis_priv->dirty_tx % NUM_TX_DESC; tx_status = sis_priv->tx_ring[entry].cmdsts; if (tx_status & OWN) { /* The packet is not transmitted yet (owned by hardware) ! Note: the interrupt is generated only when Tx Machine is idle, so this is an almost impossible case */ break; } if (tx_status & (ABORT | UNDERRUN | OWCOLL)) { /* packet unsuccessfully transmitted */ if (sis900_debug > 3) printk(KERN_INFO "%s: Transmit " "error, Tx status %8.8x.\n", net_dev->name, tx_status); sis_priv->stats.tx_errors++; if (tx_status & UNDERRUN) sis_priv->stats.tx_fifo_errors++; if (tx_status & ABORT) sis_priv->stats.tx_aborted_errors++; if (tx_status & NOCARRIER) sis_priv->stats.tx_carrier_errors++; if (tx_status & OWCOLL) sis_priv->stats.tx_window_errors++; } else { /* packet successfully transmitted */ sis_priv->stats.collisions += (tx_status & COLCNT) >> 16; sis_priv->stats.tx_bytes += tx_status & DSIZE; sis_priv->stats.tx_packets++; } /* Free the original skb. */ skb = sis_priv->tx_skbuff[entry]; pci_unmap_single(sis_priv->pci_dev, sis_priv->tx_ring[entry].bufptr, skb->len, PCI_DMA_TODEVICE); dev_kfree_skb_irq(skb); sis_priv->tx_skbuff[entry] = NULL; sis_priv->tx_ring[entry].bufptr = 0; sis_priv->tx_ring[entry].cmdsts = 0; } if (sis_priv->tx_full && netif_queue_stopped(net_dev) && sis_priv->cur_tx - sis_priv->dirty_tx < NUM_TX_DESC - 4) { /* The ring is no longer full, clear tx_full and schedule more transmission by netif_wake_queue(net_dev) */ sis_priv->tx_full = 0; netif_wake_queue (net_dev); }}/** * sis900_close: - close sis900 device * @net_dev: the net device to be closed * * Disable interrupts, stop the Tx and Rx Status Machine * free Tx and RX socket buffer */static intsis900_close(struct net_device *net_dev){ long ioaddr = net_dev->base_addr;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -