📄 gt96100eth.c
字号:
printk(KERN_ERR "%s: error in gt96100_init\n", dev->name); free_irq(dev->irq, dev); MOD_DEC_USE_COUNT; return retval; } netif_start_queue(dev); if (gt96100_debug > 2) printk("%s: gt96100_open: Initialization done.\n", dev->name); return 0;}static int gt96100_close(struct net_device *dev){ struct gt96100_private *gp = (struct gt96100_private *) dev->priv; int i; if (gt96100_debug > 2) printk("%s: gt96100_close: dev=%p\n", dev->name, dev); // stop the device if (netif_device_present(dev)) { netif_stop_queue(dev); hard_stop(dev); } // free the Rx DMA buffers for (i = 0; i < RX_RING_SIZE; i++) { if (gp->rx_buff[i]) { dmafree(PKT_BUF_SZ, gp->rx_buff[i]); gp->rx_buff[i] = NULL; } } free_irq(dev->irq, dev); MOD_DEC_USE_COUNT; return 0;}static int gt96100_tx(struct sk_buff *skb, struct net_device *dev){ struct gt96100_private *gp = (struct gt96100_private *) dev->priv; unsigned long flags; int nextIn; if (gt96100_debug > 2) printk("%s: gt96100_tx: skb->len=%d, skb->data=%p\n", dev->name, skb->len, skb->data); spin_lock_irqsave(&gp->lock, flags); if (gp->tx_count >= TX_RING_SIZE) { printk(KERN_WARNING "%s: Tx Ring full, refusing to send buffer.\n", dev->name); gp->stats.tx_dropped++; spin_unlock_irqrestore(&gp->lock, flags); return 1; } // Prepare the Descriptor at tx_next_in nextIn = gp->tx_next_in; if (dma32_to_cpu(gp->tx_ring[nextIn].cmdstat) & txOwn) { printk(KERN_ERR "%s: gt96100_tx: TxOwn bit wrong!!\n", dev->name); } gp->tx_skbuff[nextIn] = skb; gp->tx_ring[nextIn].byte_cnt = cpu_to_dma32(skb->len << tdByteCntBit); gp->tx_ring[nextIn].buff_ptr = cpu_to_dma32(virt_to_phys(skb->data)); // Give ownership to device, set first and last desc, enable interrupt // Setting of ownership bit must be *last*! gp->tx_ring[nextIn].cmdstat = cpu_to_dma32((u32) (txOwn | txEI | txFirst | txLast)); // increment tx_next_in with wrap gp->tx_next_in = (nextIn + 1) % TX_RING_SIZE; // If count is zero, DMA should be stopped, so restart if (gp->tx_count == 0) { if (GT96100ETH_READ(gp, GT96100_ETH_PORT_STATUS) & psrTxLow) printk(KERN_WARNING "%s: Tx count zero but Tx queue running!\n", dev->name); GT96100ETH_WRITE(gp, GT96100_ETH_SDMA_COMM, sdcmrERD | sdcmrTXDL); } // increment count and stop queue if full if (++gp->tx_count == TX_RING_SIZE) netif_stop_queue(dev); dev->trans_start = jiffies; spin_unlock_irqrestore(&gp->lock, flags); return 0;}static int gt96100_rx(struct net_device *dev, u32 status){ struct gt96100_private *gp = (struct gt96100_private *) dev->priv; struct sk_buff *skb; int pkt_len, nextOut; gt96100_rd_t *rd; u32 cmdstat; if (gt96100_debug > 2) printk("%s: gt96100_rx: dev=%p, status = %x\n", dev->name, dev, status); // Continue until we reach the current descriptor pointer for (nextOut = gp->rx_next_out; nextOut != (GT96100ETH_READ(gp, GT96100_ETH_CURR_RX_DESC_PTR0) - gp->rx_ring_dma) / sizeof(gt96100_rd_t); nextOut = (nextOut + 1) % RX_RING_SIZE) { rd = &gp->rx_ring[nextOut]; cmdstat = dma32_to_cpu(rd->cmdstat); if (cmdstat & (u32) rxOwn) { cmdstat &= ~((u32) rxOwn); rd->cmdstat = cpu_to_dma32(cmdstat); printk(KERN_ERR "%s: gt96100_rx: ownership bit wrong!\n", dev->name); } // must be first and last (ie only) buffer of packet if (!(cmdstat & (u32) rxFirst) || !(cmdstat & (u32) rxLast)) { printk(KERN_ERR "%s: gt96100_rx: desc not first and last!\n", dev->name); continue; } // drop this received pkt if there were any errors if ((cmdstat & (u32) rxErrorSummary) || (status & icrRxErrorQ0)) { // update the detailed rx error counters that are not covered // by the MIB counters. if (cmdstat & (u32) rxOverrun) gp->stats.rx_fifo_errors++; continue; } pkt_len = dma32_to_cpu(rd->buff_cnt_sz) & rdByteCntMask; /* Create new skb. */ skb = dev_alloc_skb(pkt_len + 2); if (skb == NULL) { printk(KERN_ERR "%s: Memory squeeze, dropping packet.\n", dev->name); gp->stats.rx_dropped++; continue; } skb->dev = dev; skb_reserve(skb, 2); /* 16 byte IP header align */ skb_put(skb, pkt_len); /* Make room */ eth_copy_and_sum(skb, gp->rx_buff[nextOut], pkt_len, 0); skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); /* pass the packet to upper layers */ // now we can release ownership of this desc back to device cmdstat |= (u32) rxOwn; rd->cmdstat = cpu_to_dma32(cmdstat); dev->last_rx = jiffies; } gp->rx_next_out = nextOut; return 0;}static void gt96100_interrupt(int irq, void *dev_id, struct pt_regs *regs){ struct net_device *dev = (struct net_device *) dev_id; struct gt96100_private *gp = (struct gt96100_private *) dev->priv; u32 status; if (dev == NULL) { printk(KERN_ERR "%s: isr: null dev ptr\n", dev->name); return; } status = GT96100ETH_READ(gp, GT96100_ETH_INT_CAUSE); // ACK interrupts#if 0 GT96100ETH_CLRBIT(gp, GT96100_ETH_INT_CAUSE, icrEtherIntSum | icrRxBufferQ1 | icrRxBufferQ2 | icrRxBufferQ3 | icrRxBufferQ0 | icrTxBufferHigh | icrTxEndHigh | icrTxBufferLow | icrTxEndLow | icrTxErrorHigh | icrTxErrorLow | icrTxUdr);#else GT96100ETH_WRITE(gp, GT96100_ETH_INT_CAUSE, 0);#endif if ((status & icrEtherIntSum) == 0) { // not our interrupt //printk("%s: isr: no ints? icr=%x,cp0_cause=%x\n", // dev->name, status, read_32bit_cp0_register(CP0_CAUSE)); return; } if (gt96100_debug > 3) printk("%s: isr: entry, icr=%x\n", dev->name, status); if (status & (icrRxBufferQ1 | icrRxBufferQ2 | icrRxBufferQ3)) { printk(KERN_ERR "%s: isr: Rx intr in unused queues!?\n", dev->name); } if (status & icrRxBufferQ0) { gt96100_rx(dev, status); } if (status & (icrTxBufferHigh | icrTxEndHigh)) { printk(KERN_ERR "%s: isr: Tx intr in unused queue!?\n", dev->name); } if (status & icrMIIPhySTC) { u32 psr = GT96100ETH_READ(gp, GT96100_ETH_PORT_STATUS); printk("%s: port status:\n", dev->name); printk ("%s: %s MBit/s, %s-duplex, flow-control %s, link is %s,\n", dev->name, psr & psrSpeed ? "100" : "10", psr & psrDuplex ? "full" : "half", psr & psrFctl ? "disabled" : "enabled", psr & psrLink ? "up" : "down"); printk ("%s: TxLowQ is %s, TxHighQ is %s, Transmitter is %s\n", dev->name, psr & psrTxLow ? "running" : "stopped", psr & psrTxHigh ? "running" : "stopped", psr & psrTxInProg ? "on" : "off"); gp->last_psr = psr; } if (status & (icrTxBufferLow | icrTxEndLow)) { int nextOut; gt96100_td_t *td; u32 cmdstat; // Continue until we reach the current descriptor pointer for (nextOut = gp->tx_next_out; nextOut != (GT96100ETH_READ(gp, GT96100_ETH_CURR_TX_DESC_PTR0) - gp->tx_ring_dma) / sizeof(gt96100_td_t); nextOut = (nextOut + 1) % TX_RING_SIZE) { td = &gp->tx_ring[nextOut]; cmdstat = dma32_to_cpu(td->cmdstat); if (gt96100_debug > 2) printk("%s: isr: Tx desc cmdstat=%x\n", dev->name, cmdstat); if (cmdstat & (u32) txOwn) { cmdstat &= ~((u32) txOwn); td->cmdstat = cpu_to_dma32(cmdstat); printk(KERN_ERR "%s: isr: Tx ownership bit wrong!\n", dev->name); } // increment Tx error stats if (cmdstat & (u32) txErrorSummary) { if (gt96100_debug > 2) printk ("%s: gt96100_interrupt: Tx error, cmdstat = %x\n", dev->name, cmdstat); gp->stats.tx_errors++; if (cmdstat & (u32) txReTxLimit) gp->stats.collisions++; if (cmdstat & (u32) txUnderrun) gp->stats.tx_fifo_errors++; if (cmdstat & (u32) txLateCollision) gp->stats.tx_window_errors++; } // Wake the queue if the ring was full if (gp->tx_count == TX_RING_SIZE) netif_wake_queue(dev); // decrement tx ring buffer count if (gp->tx_count) gp->tx_count--; // free the skb if (gp->tx_skbuff[nextOut]) { if (gt96100_debug > 2) printk ("%s: isr: good Tx, skb=%p\n", dev->name, gp->tx_skbuff[nextOut]); dev_kfree_skb_irq(gp->tx_skbuff[nextOut]); gp->tx_skbuff[nextOut] = NULL; } else { printk(KERN_ERR "%s: isr: no skb!\n", dev->name); } } if (gp->tx_count == 0 && nextOut != gp->tx_next_in) { // FIX! this should probably be a panic printk(KERN_ERR "%s: isr: warning! Tx queue inconsistent\n", dev->name); } gp->tx_next_out = nextOut; if ((status & icrTxEndLow) && gp->tx_count != 0) { // we must restart the DMA if (gt96100_debug > 2) printk("%s: isr: Restarting Tx DMA\n", dev->name); GT96100ETH_WRITE(gp, GT96100_ETH_SDMA_COMM, sdcmrERD | sdcmrTXDL); } } // Now check TX errors (RX errors were handled in gt96100_rx) if (status & icrTxErrorHigh) { printk(KERN_ERR "%s: isr: Tx resource error in unused queue!?\n", dev->name); } if (status & icrTxErrorLow) { printk(KERN_ERR "%s: isr: Tx resource error\n", dev->name); } if (status & icrTxUdr) { printk(KERN_ERR "%s: isr: Tx underrun error\n", dev->name); } if (gt96100_debug > 3) printk("%s: isr: exit, icr=%x\n", dev->name, GT96100ETH_READ(gp, GT96100_ETH_INT_CAUSE));}/* * The Tx ring has been full longer than the watchdog timeout * value, meaning that the interrupt routine has not been freeing * up space in the Tx ring buffer. */static void gt96100_tx_timeout(struct net_device *dev){// struct gt96100_private *gp = (struct gt96100_private *)dev->priv; printk(KERN_ERR "%s: gt96100_tx_timeout: dev=%p\n", dev->name, dev); // FIX! do something, like reset the device}static void gt96100_set_rx_mode(struct net_device *dev){ struct gt96100_private *gp = (struct gt96100_private *) dev->priv; unsigned long flags; struct dev_mc_list *mcptr; if (gt96100_debug > 2) printk("%s: gt96100_set_rx_mode: dev=%p, flags=%x\n", dev->name, dev, dev->flags); // stop the Receiver DMA abort(dev, sdcmrAR); spin_lock_irqsave(&gp->lock, flags); if (dev->flags & IFF_PROMISC) GT96100ETH_WRITE(gp, GT96100_ETH_PORT_CONFIG, pcrEN | pcrHS | pcrPM); memset(gp->hash_table, 0, RX_HASH_TABLE_SIZE); // clear hash table // Add our ethernet address gt96100_add_hash_entry(dev, dev->dev_addr); if (dev->mc_count) { for (mcptr = dev->mc_list; mcptr; mcptr = mcptr->next) { gt96100_add_hash_entry(dev, mcptr->dmi_addr); } } // restart Rx DMA GT96100ETH_WRITE(gp, GT96100_ETH_SDMA_COMM, sdcmrERD); spin_unlock_irqrestore(&gp->lock, flags);}static struct net_device_stats *gt96100_get_stats(struct net_device *dev){ struct gt96100_private *gp = (struct gt96100_private *) dev->priv; unsigned long flags; if (gt96100_debug > 2) printk("%s: gt96100_get_stats: dev=%p\n", dev->name, dev); if (netif_device_present(dev)) { spin_lock_irqsave(&gp->lock, flags); update_stats(gp); spin_unlock_irqrestore(&gp->lock, flags); } return &gp->stats;}module_init(gt96100_probe);MODULE_LICENSE("GPL");
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -