📄 winbond-840.c
字号:
np->full_duplex = duplex; if (np->msg_level & NETIF_MSG_LINK) printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d " "negotiated capability %4.4x.\n", dev->name, duplex ? "full" : "half", np->phys[0], negotiated); np->csr6 &= ~0x200; np->csr6 |= duplex ? 0x200 : 0; }}static void netdev_timer(unsigned long data){ struct net_device *dev = (struct net_device *)data; struct netdev_private *np = (struct netdev_private *)dev->priv; long ioaddr = dev->base_addr; int next_tick = 10*HZ; int old_csr6 = np->csr6; u32 intr_status = readl(ioaddr + IntrStatus); if (np->msg_level & NETIF_MSG_TIMER) printk(KERN_DEBUG "%s: Media selection timer tick, status %8.8x " "config %8.8x.\n", dev->name, intr_status, (int)readl(ioaddr + NetworkConfig)); /* Check for blocked interrupts. */ if (np->polling) { if (intr_status & 0x1ffff) { intr_handler(dev->irq, dev, 0); next_tick = 1; np->polling = 1; } else if (++np->polling > 10*HZ) np->polling = 0; else next_tick = 2; } else if ((intr_status & 0x1ffff)) { np->polling = 1; } if (netif_queue_paused(dev) && np->cur_tx - np->dirty_tx > 1 && (jiffies - dev->trans_start) > TX_TIMEOUT) { tx_timeout(dev); } check_duplex(dev); if (np->csr6 != old_csr6) { writel(np->csr6 & ~0x0002, ioaddr + NetworkConfig); writel(np->csr6 | 0x2002, ioaddr + NetworkConfig); } np->timer.expires = jiffies + next_tick; add_timer(&np->timer);}static void tx_timeout(struct net_device *dev){ struct netdev_private *np = (struct netdev_private *)dev->priv; long ioaddr = dev->base_addr; printk(KERN_WARNING "%s: Transmit timed out, status %8.8x," " resetting...\n", dev->name, (int)readl(ioaddr + IntrStatus));#ifndef __alpha__ if (np->msg_level & NETIF_MSG_TX_ERR) { int i; printk(KERN_DEBUG " Rx ring %p: ", np->rx_ring); for (i = 0; i < np->rx_ring_size; i++) printk(" %8.8x", (unsigned int)np->rx_ring[i].status); printk("\n"KERN_DEBUG" Tx ring %p: ", np->tx_ring); for (i = 0; i < np->tx_ring_size; i++) printk(" %8.8x", np->tx_ring[i].status); printk("\n"); }#endif /* Perhaps we should reinitialize the hardware here. Just trigger a Tx demand for now. */ writel(0, ioaddr + TxStartDemand); dev->if_port = 0; /* Stop and restart the chip's Tx processes . */ dev->trans_start = jiffies; np->stats.tx_errors++; return;}/* Initialize the Rx and Tx rings, along with various 'dev' bits. */static void init_ring(struct net_device *dev){ struct netdev_private *np = (struct netdev_private *)dev->priv; int i; np->tx_full = 0; np->cur_tx = np->dirty_tx = 0; np->tx_q_bytes = np->tx_unq_bytes = 0; np->cur_rx = np->dirty_rx = 0; np->rx_buf_sz = (dev->mtu <= 1522 ? PKT_BUF_SZ : dev->mtu + 14); np->rx_head_desc = &np->rx_ring[0]; /* Initialize all Rx descriptors. */ for (i = 0; i < np->rx_ring_size; i++) { np->rx_ring[i].length = np->rx_buf_sz; np->rx_ring[i].status = 0; np->rx_ring[i].next_desc = virt_to_bus(&np->rx_ring[i+1]); np->rx_skbuff[i] = 0; } /* Mark the last entry as wrapping the ring. */ np->rx_ring[i-1].length |= DescEndRing; np->rx_ring[i-1].next_desc = virt_to_bus(&np->rx_ring[0]); /* Fill in the Rx buffers. Handle allocation failure gracefully. */ for (i = 0; i < np->rx_ring_size; i++) { struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz); np->rx_skbuff[i] = skb; if (skb == NULL) break; skb->dev = dev; /* Mark as being used by this device. */ np->rx_ring[i].buffer1 = virt_to_bus(skb->tail); np->rx_ring[i].status = DescOwn | DescIntr; } np->dirty_rx = (unsigned int)(i - np->rx_ring_size); for (i = 0; i < np->tx_ring_size; i++) { np->tx_skbuff[i] = 0; np->tx_ring[i].status = 0; } return;}static int start_tx(struct sk_buff *skb, struct net_device *dev){ struct netdev_private *np = (struct netdev_private *)dev->priv; unsigned entry; /* Block a timer-based transmit from overlapping. */ if (netif_pause_tx_queue(dev) != 0) { /* This watchdog code is redundant with the media monitor timer. */ if (jiffies - dev->trans_start > TX_TIMEOUT) tx_timeout(dev); return 1; } /* Note: Ordering is important here, set the field with the "ownership" bit last, and only then increment cur_tx. */ /* Calculate the next Tx descriptor entry. */ entry = np->cur_tx % np->tx_ring_size; np->tx_skbuff[entry] = skb; np->tx_ring[entry].buffer1 = virt_to_bus(skb->data);#define one_buffer#define BPT 1022#if defined(one_buffer) np->tx_ring[entry].length = DescWholePkt | skb->len; if (entry >= np->tx_ring_size-1) /* Wrap ring */ np->tx_ring[entry].length |= DescIntr | DescEndRing; np->tx_ring[entry].status = DescOwn; np->cur_tx++;#elif defined(two_buffer) if (skb->len > BPT) { unsigned int entry1 = ++np->cur_tx % np->tx_ring_size; np->tx_ring[entry].length = DescStartPkt | BPT; np->tx_ring[entry1].length = DescEndPkt | (skb->len - BPT); np->tx_ring[entry1].buffer1 = virt_to_bus((skb->data) + BPT); np->tx_ring[entry1].status = DescOwn; np->tx_ring[entry].status = DescOwn; if (entry >= np->tx_ring_size-1) np->tx_ring[entry].length |= DescIntr|DescEndRing; else if (entry1 >= np->tx_ring_size-1) np->tx_ring[entry1].length |= DescIntr|DescEndRing; np->cur_tx++; } else { np->tx_ring[entry].length = DescWholePkt | skb->len; if (entry >= np->tx_ring_size-1) /* Wrap ring */ np->tx_ring[entry].length |= DescIntr | DescEndRing; np->tx_ring[entry].status = DescOwn; np->cur_tx++; }#elif defined(split_buffer) { /* Work around the Tx-FIFO-full bug by splitting our transmit packet into two pieces, the first which may be loaded without overflowing the FIFO, and the second which contains the remainder of the packet. When we get a Tx-done interrupt that frees enough room in the FIFO we mark the remainder of the packet as loadable. This has the problem that the Tx descriptors are written both here and in the interrupt handler. */ int buf1size = TX_FIFO_SIZE - (np->tx_q_bytes - np->tx_unq_bytes); int buf2size = skb->len - buf1size; if (buf2size <= 0) { /* We fit into one descriptor. */ np->tx_ring[entry].length = DescWholePkt | skb->len; } else { /* We must use two descriptors. */ unsigned int entry2; np->tx_ring[entry].length = DescIntr | DescStartPkt | buf1size; if (entry >= np->tx_ring_size-1) { /* Wrap ring */ np->tx_ring[entry].length |= DescEndRing; entry2 = 0; } else entry2 = entry + 1; np->cur_tx++; np->tx_ring[entry2].buffer1 = virt_to_bus(skb->data + buf1size); np->tx_ring[entry2].length = DescEndPkt | buf2size; if (entry2 >= np->tx_ring_size-1) /* Wrap ring */ np->tx_ring[entry2].length |= DescEndRing; } np->tx_ring[entry].status = DescOwn; np->cur_tx++; }#endif np->tx_q_bytes += skb->len; writel(0, dev->base_addr + TxStartDemand); /* Work around horrible bug in the chip by marking the queue as full when we do not have FIFO room for a maximum sized packet. */ if (np->cur_tx - np->dirty_tx > TX_QUEUE_LEN) { np->tx_full = 1; netif_stop_tx_queue(dev); } else if ((np->drv_flags & HasBrokenTx) && np->tx_q_bytes - np->tx_unq_bytes > TX_BUG_FIFO_LIMIT) { np->tx_full = 1; netif_stop_tx_queue(dev); } else netif_unpause_tx_queue(dev); /* Typical path */ dev->trans_start = jiffies; if (np->msg_level & NETIF_MSG_TX_QUEUED) { printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n", dev->name, np->cur_tx, entry); } return 0;}/* The interrupt handler does all of the Rx thread work and cleans up after the Tx thread. */static void intr_handler(int irq, void *dev_instance, struct pt_regs *rgs){ struct net_device *dev = (struct net_device *)dev_instance; struct netdev_private *np = (struct netdev_private *)dev->priv; long ioaddr = dev->base_addr; int work_limit = np->max_interrupt_work; do { u32 intr_status = readl(ioaddr + IntrStatus); /* Acknowledge all of the current interrupt sources ASAP. */ writel(intr_status & 0x0001ffff, ioaddr + IntrStatus); if (np->msg_level & NETIF_MSG_INTR) printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n", dev->name, intr_status); if ((intr_status & (NormalIntr|AbnormalIntr)) == 0 || intr_status == 0xffffffff) break; if (intr_status & (IntrRxDone | RxNoBuf)) netdev_rx(dev); for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) { int entry = np->dirty_tx % np->tx_ring_size; int tx_status = np->tx_ring[entry].status; if (tx_status < 0) break; if (np->msg_level & NETIF_MSG_TX_DONE) printk(KERN_DEBUG "%s: Transmit done, Tx status %8.8x.\n", dev->name, tx_status); if (tx_status & 0x8000) { /* There was an error, log it. */ if (np->msg_level & NETIF_MSG_TX_ERR) printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n", dev->name, tx_status); np->stats.tx_errors++; if (tx_status & 0x0104) np->stats.tx_aborted_errors++; if (tx_status & 0x0C80) np->stats.tx_carrier_errors++; if (tx_status & 0x0200) np->stats.tx_window_errors++; if (tx_status & 0x0002) np->stats.tx_fifo_errors++; if ((tx_status & 0x0080) && np->full_duplex == 0) np->stats.tx_heartbeat_errors++;#ifdef ETHER_STATS if (tx_status & 0x0100) np->stats.collisions16++;#endif } else {#ifdef ETHER_STATS if (tx_status & 0x0001) np->stats.tx_deferred++;#endif#if LINUX_VERSION_CODE > 0x20127 np->stats.tx_bytes += np->tx_skbuff[entry]->len;#endif np->stats.collisions += (tx_status >> 3) & 15; np->stats.tx_packets++; } /* Free the original skb. */ np->tx_unq_bytes += np->tx_skbuff[entry]->len; dev_free_skb_irq(np->tx_skbuff[entry]); np->tx_skbuff[entry] = 0; } if (np->tx_full && np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4 && np->tx_q_bytes - np->tx_unq_bytes < TX_BUG_FIFO_LIMIT) { /* The ring is no longer full, allow new TX entries. */ np->tx_full = 0; netif_resume_tx_queue(dev); } /* Abnormal error summary/uncommon events handlers. */ if (intr_status & (AbnormalIntr | TxFIFOUnderflow | IntrPCIErr | TimerInt | IntrTxStopped)) netdev_error(dev, intr_status); if (--work_limit < 0) { printk(KERN_WARNING "%s: Too much work at interrupt, " "status=0x%4.4x.\n", dev->name, intr_status); /* Set the timer to re-enable the other interrupts after 10*82usec ticks. */ writel(AbnormalIntr | TimerInt, ioaddr + IntrEnable); writel(10, ioaddr + GPTimer); break; } } while (1); if (np->msg_level & NETIF_MSG_INTR) printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n", dev->name, (int)readl(ioaddr + IntrStatus)); return;}/* This routine is logically part of the interrupt handler, but separated for clarity and better register allocation. */static int netdev_rx(struct net_device *dev){ struct netdev_private *np = (struct netdev_private *)dev->priv; int entry = np->cur_rx % np->rx_ring_size; int work_limit = np->dirty_rx + np->rx_ring_size - np->cur_rx; if (np->msg_level & NETIF_MSG_RX_STATUS) { printk(KERN_DEBUG " In netdev_rx(), entry %d status %4.4x.\n", entry, np->rx_ring[entry].status); } /* If EOP is set on the next entry, it's a new packet. Send it up. */ while (--work_limit >= 0) { struct w840_rx_desc *desc = np->rx_head_desc; s32 status = desc->status; if (np->msg_level & NETIF_MSG_RX_STATUS) printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n", status); if (status < 0) break; if ((status & 0x38008300) != 0x0300) { if ((status & 0x38000300) != 0x0300) { /* Ingore earlier buffers. */ if ((status & 0xffff) != 0x7fff) { printk(KERN_WARNING "%s: Oversized Ethernet frame spanned " "multiple buffers, entry %#x status %4.4x!\n", dev->name, np->cur_rx, status); np->stats.rx_length_errors++; } } else if (status & 0x8000) { /* There was a fatal error. */ if (np->msg_level & NETIF_MSG_RX_ERR) printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n", dev->name, status); np->stats.rx_errors++; /* end of a packet.*/ if (status & 0x0890) np->stats.rx_length_errors++; if (status & 0x004C) np->stats.rx_frame_errors++; if (status & 0x0002) np->stats.rx_crc_errors++; } } else { struct sk_buff *skb; /* Omit the four octet CRC from the length. */ int pkt_len = ((status >> 16) & 0x7ff) - 4; if (np->msg_level & NETIF_MSG_RX_STATUS) printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d" " status %x.\n", pkt_len, status); /* Check if the packet is long enough to accept without copying to a minimally-sized skbuff. */ if (pkt_len < np->rx_copybreak && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { skb->dev = dev; skb_reserve(skb, 2); /* 16 byte align the IP header */ /* Call copy + cksum if available. */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -