📄 eepro100.c
字号:
sp->rx_ringp[entry] = NULL; } skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); sp->stats.rx_packets++; sp->stats.rx_bytes += pkt_len; /* Count received bytes */ } entry = (++sp->cur_rx) % RX_RING_SIZE; } /* Refill the Rx ring buffers. */ for (; sp->dirty_rx < sp->cur_rx; sp->dirty_rx++) { struct RxFD *rxf; entry = sp->dirty_rx % RX_RING_SIZE; if (sp->rx_skbuff[entry] == NULL) { struct sk_buff *skb; /* Get a fresh skbuff to replace the consumed one. */ skb = dev_alloc_skb(PKT_BUF_SZ + sizeof(struct RxFD)); sp->rx_skbuff[entry] = skb; if (skb == NULL) { sp->rx_ringp[entry] = NULL; break; /* Better luck next time! */ } rxf = sp->rx_ringp[entry] = (struct RxFD *)skb->tail; skb->dev = dev; skb_reserve(skb, sizeof(struct RxFD)); rxf->rx_buf_addr = virt_to_bus(skb->tail); } else { rxf = sp->rx_ringp[entry]; } rxf->status = 0xC0000001; /* '1' for driver use only. */ rxf->link = 0; /* None yet. */ rxf->count = 0; rxf->size = PKT_BUF_SZ; sp->last_rxf->link = virt_to_bus(rxf); sp->last_rxf->status &= ~0xC0000000; sp->last_rxf = rxf; } sp->last_rx_time = jiffies; return 0;}static intspeedo_close(struct device *dev){ long ioaddr = dev->base_addr; struct speedo_private *sp = (struct speedo_private *)dev->priv; int i; dev->start = 0; dev->tbusy = 1; if (speedo_debug > 1) printk(KERN_DEBUG "%s: Shutting down ethercard, status was %4.4x.\n", dev->name, inw(ioaddr + SCBStatus)); /* Shut off the media monitoring timer. */ del_timer(&sp->timer); /* Disable interrupts, and stop the chip's Rx process. */ outw(INT_MASK, ioaddr + SCBCmd); outw(INT_MASK | RX_ABORT, ioaddr + SCBCmd); free_irq(dev->irq, dev); /* Free all the skbuffs in the Rx and Tx queues. */ for (i = 0; i < RX_RING_SIZE; i++) { struct sk_buff *skb = sp->rx_skbuff[i]; sp->rx_skbuff[i] = 0; /* Clear the Rx descriptors. */ if (skb) dev_free_skb(skb); } for (i = 0; i < TX_RING_SIZE; i++) { struct sk_buff *skb = sp->tx_skbuff[i]; sp->tx_skbuff[i] = 0; /* Clear the Tx descriptors. */ if (skb) dev_free_skb(skb); } if (sp->mc_setup_frm) { kfree(sp->mc_setup_frm); sp->mc_setup_frm_len = 0; } /* Print a few items for debugging. */ if (speedo_debug > 3) { int phy_num = sp->phy[0] & 0x1f; printk(KERN_DEBUG "%s:Printing Rx ring (next to receive into %d).\n", dev->name, sp->cur_rx); for (i = 0; i < RX_RING_SIZE; i++) printk(KERN_DEBUG " Rx ring entry %d %8.8x.\n", i, (int)sp->rx_ringp[i]->status); for (i = 0; i < 5; i++) printk(KERN_DEBUG " PHY index %d register %d is %4.4x.\n", phy_num, i, mdio_read(ioaddr, phy_num, i)); for (i = 21; i < 26; i++) printk(KERN_DEBUG " PHY index %d register %d is %4.4x.\n", phy_num, i, mdio_read(ioaddr, phy_num, i)); } MOD_DEC_USE_COUNT; return 0;}/* The Speedo-3 has an especially awkward and unusable method of getting statistics out of the chip. It takes an unpredictable length of time for the dump-stats command to complete. To avoid a busy-wait loop we update the stats with the previous dump results, and then trigger a new dump. These problems are mitigated by the current /proc implementation, which calls this routine first to judge the output length, and then to emit the output. Oh, and incoming frames are dropped while executing dump-stats! */static struct enet_statistics *speedo_get_stats(struct device *dev){ struct speedo_private *sp = (struct speedo_private *)dev->priv; long ioaddr = dev->base_addr; if (sp->lstats.done_marker == 0xA007) { /* Previous dump finished */ sp->stats.tx_aborted_errors += sp->lstats.tx_coll16_errs; sp->stats.tx_window_errors += sp->lstats.tx_late_colls; sp->stats.tx_fifo_errors += sp->lstats.tx_underruns; sp->stats.tx_fifo_errors += sp->lstats.tx_lost_carrier; /*sp->stats.tx_deferred += sp->lstats.tx_deferred;*/ sp->stats.collisions += sp->lstats.tx_total_colls; sp->stats.rx_crc_errors += sp->lstats.rx_crc_errs; sp->stats.rx_frame_errors += sp->lstats.rx_align_errs; sp->stats.rx_over_errors += sp->lstats.rx_resource_errs; sp->stats.rx_fifo_errors += sp->lstats.rx_overrun_errs; sp->stats.rx_length_errors += sp->lstats.rx_runt_errs; sp->lstats.done_marker = 0x0000; if (dev->start) { wait_for_cmd_done(ioaddr + SCBCmd); outw(CU_DUMPSTATS, ioaddr + SCBCmd); } } return &sp->stats;}static int speedo_ioctl(struct device *dev, struct ifreq *rq, int cmd){ struct speedo_private *sp = (struct speedo_private *)dev->priv; long ioaddr = dev->base_addr; u16 *data = (u16 *)&rq->ifr_data; int phy = sp->phy[0] & 0x1f; switch(cmd) { case SIOCDEVPRIVATE: /* Get the address of the PHY in use. */ data[0] = phy; case SIOCDEVPRIVATE+1: /* Read the specified MII register. */ data[3] = mdio_read(ioaddr, data[0], data[1]); return 0; case SIOCDEVPRIVATE+2: /* Write the specified MII register */ if (!capable(CAP_NET_ADMIN)) return -EPERM; mdio_write(ioaddr, data[0], data[1], data[2]); return 0; default: return -EOPNOTSUPP; }}/* Set or clear the multicast filter for this adaptor. This is very ugly with Intel chips -- we usually have to execute an entire configuration command, plus process a multicast command. This is complicated. We must put a large configuration command and an arbitrarily-sized multicast command in the transmit list. To minimize the disruption -- the previous command might have already loaded the link -- we convert the current command block, normally a Tx command, into a no-op and link it to the new command.*/static voidset_rx_mode(struct device *dev){ struct speedo_private *sp = (struct speedo_private *)dev->priv; long ioaddr = dev->base_addr; struct descriptor *last_cmd; char new_rx_mode; unsigned long flags; int entry, i; if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ new_rx_mode = 3; } else if ((dev->flags & IFF_ALLMULTI) || dev->mc_count > multicast_filter_limit) { new_rx_mode = 1; } else new_rx_mode = 0; if (sp->cur_tx - sp->dirty_tx >= TX_RING_SIZE - 1) { /* The Tx ring is full -- don't add anything! Presumably the new mode is in config_cmd_data and will be added anyway. */ sp->rx_mode = -1; return; } if (new_rx_mode != sp->rx_mode) { u8 *config_cmd_data; spin_lock_irqsave(&sp->lock, flags); entry = sp->cur_tx++ % TX_RING_SIZE; last_cmd = sp->last_cmd; sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry]; sp->tx_skbuff[entry] = 0; /* Redundant. */ sp->tx_ring[entry].status = (CmdSuspend | CmdConfigure) << 16; sp->tx_ring[entry].link = virt_to_bus(&sp->tx_ring[(entry + 1) % TX_RING_SIZE]); config_cmd_data = (void *)&sp->tx_ring[entry].tx_desc_addr; /* Construct a full CmdConfig frame. */ memcpy(config_cmd_data, i82558_config_cmd, sizeof(i82558_config_cmd)); config_cmd_data[1] = (txfifo << 4) | rxfifo; config_cmd_data[4] = rxdmacount; config_cmd_data[5] = txdmacount + 0x80; config_cmd_data[15] |= (new_rx_mode & 2) ? 1 : 0; config_cmd_data[19] |= sp->full_duplex ? 0x40 : 0; config_cmd_data[21] = (new_rx_mode & 1) ? 0x0D : 0x05; if (sp->phy[0] & 0x8000) { /* Use the AUI port instead. */ config_cmd_data[15] |= 0x80; config_cmd_data[8] = 0; } /* Trigger the command unit resume. */ last_cmd->command &= ~CmdSuspend; wait_for_cmd_done(ioaddr + SCBCmd); outw(CU_RESUME, ioaddr + SCBCmd); spin_unlock_irqrestore(&sp->lock, flags); } if (new_rx_mode == 0 && dev->mc_count < 4) { /* The simple case of 0-3 multicast list entries occurs often, and fits within one tx_ring[] entry. */ struct dev_mc_list *mclist; u16 *setup_params, *eaddrs; spin_lock_irqsave(&sp->lock, flags); entry = sp->cur_tx++ % TX_RING_SIZE; last_cmd = sp->last_cmd; sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry]; sp->tx_skbuff[entry] = 0; sp->tx_ring[entry].status = (CmdSuspend | CmdMulticastList) << 16; sp->tx_ring[entry].link = virt_to_bus(&sp->tx_ring[(entry + 1) % TX_RING_SIZE]); sp->tx_ring[entry].tx_desc_addr = 0; /* Really MC list count. */ setup_params = (u16 *)&sp->tx_ring[entry].tx_desc_addr; *setup_params++ = dev->mc_count*6; /* Fill in the multicast addresses. */ for (i = 0, mclist = dev->mc_list; i < dev->mc_count; i++, mclist = mclist->next) { eaddrs = (u16 *)mclist->dmi_addr; *setup_params++ = *eaddrs++; *setup_params++ = *eaddrs++; *setup_params++ = *eaddrs++; } last_cmd->command &= ~CmdSuspend; /* Immediately trigger the command unit resume. */ wait_for_cmd_done(ioaddr + SCBCmd); outw(CU_RESUME, ioaddr + SCBCmd); spin_unlock_irqrestore(&sp->lock, flags); } else if (new_rx_mode == 0) { struct dev_mc_list *mclist; u16 *setup_params, *eaddrs; struct descriptor *mc_setup_frm = sp->mc_setup_frm; int i; if (sp->mc_setup_frm_len < 10 + dev->mc_count*6 || sp->mc_setup_frm == NULL) { /* Allocate a full setup frame, 10bytes + <max addrs>. */ if (sp->mc_setup_frm) kfree(sp->mc_setup_frm); sp->mc_setup_busy = 0; sp->mc_setup_frm_len = 10 + multicast_filter_limit*6; sp->mc_setup_frm = kmalloc(sp->mc_setup_frm_len, GFP_ATOMIC); if (sp->mc_setup_frm == NULL) { printk(KERN_ERR "%s: Failed to allocate a setup frame.\n", dev->name); sp->rx_mode = -1; /* We failed, try again. */ return; } } /* If we are busy, someone might be quickly adding to the MC list. Try again later when the list changes stop. */ if (sp->mc_setup_busy) { sp->rx_mode = -1; return; } mc_setup_frm = sp->mc_setup_frm; /* Fill the setup frame. */ if (speedo_debug > 1) printk(KERN_DEBUG "%s: Constructing a setup frame at %p, " "%d bytes.\n", dev->name, sp->mc_setup_frm, sp->mc_setup_frm_len); mc_setup_frm->status = 0; mc_setup_frm->command = CmdSuspend | CmdIntr | CmdMulticastList; /* Link set below. */ setup_params = (u16 *)&mc_setup_frm->params; *setup_params++ = dev->mc_count*6; /* Fill in the multicast addresses. */ for (i = 0, mclist = dev->mc_list; i < dev->mc_count; i++, mclist = mclist->next) { eaddrs = (u16 *)mclist->dmi_addr; *setup_params++ = *eaddrs++; *setup_params++ = *eaddrs++; *setup_params++ = *eaddrs++; } /* Disable interrupts while playing with the Tx Cmd list. */ spin_lock_irqsave(&sp->lock, flags); entry = sp->cur_tx++ % TX_RING_SIZE; last_cmd = sp->last_cmd; sp->last_cmd = mc_setup_frm; sp->mc_setup_busy++; /* Change the command to a NoOp, pointing to the CmdMulti command. */ sp->tx_skbuff[entry] = 0; sp->tx_ring[entry].status = CmdNOp << 16; sp->tx_ring[entry].link = virt_to_bus(mc_setup_frm); /* Set the link in the setup frame. */ mc_setup_frm->link = virt_to_bus(&(sp->tx_ring[(entry+1) % TX_RING_SIZE])); last_cmd->command &= ~CmdSuspend; /* Immediately trigger the command unit resume. */ wait_for_cmd_done(ioaddr + SCBCmd); outw(CU_RESUME, ioaddr + SCBCmd); spin_unlock_irqrestore(&sp->lock, flags); if (speedo_debug > 5) printk(" CmdMCSetup frame length %d in entry %d.\n", dev->mc_count, entry); } sp->rx_mode = new_rx_mode;}#ifdef MODULEintinit_module(void){ int cards_found; if (debug >= 0) speedo_debug = debug; if (speedo_debug) printk(KERN_INFO "%s", version); root_speedo_dev = NULL; cards_found = eepro100_init(NULL); return cards_found ? 0 : -ENODEV;}voidcleanup_module(void){ struct device *next_dev; /* No need to check MOD_IN_USE, as sys_delete_module() checks. */ while (root_speedo_dev) { next_dev = ((struct speedo_private *)root_speedo_dev->priv)->next_module; unregister_netdev(root_speedo_dev); release_region(root_speedo_dev->base_addr, SPEEDO3_TOTAL_SIZE); kfree(root_speedo_dev); root_speedo_dev = next_dev; }}#else /* not MODULE */int eepro100_probe(struct device *dev){ int cards_found = 0; cards_found = eepro100_init(dev); if (speedo_debug > 0 && cards_found) printk(version); return cards_found ? 0 : -ENODEV;}#endif /* MODULE *//* * Local variables: * compile-command: "gcc -DMODULE -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -c eepro100.c `[ -f /usr/include/linux/modversions.h ] && echo -DMODVERSIONS`" * SMP-compile-command: "gcc -D__SMP__ -DMODULE -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -c eepro100.c `[ -f /usr/include/linux/modversions.h ] && echo -DMODVERSIONS`" * c-indent-level: 4 * c-basic-offset: 4 * tab-width: 4 * End: */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -