⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 eepro100_cb.c

📁 pcmcia source code
💻 C
📖 第 1 页 / 共 5 页
字号:
					printk(KERN_ERR "%s: Rx consistency error -- the skbuff "						   "addresses do not match in speedo_rx: %p vs. %p "						   "/ %p.\n", dev->name,						   bus_to_virt(sp->rx_ringp[entry]->rx_buf_addr),						   skb->head, temp);#endif				sp->rx_ringp[entry] = NULL;			} 			skb->protocol = eth_type_trans(skb, dev);			netif_rx(skb);			sp->stats.rx_packets++;#if LINUX_VERSION_CODE > 0x20127			sp->stats.rx_bytes += pkt_len;#endif		}		entry = (++sp->cur_rx) % RX_RING_SIZE;	}	/* Refill the Rx ring buffers. */	for (; sp->cur_rx - sp->dirty_rx > 0; sp->dirty_rx++) {		struct RxFD *rxf;		entry = sp->dirty_rx % RX_RING_SIZE;		if (sp->rx_skbuff[entry] == NULL) {			struct sk_buff *skb;			/* Get a fresh skbuff to replace the consumed one. */			skb = dev_alloc_skb(PKT_BUF_SZ + sizeof(struct RxFD));			sp->rx_skbuff[entry] = skb;			if (skb == NULL) {				sp->rx_ringp[entry] = NULL;				break;			/* Better luck next time!  */			}			rxf = sp->rx_ringp[entry] = (struct RxFD *)skb->tail;			skb->dev = dev;			skb_reserve(skb, sizeof(struct RxFD));			rxf->rx_buf_addr = virt_to_le32desc(skb->tail);		} else {			rxf = sp->rx_ringp[entry];		}		rxf->status = cpu_to_le32(0xC0000001); 	/* '1' for driver use only. */		rxf->link = 0;			/* None yet. */		rxf->count = cpu_to_le32(PKT_BUF_SZ << 16);		sp->last_rxf->link = virt_to_le32desc(rxf);		sp->last_rxf->status &= cpu_to_le32(~0xC0000000);		sp->last_rxf = rxf;	}	sp->last_rx_time = jiffies;	return 0;}static intspeedo_close(struct net_device *dev){	long ioaddr = dev->base_addr;	struct speedo_private *sp = (struct speedo_private *)dev->priv;	int i;	netif_stop_queue(dev);	netif_mark_down(dev);	if (speedo_debug > 1)		printk(KERN_DEBUG "%s: Shutting down ethercard, status was %4.4x.\n",			   dev->name, inw(ioaddr + SCBStatus));	/* Shut off the media monitoring timer. */	del_timer(&sp->timer);	/* Shutting down the chip nicely fails to disable flow control. So.. */	outl(PortPartialReset, ioaddr + SCBPort);	free_irq(dev->irq, dev);	/* Free all the skbuffs in the Rx and Tx queues. */	for (i = 0; i < RX_RING_SIZE; i++) {		struct sk_buff *skb = sp->rx_skbuff[i];		sp->rx_skbuff[i] = 0;		/* Clear the Rx descriptors. */		if (skb) {#if LINUX_VERSION_CODE < 0x20100			skb->free = 1;#endif			dev_free_skb(skb);		}	}	for (i = 0; i < TX_RING_SIZE; i++) {		struct sk_buff *skb = sp->tx_skbuff[i];		sp->tx_skbuff[i] = 0;		/* Clear the Tx descriptors. */		if (skb)			dev_free_skb(skb);	}	if (sp->mc_setup_frm) {		kfree(sp->mc_setup_frm);		sp->mc_setup_frm_len = 0;	}	/* Print a few items for debugging. */	if (speedo_debug > 3)		speedo_show_state(dev);	/* Alt: acpi_set_pwr_state(pdev, sp->acpi_pwr); */	acpi_set_pwr_state(sp->pci_dev, ACPI_D2);	MOD_DEC_USE_COUNT;	return 0;}/* The Speedo-3 has an especially awkward and unusable method of getting   statistics out of the chip.  It takes an unpredictable length of time   for the dump-stats command to complete.  To avoid a busy-wait loop we   update the stats with the previous dump results, and then trigger a   new dump.   These problems are mitigated by the current /proc implementation, which   calls this routine first to judge the output length, and then to emit the   output.   Oh, and incoming frames are dropped while executing dump-stats!   */static struct net_device_stats *speedo_get_stats(struct net_device *dev){	struct speedo_private *sp = (struct speedo_private *)dev->priv;	long ioaddr = dev->base_addr;	/* Update only if the previous dump finished. */	if (sp->lstats.done_marker == le32_to_cpu(0xA007)) {		sp->stats.tx_aborted_errors += le32_to_cpu(sp->lstats.tx_coll16_errs);		sp->stats.tx_window_errors += le32_to_cpu(sp->lstats.tx_late_colls);		sp->stats.tx_fifo_errors += le32_to_cpu(sp->lstats.tx_underruns);		sp->stats.tx_fifo_errors += le32_to_cpu(sp->lstats.tx_lost_carrier);		/*sp->stats.tx_deferred += le32_to_cpu(sp->lstats.tx_deferred);*/		sp->stats.collisions += le32_to_cpu(sp->lstats.tx_total_colls);		sp->stats.rx_crc_errors += le32_to_cpu(sp->lstats.rx_crc_errs);		sp->stats.rx_frame_errors += le32_to_cpu(sp->lstats.rx_align_errs);		sp->stats.rx_over_errors += le32_to_cpu(sp->lstats.rx_resource_errs);		sp->stats.rx_fifo_errors += le32_to_cpu(sp->lstats.rx_overrun_errs);		sp->stats.rx_length_errors += le32_to_cpu(sp->lstats.rx_runt_errs);		sp->lstats.done_marker = 0x0000;		if (netif_running(dev)) {			wait_for_cmd_done(ioaddr + SCBCmd);			outb(CUDumpStats, ioaddr + SCBCmd);		}	}	return &sp->stats;}static int speedo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd){	struct speedo_private *sp = (struct speedo_private *)dev->priv;	long ioaddr = dev->base_addr;	u16 *data = (u16 *)&rq->ifr_data;	int phy = sp->phy[0] & 0x1f;	int saved_acpi;    switch(cmd) {	case SIOCDEVPRIVATE:		/* Get the address of the PHY in use. */		data[0] = phy;	case SIOCDEVPRIVATE+1:		/* Read the specified MII register. */		saved_acpi = acpi_set_pwr_state(sp->pci_dev, ACPI_D0);		data[3] = mdio_read(ioaddr, data[0], data[1]);		acpi_set_pwr_state(sp->pci_dev, saved_acpi);		return 0;	case SIOCDEVPRIVATE+2:		/* Write the specified MII register */		if (!capable(CAP_NET_ADMIN))			return -EPERM;		saved_acpi = acpi_set_pwr_state(sp->pci_dev, ACPI_D0);		mdio_write(ioaddr, data[0], data[1], data[2]);		acpi_set_pwr_state(sp->pci_dev, saved_acpi);		return 0;	default:		return -EOPNOTSUPP;	}}/* Set or clear the multicast filter for this adaptor.   This is very ugly with Intel chips -- we usually have to execute an   entire configuration command, plus process a multicast command.   This is complicated.  We must put a large configuration command and   an arbitrarily-sized multicast command in the transmit list.   To minimize the disruption -- the previous command might have already   loaded the link -- we convert the current command block, normally a Tx   command, into a no-op and link it to the new command.*/static void set_rx_mode(struct net_device *dev){	struct speedo_private *sp = (struct speedo_private *)dev->priv;	long ioaddr = dev->base_addr;	struct descriptor *last_cmd;	char new_rx_mode;	unsigned long flags;	int entry, i;	if (dev->flags & IFF_PROMISC) {			/* Set promiscuous. */		new_rx_mode = 3;	} else if ((dev->flags & IFF_ALLMULTI)  ||			   dev->mc_count > multicast_filter_limit) {		new_rx_mode = 1;	} else		new_rx_mode = 0;	if (sp->cur_tx - sp->dirty_tx >= TX_RING_SIZE - 1) {	  /* The Tx ring is full -- don't add anything!  Presumably the new mode		 is in config_cmd_data and will be added anyway, otherwise we wait		 for a timer tick or the mode to change again. */		sp->rx_mode = -1;		return;	}	if (new_rx_mode != sp->rx_mode) {		u8 *config_cmd_data;		spin_lock_irqsave(&sp->lock, flags);		entry = sp->cur_tx % TX_RING_SIZE;		last_cmd = sp->last_cmd;		sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];		sp->tx_skbuff[entry] = 0;			/* Redundant. */		sp->tx_ring[entry].status = cpu_to_le32(CmdSuspend | CmdConfigure);		sp->cur_tx++;		sp->tx_ring[entry].link =			virt_to_le32desc(&sp->tx_ring[(entry + 1) % TX_RING_SIZE]);		/* We may nominally release the lock here. */		config_cmd_data = (void *)&sp->tx_ring[entry].tx_desc_addr;		/* Construct a full CmdConfig frame. */		memcpy(config_cmd_data, i82558_config_cmd, sizeof(i82558_config_cmd));		config_cmd_data[1] = (txfifo << 4) | rxfifo;		config_cmd_data[4] = rxdmacount;		config_cmd_data[5] = txdmacount + 0x80;		config_cmd_data[15] |= (new_rx_mode & 2) ? 1 : 0;		config_cmd_data[19] = sp->flow_ctrl ? 0xBD : 0x80;		config_cmd_data[19] |= sp->full_duplex ? 0x40 : 0;		config_cmd_data[21] = (new_rx_mode & 1) ? 0x0D : 0x05;		if (sp->phy[0] & 0x8000) {			/* Use the AUI port instead. */			config_cmd_data[15] |= 0x80;			config_cmd_data[8] = 0;		}		/* Trigger the command unit resume. */		wait_for_cmd_done(ioaddr + SCBCmd);		clear_suspend(last_cmd);		outb(CUResume, ioaddr + SCBCmd);		spin_unlock_irqrestore(&sp->lock, flags);		sp->last_cmd_time = jiffies;	}	if (new_rx_mode == 0  &&  dev->mc_count < 4) {		/* The simple case of 0-3 multicast list entries occurs often, and		   fits within one tx_ring[] entry. */		struct dev_mc_list *mclist;		u16 *setup_params, *eaddrs;		spin_lock_irqsave(&sp->lock, flags);		entry = sp->cur_tx % TX_RING_SIZE;		last_cmd = sp->last_cmd;		sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];		sp->tx_skbuff[entry] = 0;		sp->tx_ring[entry].status = cpu_to_le32(CmdSuspend | CmdMulticastList);		sp->cur_tx++;		sp->tx_ring[entry].link =			virt_to_le32desc(&sp->tx_ring[(entry + 1) % TX_RING_SIZE]);		/* We may nominally release the lock here. */		sp->tx_ring[entry].tx_desc_addr = 0; /* Really MC list count. */		setup_params = (u16 *)&sp->tx_ring[entry].tx_desc_addr;		*setup_params++ = cpu_to_le16(dev->mc_count*6);		/* Fill in the multicast addresses. */		for (i = 0, mclist = dev->mc_list; i < dev->mc_count;			 i++, mclist = mclist->next) {			eaddrs = (u16 *)mclist->dmi_addr;			*setup_params++ = *eaddrs++;			*setup_params++ = *eaddrs++;			*setup_params++ = *eaddrs++;		}		wait_for_cmd_done(ioaddr + SCBCmd);		clear_suspend(last_cmd);		/* Immediately trigger the command unit resume. */		outb(CUResume, ioaddr + SCBCmd);		spin_unlock_irqrestore(&sp->lock, flags);		sp->last_cmd_time = jiffies;	} else if (new_rx_mode == 0) {		struct dev_mc_list *mclist;		u16 *setup_params, *eaddrs;		struct descriptor *mc_setup_frm = sp->mc_setup_frm;		int i;		if (sp->mc_setup_frm_len < 10 + dev->mc_count*6			|| sp->mc_setup_frm == NULL) {			/* Allocate a full setup frame, 10bytes + <max addrs>. */			if (sp->mc_setup_frm)				kfree(sp->mc_setup_frm);			sp->mc_setup_busy = 0;			sp->mc_setup_frm_len = 10 + multicast_filter_limit*6;			sp->mc_setup_frm = kmalloc(sp->mc_setup_frm_len, GFP_ATOMIC);			if (sp->mc_setup_frm == NULL) {				printk(KERN_ERR "%s: Failed to allocate a setup frame.\n",					   dev->name);				sp->rx_mode = -1; /* We failed, try again. */				return;			}		}		/* If we are busy, someone might be quickly adding to the MC list.		   Try again later when the list updates stop. */		if (sp->mc_setup_busy) {			sp->rx_mode = -1;			return;		}		mc_setup_frm = sp->mc_setup_frm;		/* Fill the setup frame. */		if (speedo_debug > 1)			printk(KERN_DEBUG "%s: Constructing a setup frame at %p, "				   "%d bytes.\n",				   dev->name, sp->mc_setup_frm, sp->mc_setup_frm_len);		mc_setup_frm->cmd_status =			cpu_to_le32(CmdSuspend | CmdIntr | CmdMulticastList);		/* Link set below. */		setup_params = (u16 *)&mc_setup_frm->params;		*setup_params++ = cpu_to_le16(dev->mc_count*6);		/* Fill in the multicast addresses. */		for (i = 0, mclist = dev->mc_list; i < dev->mc_count;			 i++, mclist = mclist->next) {			eaddrs = (u16 *)mclist->dmi_addr;			*setup_params++ = *eaddrs++;			*setup_params++ = *eaddrs++;			*setup_params++ = *eaddrs++;		}		/* Disable interrupts while playing with the Tx Cmd list. */		spin_lock_irqsave(&sp->lock, flags);		entry = sp->cur_tx % TX_RING_SIZE;		last_cmd = sp->last_cmd;		sp->last_cmd = mc_setup_frm;		sp->mc_setup_busy++;		/* Change the command to a NoOp, pointing to the CmdMulti command. */		sp->tx_skbuff[entry] = 0;		sp->tx_ring[entry].status = cpu_to_le32(CmdNOp);		sp->cur_tx++;		sp->tx_ring[entry].link = virt_to_le32desc(mc_setup_frm);		/* We may nominally release the lock here. */		/* Set the link in the setup frame. */		mc_setup_frm->link =			virt_to_le32desc(&(sp->tx_ring[(entry+1) % TX_RING_SIZE]));		wait_for_cmd_done(ioaddr + SCBCmd);		clear_suspend(last_cmd);		/* Immediately trigger the command unit resume. */		outb(CUResume, ioaddr + SCBCmd);		spin_unlock_irqrestore(&sp->lock, flags);		sp->last_cmd_time = jiffies;		if (speedo_debug > 5)			printk(" CmdMCSetup frame length %d in entry %d.\n",				   dev->mc_count, entry);	}	sp->rx_mode = new_rx_mode;}#ifdef CARDBUS#include <pcmcia/driver_ops.h>static dev_node_t *eepro100_attach(dev_locator_t *loc){    struct pci_dev *pdev;	u16 dev_id;	u16 vendor_id;	u32 io;	u8 bus, devfn, irq;    int cards_found;	if (loc->bus != LOC_PCI) return NULL;	b

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -