⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 3c575_cb.c

📁 pcmcia source code
💻 C
📖 第 1 页 / 共 5 页
字号:
				outw(SetTxReclaim | 0xff, ioaddr + EL3_CMD);			outl(virt_to_bus(&vp->tx_ring[vp->dirty_tx % TX_RING_SIZE]),				 ioaddr + DownListPtr);			vp->restart_tx = 0;			queued_packet++;		}	}	vp->cur_tx++;	if (vp->cur_tx - vp->dirty_tx >= TX_QUEUE_LEN)		vp->tx_full = 1;	else {					/* Clear previous interrupt enable. */#if defined(tx_interrupt_mitigation)		prev_entry->status &= cpu_to_le32(~TxIntrUploaded);#endif		netif_start_queue(dev);	}	spin_unlock_irqrestore(&vp->lock, flags);	dev->trans_start = jiffies;	return 0;}/* The interrupt handler does all of the Rx thread work and cleans up   after the Tx thread. */static void vortex_interrupt(int irq, void *dev_id, struct pt_regs *regs){	struct net_device *dev = dev_id;	struct vortex_private *vp = (struct vortex_private *)dev->priv;	long ioaddr;	int latency, status;	int work_done = max_interrupt_work;	ioaddr = dev->base_addr;	latency = inb(ioaddr + Timer);	status = inw(ioaddr + EL3_STATUS);	if (status & IntReq) {		status |= vp->deferred_irqs;		vp->deferred_irqs = 0;	}	if (status == 0xffff)		goto handler_exit;	if (vortex_debug > 4)		printk(KERN_DEBUG "%s: interrupt, status %4.4x, latency %d ticks.\n",			   dev->name, status, latency);	spin_lock(&vp->lock);	do {		if (vortex_debug > 5)				printk(KERN_DEBUG "%s: In interrupt loop, status %4.4x.\n",					   dev->name, status);		if (status & RxComplete)			vortex_rx(dev);		if (status & UpComplete) {			outw(AckIntr | UpComplete, ioaddr + EL3_CMD);			boomerang_rx(dev);		}		if (status & TxAvailable) {			if (vortex_debug > 5)				printk(KERN_DEBUG "	TX room bit was handled.\n");			/* There's room in the FIFO for a full-sized packet. */			outw(AckIntr | TxAvailable, ioaddr + EL3_CMD);			netif_wake_queue(dev);		}		if (status & DownComplete) {			unsigned int dirty_tx = vp->dirty_tx;			outw(AckIntr | DownComplete, ioaddr + EL3_CMD);			while (vp->cur_tx - dirty_tx > 0) {				int entry = dirty_tx % TX_RING_SIZE;				if (inl(ioaddr + DownListPtr) ==					virt_to_bus(&vp->tx_ring[entry]))					break;			/* It still hasn't been processed. */				if (vp->tx_skbuff[entry]) {					dev_kfree_skb_irq(vp->tx_skbuff[entry]);					vp->tx_skbuff[entry] = 0;				}				/* vp->stats.tx_packets++;  Counted below. */				dirty_tx++;			}			vp->dirty_tx = dirty_tx;			if (vp->tx_full && (vp->cur_tx - dirty_tx < TX_QUEUE_LEN)) {				vp->tx_full = 0;				netif_wake_queue(dev);			}		}		if (status & DMADone) {			if (inw(ioaddr + Wn7_MasterStatus) & 0x1000) {				outw(0x1000, ioaddr + Wn7_MasterStatus); /* Ack the event. */				dev_kfree_skb_irq(vp->tx_skb); /* Release the tx buffer */				if (inw(ioaddr + TxFree) > 1536) {					netif_wake_queue(dev);				} else /* Interrupt when FIFO has room for max-sized packet. */					outw(SetTxThreshold + (1536>>2), ioaddr + EL3_CMD);			}		}		/* Check for all uncommon interrupts at once. */		if (status & (HostError | RxEarly | StatsFull | TxComplete | IntReq)) {			if (status == 0xffff)				break;			vortex_error(dev, status);		}		if (--work_done < 0) {			printk(KERN_DEBUG "%s: Too much work in interrupt, status "				   "%4.4x.\n", dev->name, status);			/* Disable all pending interrupts. */			do {				vp->deferred_irqs |= status;				outw(SetStatusEnb | (~vp->deferred_irqs & vp->status_enable),					 ioaddr + EL3_CMD);				outw(AckIntr | (vp->deferred_irqs & 0x7ff), ioaddr + EL3_CMD);			} while ((status = inw(ioaddr + EL3_CMD)) & IntLatch);			/* The timer will reenable interrupts. */			del_timer(&vp->timer);			vp->timer.expires = jiffies + 1;			add_timer(&vp->timer);			break;		}		/* Acknowledge the IRQ. */		outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD);		if (vp->cb_fn_base)			/* The PCMCIA people are idiots.  */			writel(0x8000, vp->cb_fn_base + 4);	} while ((status = inw(ioaddr + EL3_STATUS)) & (IntLatch | RxComplete));	spin_unlock(&vp->lock);	if (vortex_debug > 4)		printk(KERN_DEBUG "%s: exiting interrupt, status %4.4x.\n",			   dev->name, status);handler_exit:	return;}static int vortex_rx(struct net_device *dev){	struct vortex_private *vp = (struct vortex_private *)dev->priv;	long ioaddr = dev->base_addr;	int i;	short rx_status;	if (vortex_debug > 5)		printk(KERN_DEBUG"   In rx_packet(), status %4.4x, rx_status %4.4x.\n",			   inw(ioaddr+EL3_STATUS), inw(ioaddr+RxStatus));	while ((rx_status = inw(ioaddr + RxStatus)) > 0) {		if (rx_status & 0x4000) { /* Error, update stats. */			unsigned char rx_error = inb(ioaddr + RxErrors);			if (vortex_debug > 2)				printk(KERN_DEBUG " Rx error: status %2.2x.\n", rx_error);			vp->stats.rx_errors++;			if (rx_error & 0x01)  vp->stats.rx_over_errors++;			if (rx_error & 0x02)  vp->stats.rx_length_errors++;			if (rx_error & 0x04)  vp->stats.rx_frame_errors++;			if (rx_error & 0x08)  vp->stats.rx_crc_errors++;			if (rx_error & 0x10)  vp->stats.rx_length_errors++;		} else {			/* The packet length: up to 4.5K!. */			int pkt_len = rx_status & 0x1fff;			struct sk_buff *skb;			skb = dev_alloc_skb(pkt_len + 5);			if (vortex_debug > 4)				printk(KERN_DEBUG "Receiving packet size %d status %4.4x.\n",					   pkt_len, rx_status);			if (skb != NULL) {				skb->dev = dev;				skb_reserve(skb, 2);	/* Align IP on 16 byte boundaries */				/* 'skb_put()' points to the start of sk_buff data area. */				if (vp->bus_master &&					! (inw(ioaddr + Wn7_MasterStatus) & 0x8000)) {					outl(virt_to_bus(skb_put(skb, pkt_len)),						 ioaddr + Wn7_MasterAddr);					outw((skb->len + 3) & ~3, ioaddr + Wn7_MasterLen);					outw(StartDMAUp, ioaddr + EL3_CMD);					while (inw(ioaddr + Wn7_MasterStatus) & 0x8000)						;				} else {					insl(ioaddr + RX_FIFO, skb_put(skb, pkt_len),						 (pkt_len + 3) >> 2);				}				outw(RxDiscard, ioaddr + EL3_CMD); /* Pop top Rx packet. */				skb->protocol = eth_type_trans(skb, dev);				netif_rx(skb);				dev->last_rx = jiffies;				vp->stats.rx_packets++;				/* Wait a limited time to go to next packet. */				for (i = 200; i >= 0; i--)					if ( ! (inw(ioaddr + EL3_STATUS) & CmdInProgress))						break;				continue;			} else if (vortex_debug)				printk(KERN_NOTICE "%s: No memory to allocate a sk_buff of "					   "size %d.\n", dev->name, pkt_len);		}		vp->stats.rx_dropped++;		issue_and_wait(dev, RxDiscard);	}	return 0;}static intboomerang_rx(struct net_device *dev){	struct vortex_private *vp = (struct vortex_private *)dev->priv;	int entry = vp->cur_rx % RX_RING_SIZE;	long ioaddr = dev->base_addr;	int rx_status;	int rx_work_limit = vp->dirty_rx + RX_RING_SIZE - vp->cur_rx;	if (vortex_debug > 5)		printk(KERN_DEBUG "  In boomerang_rx(), status %4.4x, rx_status "			   "%4.4x.\n",			   inw(ioaddr+EL3_STATUS), inw(ioaddr+RxStatus));	while ((rx_status = le32_to_cpu(vp->rx_ring[entry].status)) & RxDComplete){		if (--rx_work_limit < 0)			break;		if (rx_status & RxDError) { /* Error, update stats. */			unsigned char rx_error = rx_status >> 16;			if (vortex_debug > 2)				printk(KERN_DEBUG " Rx error: status %2.2x.\n", rx_error);			vp->stats.rx_errors++;			if (rx_error & 0x01)  vp->stats.rx_over_errors++;			if (rx_error & 0x02)  vp->stats.rx_length_errors++;			if (rx_error & 0x04)  vp->stats.rx_frame_errors++;			if (rx_error & 0x08)  vp->stats.rx_crc_errors++;			if (rx_error & 0x10)  vp->stats.rx_length_errors++;		} else {			/* The packet length: up to 4.5K!. */			int pkt_len = rx_status & 0x1fff;			struct sk_buff *skb;			if (vortex_debug > 4)				printk(KERN_DEBUG "Receiving packet size %d status %4.4x.\n",					   pkt_len, rx_status);			/* Check if the packet is long enough to just accept without			   copying to a properly sized skbuff. */			if (pkt_len < rx_copybreak				&& (skb = dev_alloc_skb(pkt_len + 2)) != 0) {				skb->dev = dev;				skb_reserve(skb, 2);	/* Align IP on 16 byte boundaries */				/* 'skb_put()' points to the start of sk_buff data area. */				memcpy(skb_put(skb, pkt_len),					   le32desc_to_virt(vp->rx_ring[entry].addr), pkt_len);				rx_copy++;			} else {				void *temp;				/* Pass up the skbuff already on the Rx ring. */				skb = vp->rx_skbuff[entry];				vp->rx_skbuff[entry] = NULL;				temp = skb_put(skb, pkt_len);				/* Remove this checking code for final release. */				if (le32desc_to_virt(vp->rx_ring[entry].addr) != temp)					printk(KERN_ERR "%s: Warning -- the skbuff addresses do not match"						   " in boomerang_rx: %p vs. %p.\n", dev->name,						   le32desc_to_virt(vp->rx_ring[entry].addr),						   temp);				rx_nocopy++;			}			skb->protocol = eth_type_trans(skb, dev);			if (use_hw_csums) {				int csum_bits = rx_status & 0xee000000;				if (csum_bits &&					(csum_bits == (IPChksumValid | TCPChksumValid) ||					 csum_bits == (IPChksumValid | UDPChksumValid))) {					skb->ip_summed = CHECKSUM_UNNECESSARY;					rx_csumhits++;				}			}			netif_rx(skb);			dev->last_rx = jiffies;			vp->stats.rx_packets++;		}		entry = (++vp->cur_rx) % RX_RING_SIZE;	}	/* Refill the Rx ring buffers. */	for (; vp->cur_rx - vp->dirty_rx > 0; vp->dirty_rx++) {		struct sk_buff *skb;		entry = vp->dirty_rx % RX_RING_SIZE;		if (vp->rx_skbuff[entry] == NULL) {			skb = dev_alloc_skb(PKT_BUF_SZ);			if (skb == NULL)				break;			/* Bad news!  */			skb->dev = dev;			/* Mark as being used by this device. */			skb_reserve(skb, 2);	/* Align IP on 16 byte boundaries */			vp->rx_ring[entry].addr = virt_to_le32desc(skb->tail);			vp->rx_skbuff[entry] = skb;		}		vp->rx_ring[entry].status = 0;	/* Clear complete bit. */		outw(UpUnstall, ioaddr + EL3_CMD);	}	return 0;}static voidvortex_down(struct net_device *dev){	struct vortex_private *vp = (struct vortex_private *)dev->priv;	long ioaddr = dev->base_addr;	del_timer(&vp->timer);	/* Turn off statistics ASAP.  We update vp->stats below. */	outw(StatsDisable, ioaddr + EL3_CMD);	/* Disable the receiver and transmitter. */	outw(RxDisable, ioaddr + EL3_CMD);	outw(TxDisable, ioaddr + EL3_CMD);	if (dev->if_port == XCVR_10base2)		/* Turn off thinnet power.  Green! */		outw(StopCoax, ioaddr + EL3_CMD);	outw(SetIntrEnb | 0x0000, ioaddr + EL3_CMD);	update_stats(ioaddr, dev);	if (vp->full_bus_master_rx)		outl(0, ioaddr + UpListPtr);	if (vp->full_bus_master_tx)		outl(0, ioaddr + DownListPtr);	if (vp->capabilities & CapPwrMgmt)		acpi_set_WOL(dev);}static intvortex_close(struct net_device *dev){	struct vortex_private *vp = (struct vortex_private *)dev->priv;	long ioaddr = dev->base_addr;	int i;	if (netif_device_present(dev)) {		netif_stop_queue(dev);		netif_mark_down(dev);		vortex_down(dev);	}	if (vortex_debug > 1) {		printk(KERN_DEBUG"%s: vortex_close() status %4.4x, Tx status %2.2x.\n",			   dev->name, inw(ioaddr + EL3_STATUS), inb(ioaddr + TxStatus));		printk(KERN_DEBUG "%s: vortex close stats: rx_nocopy %d rx_copy %d"			   " tx_queued %d Rx pre-checksummed %d.\n",			   dev->name, rx_nocopy, rx_copy, queued_packet, rx_csumhits);	}	free_irq(dev->irq, dev);	if (vp->full_bus_master_rx) { /* Free Boomerang bus master Rx buffers. */		for (i = 0; i < RX_RING_SIZE; i++)			if (vp->rx_skbuff[i]) {#if LINUX_VERSION_CODE < 0x20100				vp->rx_skbuff[i]->free = 1;#endif				dev_free_skb(vp->rx_skbuff[i]);				vp->rx_skbuff[i] = 0;			}	}	if (vp->full_bus_master_tx) { /* Free Boomerang bus master Tx buffers. */		for (i = 0; i < TX_RING_SIZE; i++)			if (vp->tx_skbuff[i]) {				dev_free_skb(vp->tx_skbuff[i]);				vp->tx_skbuff[i] = 0;			}	}	MOD_DEC_USE_COUNT;	vp->open = 0;	return 0;}static struct net_device_stats *vortex_get_stats(struct net_device *dev){	struct vortex_private *vp = (struct vortex_private *)dev->priv;	unsigned long flags;	if (netif_device_present(dev)) {		spin_lock_irqsave(&vp->lock, flags);		update_stats(dev->base_addr, dev);		spin_unlock_irqrestore(&vp->lock, flags);	}	return &vp->stats;}/*  Update statistics.	Unlike with the EL3 we need not worry 

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -