⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 olympic.c

📁 内核linux2.4.20,可跟rtlinux3.2打补丁 组成实时linux系统,编译内核
💻 C
📖 第 1 页 / 共 5 页
字号:
	struct olympic_private *olympic_priv=(struct olympic_private *)dev->priv;	u8 *olympic_mmio=olympic_priv->olympic_mmio;	struct olympic_rx_status *rx_status;	struct olympic_rx_desc *rx_desc ; 	int rx_ring_last_received,length, buffer_cnt, cpy_length, frag_len;	struct sk_buff *skb, *skb2;	int i;	rx_status=&(olympic_priv->olympic_rx_status_ring[(olympic_priv->rx_status_last_received + 1) & (OLYMPIC_RX_RING_SIZE - 1)]) ;  	while (rx_status->status_buffercnt) {                 u32 l_status_buffercnt;		olympic_priv->rx_status_last_received++ ;		olympic_priv->rx_status_last_received &= (OLYMPIC_RX_RING_SIZE -1);#if OLYMPIC_DEBUG		printk("rx status: %x rx len: %x \n", le32_to_cpu(rx_status->status_buffercnt), le32_to_cpu(rx_status->fragmentcnt_framelen));#endif		length = le32_to_cpu(rx_status->fragmentcnt_framelen) & 0xffff;		buffer_cnt = le32_to_cpu(rx_status->status_buffercnt) & 0xffff; 		i = buffer_cnt ; /* Need buffer_cnt later for rxenq update */ 		frag_len = le32_to_cpu(rx_status->fragmentcnt_framelen) >> 16; #if OLYMPIC_DEBUG 		printk("length: %x, frag_len: %x, buffer_cnt: %x\n", length, frag_len, buffer_cnt);#endif                l_status_buffercnt = le32_to_cpu(rx_status->status_buffercnt);		if(l_status_buffercnt & 0xC0000000) {			if (l_status_buffercnt & 0x3B000000) {				if (olympic_priv->olympic_message_level) {					if (l_status_buffercnt & (1<<29))  /* Rx Frame Truncated */						printk(KERN_WARNING "%s: Rx Frame Truncated \n",dev->name);					if (l_status_buffercnt & (1<<28)) /*Rx receive overrun */						printk(KERN_WARNING "%s: Rx Frame Receive overrun \n",dev->name);					if (l_status_buffercnt & (1<<27)) /* No receive buffers */						printk(KERN_WARNING "%s: No receive buffers \n",dev->name);					if (l_status_buffercnt & (1<<25)) /* Receive frame error detect */						printk(KERN_WARNING "%s: Receive frame error detect \n",dev->name);					if (l_status_buffercnt & (1<<24)) /* Received Error Detect */						printk(KERN_WARNING "%s: Received Error Detect \n",dev->name);				} 				olympic_priv->rx_ring_last_received += i ; 				olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1) ; 				olympic_priv->olympic_stats.rx_errors++;	 			} else {								if (buffer_cnt == 1) {					skb = dev_alloc_skb(max_t(int, olympic_priv->pkt_buf_sz,length)) ; 				} else {					skb = dev_alloc_skb(length) ; 				}				if (skb == NULL) {					printk(KERN_WARNING "%s: Not enough memory to copy packet to upper layers. \n",dev->name) ;					olympic_priv->olympic_stats.rx_dropped++ ; 					/* Update counters even though we don't transfer the frame */					olympic_priv->rx_ring_last_received += i ; 					olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1) ;  				} else  {					skb->dev = dev ; 					/* Optimise based upon number of buffers used. 			   	   	   If only one buffer is used we can simply swap the buffers around.			   	   	   If more than one then we must use the new buffer and copy the information			   	   	   first. Ideally all frames would be in a single buffer, this can be tuned by                               	   	   altering the buffer size. If the length of the packet is less than					   1500 bytes we're going to copy it over anyway to stop packets getting					   dropped from sockets with buffers small than our pkt_buf_sz. */				 					if (buffer_cnt==1) {						olympic_priv->rx_ring_last_received++ ; 						olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1);						rx_ring_last_received = olympic_priv->rx_ring_last_received ;						if (length > 1500) { 							skb2=olympic_priv->rx_ring_skb[rx_ring_last_received] ; 							/* unmap buffer */							pci_unmap_single(olympic_priv->pdev,								le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer), 								olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ; 							skb_put(skb2,length-4);							skb2->protocol = tr_type_trans(skb2,dev);							olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer = 								cpu_to_le32(pci_map_single(olympic_priv->pdev, skb->data, 								olympic_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE));							olympic_priv->olympic_rx_ring[rx_ring_last_received].res_length = 								cpu_to_le32(olympic_priv->pkt_buf_sz); 							olympic_priv->rx_ring_skb[rx_ring_last_received] = skb ; 							netif_rx(skb2) ; 						} else { 							pci_dma_sync_single(olympic_priv->pdev,								le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),								olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ; 							memcpy(skb_put(skb,length-4),olympic_priv->rx_ring_skb[rx_ring_last_received]->data,length-4) ; 							skb->protocol = tr_type_trans(skb,dev) ; 							netif_rx(skb) ; 						} 					} else {						do { /* Walk the buffers */ 							olympic_priv->rx_ring_last_received++ ; 							olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1);							rx_ring_last_received = olympic_priv->rx_ring_last_received ; 							pci_dma_sync_single(olympic_priv->pdev,								le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),								olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ; 							rx_desc = &(olympic_priv->olympic_rx_ring[rx_ring_last_received]);							cpy_length = (i == 1 ? frag_len : le32_to_cpu(rx_desc->res_length)); 							memcpy(skb_put(skb, cpy_length), olympic_priv->rx_ring_skb[rx_ring_last_received]->data, cpy_length) ;						} while (--i) ; 						skb_trim(skb,skb->len-4) ; 						skb->protocol = tr_type_trans(skb,dev);						netif_rx(skb) ; 					} 					dev->last_rx = jiffies ; 					olympic_priv->olympic_stats.rx_packets++ ; 					olympic_priv->olympic_stats.rx_bytes += length ; 				} /* if skb == null */			} /* If status & 0x3b */		} else { /*if buffercnt & 0xC */			olympic_priv->rx_ring_last_received += i ; 			olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE - 1) ; 		} 		rx_status->fragmentcnt_framelen = 0 ; 		rx_status->status_buffercnt = 0 ; 		rx_status = &(olympic_priv->olympic_rx_status_ring[(olympic_priv->rx_status_last_received+1) & (OLYMPIC_RX_RING_SIZE -1) ]);		writew((((readw(olympic_mmio+RXENQ)) & 0x8000) ^ 0x8000) |  buffer_cnt , olympic_mmio+RXENQ); 	} /* while */}static void olympic_interrupt(int irq, void *dev_id, struct pt_regs *regs) {	struct net_device *dev= (struct net_device *)dev_id;	struct olympic_private *olympic_priv=(struct olympic_private *)dev->priv;	u8 *olympic_mmio=olympic_priv->olympic_mmio;	u32 sisr;	u8 *adapter_check_area ; 		/* 	 *  Read sisr but don't reset it yet. 	 *  The indication bit may have been set but the interrupt latch	 *  bit may not be set, so we'd lose the interrupt later. 	 */ 	sisr=readl(olympic_mmio+SISR) ; 	if (!(sisr & SISR_MI)) /* Interrupt isn't for us */ 		return ;	sisr=readl(olympic_mmio+SISR_RR) ;  /* Read & Reset sisr */ 	spin_lock(&olympic_priv->olympic_lock);	if (sisr & (SISR_SRB_REPLY | SISR_TX1_EOF | SISR_RX_STATUS | SISR_ADAPTER_CHECK |  			SISR_ASB_FREE | SISR_ARB_CMD | SISR_TRB_REPLY | SISR_RX_NOBUF)) {  			if(sisr & SISR_SRB_REPLY) {			if(olympic_priv->srb_queued==1) {				wake_up_interruptible(&olympic_priv->srb_wait);			} else if (olympic_priv->srb_queued==2) { 				olympic_srb_bh(dev) ; 			}			olympic_priv->srb_queued=0;		} /* SISR_SRB_REPLY */		/* We shouldn't ever miss the Tx interrupt, but the you never know, hence the loop to ensure		   we get all tx completions. */		if (sisr & SISR_TX1_EOF) {			while(olympic_priv->olympic_tx_status_ring[(olympic_priv->tx_ring_last_status + 1) & (OLYMPIC_TX_RING_SIZE-1)].status) { 				olympic_priv->tx_ring_last_status++;				olympic_priv->tx_ring_last_status &= (OLYMPIC_TX_RING_SIZE-1);				olympic_priv->free_tx_ring_entries++;				olympic_priv->olympic_stats.tx_bytes += olympic_priv->tx_ring_skb[olympic_priv->tx_ring_last_status]->len;				olympic_priv->olympic_stats.tx_packets++ ; 				pci_unmap_single(olympic_priv->pdev, 					le32_to_cpu(olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_last_status].buffer), 					olympic_priv->tx_ring_skb[olympic_priv->tx_ring_last_status]->len,PCI_DMA_TODEVICE);				dev_kfree_skb_irq(olympic_priv->tx_ring_skb[olympic_priv->tx_ring_last_status]);				olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_last_status].buffer=0xdeadbeef;				olympic_priv->olympic_tx_status_ring[olympic_priv->tx_ring_last_status].status=0;			}			netif_wake_queue(dev);		} /* SISR_TX1_EOF */			if (sisr & SISR_RX_STATUS) {			olympic_rx(dev);		} /* SISR_RX_STATUS */			if (sisr & SISR_ADAPTER_CHECK) {			int i ; 			netif_stop_queue(dev);			printk(KERN_WARNING "%s: Adapter Check Interrupt Raised, 8 bytes of information follow:\n", dev->name);			writel(readl(olympic_mmio+LAPWWO),olympic_mmio+LAPA);			adapter_check_area = (u8 *)(olympic_mmio+LAPWWO) ; 			printk(KERN_WARNING "%s: Bytes %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n",dev->name, readb(adapter_check_area+0), readb(adapter_check_area+1), readb(adapter_check_area+2), readb(adapter_check_area+3), readb(adapter_check_area+4), readb(adapter_check_area+5), readb(adapter_check_area+6), readb(adapter_check_area+7)) ; 			/* The adapter is effectively dead, clean up and exit */			for(i=0;i<OLYMPIC_RX_RING_SIZE;i++) {				dev_kfree_skb_irq(olympic_priv->rx_ring_skb[olympic_priv->rx_status_last_received]);				if (olympic_priv->olympic_rx_ring[olympic_priv->rx_status_last_received].buffer != 0xdeadbeef) {					pci_unmap_single(olympic_priv->pdev, 						le32_to_cpu(olympic_priv->olympic_rx_ring[olympic_priv->rx_status_last_received].buffer),						olympic_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE);				}				olympic_priv->rx_status_last_received++;				olympic_priv->rx_status_last_received&=OLYMPIC_RX_RING_SIZE-1;			}			/* unmap rings */			pci_unmap_single(olympic_priv->pdev, olympic_priv->rx_status_ring_dma_addr, 				sizeof(struct olympic_rx_status) * OLYMPIC_RX_RING_SIZE, PCI_DMA_FROMDEVICE);			pci_unmap_single(olympic_priv->pdev, olympic_priv->rx_ring_dma_addr,				sizeof(struct olympic_rx_desc) * OLYMPIC_RX_RING_SIZE, PCI_DMA_TODEVICE);			pci_unmap_single(olympic_priv->pdev, olympic_priv->tx_status_ring_dma_addr, 				sizeof(struct olympic_tx_status) * OLYMPIC_TX_RING_SIZE, PCI_DMA_FROMDEVICE);			pci_unmap_single(olympic_priv->pdev, olympic_priv->tx_ring_dma_addr, 				sizeof(struct olympic_tx_desc) * OLYMPIC_TX_RING_SIZE, PCI_DMA_TODEVICE);			free_irq(dev->irq, dev) ;			dev->stop = NULL ;  			spin_unlock(&olympic_priv->olympic_lock) ; 			return ; 		} /* SISR_ADAPTER_CHECK */			if (sisr & SISR_ASB_FREE) {			/* Wake up anything that is waiting for the asb response */  			if (olympic_priv->asb_queued) {				olympic_asb_bh(dev) ; 			}		} /* SISR_ASB_FREE */			if (sisr & SISR_ARB_CMD) {			olympic_arb_cmd(dev) ; 		} /* SISR_ARB_CMD */			if (sisr & SISR_TRB_REPLY) {			/* Wake up anything that is waiting for the trb response */			if (olympic_priv->trb_queued) {				wake_up_interruptible(&olympic_priv->trb_wait);			}			olympic_priv->trb_queued = 0 ; 		} /* SISR_TRB_REPLY */				if (sisr & SISR_RX_NOBUF) {			/* According to the documentation, we don't have to do anything, but trapping it keeps it out of                  	   	   /var/log/messages.  */		} /* SISR_RX_NOBUF */	} else { 		printk(KERN_WARNING "%s: Unexpected interrupt: %x\n",dev->name, sisr);		printk(KERN_WARNING "%s: SISR_MASK: %x\n",dev->name, readl(olympic_mmio+SISR_MASK)) ;	} /* One if the interrupts we want */	writel(SISR_MI,olympic_mmio+SISR_MASK_SUM);		spin_unlock(&olympic_priv->olympic_lock) ; }	static int olympic_xmit(struct sk_buff *skb, struct net_device *dev) {	struct olympic_private *olympic_priv=(struct olympic_private *)dev->priv;	u8 *olympic_mmio=olympic_priv->olympic_mmio;	unsigned long flags ; 	spin_lock_irqsave(&olympic_priv->olympic_lock, flags);	netif_stop_queue(dev);		if(olympic_priv->free_tx_ring_entries) {		olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_free].buffer = 			cpu_to_le32(pci_map_single(olympic_priv->pdev, skb->data, skb->len,PCI_DMA_TODEVICE));		olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_free].status_length = cpu_to_le32(skb->len | (0x80000000));		olympic_priv->tx_ring_skb[olympic_priv->tx_ring_free]=skb;		olympic_priv->free_tx_ring_entries--;        	olympic_priv->tx_ring_free++;        	olympic_priv->tx_ring_free &= (OLYMPIC_TX_RING_SIZE-1);		writew((((readw(olympic_mmio+TXENQ_1)) & 0x8000) ^ 0x8000) | 1,olympic_mmio+TXENQ_1);		netif_wake_queue(dev);		spin_unlock_irqrestore(&olympic_priv->olympic_lock,flags);		return 0;	} else {		spin_unlock_irqrestore(&olympic_priv->olympic_lock,flags);		return 1;	} }	static int olympic_close(struct net_device *dev) {	struct olympic_private *olympic_priv=(struct olympic_private *)dev->priv;    	u8 *olympic_mmio=olympic_priv->olympic_mmio,*srb;	unsigned long t,flags;	int i;	netif_stop_queue(dev);		writel(olympic_priv->srb,olympic_mmio+LAPA);	srb=olympic_priv->olympic_lap + (olympic_priv->srb & (~0xf800));	    	writeb(SRB_CLOSE_ADAPTER,srb+0);	writeb(0,srb+1);	writeb(OLYMPIC_CLEAR_RET_CODE,srb+2);	save_flags(flags);	cli();		olympic_priv->srb_queued=1;	writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);		t = jiffies ; 	while(olympic_priv->srb_queued) {	        interruptible_sleep_on_timeout(&olympic_priv->srb_wait, jiffies+60*HZ);        	if(signal_pending(current))	{            			printk(KERN_WARNING "%s: SRB timed out.\n",dev->name);            		printk(KERN_WARNING "SISR=%x MISR=%x\n",readl(olympic_mmio+SISR),readl(olympic_mmio+LISR));            		olympic_priv->srb_queued=0;            		break;        	}		if ((jiffies-t) > 60*HZ) { 			printk(KERN_WARNING "%s: SRB timed out. May not be fatal. \n",dev->name) ; 			olympic_priv->srb_queued=0;			break ; 		}     	}	restore_flags(flags) ; 	olympic_priv->rx_status_last_received++;	olympic_priv->rx_status_last_received&=OLYMPIC_RX_RING_SIZE-1;		for(i=0;i<OLYMPIC_RX_RING_SIZE;i++) {		dev_kfree_skb(olympic_priv->rx_ring_skb[olympic_priv->rx_status_last_received]);		if (olympic_priv->olympic_rx_ring[olympic_priv->rx_status_last_received].buffer != 0xdeadbeef) {			pci_unmap_single(olympic_priv->pdev, 				le32_to_cpu(olympic_priv->olympic_rx_ring[olympic_priv->rx_status_last_received].buffer),				olympic_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE);		}		olympic_priv->rx_status_last_received++;		olympic_priv->rx_status_last_received&=OLYMPIC_RX_RING_SIZE-1;	}	/* unmap rings */	pci_unmap_single(olympic_priv->pdev, olympic_priv->rx_status_ring_dma_addr, 		sizeof(struct olympic_rx_status) * OLYMPIC_RX_RING_SIZE, PCI_DMA_FROMDEVICE);	pci_unmap_single(olympic_priv->pdev, olympic_priv->rx_ring_dma_addr,		sizeof(struct olympic_rx_desc) * OLYMPIC_RX_RING_SIZE, PCI_DMA_TODEVICE);	pci_unmap_single(olympic_priv->pdev, olympic_priv->tx_status_ring_dma_addr, 		sizeof(struct olympic_tx_status) * OLYMPIC_TX_RING_SIZE, PCI_DMA_FROMDEVICE);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -