⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 ni65.c

📁 Linux Kernel 2.6.9 for OMAP1710
💻 C
📖 第 1 页 / 共 3 页
字号:
		 ni65_init_lance(p,dev->dev_addr,0x00,0x00);	/*	 * ni65_set_lance_mem() sets L_ADDRREG to CSR0	 * NOW, WE WILL NEVER CHANGE THE L_ADDRREG, CSR0 IS ALWAYS SELECTED	 */	 if(inw(PORT+L_DATAREG) & CSR0_IDON)	{		 ni65_set_performance(p);					 /* init OK: start lance , enable interrupts */		 writedatareg(CSR0_CLRALL | CSR0_INEA | CSR0_STRT);		 return 1; /* ->OK */	 }	 printk(KERN_ERR "%s: can't init lance, status: %04x\n",dev->name,(int) inw(PORT+L_DATAREG));	 flags=claim_dma_lock();	 disable_dma(dev->dma);	 release_dma_lock(flags);	 return 0; /* ->Error */}/* * interrupt handler */static irqreturn_t ni65_interrupt(int irq, void * dev_id, struct pt_regs * regs){	int csr0 = 0;	struct net_device *dev = dev_id;	struct priv *p;	int bcnt = 32;	p = (struct priv *) dev->priv;	spin_lock(&p->ring_lock);		while(--bcnt) {		csr0 = inw(PORT+L_DATAREG);#if 0		writedatareg( (csr0 & CSR0_CLRALL) ); /* ack interrupts, disable int. */#else		writedatareg( (csr0 & CSR0_CLRALL) | CSR0_INEA ); /* ack interrupts, interrupts enabled */#endif		if(!(csr0 & (CSR0_ERR | CSR0_RINT | CSR0_TINT)))			break;		if(csr0 & CSR0_RINT) /* RECV-int? */			ni65_recv_intr(dev,csr0);		if(csr0 & CSR0_TINT) /* XMIT-int? */			ni65_xmit_intr(dev,csr0);		if(csr0 & CSR0_ERR)		{			struct priv *p = (struct priv *) dev->priv;			if(debuglevel > 1)				printk(KERN_ERR "%s: general error: %04x.\n",dev->name,csr0);			if(csr0 & CSR0_BABL)				p->stats.tx_errors++;			if(csr0 & CSR0_MISS) {				int i;				for(i=0;i<RMDNUM;i++)					printk("%02x ",p->rmdhead[i].u.s.status);				printk("\n");				p->stats.rx_errors++;			}			if(csr0 & CSR0_MERR) {				if(debuglevel > 1)					printk(KERN_ERR "%s: Ooops .. memory error: %04x.\n",dev->name,csr0);				ni65_stop_start(dev,p);			}		}	}#ifdef RCV_PARANOIA_CHECK{ int j; for(j=0;j<RMDNUM;j++) {	struct priv *p = (struct priv *) dev->priv;	int i,k,num1,num2;	for(i=RMDNUM-1;i>0;i--) {		 num2 = (p->rmdnum + i) & (RMDNUM-1);		 if(!(p->rmdhead[num2].u.s.status & RCV_OWN))				break;	}	if(i) {		for(k=0;k<RMDNUM;k++) {			num1 = (p->rmdnum + k) & (RMDNUM-1);			if(!(p->rmdhead[num1].u.s.status & RCV_OWN))				break;		}		if(!k)			break;		if(debuglevel > 0)		{			char buf[256],*buf1;			int k;			buf1 = buf;			for(k=0;k<RMDNUM;k++) {				sprintf(buf1,"%02x ",(p->rmdhead[k].u.s.status)); /* & RCV_OWN) ); */				buf1 += 3;			}			*buf1 = 0;			printk(KERN_ERR "%s: Ooops, receive ring corrupted %2d %2d | %s\n",dev->name,p->rmdnum,i,buf);		}		p->rmdnum = num1;		ni65_recv_intr(dev,csr0);		if((p->rmdhead[num2].u.s.status & RCV_OWN))			break;	/* ok, we are 'in sync' again */	}	else		break; }}#endif	if( (csr0 & (CSR0_RXON | CSR0_TXON)) != (CSR0_RXON | CSR0_TXON) ) {		printk(KERN_DEBUG "%s: RX or TX was offline -> restart\n",dev->name);		ni65_stop_start(dev,p);	}	else		writedatareg(CSR0_INEA);	spin_unlock(&p->ring_lock);	return IRQ_HANDLED;}/* * We have received an Xmit-Interrupt .. * send a new packet if necessary */static void ni65_xmit_intr(struct net_device *dev,int csr0){	struct priv *p = (struct priv *) dev->priv;	while(p->xmit_queued)	{		struct tmd *tmdp = p->tmdhead + p->tmdlast;		int tmdstat = tmdp->u.s.status;		if(tmdstat & XMIT_OWN)			break;		if(tmdstat & XMIT_ERR)		{#if 0			if(tmdp->status2 & XMIT_TDRMASK && debuglevel > 3)				printk(KERN_ERR "%s: tdr-problems (e.g. no resistor)\n",dev->name);#endif		 /* checking some errors */			if(tmdp->status2 & XMIT_RTRY)				p->stats.tx_aborted_errors++;			if(tmdp->status2 & XMIT_LCAR)				p->stats.tx_carrier_errors++;			if(tmdp->status2 & (XMIT_BUFF | XMIT_UFLO )) {		/* this stops the xmitter */				p->stats.tx_fifo_errors++;				if(debuglevel > 0)					printk(KERN_ERR "%s: Xmit FIFO/BUFF error\n",dev->name);				if(p->features & INIT_RING_BEFORE_START) {					tmdp->u.s.status = XMIT_OWN | XMIT_START | XMIT_END;	/* test: resend this frame */					ni65_stop_start(dev,p);					break;	/* no more Xmit processing .. */				}				else				 ni65_stop_start(dev,p);			}			if(debuglevel > 2)				printk(KERN_ERR "%s: xmit-error: %04x %02x-%04x\n",dev->name,csr0,(int) tmdstat,(int) tmdp->status2);			if(!(csr0 & CSR0_BABL)) /* don't count errors twice */				p->stats.tx_errors++;			tmdp->status2 = 0;		}		else {			p->stats.tx_bytes -= (short)(tmdp->blen);			p->stats.tx_packets++;		}#ifdef XMT_VIA_SKB		if(p->tmd_skb[p->tmdlast]) {			 dev_kfree_skb_irq(p->tmd_skb[p->tmdlast]);			 p->tmd_skb[p->tmdlast] = NULL;		}#endif		p->tmdlast = (p->tmdlast + 1) & (TMDNUM-1);		if(p->tmdlast == p->tmdnum)			p->xmit_queued = 0;	}	netif_wake_queue(dev);}/* * We have received a packet */static void ni65_recv_intr(struct net_device *dev,int csr0){	struct rmd *rmdp;	int rmdstat,len;	int cnt=0;	struct priv *p = (struct priv *) dev->priv;	rmdp = p->rmdhead + p->rmdnum;	while(!( (rmdstat = rmdp->u.s.status) & RCV_OWN))	{		cnt++;		if( (rmdstat & (RCV_START | RCV_END | RCV_ERR)) != (RCV_START | RCV_END) ) /* error or oversized? */		{			if(!(rmdstat & RCV_ERR)) {				if(rmdstat & RCV_START)				{					p->stats.rx_length_errors++;					printk(KERN_ERR "%s: recv, packet too long: %d\n",dev->name,rmdp->mlen & 0x0fff);				}			}			else {				if(debuglevel > 2)					printk(KERN_ERR "%s: receive-error: %04x, lance-status: %04x/%04x\n",									dev->name,(int) rmdstat,csr0,(int) inw(PORT+L_DATAREG) );				if(rmdstat & RCV_FRAM)					p->stats.rx_frame_errors++;				if(rmdstat & RCV_OFLO)					p->stats.rx_over_errors++;				if(rmdstat & RCV_CRC)					p->stats.rx_crc_errors++;				if(rmdstat & RCV_BUF_ERR)					p->stats.rx_fifo_errors++;			}			if(!(csr0 & CSR0_MISS)) /* don't count errors twice */				p->stats.rx_errors++;		}		else if( (len = (rmdp->mlen & 0x0fff) - 4) >= 60)		{#ifdef RCV_VIA_SKB			struct sk_buff *skb = alloc_skb(R_BUF_SIZE+2+16,GFP_ATOMIC);			if (skb)				skb_reserve(skb,16);#else			struct sk_buff *skb = dev_alloc_skb(len+2);#endif			if(skb)			{				skb_reserve(skb,2);	skb->dev = dev;#ifdef RCV_VIA_SKB				if( (unsigned long) (skb->data + R_BUF_SIZE) > 0x1000000) {					skb_put(skb,len);					eth_copy_and_sum(skb, (unsigned char *)(p->recv_skb[p->rmdnum]->data),len,0);				}				else {					struct sk_buff *skb1 = p->recv_skb[p->rmdnum];					skb_put(skb,R_BUF_SIZE);					p->recv_skb[p->rmdnum] = skb;					rmdp->u.buffer = (u32) isa_virt_to_bus(skb->data);					skb = skb1;					skb_trim(skb,len);				}#else				skb_put(skb,len);				eth_copy_and_sum(skb, (unsigned char *) p->recvbounce[p->rmdnum],len,0);#endif				p->stats.rx_packets++;				p->stats.rx_bytes += len;				skb->protocol=eth_type_trans(skb,dev);				netif_rx(skb);				dev->last_rx = jiffies;			}			else			{				printk(KERN_ERR "%s: can't alloc new sk_buff\n",dev->name);				p->stats.rx_dropped++;			}		}		else {			printk(KERN_INFO "%s: received runt packet\n",dev->name);			p->stats.rx_errors++;		}		rmdp->blen = -(R_BUF_SIZE-8);		rmdp->mlen = 0;		rmdp->u.s.status = RCV_OWN; /* change owner */		p->rmdnum = (p->rmdnum + 1) & (RMDNUM-1);		rmdp = p->rmdhead + p->rmdnum;	}}/* * kick xmitter .. */ static void ni65_timeout(struct net_device *dev){	int i;	struct priv *p = (struct priv *) dev->priv;	printk(KERN_ERR "%s: xmitter timed out, try to restart!\n",dev->name);	for(i=0;i<TMDNUM;i++)		printk("%02x ",p->tmdhead[i].u.s.status);	printk("\n");	ni65_lance_reinit(dev);	dev->trans_start = jiffies;	netif_wake_queue(dev);}/* *	Send a packet */static int ni65_send_packet(struct sk_buff *skb, struct net_device *dev){	struct priv *p = (struct priv *) dev->priv;	netif_stop_queue(dev);		if (test_and_set_bit(0, (void*)&p->lock)) {		printk(KERN_ERR "%s: Queue was locked.\n", dev->name);		return 1;	}	{		short len = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;		struct tmd *tmdp;		unsigned long flags;#ifdef XMT_VIA_SKB		if( (unsigned long) (skb->data + skb->len) > 0x1000000) {#endif			memcpy((char *) p->tmdbounce[p->tmdbouncenum] ,(char *)skb->data,							 (skb->len > T_BUF_SIZE) ? T_BUF_SIZE : skb->len);			if (len > skb->len)				memset((char *)p->tmdbounce[p->tmdbouncenum]+skb->len, 0, len-skb->len);			dev_kfree_skb (skb);			spin_lock_irqsave(&p->ring_lock, flags);			tmdp = p->tmdhead + p->tmdnum;			tmdp->u.buffer = (u32) isa_virt_to_bus(p->tmdbounce[p->tmdbouncenum]);			p->tmdbouncenum = (p->tmdbouncenum + 1) & (TMDNUM - 1);#ifdef XMT_VIA_SKB		}		else {			spin_lock_irqsave(&p->ring_lock, flags);			tmdp = p->tmdhead + p->tmdnum;			tmdp->u.buffer = (u32) isa_virt_to_bus(skb->data);			p->tmd_skb[p->tmdnum] = skb;		}#endif		tmdp->blen = -len;		tmdp->u.s.status = XMIT_OWN | XMIT_START | XMIT_END;		writedatareg(CSR0_TDMD | CSR0_INEA); /* enable xmit & interrupt */		p->xmit_queued = 1;		p->tmdnum = (p->tmdnum + 1) & (TMDNUM-1);		if(p->tmdnum != p->tmdlast)			netif_wake_queue(dev);					p->lock = 0;		dev->trans_start = jiffies;				spin_unlock_irqrestore(&p->ring_lock, flags);	}	return 0;}static struct net_device_stats *ni65_get_stats(struct net_device *dev){#if 0	int i;	struct priv *p = (struct priv *) dev->priv;	for(i=0;i<RMDNUM;i++)	{		struct rmd *rmdp = p->rmdhead + ((p->rmdnum + i) & (RMDNUM-1));		printk("%02x ",rmdp->u.s.status);	}	printk("\n");#endif	return &((struct priv *) dev->priv)->stats;}static void set_multicast_list(struct net_device *dev){	if(!ni65_lance_reinit(dev))		printk(KERN_ERR "%s: Can't switch card into MC mode!\n",dev->name);	netif_wake_queue(dev);}#ifdef MODULEstatic struct net_device *dev_ni65;MODULE_PARM(irq, "i");MODULE_PARM(io, "i");MODULE_PARM(dma, "i");MODULE_PARM_DESC(irq, "ni6510 IRQ number (ignored for some cards)");MODULE_PARM_DESC(io, "ni6510 I/O base address");MODULE_PARM_DESC(dma, "ni6510 ISA DMA channel (ignored for some cards)");int init_module(void){ 	dev_ni65 = ni65_probe(-1);	return IS_ERR(dev_ni65) ? PTR_ERR(dev_ni65) : 0;}void cleanup_module(void){ 	unregister_netdev(dev_ni65); 	cleanup_card(dev_ni65); 	free_netdev(dev_ni65);}#endif /* MODULE */MODULE_LICENSE("GPL");/* * END of ni65.c */

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -