sunqe.c

来自「powerpc内核mpc8241linux系统下net驱动程序」· C语言 代码 · 共 1,244 行 · 第 1/3 页

C
1,244
字号
		qep->net_stats.rx_missed_errors++;		mace_hwbug_workaround = 1;	}	if(qe_status & CREG_STAT_RXSERR) {		printk("%s: Receive DMA sbus error ack.\n", dev->name);		qep->net_stats.rx_errors++;		qep->net_stats.rx_missed_errors++;		mace_hwbug_workaround = 1;	}	if(mace_hwbug_workaround)		qe_init(qep, 1);	return mace_hwbug_workaround;}/* Per-QE transmit complete interrupt service routine. */static inline void qe_tx(struct sunqe *qep){	struct qe_txd *txbase = &qep->qe_block->qe_txd[0];	struct qe_txd *this;	int elem = qep->tx_old;	while(elem != qep->tx_new) {		struct sk_buff *skb;		this = &txbase[elem];		if(this->tx_flags & TXD_OWN)			break;		skb = qep->tx_skbs[elem];		qep->tx_skbs[elem] = NULL;		qep->net_stats.tx_bytes+=skb->len;		dev_kfree_skb(skb);		qep->net_stats.tx_packets++;		elem = NEXT_TX(elem);	}	qep->tx_old = elem;}static inline void sun4c_qe_tx(struct sunqe *qep){	struct qe_txd *txbase = &qep->qe_block->qe_txd[0];	struct qe_txd *this;	int elem = qep->tx_old;	while(elem != qep->tx_new) {		this = &txbase[elem];		if(this->tx_flags & TXD_OWN)			break;		qep->net_stats.tx_packets++;		elem = NEXT_TX(elem);	}	qep->tx_old = elem;}/* Per-QE receive interrupt service routine.  Just like on the happy meal * we receive directly into skb's with a small packet copy water mark. */static inline void qe_rx(struct sunqe *qep){	struct qe_rxd *rxbase = &qep->qe_block->qe_rxd[0];	struct qe_rxd *this;	int elem = qep->rx_new, drops = 0;	this = &rxbase[elem];	while(!(this->rx_flags & RXD_OWN)) {		struct sk_buff *skb;		unsigned int flags = this->rx_flags;		int len = (flags & RXD_LENGTH) - 4;  /* QE adds ether FCS size to len */		/* Check for errors. */		if(len < ETH_ZLEN) {			qep->net_stats.rx_errors++;			qep->net_stats.rx_length_errors++;	drop_it:			/* Return it to the QE. */			qep->net_stats.rx_dropped++;			this->rx_addr = sbus_dvma_addr(qep->rx_skbs[elem]->data);			this->rx_flags =				(RXD_OWN | (RX_BUF_ALLOC_SIZE & RXD_LENGTH));			goto next;		}		skb = qep->rx_skbs[elem];#ifdef NEED_DMA_SYNCHRONIZATION#ifdef __sparc_v9__		if ((unsigned long) (skb->data + skb->len) >= MAX_DMA_ADDRESS) {			printk("sunqe: Bogus DMA buffer address "			       "[%016lx]\n", ((unsigned long) skb->data));			panic("DMA address too large, tell DaveM");		}#endif		mmu_sync_dma(sbus_dvma_addr(skb->data),			     skb->len, qep->qe_sbusdev->my_bus);#endif		if(len > RX_COPY_THRESHOLD) {			struct sk_buff *new_skb;			/* Now refill the entry, if we can. */			new_skb = qe_alloc_skb(RX_BUF_ALLOC_SIZE, (GFP_DMA|GFP_ATOMIC));			if(!new_skb) {				drops++;				goto drop_it;			}			qep->rx_skbs[elem] = new_skb;			new_skb->dev = qep->dev;			skb_put(new_skb, ETH_FRAME_LEN);			skb_reserve(new_skb, 34);			rxbase[elem].rx_addr = sbus_dvma_addr(new_skb->data);			rxbase[elem].rx_flags =				(RXD_OWN | ((RX_BUF_ALLOC_SIZE - 34) & RXD_LENGTH));			/* Trim the original skb for the netif. */			skb_trim(skb, len);		} else {			struct sk_buff *copy_skb = dev_alloc_skb(len + 2);			if(!copy_skb) {				drops++;				goto drop_it;			}			copy_skb->dev = qep->dev;			skb_reserve(copy_skb, 2);			skb_put(copy_skb, len);			eth_copy_and_sum(copy_skb, (unsigned char *)skb->data, len, 0);			/* Reuse original ring buffer. */			rxbase[elem].rx_addr = sbus_dvma_addr(skb->data);			rxbase[elem].rx_flags =				(RXD_OWN | ((RX_BUF_ALLOC_SIZE - 34) & RXD_LENGTH));			skb = copy_skb;		}		/* No checksums are done by this card ;-( */		skb->protocol = eth_type_trans(skb, qep->dev);		netif_rx(skb);		qep->net_stats.rx_packets++;	next:		elem = NEXT_RX(elem);		this = &rxbase[elem];	}	qep->rx_new = elem;	if(drops)		printk("%s: Memory squeeze, deferring packet.\n", qep->dev->name);}static inline void sun4c_qe_rx(struct sunqe *qep){	struct qe_rxd *rxbase = &qep->qe_block->qe_rxd[0];	struct qe_rxd *this;	struct sunqe_buffers *qbufs = qep->sun4c_buffers;	__u32 qbufs_dvma = qep->s4c_buf_dvma;	int elem = qep->rx_new, drops = 0;	this = &rxbase[elem];	while(!(this->rx_flags & RXD_OWN)) {		struct sk_buff *skb;		unsigned char *this_qbuf =			qbufs->rx_buf[elem & (SUN4C_RX_RING_SIZE - 1)];		__u32 this_qbuf_dvma = qbufs_dvma +			qebuf_offset(rx_buf, (elem & (SUN4C_RX_RING_SIZE - 1)));		struct qe_rxd *end_rxd =			&rxbase[(elem+SUN4C_RX_RING_SIZE)&(RX_RING_SIZE-1)];		unsigned int flags = this->rx_flags;		int len = (flags & RXD_LENGTH) - 4;  /* QE adds ether FCS size to len */		/* Check for errors. */		if(len < ETH_ZLEN) {			qep->net_stats.rx_errors++;			qep->net_stats.rx_length_errors++;			qep->net_stats.rx_dropped++;		} else {			skb = dev_alloc_skb(len + 2);			if(skb == 0) {				drops++;				qep->net_stats.rx_dropped++;			} else {				skb->dev = qep->dev;				skb_reserve(skb, 2);				skb_put(skb, len);				eth_copy_and_sum(skb, (unsigned char *)this_qbuf,						 len, 0);				skb->protocol = eth_type_trans(skb, qep->dev);				netif_rx(skb);				qep->net_stats.rx_packets++;			}		}		end_rxd->rx_addr = this_qbuf_dvma;		end_rxd->rx_flags = (RXD_OWN | (SUN4C_RX_BUFF_SIZE & RXD_LENGTH));				elem = NEXT_RX(elem);		this = &rxbase[elem];	}	qep->rx_new = elem;	if(drops)		printk("%s: Memory squeeze, deferring packet.\n", qep->dev->name);}/* Interrupts for all QE's get filtered out via the QEC master controller, * so we just run through each qe and check to see who is signaling * and thus needs to be serviced. */static void qec_interrupt(int irq, void *dev_id, struct pt_regs *regs){	struct sunqec *qecp = (struct sunqec *) dev_id;	unsigned int qec_status;	int channel = 0;	/* Latch the status now. */	qec_status = qecp->gregs->stat;	while(channel < 4) {		if(qec_status & 0xf) {			struct sunqe *qep = qecp->qes[channel];			struct device *dev = qep->dev;			unsigned int qe_status;			dev->interrupt = 1;			qe_status = qep->qcregs->stat;			if(qe_status & CREG_STAT_ERRORS)				if(qe_is_bolixed(qep, qe_status))					goto next;			if(qe_status & CREG_STAT_RXIRQ)				qe_rx(qep);			if(qe_status & CREG_STAT_TXIRQ)				qe_tx(qep);			if(dev->tbusy && (TX_BUFFS_AVAIL(qep) >= 0)) {				dev->tbusy = 0;				mark_bh(NET_BH);			}	next:			dev->interrupt = 0;		}		qec_status >>= 4;		channel++;	}}static void sun4c_qec_interrupt(int irq, void *dev_id, struct pt_regs *regs){	struct sunqec *qecp = (struct sunqec *) dev_id;	unsigned int qec_status;	int channel = 0;	/* Latch the status now. */	qec_status = qecp->gregs->stat;	while(channel < 4) {		if(qec_status & 0xf) {			struct sunqe *qep = qecp->qes[channel];			struct device *dev = qep->dev;			unsigned int qe_status;			dev->interrupt = 1;			qe_status = qep->qcregs->stat;			if(qe_status & CREG_STAT_ERRORS)				if(qe_is_bolixed(qep, qe_status))					goto next;			if(qe_status & CREG_STAT_RXIRQ)				sun4c_qe_rx(qep);			if(qe_status & CREG_STAT_TXIRQ)				sun4c_qe_tx(qep);			if(dev->tbusy && (SUN4C_TX_BUFFS_AVAIL(qep) >= 0)) {				dev->tbusy = 0;				mark_bh(NET_BH);			}	next:			dev->interrupt = 0;		}		qec_status >>= 4;		channel++;	}}static int qe_open(struct device *dev){	struct sunqe *qep = (struct sunqe *) dev->priv;	int res;	res = qe_init(qep, 0);	if(!res) {		MOD_INC_USE_COUNT;	}	return res;}static int qe_close(struct device *dev){	struct sunqe *qep = (struct sunqe *) dev->priv;	qe_stop(qep);	qe_clean_rings(qep);	MOD_DEC_USE_COUNT;	return 0;}/* Get a packet queued to go onto the wire. */static int qe_start_xmit(struct sk_buff *skb, struct device *dev){	struct sunqe *qep = (struct sunqe *) dev->priv;	int len, entry;	if(dev->tbusy)		return 1;	if(test_and_set_bit(0, (void *) &dev->tbusy) != 0) {		printk("%s: Transmitter access conflict.\n", dev->name);		return 1;	}	if(!TX_BUFFS_AVAIL(qep))		return 1;#ifdef NEED_DMA_SYNCHRONIZATION#ifdef __sparc_v9__	if ((unsigned long) (skb->data + skb->len) >= MAX_DMA_ADDRESS) {		struct sk_buff *new_skb = skb_copy(skb, GFP_DMA | GFP_ATOMIC);		if(!new_skb)			return 1;		dev_kfree_skb(skb);		skb = new_skb;	}#endif	mmu_sync_dma(sbus_dvma_addr(skb->data),		     skb->len, qep->qe_sbusdev->my_bus);#endif	len = skb->len;	entry = qep->tx_new;	/* Avoid a race... */	qep->qe_block->qe_txd[entry].tx_flags = TXD_UPDATE;	qep->tx_skbs[entry] = skb;	qep->qe_block->qe_txd[entry].tx_addr = sbus_dvma_addr(skb->data);	qep->qe_block->qe_txd[entry].tx_flags =		(TXD_OWN | TXD_SOP | TXD_EOP | (len & TXD_LENGTH));	qep->tx_new = NEXT_TX(entry);	/* Get it going. */	qep->qcregs->ctrl = CREG_CTRL_TWAKEUP;	if(TX_BUFFS_AVAIL(qep))		dev->tbusy = 0;	return 0;}static int sun4c_qe_start_xmit(struct sk_buff *skb, struct device *dev){	struct sunqe *qep = (struct sunqe *) dev->priv;	struct sunqe_buffers *qbufs = qep->sun4c_buffers;	__u32 txbuf_dvma, qbufs_dvma = qep->s4c_buf_dvma;	unsigned char *txbuf;	int len, entry;	if(dev->tbusy)		return 1;	if(test_and_set_bit(0, (void *) &dev->tbusy) != 0) {		printk("%s: Transmitter access conflict.\n", dev->name);		return 1;	}	if(!SUN4C_TX_BUFFS_AVAIL(qep))		return 1;	len = skb->len;	entry = qep->tx_new;	txbuf = &qbufs->tx_buf[entry & (SUN4C_TX_RING_SIZE - 1)][0];	txbuf_dvma = qbufs_dvma +		qebuf_offset(tx_buf, (entry & (SUN4C_TX_RING_SIZE - 1)));	/* Avoid a race... */	qep->qe_block->qe_txd[entry].tx_flags = TXD_UPDATE;	memcpy(txbuf, skb->data, len);	qep->qe_block->qe_txd[entry].tx_addr = txbuf_dvma;	qep->qe_block->qe_txd[entry].tx_flags =		(TXD_OWN | TXD_SOP | TXD_EOP | (len & TXD_LENGTH));	qep->tx_new = NEXT_TX(entry);	/* Get it going. */	qep->qcregs->ctrl = CREG_CTRL_TWAKEUP;	qep->net_stats.tx_bytes+=skb->len;		dev_kfree_skb(skb);	if(SUN4C_TX_BUFFS_AVAIL(qep))		dev->tbusy = 0;	return 0;}static struct net_device_stats *qe_get_stats(struct device *dev){	struct sunqe *qep = (struct sunqe *) dev->priv;	return &qep->net_stats;

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?