tsi108_eth.c

来自「linux 内核源代码」· C语言 代码 · 共 1,704 行 · 第 1/4 页

C
1,704
字号
	tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXFRAG,			      TSI108_STAT_RXFRAG_CARRY, &data->rx_short_fcs);	tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXJABBER,			      TSI108_STAT_RXJABBER_CARRY, &data->rx_long_fcs);	tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXDROP,			      TSI108_STAT_RXDROP_CARRY,			      &data->stats.rx_missed_errors);	tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXBYTES,			      TSI108_STAT_TXBYTES_CARRY, &data->stats.tx_bytes);	tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXPKTS,			      TSI108_STAT_TXPKTS_CARRY,			      &data->stats.tx_packets);	tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXEXDEF,			      TSI108_STAT_TXEXDEF_CARRY,			      &data->stats.tx_aborted_errors);	tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXEXCOL,			      TSI108_STAT_TXEXCOL_CARRY, &data->tx_coll_abort);	tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXTCOL,			      TSI108_STAT_TXTCOL_CARRY,			      &data->stats.collisions);	tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXPAUSE,			      TSI108_STAT_TXPAUSEDROP_CARRY,			      &data->tx_pause_drop);	spin_unlock_irq(&data->misclock);}/* Read a stat counter atomically with respect to carries. * data->misclock must be held. */static inline unsigned longtsi108_read_stat(struct tsi108_prv_data * data, int reg, int carry_bit,		 int carry_shift, unsigned long *upper){	int carryreg;	unsigned long val;	if (reg < 0xb0)		carryreg = TSI108_STAT_CARRY1;	else		carryreg = TSI108_STAT_CARRY2;      again:	val = TSI_READ(reg) | *upper;	/* Check to see if it overflowed, but the interrupt hasn't	 * been serviced yet.  If so, handle the carry here, and	 * try again.	 */	if (unlikely(TSI_READ(carryreg) & carry_bit)) {		*upper += carry_shift;		TSI_WRITE(carryreg, carry_bit);		goto again;	}	return val;}static struct net_device_stats *tsi108_get_stats(struct net_device *dev){	unsigned long excol;	struct tsi108_prv_data *data = netdev_priv(dev);	spin_lock_irq(&data->misclock);	data->tmpstats.rx_packets =	    tsi108_read_stat(data, TSI108_STAT_RXPKTS,			     TSI108_STAT_CARRY1_RXPKTS,			     TSI108_STAT_RXPKTS_CARRY, &data->stats.rx_packets);	data->tmpstats.tx_packets =	    tsi108_read_stat(data, TSI108_STAT_TXPKTS,			     TSI108_STAT_CARRY2_TXPKTS,			     TSI108_STAT_TXPKTS_CARRY, &data->stats.tx_packets);	data->tmpstats.rx_bytes =	    tsi108_read_stat(data, TSI108_STAT_RXBYTES,			     TSI108_STAT_CARRY1_RXBYTES,			     TSI108_STAT_RXBYTES_CARRY, &data->stats.rx_bytes);	data->tmpstats.tx_bytes =	    tsi108_read_stat(data, TSI108_STAT_TXBYTES,			     TSI108_STAT_CARRY2_TXBYTES,			     TSI108_STAT_TXBYTES_CARRY, &data->stats.tx_bytes);	data->tmpstats.multicast =	    tsi108_read_stat(data, TSI108_STAT_RXMCAST,			     TSI108_STAT_CARRY1_RXMCAST,			     TSI108_STAT_RXMCAST_CARRY, &data->stats.multicast);	excol = tsi108_read_stat(data, TSI108_STAT_TXEXCOL,				 TSI108_STAT_CARRY2_TXEXCOL,				 TSI108_STAT_TXEXCOL_CARRY,				 &data->tx_coll_abort);	data->tmpstats.collisions =	    tsi108_read_stat(data, TSI108_STAT_TXTCOL,			     TSI108_STAT_CARRY2_TXTCOL,			     TSI108_STAT_TXTCOL_CARRY, &data->stats.collisions);	data->tmpstats.collisions += excol;	data->tmpstats.rx_length_errors =	    tsi108_read_stat(data, TSI108_STAT_RXLENGTH,			     TSI108_STAT_CARRY1_RXLENGTH,			     TSI108_STAT_RXLENGTH_CARRY,			     &data->stats.rx_length_errors);	data->tmpstats.rx_length_errors +=	    tsi108_read_stat(data, TSI108_STAT_RXRUNT,			     TSI108_STAT_CARRY1_RXRUNT,			     TSI108_STAT_RXRUNT_CARRY, &data->rx_underruns);	data->tmpstats.rx_length_errors +=	    tsi108_read_stat(data, TSI108_STAT_RXJUMBO,			     TSI108_STAT_CARRY1_RXJUMBO,			     TSI108_STAT_RXJUMBO_CARRY, &data->rx_overruns);	data->tmpstats.rx_frame_errors =	    tsi108_read_stat(data, TSI108_STAT_RXALIGN,			     TSI108_STAT_CARRY1_RXALIGN,			     TSI108_STAT_RXALIGN_CARRY,			     &data->stats.rx_frame_errors);	data->tmpstats.rx_frame_errors +=	    tsi108_read_stat(data, TSI108_STAT_RXFCS,			     TSI108_STAT_CARRY1_RXFCS, TSI108_STAT_RXFCS_CARRY,			     &data->rx_fcs);	data->tmpstats.rx_frame_errors +=	    tsi108_read_stat(data, TSI108_STAT_RXFRAG,			     TSI108_STAT_CARRY1_RXFRAG,			     TSI108_STAT_RXFRAG_CARRY, &data->rx_short_fcs);	data->tmpstats.rx_missed_errors =	    tsi108_read_stat(data, TSI108_STAT_RXDROP,			     TSI108_STAT_CARRY1_RXDROP,			     TSI108_STAT_RXDROP_CARRY,			     &data->stats.rx_missed_errors);	/* These three are maintained by software. */	data->tmpstats.rx_fifo_errors = data->stats.rx_fifo_errors;	data->tmpstats.rx_crc_errors = data->stats.rx_crc_errors;	data->tmpstats.tx_aborted_errors =	    tsi108_read_stat(data, TSI108_STAT_TXEXDEF,			     TSI108_STAT_CARRY2_TXEXDEF,			     TSI108_STAT_TXEXDEF_CARRY,			     &data->stats.tx_aborted_errors);	data->tmpstats.tx_aborted_errors +=	    tsi108_read_stat(data, TSI108_STAT_TXPAUSEDROP,			     TSI108_STAT_CARRY2_TXPAUSE,			     TSI108_STAT_TXPAUSEDROP_CARRY,			     &data->tx_pause_drop);	data->tmpstats.tx_aborted_errors += excol;	data->tmpstats.tx_errors = data->tmpstats.tx_aborted_errors;	data->tmpstats.rx_errors = data->tmpstats.rx_length_errors +	    data->tmpstats.rx_crc_errors +	    data->tmpstats.rx_frame_errors +	    data->tmpstats.rx_fifo_errors + data->tmpstats.rx_missed_errors;	spin_unlock_irq(&data->misclock);	return &data->tmpstats;}static void tsi108_restart_rx(struct tsi108_prv_data * data, struct net_device *dev){	TSI_WRITE(TSI108_EC_RXQ_PTRHIGH,			     TSI108_EC_RXQ_PTRHIGH_VALID);	TSI_WRITE(TSI108_EC_RXCTRL, TSI108_EC_RXCTRL_GO			     | TSI108_EC_RXCTRL_QUEUE0);}static void tsi108_restart_tx(struct tsi108_prv_data * data){	TSI_WRITE(TSI108_EC_TXQ_PTRHIGH,			     TSI108_EC_TXQ_PTRHIGH_VALID);	TSI_WRITE(TSI108_EC_TXCTRL, TSI108_EC_TXCTRL_IDLEINT |			     TSI108_EC_TXCTRL_GO | TSI108_EC_TXCTRL_QUEUE0);}/* txlock must be held by caller, with IRQs disabled, and * with permission to re-enable them when the lock is dropped. */static void tsi108_complete_tx(struct net_device *dev){	struct tsi108_prv_data *data = netdev_priv(dev);	int tx;	struct sk_buff *skb;	int release = 0;	while (!data->txfree || data->txhead != data->txtail) {		tx = data->txtail;		if (data->txring[tx].misc & TSI108_TX_OWN)			break;		skb = data->txskbs[tx];		if (!(data->txring[tx].misc & TSI108_TX_OK))			printk("%s: bad tx packet, misc %x\n",			       dev->name, data->txring[tx].misc);		data->txtail = (data->txtail + 1) % TSI108_TXRING_LEN;		data->txfree++;		if (data->txring[tx].misc & TSI108_TX_EOF) {			dev_kfree_skb_any(skb);			release++;		}	}	if (release) {		if (is_valid_ether_addr(dev->dev_addr) && data->link_up)			netif_wake_queue(dev);	}}static int tsi108_send_packet(struct sk_buff * skb, struct net_device *dev){	struct tsi108_prv_data *data = netdev_priv(dev);	int frags = skb_shinfo(skb)->nr_frags + 1;	int i;	if (!data->phy_ok && net_ratelimit())		printk(KERN_ERR "%s: Transmit while PHY is down!\n", dev->name);	if (!data->link_up) {		printk(KERN_ERR "%s: Transmit while link is down!\n",		       dev->name);		netif_stop_queue(dev);		return NETDEV_TX_BUSY;	}	if (data->txfree < MAX_SKB_FRAGS + 1) {		netif_stop_queue(dev);		if (net_ratelimit())			printk(KERN_ERR "%s: Transmit with full tx ring!\n",			       dev->name);		return NETDEV_TX_BUSY;	}	if (data->txfree - frags < MAX_SKB_FRAGS + 1) {		netif_stop_queue(dev);	}	spin_lock_irq(&data->txlock);	for (i = 0; i < frags; i++) {		int misc = 0;		int tx = data->txhead;		/* This is done to mark every TSI108_TX_INT_FREQ tx buffers with		 * the interrupt bit.  TX descriptor-complete interrupts are		 * enabled when the queue fills up, and masked when there is		 * still free space.  This way, when saturating the outbound		 * link, the tx interrupts are kept to a reasonable level.		 * When the queue is not full, reclamation of skbs still occurs		 * as new packets are transmitted, or on a queue-empty		 * interrupt.		 */		if ((tx % TSI108_TX_INT_FREQ == 0) &&		    ((TSI108_TXRING_LEN - data->txfree) >= TSI108_TX_INT_FREQ))			misc = TSI108_TX_INT;		data->txskbs[tx] = skb;		if (i == 0) {			data->txring[tx].buf0 = dma_map_single(NULL, skb->data,					skb->len - skb->data_len, DMA_TO_DEVICE);			data->txring[tx].len = skb->len - skb->data_len;			misc |= TSI108_TX_SOF;		} else {			skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];			data->txring[tx].buf0 =			    dma_map_page(NULL, frag->page, frag->page_offset,					    frag->size, DMA_TO_DEVICE);			data->txring[tx].len = frag->size;		}		if (i == frags - 1)			misc |= TSI108_TX_EOF;		if (netif_msg_pktdata(data)) {			int i;			printk("%s: Tx Frame contents (%d)\n", dev->name,			       skb->len);			for (i = 0; i < skb->len; i++)				printk(" %2.2x", skb->data[i]);			printk(".\n");		}		data->txring[tx].misc = misc | TSI108_TX_OWN;		data->txhead = (data->txhead + 1) % TSI108_TXRING_LEN;		data->txfree--;	}	tsi108_complete_tx(dev);	/* This must be done after the check for completed tx descriptors,	 * so that the tail pointer is correct.	 */	if (!(TSI_READ(TSI108_EC_TXSTAT) & TSI108_EC_TXSTAT_QUEUE0))		tsi108_restart_tx(data);	spin_unlock_irq(&data->txlock);	return NETDEV_TX_OK;}static int tsi108_complete_rx(struct net_device *dev, int budget){	struct tsi108_prv_data *data = netdev_priv(dev);	int done = 0;	while (data->rxfree && done != budget) {		int rx = data->rxtail;		struct sk_buff *skb;		if (data->rxring[rx].misc & TSI108_RX_OWN)			break;		skb = data->rxskbs[rx];		data->rxtail = (data->rxtail + 1) % TSI108_RXRING_LEN;		data->rxfree--;		done++;		if (data->rxring[rx].misc & TSI108_RX_BAD) {			spin_lock_irq(&data->misclock);			if (data->rxring[rx].misc & TSI108_RX_CRC)				data->stats.rx_crc_errors++;			if (data->rxring[rx].misc & TSI108_RX_OVER)				data->stats.rx_fifo_errors++;			spin_unlock_irq(&data->misclock);			dev_kfree_skb_any(skb);			continue;		}		if (netif_msg_pktdata(data)) {			int i;			printk("%s: Rx Frame contents (%d)\n",			       dev->name, data->rxring[rx].len);			for (i = 0; i < data->rxring[rx].len; i++)				printk(" %2.2x", skb->data[i]);			printk(".\n");		}		skb_put(skb, data->rxring[rx].len);		skb->protocol = eth_type_trans(skb, dev);		netif_receive_skb(skb);		dev->last_rx = jiffies;	}	return done;}static int tsi108_refill_rx(struct net_device *dev, int budget){	struct tsi108_prv_data *data = netdev_priv(dev);	int done = 0;	while (data->rxfree != TSI108_RXRING_LEN && done != budget) {		int rx = data->rxhead;		struct sk_buff *skb;		data->rxskbs[rx] = skb = dev_alloc_skb(TSI108_RXBUF_SIZE + 2);		if (!skb)			break;		skb_reserve(skb, 2); /* Align the data on a 4-byte boundary. */		data->rxring[rx].buf0 = dma_map_single(NULL, skb->data,							TSI108_RX_SKB_SIZE,							DMA_FROM_DEVICE);		/* Sometimes the hardware sets blen to zero after packet		 * reception, even though the manual says that it's only ever		 * modified by the driver.		 */		data->rxring[rx].blen = TSI108_RX_SKB_SIZE;		data->rxring[rx].misc = TSI108_RX_OWN | TSI108_RX_INT;		data->rxhead = (data->rxhead + 1) % TSI108_RXRING_LEN;		data->rxfree++;		done++;	}	if (done != 0 && !(TSI_READ(TSI108_EC_RXSTAT) &			   TSI108_EC_RXSTAT_QUEUE0))		tsi108_restart_rx(data, dev);	return done;}static int tsi108_poll(struct napi_struct *napi, int budget){	struct tsi108_prv_data *data = container_of(napi, struct tsi108_prv_data, napi);	struct net_device *dev = data->dev;	u32 estat = TSI_READ(TSI108_EC_RXESTAT);	u32 intstat = TSI_READ(TSI108_EC_INTSTAT);	int num_received = 0, num_filled = 0;	intstat &= TSI108_INT_RXQUEUE0 | TSI108_INT_RXTHRESH |	    TSI108_INT_RXOVERRUN | TSI108_INT_RXERROR | TSI108_INT_RXWAIT;

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?