mace.c

来自「linux 内核源代码」· C语言 代码 · 共 1,024 行 · 第 1/2 页

C
1,024
字号
static inline void mace_set_timeout(struct net_device *dev){    struct mace_data *mp = (struct mace_data *) dev->priv;    if (mp->timeout_active)	del_timer(&mp->tx_timeout);    mp->tx_timeout.expires = jiffies + TX_TIMEOUT;    mp->tx_timeout.function = mace_tx_timeout;    mp->tx_timeout.data = (unsigned long) dev;    add_timer(&mp->tx_timeout);    mp->timeout_active = 1;}static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev){    struct mace_data *mp = (struct mace_data *) dev->priv;    volatile struct dbdma_regs __iomem *td = mp->tx_dma;    volatile struct dbdma_cmd *cp, *np;    unsigned long flags;    int fill, next, len;    /* see if there's a free slot in the tx ring */    spin_lock_irqsave(&mp->lock, flags);    fill = mp->tx_fill;    next = fill + 1;    if (next >= N_TX_RING)	next = 0;    if (next == mp->tx_empty) {	netif_stop_queue(dev);	mp->tx_fullup = 1;	spin_unlock_irqrestore(&mp->lock, flags);	return 1;		/* can't take it at the moment */    }    spin_unlock_irqrestore(&mp->lock, flags);    /* partially fill in the dma command block */    len = skb->len;    if (len > ETH_FRAME_LEN) {	printk(KERN_DEBUG "mace: xmit frame too long (%d)\n", len);	len = ETH_FRAME_LEN;    }    mp->tx_bufs[fill] = skb;    cp = mp->tx_cmds + NCMDS_TX * fill;    st_le16(&cp->req_count, len);    st_le32(&cp->phy_addr, virt_to_bus(skb->data));    np = mp->tx_cmds + NCMDS_TX * next;    out_le16(&np->command, DBDMA_STOP);    /* poke the tx dma channel */    spin_lock_irqsave(&mp->lock, flags);    mp->tx_fill = next;    if (!mp->tx_bad_runt && mp->tx_active < MAX_TX_ACTIVE) {	out_le16(&cp->xfer_status, 0);	out_le16(&cp->command, OUTPUT_LAST);	out_le32(&td->control, ((RUN|WAKE) << 16) + (RUN|WAKE));	++mp->tx_active;	mace_set_timeout(dev);    }    if (++next >= N_TX_RING)	next = 0;    if (next == mp->tx_empty)	netif_stop_queue(dev);    spin_unlock_irqrestore(&mp->lock, flags);    return 0;}static void mace_set_multicast(struct net_device *dev){    struct mace_data *mp = (struct mace_data *) dev->priv;    volatile struct mace __iomem *mb = mp->mace;    int i, j;    u32 crc;    unsigned long flags;    spin_lock_irqsave(&mp->lock, flags);    mp->maccc &= ~PROM;    if (dev->flags & IFF_PROMISC) {	mp->maccc |= PROM;    } else {	unsigned char multicast_filter[8];	struct dev_mc_list *dmi = dev->mc_list;	if (dev->flags & IFF_ALLMULTI) {	    for (i = 0; i < 8; i++)		multicast_filter[i] = 0xff;	} else {	    for (i = 0; i < 8; i++)		multicast_filter[i] = 0;	    for (i = 0; i < dev->mc_count; i++) {	        crc = ether_crc_le(6, dmi->dmi_addr);		j = crc >> 26;	/* bit number in multicast_filter */		multicast_filter[j >> 3] |= 1 << (j & 7);		dmi = dmi->next;	    }	}#if 0	printk("Multicast filter :");	for (i = 0; i < 8; i++)	    printk("%02x ", multicast_filter[i]);	printk("\n");#endif	if (mp->chipid == BROKEN_ADDRCHG_REV)	    out_8(&mb->iac, LOGADDR);	else {	    out_8(&mb->iac, ADDRCHG | LOGADDR);	    while ((in_8(&mb->iac) & ADDRCHG) != 0)		;	}	for (i = 0; i < 8; ++i)	    out_8(&mb->ladrf, multicast_filter[i]);	if (mp->chipid != BROKEN_ADDRCHG_REV)	    out_8(&mb->iac, 0);    }    /* reset maccc */    out_8(&mb->maccc, mp->maccc);    spin_unlock_irqrestore(&mp->lock, flags);}static void mace_handle_misc_intrs(struct mace_data *mp, int intr, struct net_device *dev){    volatile struct mace __iomem *mb = mp->mace;    static int mace_babbles, mace_jabbers;    if (intr & MPCO)	dev->stats.rx_missed_errors += 256;    dev->stats.rx_missed_errors += in_8(&mb->mpc);   /* reading clears it */    if (intr & RNTPCO)	dev->stats.rx_length_errors += 256;    dev->stats.rx_length_errors += in_8(&mb->rntpc); /* reading clears it */    if (intr & CERR)	++dev->stats.tx_heartbeat_errors;    if (intr & BABBLE)	if (mace_babbles++ < 4)	    printk(KERN_DEBUG "mace: babbling transmitter\n");    if (intr & JABBER)	if (mace_jabbers++ < 4)	    printk(KERN_DEBUG "mace: jabbering transceiver\n");}static irqreturn_t mace_interrupt(int irq, void *dev_id){    struct net_device *dev = (struct net_device *) dev_id;    struct mace_data *mp = (struct mace_data *) dev->priv;    volatile struct mace __iomem *mb = mp->mace;    volatile struct dbdma_regs __iomem *td = mp->tx_dma;    volatile struct dbdma_cmd *cp;    int intr, fs, i, stat, x;    int xcount, dstat;    unsigned long flags;    /* static int mace_last_fs, mace_last_xcount; */    spin_lock_irqsave(&mp->lock, flags);    intr = in_8(&mb->ir);		/* read interrupt register */    in_8(&mb->xmtrc);			/* get retries */    mace_handle_misc_intrs(mp, intr, dev);    i = mp->tx_empty;    while (in_8(&mb->pr) & XMTSV) {	del_timer(&mp->tx_timeout);	mp->timeout_active = 0;	/*	 * Clear any interrupt indication associated with this status	 * word.  This appears to unlatch any error indication from	 * the DMA controller.	 */	intr = in_8(&mb->ir);	if (intr != 0)	    mace_handle_misc_intrs(mp, intr, dev);	if (mp->tx_bad_runt) {	    fs = in_8(&mb->xmtfs);	    mp->tx_bad_runt = 0;	    out_8(&mb->xmtfc, AUTO_PAD_XMIT);	    continue;	}	dstat = ld_le32(&td->status);	/* stop DMA controller */	out_le32(&td->control, RUN << 16);	/*	 * xcount is the number of complete frames which have been	 * written to the fifo but for which status has not been read.	 */	xcount = (in_8(&mb->fifofc) >> XMTFC_SH) & XMTFC_MASK;	if (xcount == 0 || (dstat & DEAD)) {	    /*	     * If a packet was aborted before the DMA controller has	     * finished transferring it, it seems that there are 2 bytes	     * which are stuck in some buffer somewhere.  These will get	     * transmitted as soon as we read the frame status (which	     * reenables the transmit data transfer request).  Turning	     * off the DMA controller and/or resetting the MACE doesn't	     * help.  So we disable auto-padding and FCS transmission	     * so the two bytes will only be a runt packet which should	     * be ignored by other stations.	     */	    out_8(&mb->xmtfc, DXMTFCS);	}	fs = in_8(&mb->xmtfs);	if ((fs & XMTSV) == 0) {	    printk(KERN_ERR "mace: xmtfs not valid! (fs=%x xc=%d ds=%x)\n",		   fs, xcount, dstat);	    mace_reset(dev);		/*		 * XXX mace likes to hang the machine after a xmtfs error.		 * This is hard to reproduce, reseting *may* help		 */	}	cp = mp->tx_cmds + NCMDS_TX * i;	stat = ld_le16(&cp->xfer_status);	if ((fs & (UFLO|LCOL|LCAR|RTRY)) || (dstat & DEAD) || xcount == 0) {	    /*	     * Check whether there were in fact 2 bytes written to	     * the transmit FIFO.	     */	    udelay(1);	    x = (in_8(&mb->fifofc) >> XMTFC_SH) & XMTFC_MASK;	    if (x != 0) {		/* there were two bytes with an end-of-packet indication */		mp->tx_bad_runt = 1;		mace_set_timeout(dev);	    } else {		/*		 * Either there weren't the two bytes buffered up, or they		 * didn't have an end-of-packet indication.		 * We flush the transmit FIFO just in case (by setting the		 * XMTFWU bit with the transmitter disabled).		 */		out_8(&mb->maccc, in_8(&mb->maccc) & ~ENXMT);		out_8(&mb->fifocc, in_8(&mb->fifocc) | XMTFWU);		udelay(1);		out_8(&mb->maccc, in_8(&mb->maccc) | ENXMT);		out_8(&mb->xmtfc, AUTO_PAD_XMIT);	    }	}	/* dma should have finished */	if (i == mp->tx_fill) {	    printk(KERN_DEBUG "mace: tx ring ran out? (fs=%x xc=%d ds=%x)\n",		   fs, xcount, dstat);	    continue;	}	/* Update stats */	if (fs & (UFLO|LCOL|LCAR|RTRY)) {	    ++dev->stats.tx_errors;	    if (fs & LCAR)		++dev->stats.tx_carrier_errors;	    if (fs & (UFLO|LCOL|RTRY))		++dev->stats.tx_aborted_errors;	} else {	    dev->stats.tx_bytes += mp->tx_bufs[i]->len;	    ++dev->stats.tx_packets;	}	dev_kfree_skb_irq(mp->tx_bufs[i]);	--mp->tx_active;	if (++i >= N_TX_RING)	    i = 0;#if 0	mace_last_fs = fs;	mace_last_xcount = xcount;#endif    }    if (i != mp->tx_empty) {	mp->tx_fullup = 0;	netif_wake_queue(dev);    }    mp->tx_empty = i;    i += mp->tx_active;    if (i >= N_TX_RING)	i -= N_TX_RING;    if (!mp->tx_bad_runt && i != mp->tx_fill && mp->tx_active < MAX_TX_ACTIVE) {	do {	    /* set up the next one */	    cp = mp->tx_cmds + NCMDS_TX * i;	    out_le16(&cp->xfer_status, 0);	    out_le16(&cp->command, OUTPUT_LAST);	    ++mp->tx_active;	    if (++i >= N_TX_RING)		i = 0;	} while (i != mp->tx_fill && mp->tx_active < MAX_TX_ACTIVE);	out_le32(&td->control, ((RUN|WAKE) << 16) + (RUN|WAKE));	mace_set_timeout(dev);    }    spin_unlock_irqrestore(&mp->lock, flags);    return IRQ_HANDLED;}static void mace_tx_timeout(unsigned long data){    struct net_device *dev = (struct net_device *) data;    struct mace_data *mp = (struct mace_data *) dev->priv;    volatile struct mace __iomem *mb = mp->mace;    volatile struct dbdma_regs __iomem *td = mp->tx_dma;    volatile struct dbdma_regs __iomem *rd = mp->rx_dma;    volatile struct dbdma_cmd *cp;    unsigned long flags;    int i;    spin_lock_irqsave(&mp->lock, flags);    mp->timeout_active = 0;    if (mp->tx_active == 0 && !mp->tx_bad_runt)	goto out;    /* update various counters */    mace_handle_misc_intrs(mp, in_8(&mb->ir), dev);    cp = mp->tx_cmds + NCMDS_TX * mp->tx_empty;    /* turn off both tx and rx and reset the chip */    out_8(&mb->maccc, 0);    printk(KERN_ERR "mace: transmit timeout - resetting\n");    dbdma_reset(td);    mace_reset(dev);    /* restart rx dma */    cp = bus_to_virt(ld_le32(&rd->cmdptr));    dbdma_reset(rd);    out_le16(&cp->xfer_status, 0);    out_le32(&rd->cmdptr, virt_to_bus(cp));    out_le32(&rd->control, (RUN << 16) | RUN);    /* fix up the transmit side */    i = mp->tx_empty;    mp->tx_active = 0;    ++dev->stats.tx_errors;    if (mp->tx_bad_runt) {	mp->tx_bad_runt = 0;    } else if (i != mp->tx_fill) {	dev_kfree_skb(mp->tx_bufs[i]);	if (++i >= N_TX_RING)	    i = 0;	mp->tx_empty = i;    }    mp->tx_fullup = 0;    netif_wake_queue(dev);    if (i != mp->tx_fill) {	cp = mp->tx_cmds + NCMDS_TX * i;	out_le16(&cp->xfer_status, 0);	out_le16(&cp->command, OUTPUT_LAST);	out_le32(&td->cmdptr, virt_to_bus(cp));	out_le32(&td->control, (RUN << 16) | RUN);	++mp->tx_active;	mace_set_timeout(dev);    }    /* turn it back on */    out_8(&mb->imr, RCVINT);    out_8(&mb->maccc, mp->maccc);out:    spin_unlock_irqrestore(&mp->lock, flags);}static irqreturn_t mace_txdma_intr(int irq, void *dev_id){	return IRQ_HANDLED;}static irqreturn_t mace_rxdma_intr(int irq, void *dev_id){    struct net_device *dev = (struct net_device *) dev_id;    struct mace_data *mp = (struct mace_data *) dev->priv;    volatile struct dbdma_regs __iomem *rd = mp->rx_dma;    volatile struct dbdma_cmd *cp, *np;    int i, nb, stat, next;    struct sk_buff *skb;    unsigned frame_status;    static int mace_lost_status;    unsigned char *data;    unsigned long flags;    spin_lock_irqsave(&mp->lock, flags);    for (i = mp->rx_empty; i != mp->rx_fill; ) {	cp = mp->rx_cmds + i;	stat = ld_le16(&cp->xfer_status);	if ((stat & ACTIVE) == 0) {	    next = i + 1;	    if (next >= N_RX_RING)		next = 0;	    np = mp->rx_cmds + next;	    if (next != mp->rx_fill		&& (ld_le16(&np->xfer_status) & ACTIVE) != 0) {		printk(KERN_DEBUG "mace: lost a status word\n");		++mace_lost_status;	    } else		break;	}	nb = ld_le16(&cp->req_count) - ld_le16(&cp->res_count);	out_le16(&cp->command, DBDMA_STOP);	/* got a packet, have a look at it */	skb = mp->rx_bufs[i];	if (skb == 0) {	    ++dev->stats.rx_dropped;	} else if (nb > 8) {	    data = skb->data;	    frame_status = (data[nb-3] << 8) + data[nb-4];	    if (frame_status & (RS_OFLO|RS_CLSN|RS_FRAMERR|RS_FCSERR)) {		++dev->stats.rx_errors;		if (frame_status & RS_OFLO)		    ++dev->stats.rx_over_errors;		if (frame_status & RS_FRAMERR)		    ++dev->stats.rx_frame_errors;		if (frame_status & RS_FCSERR)		    ++dev->stats.rx_crc_errors;	    } else {		/* Mace feature AUTO_STRIP_RCV is on by default, dropping the		 * FCS on frames with 802.3 headers. This means that Ethernet		 * frames have 8 extra octets at the end, while 802.3 frames		 * have only 4. We need to correctly account for this. */		if (*(unsigned short *)(data+12) < 1536) /* 802.3 header */		    nb -= 4;		else	/* Ethernet header; mace includes FCS */		    nb -= 8;		skb_put(skb, nb);		skb->protocol = eth_type_trans(skb, dev);		dev->stats.rx_bytes += skb->len;		netif_rx(skb);		dev->last_rx = jiffies;		mp->rx_bufs[i] = NULL;		++dev->stats.rx_packets;	    }	} else {	    ++dev->stats.rx_errors;	    ++dev->stats.rx_length_errors;	}	/* advance to next */	if (++i >= N_RX_RING)	    i = 0;    }    mp->rx_empty = i;    i = mp->rx_fill;    for (;;) {	next = i + 1;	if (next >= N_RX_RING)	    next = 0;	if (next == mp->rx_empty)	    break;	cp = mp->rx_cmds + i;	skb = mp->rx_bufs[i];	if (skb == 0) {	    skb = dev_alloc_skb(RX_BUFLEN + 2);	    if (skb != 0) {		skb_reserve(skb, 2);		mp->rx_bufs[i] = skb;	    }	}	st_le16(&cp->req_count, RX_BUFLEN);	data = skb? skb->data: dummy_buf;	st_le32(&cp->phy_addr, virt_to_bus(data));	out_le16(&cp->xfer_status, 0);	out_le16(&cp->command, INPUT_LAST + INTR_ALWAYS);#if 0	if ((ld_le32(&rd->status) & ACTIVE) != 0) {	    out_le32(&rd->control, (PAUSE << 16) | PAUSE);	    while ((in_le32(&rd->status) & ACTIVE) != 0)		;	}#endif	i = next;    }    if (i != mp->rx_fill) {	out_le32(&rd->control, ((RUN|WAKE) << 16) | (RUN|WAKE));	mp->rx_fill = i;    }    spin_unlock_irqrestore(&mp->lock, flags);    return IRQ_HANDLED;}static struct of_device_id mace_match[] ={	{	.name 		= "mace",	},	{},};MODULE_DEVICE_TABLE (of, mace_match);static struct macio_driver mace_driver ={	.name 		= "mace",	.match_table	= mace_match,	.probe		= mace_probe,	.remove		= mace_remove,};static int __init mace_init(void){	return macio_register_driver(&mace_driver);}static void __exit mace_cleanup(void){	macio_unregister_driver(&mace_driver);	kfree(dummy_buf);	dummy_buf = NULL;}MODULE_AUTHOR("Paul Mackerras");MODULE_DESCRIPTION("PowerMac MACE driver.");module_param(port_aaui, int, 0);MODULE_PARM_DESC(port_aaui, "MACE uses AAUI port (0-1)");MODULE_LICENSE("GPL");module_init(mace_init);module_exit(mace_cleanup);

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?