⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 de4x5.c

📁 linux-2.6.15.6
💻 C
📖 第 1 页 / 共 5 页
字号:
    */    status = de4x5_init(dev);    spin_lock_init(&lp->lock);    lp->state = OPEN;    de4x5_dbg_open(dev);        if (request_irq(dev->irq, (void *)de4x5_interrupt, SA_SHIRQ, 		                                     lp->adapter_name, dev)) {	printk("de4x5_open(): Requested IRQ%d is busy - attemping FAST/SHARE...", dev->irq);	if (request_irq(dev->irq, de4x5_interrupt, SA_INTERRUPT | SA_SHIRQ,			                             lp->adapter_name, dev)) {	    printk("\n              Cannot get IRQ- reconfigure your hardware.\n");	    disable_ast(dev);	    de4x5_free_rx_buffs(dev);	    de4x5_free_tx_buffs(dev);	    yawn(dev, SLEEP);	    lp->state = CLOSED;	    return -EAGAIN;	} else {	    printk("\n              Succeeded, but you should reconfigure your hardware to avoid this.\n");	    printk("WARNING: there may be IRQ related problems in heavily loaded systems.\n");	}    }    lp->interrupt = UNMASK_INTERRUPTS;    dev->trans_start = jiffies;        START_DE4X5;	    de4x5_setup_intr(dev);        if (de4x5_debug & DEBUG_OPEN) {	printk("\tsts:  0x%08x\n", inl(DE4X5_STS));	printk("\tbmr:  0x%08x\n", inl(DE4X5_BMR));	printk("\timr:  0x%08x\n", inl(DE4X5_IMR));	printk("\tomr:  0x%08x\n", inl(DE4X5_OMR));	printk("\tsisr: 0x%08x\n", inl(DE4X5_SISR));	printk("\tsicr: 0x%08x\n", inl(DE4X5_SICR));	printk("\tstrr: 0x%08x\n", inl(DE4X5_STRR));	printk("\tsigr: 0x%08x\n", inl(DE4X5_SIGR));    }        return status;}/*** Initialize the DE4X5 operating conditions. NB: a chip problem with the** DC21140 requires using perfect filtering mode for that chip. Since I can't** see why I'd want > 14 multicast addresses, I have changed all chips to use** the perfect filtering mode. Keep the DMA burst length at 8: there seems** to be data corruption problems if it is larger (UDP errors seen from a** ttcp source).*/static intde4x5_init(struct net_device *dev){      /* Lock out other processes whilst setting up the hardware */    netif_stop_queue(dev);        de4x5_sw_reset(dev);        /* Autoconfigure the connected port */    autoconf_media(dev);        return 0;}static intde4x5_sw_reset(struct net_device *dev){    struct de4x5_private *lp = netdev_priv(dev);    u_long iobase = dev->base_addr;    int i, j, status = 0;    s32 bmr, omr;        /* Select the MII or SRL port now and RESET the MAC */    if (!lp->useSROM) {	if (lp->phy[lp->active].id != 0) {	    lp->infoblock_csr6 = OMR_SDP | OMR_PS | OMR_HBD;	} else {	    lp->infoblock_csr6 = OMR_SDP | OMR_TTM;	}	de4x5_switch_mac_port(dev);    }    /*     ** Set the programmable burst length to 8 longwords for all the DC21140    ** Fasternet chips and 4 longwords for all others: DMA errors result    ** without these values. Cache align 16 long.    */    bmr = (lp->chipset==DC21140 ? PBL_8 : PBL_4) | DESC_SKIP_LEN | DE4X5_CACHE_ALIGN;    bmr |= ((lp->chipset & ~0x00ff)==DC2114x ? BMR_RML : 0);    outl(bmr, DE4X5_BMR);    omr = inl(DE4X5_OMR) & ~OMR_PR;             /* Turn off promiscuous mode */    if (lp->chipset == DC21140) {	omr |= (OMR_SDP | OMR_SB);    }    lp->setup_f = PERFECT;    outl(lp->dma_rings, DE4X5_RRBA);    outl(lp->dma_rings + NUM_RX_DESC * sizeof(struct de4x5_desc),	 DE4X5_TRBA);        lp->rx_new = lp->rx_old = 0;    lp->tx_new = lp->tx_old = 0;        for (i = 0; i < lp->rxRingSize; i++) {	lp->rx_ring[i].status = cpu_to_le32(R_OWN);    }        for (i = 0; i < lp->txRingSize; i++) {	lp->tx_ring[i].status = cpu_to_le32(0);    }        barrier();    /* Build the setup frame depending on filtering mode */    SetMulticastFilter(dev);        load_packet(dev, lp->setup_frame, PERFECT_F|TD_SET|SETUP_FRAME_LEN, (struct sk_buff *)1);    outl(omr|OMR_ST, DE4X5_OMR);    /* Poll for setup frame completion (adapter interrupts are disabled now) */    for (j=0, i=0;(i<500) && (j==0);i++) {       /* Upto 500ms delay */	mdelay(1);	if ((s32)le32_to_cpu(lp->tx_ring[lp->tx_new].status) >= 0) j=1;    }    outl(omr, DE4X5_OMR);                        /* Stop everything! */    if (j == 0) {	printk("%s: Setup frame timed out, status %08x\n", dev->name, 	       inl(DE4X5_STS));	status = -EIO;    }        lp->tx_new = (++lp->tx_new) % lp->txRingSize;    lp->tx_old = lp->tx_new;    return status;}/* ** Writes a socket buffer address to the next available transmit descriptor.*/static intde4x5_queue_pkt(struct sk_buff *skb, struct net_device *dev){    struct de4x5_private *lp = netdev_priv(dev);    u_long iobase = dev->base_addr;    int status = 0;    u_long flags = 0;    netif_stop_queue(dev);    if (lp->tx_enable == NO) {                   /* Cannot send for now */	return -1;                                    }        /*    ** Clean out the TX ring asynchronously to interrupts - sometimes the    ** interrupts are lost by delayed descriptor status updates relative to    ** the irq assertion, especially with a busy PCI bus.    */    spin_lock_irqsave(&lp->lock, flags);    de4x5_tx(dev);    spin_unlock_irqrestore(&lp->lock, flags);    /* Test if cache is already locked - requeue skb if so */    if (test_and_set_bit(0, (void *)&lp->cache.lock) && !lp->interrupt) 	return -1;    /* Transmit descriptor ring full or stale skb */    if (netif_queue_stopped(dev) || (u_long) lp->tx_skb[lp->tx_new] > 1) {	if (lp->interrupt) {	    de4x5_putb_cache(dev, skb);          /* Requeue the buffer */	} else {	    de4x5_put_cache(dev, skb);	}	if (de4x5_debug & DEBUG_TX) {	    printk("%s: transmit busy, lost media or stale skb found:\n  STS:%08x\n  tbusy:%d\n  IMR:%08x\n  OMR:%08x\n Stale skb: %s\n",dev->name, inl(DE4X5_STS), netif_queue_stopped(dev), inl(DE4X5_IMR), inl(DE4X5_OMR), ((u_long) lp->tx_skb[lp->tx_new] > 1) ? "YES" : "NO");	}    } else if (skb->len > 0) {	/* If we already have stuff queued locally, use that first */	if (lp->cache.skb && !lp->interrupt) {	    de4x5_put_cache(dev, skb);	    skb = de4x5_get_cache(dev);	}	while (skb && !netif_queue_stopped(dev) &&	       (u_long) lp->tx_skb[lp->tx_new] <= 1) {	    spin_lock_irqsave(&lp->lock, flags);	    netif_stop_queue(dev);	    load_packet(dev, skb->data, TD_IC | TD_LS | TD_FS | skb->len, skb); 	    lp->stats.tx_bytes += skb->len;	    outl(POLL_DEMAND, DE4X5_TPD);/* Start the TX */			    lp->tx_new = (++lp->tx_new) % lp->txRingSize;	    dev->trans_start = jiffies;		    	    if (TX_BUFFS_AVAIL) {		netif_start_queue(dev);         /* Another pkt may be queued */	    }	    skb = de4x5_get_cache(dev);	    spin_unlock_irqrestore(&lp->lock, flags);	}	if (skb) de4x5_putb_cache(dev, skb);    }        lp->cache.lock = 0;    return status;}/*** The DE4X5 interrupt handler. ** ** I/O Read/Writes through intermediate PCI bridges are never 'posted',** so that the asserted interrupt always has some real data to work with -** if these I/O accesses are ever changed to memory accesses, ensure the** STS write is read immediately to complete the transaction if the adapter** is not on bus 0. Lost interrupts can still occur when the PCI bus load** is high and descriptor status bits cannot be set before the associated** interrupt is asserted and this routine entered.*/static irqreturn_tde4x5_interrupt(int irq, void *dev_id, struct pt_regs *regs){    struct net_device *dev = (struct net_device *)dev_id;    struct de4x5_private *lp;    s32 imr, omr, sts, limit;    u_long iobase;    unsigned int handled = 0;        if (dev == NULL) {	printk ("de4x5_interrupt(): irq %d for unknown device.\n", irq);	return IRQ_NONE;    }    lp = netdev_priv(dev);    spin_lock(&lp->lock);    iobase = dev->base_addr;	    DISABLE_IRQs;                        /* Ensure non re-entrancy */    if (test_and_set_bit(MASK_INTERRUPTS, (void*) &lp->interrupt))	printk("%s: Re-entering the interrupt handler.\n", dev->name);    synchronize_irq(dev->irq);	    for (limit=0; limit<8; limit++) {	sts = inl(DE4X5_STS);            /* Read IRQ status */	outl(sts, DE4X5_STS);            /* Reset the board interrupts */	    	if (!(sts & lp->irq_mask)) break;/* All done */	handled = 1;	    	if (sts & (STS_RI | STS_RU))     /* Rx interrupt (packet[s] arrived) */	  de4x5_rx(dev);	    	if (sts & (STS_TI | STS_TU))     /* Tx interrupt (packet sent) */	  de4x5_tx(dev); 	    	if (sts & STS_LNF) {             /* TP Link has failed */	    lp->irq_mask &= ~IMR_LFM;	}	    	if (sts & STS_UNF) {             /* Transmit underrun */	    de4x5_txur(dev);	}	    	if (sts & STS_SE) {              /* Bus Error */	    STOP_DE4X5;	    printk("%s: Fatal bus error occurred, sts=%#8x, device stopped.\n",		   dev->name, sts);	    spin_unlock(&lp->lock);	    return IRQ_HANDLED;	}    }    /* Load the TX ring with any locally stored packets */    if (!test_and_set_bit(0, (void *)&lp->cache.lock)) {	while (lp->cache.skb && !netif_queue_stopped(dev) && lp->tx_enable) {	    de4x5_queue_pkt(de4x5_get_cache(dev), dev);	}	lp->cache.lock = 0;    }    lp->interrupt = UNMASK_INTERRUPTS;    ENABLE_IRQs;    spin_unlock(&lp->lock);        return IRQ_RETVAL(handled);}static intde4x5_rx(struct net_device *dev){    struct de4x5_private *lp = netdev_priv(dev);    u_long iobase = dev->base_addr;    int entry;    s32 status;        for (entry=lp->rx_new; (s32)le32_to_cpu(lp->rx_ring[entry].status)>=0;	                                                    entry=lp->rx_new) {	status = (s32)le32_to_cpu(lp->rx_ring[entry].status);		if (lp->rx_ovf) {	    if (inl(DE4X5_MFC) & MFC_FOCM) {		de4x5_rx_ovfc(dev);		break;	    }	}	if (status & RD_FS) {                 /* Remember the start of frame */	    lp->rx_old = entry;	}		if (status & RD_LS) {                 /* Valid frame status */	    if (lp->tx_enable) lp->linkOK++;	    if (status & RD_ES) {	      /* There was an error. */		lp->stats.rx_errors++;        /* Update the error stats. */		if (status & (RD_RF | RD_TL)) lp->stats.rx_frame_errors++;		if (status & RD_CE)           lp->stats.rx_crc_errors++;		if (status & RD_OF)           lp->stats.rx_fifo_errors++;		if (status & RD_TL)           lp->stats.rx_length_errors++;		if (status & RD_RF)           lp->pktStats.rx_runt_frames++;		if (status & RD_CS)           lp->pktStats.rx_collision+

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -