⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 gianfar.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 4 页
字号:
	/* Clear IEVENT */	gfar_write(&priv->regs->ievent, IEVENT_INIT_CLEAR);	/* Initialize IMASK */	gfar_write(&priv->regs->imask, IMASK_INIT_CLEAR);	/* Init hash registers to zero */	gfar_write(&priv->regs->igaddr0, 0);	gfar_write(&priv->regs->igaddr1, 0);	gfar_write(&priv->regs->igaddr2, 0);	gfar_write(&priv->regs->igaddr3, 0);	gfar_write(&priv->regs->igaddr4, 0);	gfar_write(&priv->regs->igaddr5, 0);	gfar_write(&priv->regs->igaddr6, 0);	gfar_write(&priv->regs->igaddr7, 0);	gfar_write(&priv->regs->gaddr0, 0);	gfar_write(&priv->regs->gaddr1, 0);	gfar_write(&priv->regs->gaddr2, 0);	gfar_write(&priv->regs->gaddr3, 0);	gfar_write(&priv->regs->gaddr4, 0);	gfar_write(&priv->regs->gaddr5, 0);	gfar_write(&priv->regs->gaddr6, 0);	gfar_write(&priv->regs->gaddr7, 0);	/* Zero out the rmon mib registers if it has them */	if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {		memset_io(&(priv->regs->rmon), 0, sizeof (struct rmon_mib));		/* Mask off the CAM interrupts */		gfar_write(&priv->regs->rmon.cam1, 0xffffffff);		gfar_write(&priv->regs->rmon.cam2, 0xffffffff);	}	/* Initialize the max receive buffer length */	gfar_write(&priv->regs->mrblr, priv->rx_buffer_size);	/* Initialize the Minimum Frame Length Register */	gfar_write(&priv->regs->minflr, MINFLR_INIT_SETTINGS);	/* Assign the TBI an address which won't conflict with the PHYs */	gfar_write(&priv->regs->tbipa, TBIPA_VALUE);}/* Halt the receive and transmit queues */void gfar_halt(struct net_device *dev){	struct gfar_private *priv = netdev_priv(dev);	struct gfar __iomem *regs = priv->regs;	u32 tempval;	/* Mask all interrupts */	gfar_write(&regs->imask, IMASK_INIT_CLEAR);	/* Clear all interrupts */	gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);	/* Stop the DMA, and wait for it to stop */	tempval = gfar_read(&priv->regs->dmactrl);	if ((tempval & (DMACTRL_GRS | DMACTRL_GTS))	    != (DMACTRL_GRS | DMACTRL_GTS)) {		tempval |= (DMACTRL_GRS | DMACTRL_GTS);		gfar_write(&priv->regs->dmactrl, tempval);		while (!(gfar_read(&priv->regs->ievent) &			 (IEVENT_GRSC | IEVENT_GTSC)))			cpu_relax();	}	/* Disable Rx and Tx */	tempval = gfar_read(&regs->maccfg1);	tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);	gfar_write(&regs->maccfg1, tempval);}void stop_gfar(struct net_device *dev){	struct gfar_private *priv = netdev_priv(dev);	struct gfar __iomem *regs = priv->regs;	unsigned long flags;	phy_stop(priv->phydev);	/* Lock it down */	spin_lock_irqsave(&priv->txlock, flags);	spin_lock(&priv->rxlock);	gfar_halt(dev);	spin_unlock(&priv->rxlock);	spin_unlock_irqrestore(&priv->txlock, flags);	/* Free the IRQs */	if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {		free_irq(priv->interruptError, dev);		free_irq(priv->interruptTransmit, dev);		free_irq(priv->interruptReceive, dev);	} else { 		free_irq(priv->interruptTransmit, dev);	}	free_skb_resources(priv);	dma_free_coherent(NULL,			sizeof(struct txbd8)*priv->tx_ring_size			+ sizeof(struct rxbd8)*priv->rx_ring_size,			priv->tx_bd_base,			gfar_read(&regs->tbase0));}/* If there are any tx skbs or rx skbs still around, free them. * Then free tx_skbuff and rx_skbuff */static void free_skb_resources(struct gfar_private *priv){	struct rxbd8 *rxbdp;	struct txbd8 *txbdp;	int i;	/* Go through all the buffer descriptors and free their data buffers */	txbdp = priv->tx_bd_base;	for (i = 0; i < priv->tx_ring_size; i++) {		if (priv->tx_skbuff[i]) {			dma_unmap_single(NULL, txbdp->bufPtr,					txbdp->length,					DMA_TO_DEVICE);			dev_kfree_skb_any(priv->tx_skbuff[i]);			priv->tx_skbuff[i] = NULL;		}	}	kfree(priv->tx_skbuff);	rxbdp = priv->rx_bd_base;	/* rx_skbuff is not guaranteed to be allocated, so only	 * free it and its contents if it is allocated */	if(priv->rx_skbuff != NULL) {		for (i = 0; i < priv->rx_ring_size; i++) {			if (priv->rx_skbuff[i]) {				dma_unmap_single(NULL, rxbdp->bufPtr,						priv->rx_buffer_size,						DMA_FROM_DEVICE);				dev_kfree_skb_any(priv->rx_skbuff[i]);				priv->rx_skbuff[i] = NULL;			}			rxbdp->status = 0;			rxbdp->length = 0;			rxbdp->bufPtr = 0;			rxbdp++;		}		kfree(priv->rx_skbuff);	}}void gfar_start(struct net_device *dev){	struct gfar_private *priv = netdev_priv(dev);	struct gfar __iomem *regs = priv->regs;	u32 tempval;	/* Enable Rx and Tx in MACCFG1 */	tempval = gfar_read(&regs->maccfg1);	tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);	gfar_write(&regs->maccfg1, tempval);	/* Initialize DMACTRL to have WWR and WOP */	tempval = gfar_read(&priv->regs->dmactrl);	tempval |= DMACTRL_INIT_SETTINGS;	gfar_write(&priv->regs->dmactrl, tempval);	/* Make sure we aren't stopped */	tempval = gfar_read(&priv->regs->dmactrl);	tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);	gfar_write(&priv->regs->dmactrl, tempval);	/* Clear THLT/RHLT, so that the DMA starts polling now */	gfar_write(&regs->tstat, TSTAT_CLEAR_THALT);	gfar_write(&regs->rstat, RSTAT_CLEAR_RHALT);	/* Unmask the interrupts we look for */	gfar_write(&regs->imask, IMASK_DEFAULT);}/* Bring the controller up and running */int startup_gfar(struct net_device *dev){	struct txbd8 *txbdp;	struct rxbd8 *rxbdp;	dma_addr_t addr = 0;	unsigned long vaddr;	int i;	struct gfar_private *priv = netdev_priv(dev);	struct gfar __iomem *regs = priv->regs;	int err = 0;	u32 rctrl = 0;	u32 attrs = 0;	gfar_write(&regs->imask, IMASK_INIT_CLEAR);	/* Allocate memory for the buffer descriptors */	vaddr = (unsigned long) dma_alloc_coherent(NULL,			sizeof (struct txbd8) * priv->tx_ring_size +			sizeof (struct rxbd8) * priv->rx_ring_size,			&addr, GFP_KERNEL);	if (vaddr == 0) {		if (netif_msg_ifup(priv))			printk(KERN_ERR "%s: Could not allocate buffer descriptors!\n",					dev->name);		return -ENOMEM;	}	priv->tx_bd_base = (struct txbd8 *) vaddr;	/* enet DMA only understands physical addresses */	gfar_write(&regs->tbase0, addr);	/* Start the rx descriptor ring where the tx ring leaves off */	addr = addr + sizeof (struct txbd8) * priv->tx_ring_size;	vaddr = vaddr + sizeof (struct txbd8) * priv->tx_ring_size;	priv->rx_bd_base = (struct rxbd8 *) vaddr;	gfar_write(&regs->rbase0, addr);	/* Setup the skbuff rings */	priv->tx_skbuff =	    (struct sk_buff **) kmalloc(sizeof (struct sk_buff *) *					priv->tx_ring_size, GFP_KERNEL);	if (NULL == priv->tx_skbuff) {		if (netif_msg_ifup(priv))			printk(KERN_ERR "%s: Could not allocate tx_skbuff\n",					dev->name);		err = -ENOMEM;		goto tx_skb_fail;	}	for (i = 0; i < priv->tx_ring_size; i++)		priv->tx_skbuff[i] = NULL;	priv->rx_skbuff =	    (struct sk_buff **) kmalloc(sizeof (struct sk_buff *) *					priv->rx_ring_size, GFP_KERNEL);	if (NULL == priv->rx_skbuff) {		if (netif_msg_ifup(priv))			printk(KERN_ERR "%s: Could not allocate rx_skbuff\n",					dev->name);		err = -ENOMEM;		goto rx_skb_fail;	}	for (i = 0; i < priv->rx_ring_size; i++)		priv->rx_skbuff[i] = NULL;	/* Initialize some variables in our dev structure */	priv->dirty_tx = priv->cur_tx = priv->tx_bd_base;	priv->cur_rx = priv->rx_bd_base;	priv->skb_curtx = priv->skb_dirtytx = 0;	priv->skb_currx = 0;	/* Initialize Transmit Descriptor Ring */	txbdp = priv->tx_bd_base;	for (i = 0; i < priv->tx_ring_size; i++) {		txbdp->status = 0;		txbdp->length = 0;		txbdp->bufPtr = 0;		txbdp++;	}	/* Set the last descriptor in the ring to indicate wrap */	txbdp--;	txbdp->status |= TXBD_WRAP;	rxbdp = priv->rx_bd_base;	for (i = 0; i < priv->rx_ring_size; i++) {		struct sk_buff *skb = NULL;		rxbdp->status = 0;		skb = gfar_new_skb(dev, rxbdp);		priv->rx_skbuff[i] = skb;		rxbdp++;	}	/* Set the last descriptor in the ring to wrap */	rxbdp--;	rxbdp->status |= RXBD_WRAP;	/* If the device has multiple interrupts, register for	 * them.  Otherwise, only register for the one */	if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {		/* Install our interrupt handlers for Error,		 * Transmit, and Receive */		if (request_irq(priv->interruptError, gfar_error,				0, "enet_error", dev) < 0) {			if (netif_msg_intr(priv))				printk(KERN_ERR "%s: Can't get IRQ %d\n",					dev->name, priv->interruptError);			err = -1;			goto err_irq_fail;		}		if (request_irq(priv->interruptTransmit, gfar_transmit,				0, "enet_tx", dev) < 0) {			if (netif_msg_intr(priv))				printk(KERN_ERR "%s: Can't get IRQ %d\n",					dev->name, priv->interruptTransmit);			err = -1;			goto tx_irq_fail;		}		if (request_irq(priv->interruptReceive, gfar_receive,				0, "enet_rx", dev) < 0) {			if (netif_msg_intr(priv))				printk(KERN_ERR "%s: Can't get IRQ %d (receive0)\n",						dev->name, priv->interruptReceive);			err = -1;			goto rx_irq_fail;		}	} else {		if (request_irq(priv->interruptTransmit, gfar_interrupt,				0, "gfar_interrupt", dev) < 0) {			if (netif_msg_intr(priv))				printk(KERN_ERR "%s: Can't get IRQ %d\n",					dev->name, priv->interruptError);			err = -1;			goto err_irq_fail;		}	}	phy_start(priv->phydev);	/* Configure the coalescing support */	if (priv->txcoalescing)		gfar_write(&regs->txic,			   mk_ic_value(priv->txcount, priv->txtime));	else		gfar_write(&regs->txic, 0);	if (priv->rxcoalescing)		gfar_write(&regs->rxic,			   mk_ic_value(priv->rxcount, priv->rxtime));	else		gfar_write(&regs->rxic, 0);	if (priv->rx_csum_enable)		rctrl |= RCTRL_CHECKSUMMING;	if (priv->extended_hash) {		rctrl |= RCTRL_EXTHASH;		gfar_clear_exact_match(dev);		rctrl |= RCTRL_EMEN;	}	if (priv->vlan_enable)		rctrl |= RCTRL_VLAN;	if (priv->padding) {		rctrl &= ~RCTRL_PAL_MASK;		rctrl |= RCTRL_PADDING(priv->padding);	}	/* Init rctrl based on our settings */	gfar_write(&priv->regs->rctrl, rctrl);	if (dev->features & NETIF_F_IP_CSUM)		gfar_write(&priv->regs->tctrl, TCTRL_INIT_CSUM);	/* Set the extraction length and index */	attrs = ATTRELI_EL(priv->rx_stash_size) |		ATTRELI_EI(priv->rx_stash_index);	gfar_write(&priv->regs->attreli, attrs);	/* Start with defaults, and add stashing or locking	 * depending on the approprate variables */	attrs = ATTR_INIT_SETTINGS;	if (priv->bd_stash_en)		attrs |= ATTR_BDSTASH;	if (priv->rx_stash_size != 0)		attrs |= ATTR_BUFSTASH;	gfar_write(&priv->regs->attr, attrs);	gfar_write(&priv->regs->fifo_tx_thr, priv->fifo_threshold);	gfar_write(&priv->regs->fifo_tx_starve, priv->fifo_starve);	gfar_write(&priv->regs->fifo_tx_starve_shutoff, priv->fifo_starve_off);	/* Start the controller */	gfar_start(dev);	return 0;rx_irq_fail:	free_irq(priv->interruptTransmit, dev);tx_irq_fail:	free_irq(priv->interruptError, dev);err_irq_fail:rx_skb_fail:	free_skb_resources(priv);tx_skb_fail:	dma_free_coherent(NULL,			sizeof(struct txbd8)*priv->tx_ring_size			+ sizeof(struct rxbd8)*priv->rx_ring_size,			priv->tx_bd_base,			gfar_read(&regs->tbase0));	return err;}/* Called when something needs to use the ethernet device *//* Returns 0 for success. */static int gfar_enet_open(struct net_device *dev){#ifdef CONFIG_GFAR_NAPI	struct gfar_private *priv = netdev_priv(dev);#endif	int err;#ifdef CONFIG_GFAR_NAPI	napi_enable(&priv->napi);#endif	/* Initialize a bunch of registers */	init_registers(dev);	gfar_set_mac_address(dev);	err = init_phy(dev);	if(err) {#ifdef CONFIG_GFAR_NAPI		napi_disable(&priv->napi);#endif		return err;	}	err = startup_gfar(dev);	if (err) {#ifdef CONFIG_GFAR_NAPI		napi_disable(&priv->napi);#endif		return err;	}	netif_start_queue(dev);	return err;}static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb, struct txbd8 *bdp){	struct txfcb *fcb = (struct txfcb *)skb_push (skb, GMAC_FCB_LEN);	memset(fcb, 0, GMAC_FCB_LEN);	return fcb;}static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb){	u8 flags = 0;	/* If we're here, it's a IP packet with a TCP or UDP	 * payload.  We set it to checksum, using a pseudo-header	 * we provide	 */	flags = TXFCB_DEFAULT;	/* Tell the controller what the protocol is */	/* And provide the already calculated phcs */	if (ip_hdr(skb)->protocol == IPPROTO_UDP) {		flags |= TXFCB_UDP;		fcb->phcs = udp_hdr(skb)->check;	} else		fcb->phcs = tcp_hdr(skb)->check;	/* l3os is the distance between the start of the	 * frame (skb->data) and the start of the IP hdr.	 * l4os is the distance between the start of the	 * l3 hdr and the l4 hdr */	fcb->l3os = (u16)(skb_network_offset(skb) - GMAC_FCB_LEN);	fcb->l4os = skb_network_header_len(skb);	fcb->flags = flags;}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -