ipg.c

来自「linux 内核源代码」· C语言 代码 · 共 2,341 行 · 第 1/5 页

C
2,341
字号
{	struct ipg_nic_private *sp = netdev_priv(dev);	void __iomem *ioaddr = sp->ioaddr;	unsigned int txflowcontrol;	unsigned int rxflowcontrol;	unsigned int fullduplex;	unsigned int gig;	u32 mac_ctrl_val;	u32 asicctrl;	u8 phyctrl;	IPG_DEBUG_MSG("_config_autoneg\n");	asicctrl = ipg_r32(ASIC_CTRL);	phyctrl = ipg_r8(PHY_CTRL);	mac_ctrl_val = ipg_r32(MAC_CTRL);	/* Set flags for use in resolving auto-negotation, assuming	 * non-1000Mbps, half duplex, no flow control.	 */	fullduplex = 0;	txflowcontrol = 0;	rxflowcontrol = 0;	gig = 0;	/* To accomodate a problem in 10Mbps operation,	 * set a global flag if PHY running in 10Mbps mode.	 */	sp->tenmbpsmode = 0;	printk(KERN_INFO "%s: Link speed = ", dev->name);	/* Determine actual speed of operation. */	switch (phyctrl & IPG_PC_LINK_SPEED) {	case IPG_PC_LINK_SPEED_10MBPS:		printk("10Mbps.\n");		printk(KERN_INFO "%s: 10Mbps operational mode enabled.\n",		       dev->name);		sp->tenmbpsmode = 1;		break;	case IPG_PC_LINK_SPEED_100MBPS:		printk("100Mbps.\n");		break;	case IPG_PC_LINK_SPEED_1000MBPS:		printk("1000Mbps.\n");		gig = 1;		break;	default:		printk("undefined!\n");		return 0;	}	if (phyctrl & IPG_PC_DUPLEX_STATUS) {		fullduplex = 1;		txflowcontrol = 1;		rxflowcontrol = 1;	}	/* Configure full duplex, and flow control. */	if (fullduplex == 1) {		/* Configure IPG for full duplex operation. */		printk(KERN_INFO "%s: setting full duplex, ", dev->name);		mac_ctrl_val |= IPG_MC_DUPLEX_SELECT_FD;		if (txflowcontrol == 1) {			printk("TX flow control");			mac_ctrl_val |= IPG_MC_TX_FLOW_CONTROL_ENABLE;		} else {			printk("no TX flow control");			mac_ctrl_val &= ~IPG_MC_TX_FLOW_CONTROL_ENABLE;		}		if (rxflowcontrol == 1) {			printk(", RX flow control.");			mac_ctrl_val |= IPG_MC_RX_FLOW_CONTROL_ENABLE;		} else {			printk(", no RX flow control.");			mac_ctrl_val &= ~IPG_MC_RX_FLOW_CONTROL_ENABLE;		}		printk("\n");	} else {		/* Configure IPG for half duplex operation. */	        printk(KERN_INFO "%s: setting half duplex, "		       "no TX flow control, no RX flow control.\n", dev->name);		mac_ctrl_val &= ~IPG_MC_DUPLEX_SELECT_FD &			~IPG_MC_TX_FLOW_CONTROL_ENABLE &			~IPG_MC_RX_FLOW_CONTROL_ENABLE;	}	ipg_w32(mac_ctrl_val, MAC_CTRL);	return 0;}/* Determine and configure multicast operation and set * receive mode for IPG. */static void ipg_nic_set_multicast_list(struct net_device *dev){	void __iomem *ioaddr = ipg_ioaddr(dev);	struct dev_mc_list *mc_list_ptr;	unsigned int hashindex;	u32 hashtable[2];	u8 receivemode;	IPG_DEBUG_MSG("_nic_set_multicast_list\n");	receivemode = IPG_RM_RECEIVEUNICAST | IPG_RM_RECEIVEBROADCAST;	if (dev->flags & IFF_PROMISC) {		/* NIC to be configured in promiscuous mode. */		receivemode = IPG_RM_RECEIVEALLFRAMES;	} else if ((dev->flags & IFF_ALLMULTI) ||		   (dev->flags & IFF_MULTICAST &		    (dev->mc_count > IPG_MULTICAST_HASHTABLE_SIZE))) {		/* NIC to be configured to receive all multicast		 * frames. */		receivemode |= IPG_RM_RECEIVEMULTICAST;	} else if (dev->flags & IFF_MULTICAST & (dev->mc_count > 0)) {		/* NIC to be configured to receive selected		 * multicast addresses. */		receivemode |= IPG_RM_RECEIVEMULTICASTHASH;	}	/* Calculate the bits to set for the 64 bit, IPG HASHTABLE.	 * The IPG applies a cyclic-redundancy-check (the same CRC	 * used to calculate the frame data FCS) to the destination	 * address all incoming multicast frames whose destination	 * address has the multicast bit set. The least significant	 * 6 bits of the CRC result are used as an addressing index	 * into the hash table. If the value of the bit addressed by	 * this index is a 1, the frame is passed to the host system.	 */	/* Clear hashtable. */	hashtable[0] = 0x00000000;	hashtable[1] = 0x00000000;	/* Cycle through all multicast addresses to filter. */	for (mc_list_ptr = dev->mc_list;	     mc_list_ptr != NULL; mc_list_ptr = mc_list_ptr->next) {		/* Calculate CRC result for each multicast address. */		hashindex = crc32_le(0xffffffff, mc_list_ptr->dmi_addr,				     ETH_ALEN);		/* Use only the least significant 6 bits. */		hashindex = hashindex & 0x3F;		/* Within "hashtable", set bit number "hashindex"		 * to a logic 1.		 */		set_bit(hashindex, (void *)hashtable);	}	/* Write the value of the hashtable, to the 4, 16 bit	 * HASHTABLE IPG registers.	 */	ipg_w32(hashtable[0], HASHTABLE_0);	ipg_w32(hashtable[1], HASHTABLE_1);	ipg_w8(IPG_RM_RSVD_MASK & receivemode, RECEIVE_MODE);	IPG_DEBUG_MSG("ReceiveMode = %x\n", ipg_r8(RECEIVE_MODE));}static int ipg_io_config(struct net_device *dev){	void __iomem *ioaddr = ipg_ioaddr(dev);	u32 origmacctrl;	u32 restoremacctrl;	IPG_DEBUG_MSG("_io_config\n");	origmacctrl = ipg_r32(MAC_CTRL);	restoremacctrl = origmacctrl | IPG_MC_STATISTICS_ENABLE;	/* Based on compilation option, determine if FCS is to be	 * stripped on receive frames by IPG.	 */	if (!IPG_STRIP_FCS_ON_RX)		restoremacctrl |= IPG_MC_RCV_FCS;	/* Determine if transmitter and/or receiver are	 * enabled so we may restore MACCTRL correctly.	 */	if (origmacctrl & IPG_MC_TX_ENABLED)		restoremacctrl |= IPG_MC_TX_ENABLE;	if (origmacctrl & IPG_MC_RX_ENABLED)		restoremacctrl |= IPG_MC_RX_ENABLE;	/* Transmitter and receiver must be disabled before setting	 * IFSSelect.	 */	ipg_w32((origmacctrl & (IPG_MC_RX_DISABLE | IPG_MC_TX_DISABLE)) &		IPG_MC_RSVD_MASK, MAC_CTRL);	/* Now that transmitter and receiver are disabled, write	 * to IFSSelect.	 */	ipg_w32((origmacctrl & IPG_MC_IFS_96BIT) & IPG_MC_RSVD_MASK, MAC_CTRL);	/* Set RECEIVEMODE register. */	ipg_nic_set_multicast_list(dev);	ipg_w16(IPG_MAX_RXFRAME_SIZE, MAX_FRAME_SIZE);	ipg_w8(IPG_RXDMAPOLLPERIOD_VALUE,   RX_DMA_POLL_PERIOD);	ipg_w8(IPG_RXDMAURGENTTHRESH_VALUE, RX_DMA_URGENT_THRESH);	ipg_w8(IPG_RXDMABURSTTHRESH_VALUE,  RX_DMA_BURST_THRESH);	ipg_w8(IPG_TXDMAPOLLPERIOD_VALUE,   TX_DMA_POLL_PERIOD);	ipg_w8(IPG_TXDMAURGENTTHRESH_VALUE, TX_DMA_URGENT_THRESH);	ipg_w8(IPG_TXDMABURSTTHRESH_VALUE,  TX_DMA_BURST_THRESH);	ipg_w16((IPG_IE_HOST_ERROR | IPG_IE_TX_DMA_COMPLETE |		 IPG_IE_TX_COMPLETE | IPG_IE_INT_REQUESTED |		 IPG_IE_UPDATE_STATS | IPG_IE_LINK_EVENT |		 IPG_IE_RX_DMA_COMPLETE | IPG_IE_RX_DMA_PRIORITY), INT_ENABLE);	ipg_w16(IPG_FLOWONTHRESH_VALUE,  FLOW_ON_THRESH);	ipg_w16(IPG_FLOWOFFTHRESH_VALUE, FLOW_OFF_THRESH);	/* IPG multi-frag frame bug workaround.	 * Per silicon revision B3 eratta.	 */	ipg_w16(ipg_r16(DEBUG_CTRL) | 0x0200, DEBUG_CTRL);	/* IPG TX poll now bug workaround.	 * Per silicon revision B3 eratta.	 */	ipg_w16(ipg_r16(DEBUG_CTRL) | 0x0010, DEBUG_CTRL);	/* IPG RX poll now bug workaround.	 * Per silicon revision B3 eratta.	 */	ipg_w16(ipg_r16(DEBUG_CTRL) | 0x0020, DEBUG_CTRL);	/* Now restore MACCTRL to original setting. */	ipg_w32(IPG_MC_RSVD_MASK & restoremacctrl, MAC_CTRL);	/* Disable unused RMON statistics. */	ipg_w32(IPG_RZ_ALL, RMON_STATISTICS_MASK);	/* Disable unused MIB statistics. */	ipg_w32(IPG_SM_MACCONTROLFRAMESXMTD | IPG_SM_MACCONTROLFRAMESRCVD |		IPG_SM_BCSTOCTETXMTOK_BCSTFRAMESXMTDOK | IPG_SM_TXJUMBOFRAMES |		IPG_SM_MCSTOCTETXMTOK_MCSTFRAMESXMTDOK | IPG_SM_RXJUMBOFRAMES |		IPG_SM_BCSTOCTETRCVDOK_BCSTFRAMESRCVDOK |		IPG_SM_UDPCHECKSUMERRORS | IPG_SM_TCPCHECKSUMERRORS |		IPG_SM_IPCHECKSUMERRORS, STATISTICS_MASK);	return 0;}/* * Create a receive buffer within system memory and update * NIC private structure appropriately. */static int ipg_get_rxbuff(struct net_device *dev, int entry){	struct ipg_nic_private *sp = netdev_priv(dev);	struct ipg_rx *rxfd = sp->rxd + entry;	struct sk_buff *skb;	u64 rxfragsize;	IPG_DEBUG_MSG("_get_rxbuff\n");	skb = netdev_alloc_skb(dev, IPG_RXSUPPORT_SIZE + NET_IP_ALIGN);	if (!skb) {		sp->RxBuff[entry] = NULL;		return -ENOMEM;	}	/* Adjust the data start location within the buffer to	 * align IP address field to a 16 byte boundary.	 */	skb_reserve(skb, NET_IP_ALIGN);	/* Associate the receive buffer with the IPG NIC. */	skb->dev = dev;	/* Save the address of the sk_buff structure. */	sp->RxBuff[entry] = skb;	rxfd->frag_info = cpu_to_le64(pci_map_single(sp->pdev, skb->data,		sp->rx_buf_sz, PCI_DMA_FROMDEVICE));	/* Set the RFD fragment length. */	rxfragsize = IPG_RXFRAG_SIZE;	rxfd->frag_info |= cpu_to_le64((rxfragsize << 48) & IPG_RFI_FRAGLEN);	return 0;}static int init_rfdlist(struct net_device *dev){	struct ipg_nic_private *sp = netdev_priv(dev);	void __iomem *ioaddr = sp->ioaddr;	unsigned int i;	IPG_DEBUG_MSG("_init_rfdlist\n");	for (i = 0; i < IPG_RFDLIST_LENGTH; i++) {		struct ipg_rx *rxfd = sp->rxd + i;		if (sp->RxBuff[i]) {			pci_unmap_single(sp->pdev,				le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN,				sp->rx_buf_sz, PCI_DMA_FROMDEVICE);			IPG_DEV_KFREE_SKB(sp->RxBuff[i]);			sp->RxBuff[i] = NULL;		}		/* Clear out the RFS field. */		rxfd->rfs = 0x0000000000000000;		if (ipg_get_rxbuff(dev, i) < 0) {			/*			 * A receive buffer was not ready, break the			 * RFD list here.			 */			IPG_DEBUG_MSG("Cannot allocate Rx buffer.\n");			/* Just in case we cannot allocate a single RFD.			 * Should not occur.			 */			if (i == 0) {				printk(KERN_ERR "%s: No memory available"					" for RFD list.\n", dev->name);				return -ENOMEM;			}		}		rxfd->next_desc = cpu_to_le64(sp->rxd_map +			sizeof(struct ipg_rx)*(i + 1));	}	sp->rxd[i - 1].next_desc = cpu_to_le64(sp->rxd_map);	sp->rx_current = 0;	sp->rx_dirty = 0;	/* Write the location of the RFDList to the IPG. */	ipg_w32((u32) sp->rxd_map, RFD_LIST_PTR_0);	ipg_w32(0x00000000, RFD_LIST_PTR_1);	return 0;}static void init_tfdlist(struct net_device *dev){	struct ipg_nic_private *sp = netdev_priv(dev);	void __iomem *ioaddr = sp->ioaddr;	unsigned int i;	IPG_DEBUG_MSG("_init_tfdlist\n");	for (i = 0; i < IPG_TFDLIST_LENGTH; i++) {		struct ipg_tx *txfd = sp->txd + i;		txfd->tfc = cpu_to_le64(IPG_TFC_TFDDONE);		if (sp->TxBuff[i]) {			IPG_DEV_KFREE_SKB(sp->TxBuff[i]);			sp->TxBuff[i] = NULL;		}		txfd->next_desc = cpu_to_le64(sp->txd_map +			sizeof(struct ipg_tx)*(i + 1));	}	sp->txd[i - 1].next_desc = cpu_to_le64(sp->txd_map);	sp->tx_current = 0;	sp->tx_dirty = 0;	/* Write the location of the TFDList to the IPG. */	IPG_DDEBUG_MSG("Starting TFDListPtr = %8.8x\n",		       (u32) sp->txd_map);	ipg_w32((u32) sp->txd_map, TFD_LIST_PTR_0);	ipg_w32(0x00000000, TFD_LIST_PTR_1);	sp->ResetCurrentTFD = 1;}/* * Free all transmit buffers which have already been transfered * via DMA to the IPG. */static void ipg_nic_txfree(struct net_device *dev){	struct ipg_nic_private *sp = netdev_priv(dev);	unsigned int released, pending, dirty;	IPG_DEBUG_MSG("_nic_txfree\n");	pending = sp->tx_current - sp->tx_dirty;	dirty = sp->tx_dirty % IPG_TFDLIST_LENGTH;	for (released = 0; released < pending; released++) {		struct sk_buff *skb = sp->TxBuff[dirty];		struct ipg_tx *txfd = sp->txd + dirty;		IPG_DEBUG_MSG("TFC = %16.16lx\n", (unsigned long) txfd->tfc);		/* Look at each TFD's TFC field beginning		 * at the last freed TFD up to the current TFD.		 * If the TFDDone bit is set, free the associated		 * buffer.		 */		if (!(txfd->tfc & cpu_to_le64(IPG_TFC_TFDDONE)))                        break;		/* Free the transmit buffer. */		if (skb) {			pci_unmap_single(sp->pdev,				le64_to_cpu(txfd->frag_info) & ~IPG_TFI_FRAGLEN,				skb->len, PCI_DMA_TODEVICE);			IPG_DEV_KFREE_SKB(skb);			sp->TxBuff[dirty] = NULL;		}		dirty = (dirty + 1) % IPG_TFDLIST_LENGTH;	}	sp->tx_dirty += released;	if (netif_queue_stopped(dev) &&	    (sp->tx_current != (sp->tx_dirty + IPG_TFDLIST_LENGTH))) {		netif_wake_queue(dev);	}}static void ipg_tx_timeout(struct net_device *dev){	struct ipg_nic_private *sp = netdev_priv(dev);	void __iomem *ioaddr = sp->ioaddr;	ipg_reset(dev, IPG_AC_TX_RESET | IPG_AC_DMA | IPG_AC_NETWORK |		  IPG_AC_FIFO);	spin_lock_irq(&sp->lock);	/* Re-configure after DMA reset. */	if (ipg_io_config(dev) < 0) {		printk(KERN_INFO "%s: Error during re-configuration.\n",		       dev->name);	}	init_tfdlist(dev);	spin_unlock_irq(&sp->lock);	ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) & IPG_MC_RSVD_MASK,		MAC_CTRL);}/* * For TxComplete interrupts, free all transmit * buffers which have already been transfered via DMA * to the IPG. */static void ipg_nic_txcleanup(struct net_device *dev){	struct ipg_nic_private *sp = netdev_priv(dev);	void __iomem *ioaddr = sp->ioaddr;	unsigned int i;	IPG_DEBUG_MSG("_nic_txcleanup\n");

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?