⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 skge.c

📁 linux-2.4.29操作系统的源码
💻 C
📖 第 1 页 / 共 5 页
字号:
/***************************************************************************** * * 	XmitFrame - fill one socket buffer into the transmit ring * * Description: *	This function puts a message into the transmit descriptor ring *	if there is a descriptors left. *	Linux skb's consist of only one continuous buffer. *	The first step locks the ring. It is held locked *	all time to avoid problems with SWITCH_../PORT_RESET. *	Then the descriptoris allocated. *	The second part is linking the buffer to the descriptor. *	At the very last, the Control field of the descriptor *	is made valid for the BMU and a start TX command is given *	if necessary. * * Returns: *	> 0 - on succes: the number of bytes in the message *	= 0 - on resource shortage: this frame sent or dropped, now *		the ring is full ( -> set tbusy) *	< 0 - on failure: other problems ( -> return failure to upper layers) */static int XmitFrame(SK_AC 		*pAC,		/* pointer to adapter context           */TX_PORT		*pTxPort,	/* pointer to struct of port to send to */struct sk_buff	*pMessage)	/* pointer to send-message              */{	TXD		*pTxd;		/* the rxd to fill */	TXD		*pOldTxd;	unsigned long	 Flags;	SK_U64		 PhysAddr;	int	 	 Protocol;	int		 IpHeaderLength;	int		 BytesSend = pMessage->len;	SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_TX_PROGRESS, ("X"));	spin_lock_irqsave(&pTxPort->TxDesRingLock, Flags);#ifndef USE_TX_COMPLETE	FreeTxDescriptors(pAC, pTxPort);#endif	if (pTxPort->TxdRingFree == 0) {		/* 		** no enough free descriptors in ring at the moment.		** Maybe free'ing some old one help?		*/		FreeTxDescriptors(pAC, pTxPort);		if (pTxPort->TxdRingFree == 0) {			spin_unlock_irqrestore(&pTxPort->TxDesRingLock, Flags);			SK_PNMI_CNT_NO_TX_BUF(pAC, pTxPort->PortIndex);			SK_DBG_MSG(NULL, SK_DBGMOD_DRV,				SK_DBGCAT_DRV_TX_PROGRESS,				("XmitFrame failed\n"));			/* 			** the desired message can not be sent			** Because tbusy seems to be set, the message 			** should not be freed here. It will be used 			** by the scheduler of the ethernet handler 			*/			return (-1);		}	}	/*	** If the passed socket buffer is of smaller MTU-size than 60,	** copy everything into new buffer and fill all bytes between	** the original packet end and the new packet end of 60 with 0x00.	** This is to resolve faulty padding by the HW with 0xaa bytes.	*/	if (BytesSend < C_LEN_ETHERNET_MINSIZE) {		if ((pMessage = skb_padto(pMessage, C_LEN_ETHERNET_MINSIZE)) == NULL) {			return 0;		}		pMessage->len = C_LEN_ETHERNET_MINSIZE;	}	/* 	** advance head counter behind descriptor needed for this frame, 	** so that needed descriptor is reserved from that on. The next	** action will be to add the passed buffer to the TX-descriptor	*/	pTxd = pTxPort->pTxdRingHead;	pTxPort->pTxdRingHead = pTxd->pNextTxd;	pTxPort->TxdRingFree--;#ifdef SK_DUMP_TX	DumpMsg(pMessage, "XmitFrame");#endif	/* 	** First step is to map the data to be sent via the adapter onto	** the DMA memory. Kernel 2.2 uses virt_to_bus(), but kernels 2.4	** and 2.6 need to use pci_map_page() for that mapping.	*/	PhysAddr = (SK_U64) pci_map_page(pAC->PciDev,					virt_to_page(pMessage->data),					((unsigned long) pMessage->data & ~PAGE_MASK),					pMessage->len,					PCI_DMA_TODEVICE);	pTxd->VDataLow  = (SK_U32) (PhysAddr & 0xffffffff);	pTxd->VDataHigh = (SK_U32) (PhysAddr >> 32);	pTxd->pMBuf     = pMessage;	if (pMessage->ip_summed == CHECKSUM_HW) {		Protocol = ((SK_U8)pMessage->data[C_OFFSET_IPPROTO] & 0xff);		if ((Protocol == C_PROTO_ID_UDP) && 			(pAC->GIni.GIChipRev == 0) &&			(pAC->GIni.GIChipId == CHIP_ID_YUKON)) {			pTxd->TBControl = BMU_TCP_CHECK;		} else {			pTxd->TBControl = BMU_UDP_CHECK;		}		IpHeaderLength  = (SK_U8)pMessage->data[C_OFFSET_IPHEADER];		IpHeaderLength  = (IpHeaderLength & 0xf) * 4;		pTxd->TcpSumOfs = 0; /* PH-Checksum already calculated */		pTxd->TcpSumSt  = C_LEN_ETHERMAC_HEADER + IpHeaderLength + 							(Protocol == C_PROTO_ID_UDP ?							C_OFFSET_UDPHEADER_UDPCS : 							C_OFFSET_TCPHEADER_TCPCS);		pTxd->TcpSumWr  = C_LEN_ETHERMAC_HEADER + IpHeaderLength;		pTxd->TBControl |= BMU_OWN | BMU_STF | 				   BMU_SW  | BMU_EOF |#ifdef USE_TX_COMPLETE				   BMU_IRQ_EOF |#endif				   pMessage->len;        } else {		pTxd->TBControl = BMU_OWN | BMU_STF | BMU_CHECK | 				  BMU_SW  | BMU_EOF |#ifdef USE_TX_COMPLETE				   BMU_IRQ_EOF |#endif			pMessage->len;	}	/* 	** If previous descriptor already done, give TX start cmd 	*/	pOldTxd = xchg(&pTxPort->pTxdRingPrev, pTxd);	if ((pOldTxd->TBControl & BMU_OWN) == 0) {		SK_OUT8(pTxPort->HwAddr, Q_CSR, CSR_START);	}		/* 	** after releasing the lock, the skb may immediately be free'd 	*/	spin_unlock_irqrestore(&pTxPort->TxDesRingLock, Flags);	if (pTxPort->TxdRingFree != 0) {		return (BytesSend);	} else {		return (0);	}} /* XmitFrame *//***************************************************************************** * * 	XmitFrameSG - fill one socket buffer into the transmit ring *                (use SG and TCP/UDP hardware checksumming) * * Description: *	This function puts a message into the transmit descriptor ring *	if there is a descriptors left. * * Returns: *	> 0 - on succes: the number of bytes in the message *	= 0 - on resource shortage: this frame sent or dropped, now *		the ring is full ( -> set tbusy) *	< 0 - on failure: other problems ( -> return failure to upper layers) */static int XmitFrameSG(SK_AC 		*pAC,		/* pointer to adapter context           */TX_PORT		*pTxPort,	/* pointer to struct of port to send to */struct sk_buff	*pMessage)	/* pointer to send-message              */{	TXD		*pTxd;	TXD		*pTxdFst;	TXD		*pTxdLst;	int 	 	 CurrFrag;	int		 BytesSend;	int		 IpHeaderLength; 	int		 Protocol;	skb_frag_t	*sk_frag;	SK_U64		 PhysAddr;	unsigned long	 Flags;	spin_lock_irqsave(&pTxPort->TxDesRingLock, Flags);#ifndef USE_TX_COMPLETE	FreeTxDescriptors(pAC, pTxPort);#endif	if ((skb_shinfo(pMessage)->nr_frags +1) > pTxPort->TxdRingFree) {		FreeTxDescriptors(pAC, pTxPort);		if ((skb_shinfo(pMessage)->nr_frags + 1) > pTxPort->TxdRingFree) {			spin_unlock_irqrestore(&pTxPort->TxDesRingLock, Flags);			SK_PNMI_CNT_NO_TX_BUF(pAC, pTxPort->PortIndex);			SK_DBG_MSG(NULL, SK_DBGMOD_DRV,				SK_DBGCAT_DRV_TX_PROGRESS,				("XmitFrameSG failed - Ring full\n"));				/* this message can not be sent now */			return(-1);		}	}	pTxd      = pTxPort->pTxdRingHead;	pTxdFst   = pTxd;	pTxdLst   = pTxd;	BytesSend = 0;	Protocol  = 0;	/* 	** Map the first fragment (header) into the DMA-space	*/	PhysAddr = (SK_U64) pci_map_page(pAC->PciDev,			virt_to_page(pMessage->data),			((unsigned long) pMessage->data & ~PAGE_MASK),			skb_headlen(pMessage),			PCI_DMA_TODEVICE);	pTxd->VDataLow  = (SK_U32) (PhysAddr & 0xffffffff);	pTxd->VDataHigh = (SK_U32) (PhysAddr >> 32);	/* 	** Does the HW need to evaluate checksum for TCP or UDP packets? 	*/	if (pMessage->ip_summed == CHECKSUM_HW) {		pTxd->TBControl = BMU_STF | BMU_STFWD | skb_headlen(pMessage);		/* 		** We have to use the opcode for tcp here,  because the		** opcode for udp is not working in the hardware yet 		** (Revision 2.0)		*/		Protocol = ((SK_U8)pMessage->data[C_OFFSET_IPPROTO] & 0xff);		if ((Protocol == C_PROTO_ID_UDP) && 			(pAC->GIni.GIChipRev == 0) &&			(pAC->GIni.GIChipId == CHIP_ID_YUKON)) {			pTxd->TBControl |= BMU_TCP_CHECK;		} else {			pTxd->TBControl |= BMU_UDP_CHECK;		}		IpHeaderLength  = ((SK_U8)pMessage->data[C_OFFSET_IPHEADER] & 0xf)*4;		pTxd->TcpSumOfs = 0; /* PH-Checksum already claculated */		pTxd->TcpSumSt  = C_LEN_ETHERMAC_HEADER + IpHeaderLength +						(Protocol == C_PROTO_ID_UDP ?						C_OFFSET_UDPHEADER_UDPCS :						C_OFFSET_TCPHEADER_TCPCS);		pTxd->TcpSumWr  = C_LEN_ETHERMAC_HEADER + IpHeaderLength;	} else {		pTxd->TBControl = BMU_CHECK | BMU_SW | BMU_STF |					skb_headlen(pMessage);	}	pTxd = pTxd->pNextTxd;	pTxPort->TxdRingFree--;	BytesSend += skb_headlen(pMessage);	/* 	** Browse over all SG fragments and map each of them into the DMA space	*/	for (CurrFrag = 0; CurrFrag < skb_shinfo(pMessage)->nr_frags; CurrFrag++) {		sk_frag = &skb_shinfo(pMessage)->frags[CurrFrag];		/* 		** we already have the proper value in entry		*/		PhysAddr = (SK_U64) pci_map_page(pAC->PciDev,						 sk_frag->page,						 sk_frag->page_offset,						 sk_frag->size,						 PCI_DMA_TODEVICE);		pTxd->VDataLow  = (SK_U32) (PhysAddr & 0xffffffff);		pTxd->VDataHigh = (SK_U32) (PhysAddr >> 32);		pTxd->pMBuf     = pMessage;				/* 		** Does the HW need to evaluate checksum for TCP or UDP packets? 		*/		if (pMessage->ip_summed == CHECKSUM_HW) {			pTxd->TBControl = BMU_OWN | BMU_SW | BMU_STFWD;			/* 			** We have to use the opcode for tcp here because the 			** opcode for udp is not working in the hardware yet 			** (revision 2.0)			*/			if ((Protocol == C_PROTO_ID_UDP) && 				(pAC->GIni.GIChipRev == 0) &&				(pAC->GIni.GIChipId == CHIP_ID_YUKON)) {				pTxd->TBControl |= BMU_TCP_CHECK;			} else {				pTxd->TBControl |= BMU_UDP_CHECK;			}		} else {			pTxd->TBControl = BMU_CHECK | BMU_SW | BMU_OWN;		}		/* 		** Do we have the last fragment? 		*/		if( (CurrFrag+1) == skb_shinfo(pMessage)->nr_frags )  {#ifdef USE_TX_COMPLETE			pTxd->TBControl |= BMU_EOF | BMU_IRQ_EOF | sk_frag->size;#else			pTxd->TBControl |= BMU_EOF | sk_frag->size;#endif			pTxdFst->TBControl |= BMU_OWN | BMU_SW;		} else {			pTxd->TBControl |= sk_frag->size;		}		pTxdLst = pTxd;		pTxd    = pTxd->pNextTxd;		pTxPort->TxdRingFree--;		BytesSend += sk_frag->size;	}	/* 	** If previous descriptor already done, give TX start cmd 	*/	if ((pTxPort->pTxdRingPrev->TBControl & BMU_OWN) == 0) {		SK_OUT8(pTxPort->HwAddr, Q_CSR, CSR_START);	}	pTxPort->pTxdRingPrev = pTxdLst;	pTxPort->pTxdRingHead = pTxd;	spin_unlock_irqrestore(&pTxPort->TxDesRingLock, Flags);	if (pTxPort->TxdRingFree > 0) {		return (BytesSend);	} else {		return (0);	}}/***************************************************************************** * * 	FreeTxDescriptors - release descriptors from the descriptor ring * * Description: *	This function releases descriptors from a transmit ring if they *	have been sent by the BMU. *	If a descriptors is sent, it can be freed and the message can *	be freed, too. *	The SOFTWARE controllable bit is used to prevent running around a *	completely free ring for ever. If this bit is no set in the *	frame (by XmitFrame), this frame has never been sent or is *	already freed. *	The Tx descriptor ring lock must be held while calling this function !!! * * Returns: *	none */static void FreeTxDescriptors(SK_AC	*pAC,		/* pointer to the adapter context */TX_PORT	*pTxPort)	/* pointer to destination port structure */{TXD	*pTxd;		/* pointer to the checked descriptor */TXD	*pNewTail;	/* pointer to 'end' of the ring */SK_U32	Control;	/* TBControl field of descriptor */SK_U64	PhysAddr;	/* address of DMA mapping */	pNewTail = pTxPort->pTxdRingTail;	pTxd     = pNewTail;	/*	** loop forever; exits if BMU_SW bit not set in start frame	** or BMU_OWN bit set in any frame	*/	while (1) {		Control = pTxd->TBControl;		if ((Control & BMU_SW) == 0) {			/*			** software controllable bit is set in first			** fragment when given to BMU. Not set means that			** this fragment was never sent or is already			** freed ( -> ring completely free now).			*/			pTxPort->pTxdRingTail = pTxd;			netif_wake_queue(pAC->dev[pTxPort->PortIndex]);			return;		}		if (Control & BMU_OWN) {			pTxPort->pTxdRingTail = pTxd;			if (pTxPort->TxdRingFree > 0) {				netif_wake_queue(pAC->dev[pTxPort->PortIndex]);			}			return;		}				/* 		** release the DMA mapping, because until not unmapped		** this buffer is considered being under control of the		** adapter card!		*/		PhysAddr = ((SK_U64) pTxd->VDataHigh) << (SK_U64) 32;		PhysAddr |= (SK_U64) pTxd->VDataLow;		pci_unmap_page(pAC->PciDev, PhysAddr,				 pTxd->pMBuf->len,				 PCI_DMA_TODEVICE);		if (Control & BMU_EOF)			DEV_KFREE_SKB_ANY(pTxd->pMBuf);	/* free message */		pTxPort->TxdRingFree++;		pTxd->TBControl &= ~BMU_SW;		pTxd = pTxd->pNextTxd; /* point behind fragment with EOF */	} /* while(forever) */} /* FreeTxDescriptors *//***************************************************************************** * * 	FillRxRing - fill the receive ring with valid descriptors * * Description: *	This function fills the receive ring descriptors with data *	segments and makes them valid for the BMU. *	The active ring is filled completely, if possible. *	The non-active ring is filled only partial to save memory. * * Description of rx ring structure: *	head - points to the descriptor which will be used next by the BMU *	tail - points to the next descriptor to give to the BMU *	 * Returns:	N/A */static void FillRxRing(SK_AC		*pAC,		/* pointer to the adapter context */RX_PORT		*pRxPort)	/* ptr to port struct for which the ring				   should be filled */{unsigned long	Flags;	spin_lock_irqsave(&pRxPort->RxDesRingLock, Flags);	while (pRxPort->RxdRingFree > pRxPort->RxFillLimit) {		if(!FillRxDescriptor(pAC, pRxPort))			break;	}	spin_unlock_irqrestore(&pRxPort->RxDesRingLock, Flags);} /* FillRxRing *//***************************************************************************** * * 	FillRxDescriptor - fill one buffer into the receive ring * * Description: *	The function allocates a new receive buffer and *	puts it into the next descriptor. * * Returns: *	SK_TRUE - a buffer was added to the ring *	SK_FALSE - a buffer could not be added */static SK_BOOL FillRxDescriptor(SK_AC		*pAC,		/* pointer to the 

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -