⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 e100_main.c

📁 该文件是rt_linux
💻 C
📖 第 1 页 / 共 5 页
字号:
 * * This routine frees resources of TX skbs. */static void inlinee100_tx_skb_free(struct e100_private *bdp, tcb_t *tcb){	if (tcb->tcb_skb) {		int i;		tbd_t *tbd_arr = tcb->tbd_ptr;		int frags = skb_shinfo(tcb->tcb_skb)->nr_frags;		for (i = 0; i <= frags; i++, tbd_arr++) {			pci_unmap_single(bdp->pdev,					 le32_to_cpu(tbd_arr->tbd_buf_addr),					 le16_to_cpu(tbd_arr->tbd_buf_cnt),					 PCI_DMA_TODEVICE);		}		dev_kfree_skb_irq(tcb->tcb_skb);		tcb->tcb_skb = NULL;	}}/** * e100_tx_srv - service TX queues * @bdp: atapter's private data struct * * This routine services the TX queues. It reclaims the TCB's & TBD's & other * resources used during the transmit of this buffer. It is called from the ISR. * We don't need a tx_lock since we always access buffers which were already * prepared. */voide100_tx_srv(struct e100_private *bdp){	tcb_t *tcb;	int i;	/* go over at most TxDescriptors buffers */	for (i = 0; i < bdp->params.TxDescriptors; i++) {		tcb = bdp->tcb_pool.data;		tcb += bdp->tcb_pool.head;		rmb();		/* if the buffer at 'head' is not complete, break */		if (!(tcb->tcb_hdr.cb_status &		      __constant_cpu_to_le16(CB_STATUS_COMPLETE)))			break;		/* service next buffer, clear the out of resource condition */		e100_tx_skb_free(bdp, tcb);		if (netif_running(bdp->device))			netif_wake_queue(bdp->device);		/* if we've caught up with 'tail', break */		if (NEXT_TCB_TOUSE(bdp->tcb_pool.head) == bdp->tcb_pool.tail) {			break;		}		bdp->tcb_pool.head = NEXT_TCB_TOUSE(bdp->tcb_pool.head);	}}/** * e100_rx_srv - service RX queue * @bdp: atapter's private data struct * @max_number_of_rfds: max number of RFDs to process * @rx_congestion: flag pointer, to inform the calling function of congestion. * * This routine processes the RX interrupt & services the RX queues. * For each successful RFD, it allocates a new msg block, links that * into the RFD list, and sends the old msg upstream. * The new RFD is then put at the end of the free list of RFD's. * It returns the number of serviced RFDs. */u32e100_rx_srv(struct e100_private *bdp){	rfd_t *rfd;		/* new rfd, received rfd */	int i;	u16 rfd_status;	struct sk_buff *skb;	struct net_device *dev;	unsigned int data_sz;	struct rx_list_elem *rx_struct;	u32 rfd_cnt = 0;	dev = bdp->device;	/* current design of rx is as following:	 * 1. socket buffer (skb) used to pass network packet to upper layer	 * 2. all HW host memory structures (like RFDs, RBDs and data buffers)	 *    are placed in a skb's data room	 * 3. when rx process is complete, we change skb internal pointers to exclude	 *    from data area all unrelated things (RFD, RDB) and to leave	 *    just rx'ed packet netto	 * 4. for each skb passed to upper layer, new one is allocated instead.	 * 5. if no skb left, in 2 sec another atempt to allocate skbs will be made	 *    (watchdog trigger SWI intr and isr should allocate new skbs)	 */	for (i = 0; i < bdp->params.RxDescriptors; i++) {		if (list_empty(&(bdp->active_rx_list))) {			break;		}		rx_struct = list_entry(bdp->active_rx_list.next,				       struct rx_list_elem, list_elem);		skb = rx_struct->skb;		rfd = RFD_POINTER(skb, bdp);	/* locate RFD within skb */		// sync only the RFD header		pci_dma_sync_single(bdp->pdev, rx_struct->dma_addr,				    bdp->rfd_size, PCI_DMA_FROMDEVICE);		rfd_status = le16_to_cpu(rfd->rfd_header.cb_status);	/* get RFD's status */		if (!(rfd_status & RFD_STATUS_COMPLETE))	/* does not contains data yet - exit */			break;		/* to allow manipulation with current skb we need to unlink it */		list_del(&(rx_struct->list_elem));		/* do not free & unmap badly recieved packet.		 * move it to the end of skb list for reuse */		if (!(rfd_status & RFD_STATUS_OK)) {			e100_add_skb_to_end(bdp, rx_struct);			continue;		}		data_sz = min_t(u16, (le16_to_cpu(rfd->rfd_act_cnt) & 0x3fff),				(sizeof (rfd_t) - bdp->rfd_size));		/* now sync all the data */		pci_dma_sync_single(bdp->pdev, rx_struct->dma_addr,				    (data_sz + bdp->rfd_size),				    PCI_DMA_FROMDEVICE);		pci_unmap_single(bdp->pdev, rx_struct->dma_addr,				 sizeof (rfd_t), PCI_DMA_FROMDEVICE);		list_add(&(rx_struct->list_elem), &(bdp->rx_struct_pool));		/* end of dma access to rfd */		bdp->skb_req++;	/* incr number of requested skbs */		e100_alloc_skbs(bdp);	/* and get them */		/* set packet size, excluding checksum (2 last bytes) if it is present */		if ((bdp->flags & DF_CSUM_OFFLOAD)		    && (bdp->rev_id < D102_REV_ID))			skb_put(skb, (int) data_sz - 2);		else			skb_put(skb, (int) data_sz);		/* set the protocol */		skb->protocol = eth_type_trans(skb, dev);		/* set the checksum info */		if (bdp->flags & DF_CSUM_OFFLOAD) {			if (bdp->rev_id >= D102_REV_ID) {				skb->ip_summed = e100_D102_check_checksum(rfd);			} else {				skb->ip_summed = e100_D101M_checksum(bdp, skb);			}		} else {			skb->ip_summed = CHECKSUM_NONE;		}		switch (netif_rx(skb)) {		case NET_RX_BAD:		case NET_RX_DROP:		case NET_RX_CN_MOD:		case NET_RX_CN_HIGH:			break;		default:			bdp->drv_stats.net_stats.rx_bytes += skb->len;			break;		}		rfd_cnt++;	}			/* end of rfd loop */	/* restart the RU if it has stopped */	if ((readw(&bdp->scb->scb_status) & SCB_RUS_MASK) != SCB_RUS_READY) {		e100_start_ru(bdp);	}	return rfd_cnt;}voide100_refresh_txthld(struct e100_private *bdp){	basic_cntr_t *pstat = &(bdp->stats_counters->basic_stats);	/* as long as tx_per_underrun is not 0, we can go about dynamically *	 * adjusting the xmit threshold. we stop doing that & resort to defaults	 * * once the adjustments become meaningless. the value is adjusted by *	 * dumping the error counters & checking the # of xmit underrun errors *	 * we've had. */	if (bdp->tx_per_underrun) {		/* We are going to last values dumped from the dump statistics		 * command */		if (le32_to_cpu(pstat->xmt_gd_frames)) {			if (le32_to_cpu(pstat->xmt_uruns)) {				/* 				 * if we have had more than one underrun per "DEFAULT #				 * OF XMITS ALLOWED PER UNDERRUN" good xmits, raise the				 * THRESHOLD.				 */				if ((le32_to_cpu(pstat->xmt_gd_frames) /				     le32_to_cpu(pstat->xmt_uruns)) <				    bdp->tx_per_underrun) {					bdp->tx_thld += 3;				}			}			/* 			 * if we've had less than one underrun per the DEFAULT number of			 * of good xmits allowed, lower the THOLD but not less than 0 			 */			if (le32_to_cpu(pstat->xmt_gd_frames) >			    bdp->tx_per_underrun) {				bdp->tx_thld--;				if (bdp->tx_thld < 6)					bdp->tx_thld = 6;			}		}		/* end good xmits */		/* 		 * * if our adjustments are becoming unresonable, stop adjusting &		 * resort * to defaults & pray. A THOLD value > 190 means that the		 * adapter will * wait for 190*8=1520 bytes in TX FIFO before it		 * starts xmit. Since * MTU is 1514, it doesn't make any sense for		 * further increase. */		if (bdp->tx_thld >= 190) {			bdp->tx_per_underrun = 0;			bdp->tx_thld = 189;		}	}			/* end underrun check */}/** * e100_pseudo_hdr_csum - compute IP pseudo-header checksum * @ip: points to the header of the IP packet * * Return the 16 bit checksum of the IP pseudo-header.,which is computed * on the fields: IP src, IP dst, next protocol, payload length. * The checksum vaule is returned in network byte order. */static inline u16e100_pseudo_hdr_csum(const struct iphdr *ip){	u32 pseudo = 0;	u32 payload_len = 0;	payload_len = ntohs(ip->tot_len) - (ip->ihl * 4);	pseudo += htons(payload_len);	pseudo += (ip->protocol << 8);	pseudo += ip->saddr & 0x0000ffff;	pseudo += (ip->saddr & 0xffff0000) >> 16;	pseudo += ip->daddr & 0x0000ffff;	pseudo += (ip->daddr & 0xffff0000) >> 16;	return FOLD_CSUM(pseudo);}/** * e100_prepare_xmit_buff - prepare a buffer for transmission * @bdp: atapter's private data struct * @skb: skb to send * * This routine prepare a buffer for transmission. It checks * the message length for the appropiate size. It picks up a * free tcb from the TCB pool and sets up the corresponding * TBD's. If the number of fragments are more than the number * of TBD/TCB it copies all the fragments in a coalesce buffer. * It returns a pointer to the prepared TCB. */static inline tcb_t *e100_prepare_xmit_buff(struct e100_private *bdp, struct sk_buff *skb){	tcb_t *tcb, *prev_tcb;	tcb = bdp->tcb_pool.data;	tcb += TCB_TO_USE(bdp->tcb_pool);	if (bdp->flags & USE_IPCB) {		tcb->tcbu.ipcb.ip_activation_high = IPCB_IP_ACTIVATION_DEFAULT;		tcb->tcbu.ipcb.ip_schedule &= ~IPCB_TCP_PACKET;		tcb->tcbu.ipcb.ip_schedule &= ~IPCB_TCPUDP_CHECKSUM_ENABLE;	}	tcb->tcb_hdr.cb_status = 0;	tcb->tcb_thrshld = bdp->tx_thld;	tcb->tcb_hdr.cb_cmd |= __constant_cpu_to_le16(CB_S_BIT);	/* set the I bit on the modulo tcbs, so we will get an interrupt * to	 * clean things up */	if (!(++bdp->tx_count % TX_FRAME_CNT)) {		tcb->tcb_hdr.cb_cmd |= __constant_cpu_to_le16(CB_I_BIT);	}	tcb->tcb_skb = skb;	if (skb->ip_summed == CHECKSUM_HW) {		const struct iphdr *ip = skb->nh.iph;		if ((ip->protocol == IPPROTO_TCP) ||		    (ip->protocol == IPPROTO_UDP)) {			u16 *chksum;			tcb->tcbu.ipcb.ip_activation_high =				IPCB_HARDWAREPARSING_ENABLE;			tcb->tcbu.ipcb.ip_schedule |=				IPCB_TCPUDP_CHECKSUM_ENABLE;			if (ip->protocol == IPPROTO_TCP) {				struct tcphdr *tcp;				tcp = (struct tcphdr *) ((u32 *) ip + ip->ihl);				chksum = &(tcp->check);				tcb->tcbu.ipcb.ip_schedule |= IPCB_TCP_PACKET;			} else {				struct udphdr *udp;				udp = (struct udphdr *) ((u32 *) ip + ip->ihl);				chksum = &(udp->check);			}			*chksum = e100_pseudo_hdr_csum(ip);		}	}	if (!skb_shinfo(skb)->nr_frags) {		(tcb->tbd_ptr)->tbd_buf_addr =			cpu_to_le32(pci_map_single(bdp->pdev, skb->data,						   skb->len, PCI_DMA_TODEVICE));		(tcb->tbd_ptr)->tbd_buf_cnt = cpu_to_le16(skb->len);		tcb->tcb_tbd_num = 1;		tcb->tcb_tbd_ptr = tcb->tcb_tbd_dflt_ptr;	} else {		int i;		void *addr;		tbd_t *tbd_arr_ptr = &(tcb->tbd_ptr[1]);		skb_frag_t *frag = &skb_shinfo(skb)->frags[0];		(tcb->tbd_ptr)->tbd_buf_addr =			cpu_to_le32(pci_map_single(bdp->pdev, skb->data,						   (skb->len - skb->data_len),						   PCI_DMA_TODEVICE));		(tcb->tbd_ptr)->tbd_buf_cnt =			cpu_to_le16(skb->len - skb->data_len);		for (i = 0; i < skb_shinfo(skb)->nr_frags;		     i++, tbd_arr_ptr++, frag++) {			addr = ((void *) page_address(frag->page) +				frag->page_offset);			tbd_arr_ptr->tbd_buf_addr =				cpu_to_le32(pci_map_single(bdp->pdev,							   addr, frag->size,							   PCI_DMA_TODEVICE));			tbd_arr_ptr->tbd_buf_cnt = cpu_to_le16(frag->size);		}		tcb->tcb_tbd_num = skb_shinfo(skb)->nr_frags + 1;		tcb->tcb_tbd_ptr = tcb->tcb_tbd_expand_ptr;	}	/* clear the S-BIT on the previous tcb */	prev_tcb = bdp->tcb_pool.data;	prev_tcb += PREV_TCB_USED(bdp->tcb_pool);	prev_tcb->tcb_hdr.cb_cmd &= __constant_cpu_to_le16((u16) ~CB_S_BIT);	bdp->tcb_pool.tail = NEXT_TCB_TOUSE(bdp->tcb_pool.tail);	wmb();	e100_start_cu(bdp, tcb);	return tcb;}/* Changed for 82558 enhancement *//** * e100_start_cu - start the adapter's CU * @bdp: atapter's private data struct * @tcb: TCB to be transmitted * * This routine issues a CU Start or CU Resume command to the 82558/9. * This routine was added because the prepare_ext_xmit_buff takes advantage * of the 82558/9's Dynamic TBD chaining feature and has to start the CU as * soon as the first TBD is ready.  * * e100_start_cu must be called while holding the tx_lock !  */voide100_start_cu(struct e100_private *bdp, tcb_t *tcb){	unsigned long lock_flag;	spin_lock_irqsave(&(bdp->bd_lock), lock_flag);	switch (bdp->next_cu_cmd) {	case RESUME_NO_WAIT:		/*last cu command was a CU_RESMUE if this is a 558 or newer we dont need to		 * wait for command word to clear, we reach here only if we are bachlor		 */		e100_exec_cmd(bdp, SCB_CUC_RESUME);		break;	case RESUME_WAIT:		if ((bdp->flags & IS_ICH) &&		    (bdp->cur_line_speed == 10) &&		    (bdp->cur_dplx_mode == HALF_DUPLEX)) {			e100_wait_exec_simple(bdp, SCB_CUC_NOOP);			udelay(1);		}		if ((e100_wait_exec_simple(bdp, SCB_CUC_RESUME)) &&		    (bdp->flags & IS_BACHELOR) && (!(bdp->flags & IS_ICH))) {			bdp->next_cu_cmd = RESUME_NO_WAIT;		}		break;	case START_WAIT:		// The last command was a non_tx CU command		if (!e100_wait_cus_idle(bdp))			printk(KERN_DEBUG			       "e100: %s: cu_start: timeout waiting for cu\n",			       bdp->device->name);		if (!e100_wait_exec_cmplx(bdp, (u32) (tcb->tcb_phys),					  SCB_CUC_START)) {			printk(KERN_DEBUG			       "e100: %s: cu_start: timeout waiting for scb\n",			       bdp->device->name);			e100_exec_cmplx(bdp, (u32) (tcb->tcb_phys),					SCB_CUC_START);		}		bdp->next_cu_cmd = RESUME_WAIT;		break;	}	/* save the last tcb */	bdp->last_tcb = tcb;	spin_unlock_irqrestore(&(bdp->bd_lock), lock_flag);}/* ====================================================================== *//* hw                                                                     *//* ====================================================================== *//** * e100_selftest - perform H/W self test * @bdp: atapter's private data struct * @st_timeout: address to return timeout value, if fails * @st_result: address to return self

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -