⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 e100_main.c

📁 一个2.4.21版本的嵌入式linux内核
💻 C
📖 第 1 页 / 共 5 页
字号:
		}		if ((transmits / 32 < collisions)		    && (transmits > min_transmits)) {			state = state_table[state][0];	/* increment */		} else if (transmits < min_transmits) {			state = state_table[state][1];	/* decrement */		}		bdp->ifs_value = state_table[state][next_col];		bdp->ifs_state = state;	}	/* If the IFS value has changed, configure the device */	if (bdp->ifs_value != old_value) {		e100_config_ifs(bdp);		e100_config(bdp);	}}/** * e100intr - interrupt handler * @irq: the IRQ number * @dev_inst: the net_device struct * @regs: registers (unused) * * This routine is the ISR for the e100 board. It services * the RX & TX queues & starts the RU if it has stopped due * to no resources. */voide100intr(int irq, void *dev_inst, struct pt_regs *regs){	struct net_device *dev;	struct e100_private *bdp;	u16 intr_status;	dev = dev_inst;	bdp = dev->priv;	intr_status = readw(&bdp->scb->scb_status);	/* If not my interrupt, just return */	if (!(intr_status & SCB_STATUS_ACK_MASK) || (intr_status == 0xffff)) {		return;	}	/* disable and ack intr */	e100_disable_clear_intr(bdp);	/* the device is closed, don't continue or else bad things may happen. */	if (!netif_running(dev)) {		e100_set_intr_mask(bdp);		return;	}	/* SWI intr (triggered by watchdog) is signal to allocate new skb buffers */	if (intr_status & SCB_STATUS_ACK_SWI) {		e100_alloc_skbs(bdp);	}	/* do recv work if any */	if (intr_status &	    (SCB_STATUS_ACK_FR | SCB_STATUS_ACK_RNR | SCB_STATUS_ACK_SWI)) 		bdp->drv_stats.rx_intr_pkts += e100_rx_srv(bdp);	/* clean up after tx'ed packets */	if (intr_status & (SCB_STATUS_ACK_CNA | SCB_STATUS_ACK_CX))		e100_tx_srv(bdp);	e100_set_intr_mask(bdp);}/** * e100_tx_skb_free - free TX skbs resources * @bdp: atapter's private data struct * @tcb: associated tcb of the freed skb * * This routine frees resources of TX skbs. */static inline voide100_tx_skb_free(struct e100_private *bdp, tcb_t *tcb){	if (tcb->tcb_skb) {		int i;		tbd_t *tbd_arr = tcb->tbd_ptr;		int frags = skb_shinfo(tcb->tcb_skb)->nr_frags;		for (i = 0; i <= frags; i++, tbd_arr++) {			pci_unmap_single(bdp->pdev,					 le32_to_cpu(tbd_arr->tbd_buf_addr),					 le16_to_cpu(tbd_arr->tbd_buf_cnt),					 PCI_DMA_TODEVICE);		}		dev_kfree_skb_irq(tcb->tcb_skb);		tcb->tcb_skb = NULL;	}}/** * e100_tx_srv - service TX queues * @bdp: atapter's private data struct * * This routine services the TX queues. It reclaims the TCB's & TBD's & other * resources used during the transmit of this buffer. It is called from the ISR. * We don't need a tx_lock since we always access buffers which were already * prepared. */voide100_tx_srv(struct e100_private *bdp){	tcb_t *tcb;	int i;	/* go over at most TxDescriptors buffers */	for (i = 0; i < bdp->params.TxDescriptors; i++) {		tcb = bdp->tcb_pool.data;		tcb += bdp->tcb_pool.head;		rmb();		/* if the buffer at 'head' is not complete, break */		if (!(tcb->tcb_hdr.cb_status &		      __constant_cpu_to_le16(CB_STATUS_COMPLETE)))			break;		/* service next buffer, clear the out of resource condition */		e100_tx_skb_free(bdp, tcb);		if (netif_running(bdp->device))			netif_wake_queue(bdp->device);		/* if we've caught up with 'tail', break */		if (NEXT_TCB_TOUSE(bdp->tcb_pool.head) == bdp->tcb_pool.tail) {			break;		}		bdp->tcb_pool.head = NEXT_TCB_TOUSE(bdp->tcb_pool.head);	}}/** * e100_rx_srv - service RX queue * @bdp: atapter's private data struct * @max_number_of_rfds: max number of RFDs to process * @rx_congestion: flag pointer, to inform the calling function of congestion. * * This routine processes the RX interrupt & services the RX queues. * For each successful RFD, it allocates a new msg block, links that * into the RFD list, and sends the old msg upstream. * The new RFD is then put at the end of the free list of RFD's. * It returns the number of serviced RFDs. */u32e100_rx_srv(struct e100_private *bdp){	rfd_t *rfd;		/* new rfd, received rfd */	int i;	u16 rfd_status;	struct sk_buff *skb;	struct net_device *dev;	unsigned int data_sz;	struct rx_list_elem *rx_struct;	u32 rfd_cnt = 0;	dev = bdp->device;	/* current design of rx is as following:	 * 1. socket buffer (skb) used to pass network packet to upper layer	 * 2. all HW host memory structures (like RFDs, RBDs and data buffers)	 *    are placed in a skb's data room	 * 3. when rx process is complete, we change skb internal pointers to exclude	 *    from data area all unrelated things (RFD, RDB) and to leave	 *    just rx'ed packet netto	 * 4. for each skb passed to upper layer, new one is allocated instead.	 * 5. if no skb left, in 2 sec another atempt to allocate skbs will be made	 *    (watchdog trigger SWI intr and isr should allocate new skbs)	 */	for (i = 0; i < bdp->params.RxDescriptors; i++) {		if (list_empty(&(bdp->active_rx_list))) {			break;		}		rx_struct = list_entry(bdp->active_rx_list.next,				       struct rx_list_elem, list_elem);		skb = rx_struct->skb;		rfd = RFD_POINTER(skb, bdp);	/* locate RFD within skb */		// sync only the RFD header		pci_dma_sync_single(bdp->pdev, rx_struct->dma_addr,				    bdp->rfd_size, PCI_DMA_FROMDEVICE);		rfd_status = le16_to_cpu(rfd->rfd_header.cb_status);	/* get RFD's status */		if (!(rfd_status & RFD_STATUS_COMPLETE))	/* does not contains data yet - exit */			break;		/* to allow manipulation with current skb we need to unlink it */		list_del(&(rx_struct->list_elem));		/* do not free & unmap badly received packet.		 * move it to the end of skb list for reuse */		if (!(rfd_status & RFD_STATUS_OK)) {			e100_add_skb_to_end(bdp, rx_struct);			continue;		}		data_sz = min_t(u16, (le16_to_cpu(rfd->rfd_act_cnt) & 0x3fff),				(sizeof (rfd_t) - bdp->rfd_size));		/* now sync all the data */		pci_dma_sync_single(bdp->pdev, rx_struct->dma_addr,				    (data_sz + bdp->rfd_size),				    PCI_DMA_FROMDEVICE);		pci_unmap_single(bdp->pdev, rx_struct->dma_addr,				 sizeof (rfd_t), PCI_DMA_FROMDEVICE);		list_add(&(rx_struct->list_elem), &(bdp->rx_struct_pool));		/* end of dma access to rfd */		bdp->skb_req++;	/* incr number of requested skbs */		e100_alloc_skbs(bdp);	/* and get them */		/* set packet size, excluding checksum (2 last bytes) if it is present */		if ((bdp->flags & DF_CSUM_OFFLOAD)		    && (bdp->rev_id < D102_REV_ID))			skb_put(skb, (int) data_sz - 2);		else			skb_put(skb, (int) data_sz);		/* set the protocol */		skb->protocol = eth_type_trans(skb, dev);		/* set the checksum info */		if (bdp->flags & DF_CSUM_OFFLOAD) {			if (bdp->rev_id >= D102_REV_ID) {				skb->ip_summed = e100_D102_check_checksum(rfd);			} else {				skb->ip_summed = e100_D101M_checksum(bdp, skb);			}		} else {			skb->ip_summed = CHECKSUM_NONE;		}		if(bdp->vlgrp && (rfd_status & CB_STATUS_VLAN)) {			vlan_hwaccel_rx(skb, bdp->vlgrp, be16_to_cpu(rfd->vlanid));		} else {			netif_rx(skb);		}		dev->last_rx = jiffies;		bdp->drv_stats.net_stats.rx_bytes += skb->len;				rfd_cnt++;	}			/* end of rfd loop */	/* restart the RU if it has stopped */	if ((readw(&bdp->scb->scb_status) & SCB_RUS_MASK) != SCB_RUS_READY) {		e100_start_ru(bdp);	}	return rfd_cnt;}voide100_refresh_txthld(struct e100_private *bdp){	basic_cntr_t *pstat = &(bdp->stats_counters->basic_stats);	/* as long as tx_per_underrun is not 0, we can go about dynamically *	 * adjusting the xmit threshold. we stop doing that & resort to defaults	 * * once the adjustments become meaningless. the value is adjusted by *	 * dumping the error counters & checking the # of xmit underrun errors *	 * we've had. */	if (bdp->tx_per_underrun) {		/* We are going to last values dumped from the dump statistics		 * command */		if (le32_to_cpu(pstat->xmt_gd_frames)) {			if (le32_to_cpu(pstat->xmt_uruns)) {				/* 				 * if we have had more than one underrun per "DEFAULT #				 * OF XMITS ALLOWED PER UNDERRUN" good xmits, raise the				 * THRESHOLD.				 */				if ((le32_to_cpu(pstat->xmt_gd_frames) /				     le32_to_cpu(pstat->xmt_uruns)) <				    bdp->tx_per_underrun) {					bdp->tx_thld += 3;				}			}			/* 			 * if we've had less than one underrun per the DEFAULT number of			 * of good xmits allowed, lower the THOLD but not less than 0 			 */			if (le32_to_cpu(pstat->xmt_gd_frames) >			    bdp->tx_per_underrun) {				bdp->tx_thld--;				if (bdp->tx_thld < 6)					bdp->tx_thld = 6;			}		}		/* end good xmits */		/* 		 * * if our adjustments are becoming unresonable, stop adjusting &		 * resort * to defaults & pray. A THOLD value > 190 means that the		 * adapter will * wait for 190*8=1520 bytes in TX FIFO before it		 * starts xmit. Since * MTU is 1514, it doesn't make any sense for		 * further increase. */		if (bdp->tx_thld >= 190) {			bdp->tx_per_underrun = 0;			bdp->tx_thld = 189;		}	}			/* end underrun check */}/** * e100_prepare_xmit_buff - prepare a buffer for transmission * @bdp: atapter's private data struct * @skb: skb to send * * This routine prepare a buffer for transmission. It checks * the message length for the appropiate size. It picks up a * free tcb from the TCB pool and sets up the corresponding * TBD's. If the number of fragments are more than the number * of TBD/TCB it copies all the fragments in a coalesce buffer. * It returns a pointer to the prepared TCB. */static inline tcb_t *e100_prepare_xmit_buff(struct e100_private *bdp, struct sk_buff *skb){	tcb_t *tcb, *prev_tcb;	tcb = bdp->tcb_pool.data;	tcb += TCB_TO_USE(bdp->tcb_pool);	if (bdp->flags & USE_IPCB) {		tcb->tcbu.ipcb.ip_activation_high = IPCB_IP_ACTIVATION_DEFAULT;		tcb->tcbu.ipcb.ip_schedule &= ~IPCB_TCP_PACKET;		tcb->tcbu.ipcb.ip_schedule &= ~IPCB_TCPUDP_CHECKSUM_ENABLE;	}	if(bdp->vlgrp && vlan_tx_tag_present(skb)) {		(tcb->tcbu).ipcb.ip_activation_high |= IPCB_INSERTVLAN_ENABLE;		(tcb->tcbu).ipcb.vlan = cpu_to_be16(vlan_tx_tag_get(skb));	}		tcb->tcb_hdr.cb_status = 0;	tcb->tcb_thrshld = bdp->tx_thld;	tcb->tcb_hdr.cb_cmd |= __constant_cpu_to_le16(CB_S_BIT);	/* Set I (Interrupt) bit on every (TX_FRAME_CNT)th packet */	if (!(++bdp->tx_count % TX_FRAME_CNT))		tcb->tcb_hdr.cb_cmd |= __constant_cpu_to_le16(CB_I_BIT);	else		/* Clear I bit on other packets */		tcb->tcb_hdr.cb_cmd &= ~__constant_cpu_to_le16(CB_I_BIT);	tcb->tcb_skb = skb;	if (skb->ip_summed == CHECKSUM_HW) {		const struct iphdr *ip = skb->nh.iph;		if ((ip->protocol == IPPROTO_TCP) ||		    (ip->protocol == IPPROTO_UDP)) {			tcb->tcbu.ipcb.ip_activation_high |=				IPCB_HARDWAREPARSING_ENABLE;			tcb->tcbu.ipcb.ip_schedule |=				IPCB_TCPUDP_CHECKSUM_ENABLE;			if (ip->protocol == IPPROTO_TCP)				tcb->tcbu.ipcb.ip_schedule |= IPCB_TCP_PACKET;		}	}	if (!skb_shinfo(skb)->nr_frags) {		(tcb->tbd_ptr)->tbd_buf_addr =			cpu_to_le32(pci_map_single(bdp->pdev, skb->data,						   skb->len, PCI_DMA_TODEVICE));		(tcb->tbd_ptr)->tbd_buf_cnt = cpu_to_le16(skb->len);		tcb->tcb_tbd_num = 1;		tcb->tcb_tbd_ptr = tcb->tcb_tbd_dflt_ptr;	} else {		int i;		void *addr;		tbd_t *tbd_arr_ptr = &(tcb->tbd_ptr[1]);		skb_frag_t *frag = &skb_shinfo(skb)->frags[0];		(tcb->tbd_ptr)->tbd_buf_addr =			cpu_to_le32(pci_map_single(bdp->pdev, skb->data,						   (skb->len - skb->data_len),						   PCI_DMA_TODEVICE));		(tcb->tbd_ptr)->tbd_buf_cnt =			cpu_to_le16(skb->len - skb->data_len);		for (i = 0; i < skb_shinfo(skb)->nr_frags;		     i++, tbd_arr_ptr++, frag++) {			addr = ((void *) page_address(frag->page) +				frag->page_offset);			tbd_arr_ptr->tbd_buf_addr =				cpu_to_le32(pci_map_single(bdp->pdev,							   addr, frag->size,							   PCI_DMA_TODEVICE));			tbd_arr_ptr->tbd_buf_cnt = cpu_to_le16(frag->size);		}		tcb->tcb_tbd_num = skb_shinfo(skb)->nr_frags + 1;		tcb->tcb_tbd_ptr = tcb->tcb_tbd_expand_ptr;	}	/* clear the S-BIT on the previous tcb */	prev_tcb = bdp->tcb_pool.data;	prev_tcb += PREV_TCB_USED(bdp->tcb_pool);	prev_tcb->tcb_hdr.cb_cmd &= __constant_cpu_to_le16((u16) ~CB_S_BIT);	bdp->tcb_pool.tail = NEXT_TCB_TOUSE(bdp->tcb_pool.tail);	wmb();	e100_start_cu(bdp, tcb);	return tcb;}/* Changed for 82558 enhancement *//** * e100_start_cu - start the adapter's CU * @bdp: atapter's private data struct * @tcb: TCB to be transmitted * * This routine issues a CU Start or CU Resume command to the 82558/9. * This routine was added because the prepare_ext_xmit_buff takes advantage * of the 82558/9's Dynamic TBD chaining feature and has to start the CU as * soon as the first TBD is ready.  * * e100_start_cu must be called while holding the tx_lock !  */u8e100_start_cu(struct e100_private *bdp, tcb_t *tcb){	unsigned long lock_flag;	u8 ret = true;	spin_lock_irqsave(&(bdp->bd_lock), lock_flag);	switch (bdp->n

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -