⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 e100_main.c

📁 Linux* Base Driver for the Intel(R) PRO/100 Family of Adapters
💻 C
📖 第 1 页 / 共 5 页
字号:
			       "Half" : "Full");			e100_config_fc(bdp);			e100_config(bdp);  		} else {			printk(KERN_ERR "e100: %s NIC Link is Down\n",			       bdp->device->name);		}	}	// toggle the tx queue according to link status	// this also resolves a race condition between tx & non-cu cmd flows	if (netif_carrier_ok(dev)) {		if (netif_running(dev))			netif_wake_queue(dev);	} else {		if (netif_running(dev))			netif_stop_queue(dev);		/* When changing to non-autoneg, device may lose  */		/* link with some switches. e100 will try to      */		/* revover link by sending command to PHY layer   */		if (bdp->params.e100_speed_duplex != E100_AUTONEG)			e100_force_speed_duplex_to_phy(bdp);	}	rmb();	if (e100_update_stats(bdp)) {		/* Check if a change in the IFS parameter is needed,		   and configure the device accordingly */		if (bdp->params.b_params & PRM_IFS)			e100_manage_adaptive_ifs(bdp);		/* Now adjust our dynamic tx threshold value */		e100_refresh_txthld(bdp);		/* Now if we are on a 557 and we havn't received any frames then we		 * should issue a multicast command to reset the RU */		if (bdp->rev_id < D101A4_REV_ID) {			if (!(bdp->stats_counters->basic_stats.rcv_gd_frames)) {				e100_set_multi(dev);			}		}	}	/* Issue command to dump statistics from device.        */	/* Check for command completion on next watchdog timer. */	e100_dump_stats_cntrs(bdp);	wmb();	/* relaunch watchdog timer in 2 sec */	mod_timer(&(bdp->watchdog_timer), jiffies + (2 * HZ));	if (list_empty(&bdp->active_rx_list))		e100_trigger_SWI(bdp);}/** * e100_manage_adaptive_ifs * @bdp: atapter's private data struct * * This routine manages the adaptive Inter-Frame Spacing algorithm * using a state machine. */voide100_manage_adaptive_ifs(struct e100_private *bdp){	static u16 state_table[9][4] = {	// rows are states		{2, 0, 0, 0},	// state0   // column0: next state if increasing		{2, 0, 5, 30},	// state1   // column1: next state if decreasing		{5, 1, 5, 30},	// state2   // column2: IFS value for 100 mbit		{5, 3, 0, 0},	// state3   // column3: IFS value for 10 mbit		{5, 3, 10, 60},	// state4		{8, 4, 10, 60},	// state5		{8, 6, 0, 0},	// state6		{8, 6, 20, 60},	// state7		{8, 7, 20, 60}	// state8	};	u32 transmits =		le32_to_cpu(bdp->stats_counters->basic_stats.xmt_gd_frames);	u32 collisions =		le32_to_cpu(bdp->stats_counters->basic_stats.xmt_ttl_coll);	u32 state = bdp->ifs_state;	u32 old_value = bdp->ifs_value;	int next_col;	u32 min_transmits;	if (bdp->cur_dplx_mode == FULL_DUPLEX) {		bdp->ifs_state = 0;		bdp->ifs_value = 0;	} else {		/* Half Duplex */		/* Set speed specific parameters */		if (bdp->cur_line_speed == 100) {			next_col = 2;			min_transmits = MIN_NUMBER_OF_TRANSMITS_100;		} else {	/* 10 Mbps */			next_col = 3;			min_transmits = MIN_NUMBER_OF_TRANSMITS_10;		}		if ((transmits / 32 < collisions)		    && (transmits > min_transmits)) {			state = state_table[state][0];	/* increment */		} else if (transmits < min_transmits) {			state = state_table[state][1];	/* decrement */		}		bdp->ifs_value = state_table[state][next_col];		bdp->ifs_state = state;	}	/* If the IFS value has changed, configure the device */	if (bdp->ifs_value != old_value) {		e100_config_ifs(bdp);		e100_config(bdp);	}}/** * e100intr - interrupt handler * @irq: the IRQ number * @dev_inst: the net_device struct * @regs: registers (unused) * * This routine is the ISR for the e100 board. It services * the RX & TX queues & starts the RU if it has stopped due * to no resources. */irqreturn_te100intr(int irq, void *dev_inst, struct pt_regs *regs){	struct net_device *dev;	struct e100_private *bdp;	u16 intr_status;	dev = dev_inst;	bdp = dev->priv;	intr_status = readw(&bdp->scb->scb_status);	/* If not my interrupt, just return */	if (!(intr_status & SCB_STATUS_ACK_MASK) || (intr_status == 0xffff)) {		return IRQ_NONE;	}	/* disable and ack intr */	e100_disable_clear_intr(bdp);	/* the device is closed, don't continue or else bad things may happen. */	if (!netif_running(dev)) {		e100_set_intr_mask(bdp);		return IRQ_NONE;	}	/* SWI intr (triggered by watchdog) is signal to allocate new skb buffers */	if (intr_status & SCB_STATUS_ACK_SWI) {		e100_alloc_skbs(bdp);	}	/* do recv work if any */	if (intr_status &	    (SCB_STATUS_ACK_FR | SCB_STATUS_ACK_RNR | SCB_STATUS_ACK_SWI)) 		bdp->drv_stats.rx_intr_pkts += e100_rx_srv(bdp);	/* clean up after tx'ed packets */	if (intr_status & (SCB_STATUS_ACK_CNA | SCB_STATUS_ACK_CX))		e100_tx_srv(bdp);	e100_set_intr_mask(bdp);	return IRQ_HANDLED;}/** * e100_tx_skb_free - free TX skbs resources * @bdp: atapter's private data struct * @tcb: associated tcb of the freed skb * * This routine frees resources of TX skbs. */static inline voide100_tx_skb_free(struct e100_private *bdp, tcb_t *tcb){	if (tcb->tcb_skb) {#ifdef MAX_SKB_FRAGS		int i;		tbd_t *tbd_arr = tcb->tbd_ptr;		int frags = skb_shinfo(tcb->tcb_skb)->nr_frags;		for (i = 0; i <= frags; i++, tbd_arr++) {			pci_unmap_single(bdp->pdev,					 le32_to_cpu(tbd_arr->tbd_buf_addr),					 le16_to_cpu(tbd_arr->tbd_buf_cnt),					 PCI_DMA_TODEVICE);		}#else		pci_unmap_single(bdp->pdev,				 le32_to_cpu((tcb->tbd_ptr)->tbd_buf_addr),				 tcb->tcb_skb->len, PCI_DMA_TODEVICE);#endif		dev_kfree_skb_irq(tcb->tcb_skb);		tcb->tcb_skb = NULL;	}}/** * e100_tx_srv - service TX queues * @bdp: atapter's private data struct * * This routine services the TX queues. It reclaims the TCB's & TBD's & other * resources used during the transmit of this buffer. It is called from the ISR. * We don't need a tx_lock since we always access buffers which were already * prepared. */voide100_tx_srv(struct e100_private *bdp){	tcb_t *tcb;	int i;	/* go over at most TxDescriptors buffers */	for (i = 0; i < bdp->params.TxDescriptors; i++) {		tcb = bdp->tcb_pool.data;		tcb += bdp->tcb_pool.head;		rmb();		/* if the buffer at 'head' is not complete, break */		if (!(tcb->tcb_hdr.cb_status &		      __constant_cpu_to_le16(CB_STATUS_COMPLETE)))			break;		/* service next buffer, clear the out of resource condition */		e100_tx_skb_free(bdp, tcb);		if (netif_running(bdp->device))			netif_wake_queue(bdp->device);		/* if we've caught up with 'tail', break */		if (NEXT_TCB_TOUSE(bdp->tcb_pool.head) == bdp->tcb_pool.tail) {			break;		}		bdp->tcb_pool.head = NEXT_TCB_TOUSE(bdp->tcb_pool.head);	}}/** * e100_rx_srv - service RX queue * @bdp: atapter's private data struct * @max_number_of_rfds: max number of RFDs to process * @rx_congestion: flag pointer, to inform the calling function of congestion. * * This routine processes the RX interrupt & services the RX queues. * For each successful RFD, it allocates a new msg block, links that * into the RFD list, and sends the old msg upstream. * The new RFD is then put at the end of the free list of RFD's. * It returns the number of serviced RFDs. */u32e100_rx_srv(struct e100_private *bdp){	rfd_t *rfd;		/* new rfd, received rfd */	int i;	u16 rfd_status;	struct sk_buff *skb;	struct net_device *dev;	unsigned int data_sz;	struct rx_list_elem *rx_struct;	u32 rfd_cnt = 0;	dev = bdp->device;	/* current design of rx is as following:	 * 1. socket buffer (skb) used to pass network packet to upper layer	 * 2. all HW host memory structures (like RFDs, RBDs and data buffers)	 *    are placed in a skb's data room	 * 3. when rx process is complete, we change skb internal pointers to exclude	 *    from data area all unrelated things (RFD, RDB) and to leave	 *    just rx'ed packet netto	 * 4. for each skb passed to upper layer, new one is allocated instead.	 * 5. if no skb left, in 2 sec another atempt to allocate skbs will be made	 *    (watchdog trigger SWI intr and isr should allocate new skbs)	 */	for (i = 0; i < bdp->params.RxDescriptors; i++) {		if (list_empty(&(bdp->active_rx_list))) {			break;		}		rx_struct = list_entry(bdp->active_rx_list.next,				       struct rx_list_elem, list_elem);		skb = rx_struct->skb;		rfd = RFD_POINTER(skb, bdp);	/* locate RFD within skb */		// sync only the RFD header		pci_dma_sync_single(bdp->pdev, rx_struct->dma_addr,				    bdp->rfd_size, PCI_DMA_FROMDEVICE);		rfd_status = le16_to_cpu(rfd->rfd_header.cb_status);	/* get RFD's status */		if (!(rfd_status & RFD_STATUS_COMPLETE))	/* does not contains data yet - exit */			break;		/* to allow manipulation with current skb we need to unlink it */		list_del(&(rx_struct->list_elem));		/* do not free & unmap badly received packet.		 * move it to the end of skb list for reuse */		if (!(rfd_status & RFD_STATUS_OK)) {			e100_add_skb_to_end(bdp, rx_struct);			continue;		}		data_sz = min_t(u16, (le16_to_cpu(rfd->rfd_act_cnt) & 0x3fff),				(sizeof (rfd_t) - bdp->rfd_size));		/* now sync all the data */		pci_dma_sync_single(bdp->pdev, rx_struct->dma_addr,				    (data_sz + bdp->rfd_size),				    PCI_DMA_FROMDEVICE);		pci_unmap_single(bdp->pdev, rx_struct->dma_addr,				 sizeof (rfd_t), PCI_DMA_FROMDEVICE);		list_add(&(rx_struct->list_elem), &(bdp->rx_struct_pool));		/* end of dma access to rfd */		bdp->skb_req++;	/* incr number of requested skbs */		e100_alloc_skbs(bdp);	/* and get them */		/* set packet size, excluding checksum (2 last bytes) if it is present */		if ((bdp->flags & DF_CSUM_OFFLOAD)		    && (bdp->rev_id < D102_REV_ID))			skb_put(skb, (int) data_sz - 2);		else			skb_put(skb, (int) data_sz);		/* set the protocol */		skb->protocol = eth_type_trans(skb, dev);		/* set the checksum info */		if (bdp->flags & DF_CSUM_OFFLOAD) {			if (bdp->rev_id >= D102_REV_ID) {				skb->ip_summed = e100_D102_check_checksum(rfd);			} else {				skb->ip_summed = e100_D101M_checksum(bdp, skb);			}		} else {			skb->ip_summed = CHECKSUM_NONE;		}#ifdef E100_IA64_DMA_FIX		//Free low-memory skb buffer without passing it up to the IP stack		if (non_DMA32_memory_present) {			skb_linearize(skb, GFP_ATOMIC);		}#endif		bdp->drv_stats.net_stats.rx_bytes += skb->len;#ifdef NETIF_F_HW_VLAN_TX		if(bdp->vlgrp && (rfd_status & CB_STATUS_VLAN)) {			vlan_hwaccel_rx(skb, bdp->vlgrp, be16_to_cpu(rfd->vlanid));		} else {			netif_rx(skb);		}#else		netif_rx(skb);#endif		dev->last_rx = jiffies;				rfd_cnt++;	}			/* end of rfd loop */	/* restart the RU if it has stopped */	if ((readw(&bdp->scb->scb_status) & SCB_RUS_MASK) != SCB_RUS_READY) {		e100_start_ru(bdp);	}	return rfd_cnt;}voide100_refresh_txthld(struct e100_private *bdp){	basic_cntr_t *pstat = &(bdp->stats_counters->basic_stats);	/* as long as tx_per_underrun is not 0, we can go about dynamically *	 * adjusting the xmit threshold. we stop doing that & resort to defaults	 * * once the adjustments become meaningless. the value is adjusted by *	 * dumping the error counters & checking the # of xmit underrun errors *	 * we've had. */	if (bdp->tx_per_underrun) {		/* We are going to last values dumped from the dump statistics		 * command */		if (le32_to_cpu(pstat->xmt_gd_frames)) {			if (le32_to_cpu(pstat->xmt_uruns)) {				/* 				 * if we have had more than one underrun per "DEFAULT #				 * OF XMITS ALLOWED PER UNDERRUN" good xmits, raise the				 * THRESHOLD.				 */				if ((le32_to_cpu(pstat->xmt_gd_frames) /				     le32_to_cpu(pstat->xmt_uruns)) <				    bdp->tx_per_underrun) {					bdp->tx_thld += 3;				}			}			/* 			 * if we've had less than one underrun per the DEFAULT number of			 * of good xmits allowed, lower the THOLD but not less than 0 			 */			if (le32_to_cpu(pstat->xmt_gd_frames) >			    bdp->tx_per_underrun) {				bdp->tx_thld--;				if (bdp->tx_thld < 6)					bdp->tx_thld = 6;			}		}		/* end good xmits */		/* 		 * * if our adjustments are becoming unresonable, stop adjusting &		 * resort * to defaults & pray. A THOLD value > 190 means that the		 * adapter will * wait for 190*8=1520 bytes in TX FIFO before it		 * starts xmit. Since * MTU is 1514, it doesn't make any sense for		 * further increase. */		if (bdp->tx_thld >= 190) {			bdp->tx_per_underrun = 0;			bdp->tx_thld = 189;		}	}			/* end underrun check */}/** * e100_prepare_xmit_buff - prepare a buffer for transmission * @bdp: atapter's private data struct * @skb: skb to send * * This routine prepare a buffer for transmission. It checks * the message length for the appropiate size. It picks up a * free tcb from the TCB pool and sets up the corresponding * TBD's. If the number of fragments are more than the number * of TBD/TCB it copies all the fragments in a coalesce buffer. * It returns a pointer to the prepared TCB. */static inline tcb_t *e100_prepare_xmit_buff(struct e100_private *bdp, struct sk_buff *skb){	tcb_t *tcb, *prev_tcb;	tcb = bdp->t

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -