⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 sge.c

📁 linux-2.6.15.6
💻 C
📖 第 1 页 / 共 4 页
字号:
}/* * Main interrupt handler, optimized assuming that we took a 'DATA' * interrupt. * * 1. Clear the interrupt * 2. Loop while we find valid descriptors and process them; accumulate *      information that can be processed after the loop * 3. Tell the SGE at which index we stopped processing descriptors * 4. Bookkeeping; free TX buffers, ring doorbell if there are any *      outstanding TX buffers waiting, replenish RX buffers, potentially *      reenable upper layers if they were turned off due to lack of TX *      resources which are available again. * 5. If we took an interrupt, but no valid respQ descriptors was found we *      let the slow_intr_handler run and do error handling. */static irqreturn_t t1_interrupt(int irq, void *cookie, struct pt_regs *regs){	int work_done;	struct respQ_e *e;	struct adapter *adapter = cookie;	struct respQ *Q = &adapter->sge->respQ;	spin_lock(&adapter->async_lock);	e = &Q->entries[Q->cidx];	prefetch(e);	writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE);	if (likely(e->GenerationBit == Q->genbit))		work_done = process_responses(adapter, -1);	else		work_done = t1_slow_intr_handler(adapter);	/*	 * The unconditional clearing of the PL_CAUSE above may have raced	 * with DMA completion and the corresponding generation of a response	 * to cause us to miss the resulting data interrupt.  The next write	 * is also unconditional to recover the missed interrupt and render	 * this race harmless.	 */	writel(Q->cidx, adapter->regs + A_SG_SLEEPING);	if (!work_done)		adapter->sge->stats.unhandled_irqs++;	spin_unlock(&adapter->async_lock);	return IRQ_RETVAL(work_done != 0);}intr_handler_t t1_select_intr_handler(adapter_t *adapter){	return adapter->params.sge.polling ? t1_interrupt_napi : t1_interrupt;}/* * Enqueues the sk_buff onto the cmdQ[qid] and has hardware fetch it. * * The code figures out how many entries the sk_buff will require in the * cmdQ and updates the cmdQ data structure with the state once the enqueue * has complete. Then, it doesn't access the global structure anymore, but * uses the corresponding fields on the stack. In conjuction with a spinlock * around that code, we can make the function reentrant without holding the * lock when we actually enqueue (which might be expensive, especially on * architectures with IO MMUs). * * This runs with softirqs disabled. */unsigned int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter,		       unsigned int qid, struct net_device *dev){	struct sge *sge = adapter->sge;	struct cmdQ *q = &sge->cmdQ[qid];	unsigned int credits, pidx, genbit, count;	spin_lock(&q->lock);	reclaim_completed_tx(sge, q);	pidx = q->pidx;	credits = q->size - q->in_use;	count = 1 + skb_shinfo(skb)->nr_frags;	{	/* Ethernet packet */	 	if (unlikely(credits < count)) {			netif_stop_queue(dev);			set_bit(dev->if_port, &sge->stopped_tx_queues);			sge->stats.cmdQ_full[3]++;			spin_unlock(&q->lock);			CH_ERR("%s: Tx ring full while queue awake!\n",			       adapter->name);			return 1;		}		if (unlikely(credits - count < q->stop_thres)) {			sge->stats.cmdQ_full[3]++;			netif_stop_queue(dev);			set_bit(dev->if_port, &sge->stopped_tx_queues);		}	}	q->in_use += count;	genbit = q->genbit;	q->pidx += count;	if (q->pidx >= q->size) {		q->pidx -= q->size;		q->genbit ^= 1;	}	spin_unlock(&q->lock);	write_tx_descs(adapter, skb, pidx, genbit, q);	/*	 * We always ring the doorbell for cmdQ1.  For cmdQ0, we only ring	 * the doorbell if the Q is asleep. There is a natural race, where	 * the hardware is going to sleep just after we checked, however,	 * then the interrupt handler will detect the outstanding TX packet	 * and ring the doorbell for us.	 */	if (qid)		doorbell_pio(adapter, F_CMDQ1_ENABLE);	else {		clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);		if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) {			set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);			writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);		}	}	return 0;}#define MK_ETH_TYPE_MSS(type, mss) (((mss) & 0x3FFF) | ((type) << 14))/* *	eth_hdr_len - return the length of an Ethernet header *	@data: pointer to the start of the Ethernet header * *	Returns the length of an Ethernet header, including optional VLAN tag. */static inline int eth_hdr_len(const void *data){	const struct ethhdr *e = data;	return e->h_proto == htons(ETH_P_8021Q) ? VLAN_ETH_HLEN : ETH_HLEN;}/* * Adds the CPL header to the sk_buff and passes it to t1_sge_tx. */int t1_start_xmit(struct sk_buff *skb, struct net_device *dev){	struct adapter *adapter = dev->priv;	struct sge_port_stats *st = &adapter->sge->port_stats[dev->if_port];	struct sge *sge = adapter->sge;	struct cpl_tx_pkt *cpl;#ifdef NETIF_F_TSO	if (skb_shinfo(skb)->tso_size) {		int eth_type;		struct cpl_tx_pkt_lso *hdr;		st->tso++;		eth_type = skb->nh.raw - skb->data == ETH_HLEN ?			CPL_ETH_II : CPL_ETH_II_VLAN;		hdr = (struct cpl_tx_pkt_lso *)skb_push(skb, sizeof(*hdr));		hdr->opcode = CPL_TX_PKT_LSO;		hdr->ip_csum_dis = hdr->l4_csum_dis = 0;		hdr->ip_hdr_words = skb->nh.iph->ihl;		hdr->tcp_hdr_words = skb->h.th->doff;		hdr->eth_type_mss = htons(MK_ETH_TYPE_MSS(eth_type,						skb_shinfo(skb)->tso_size));		hdr->len = htonl(skb->len - sizeof(*hdr));		cpl = (struct cpl_tx_pkt *)hdr;		sge->stats.tx_lso_pkts++;	} else#endif	{		/*	 	 * Packets shorter than ETH_HLEN can break the MAC, drop them		 * early.  Also, we may get oversized packets because some		 * parts of the kernel don't handle our unusual hard_header_len		 * right, drop those too.		 */		if (unlikely(skb->len < ETH_HLEN ||			     skb->len > dev->mtu + eth_hdr_len(skb->data))) {			dev_kfree_skb_any(skb);			return NET_XMIT_SUCCESS;		}		/*		 * We are using a non-standard hard_header_len and some kernel		 * components, such as pktgen, do not handle it right.		 * Complain when this happens but try to fix things up.		 */		if (unlikely(skb_headroom(skb) <			     dev->hard_header_len - ETH_HLEN)) {			struct sk_buff *orig_skb = skb;			if (net_ratelimit())				printk(KERN_ERR "%s: inadequate headroom in "				       "Tx packet\n", dev->name);			skb = skb_realloc_headroom(skb, sizeof(*cpl));			dev_kfree_skb_any(orig_skb);			if (!skb)				return -ENOMEM;		}		if (!(adapter->flags & UDP_CSUM_CAPABLE) &&		    skb->ip_summed == CHECKSUM_HW &&		    skb->nh.iph->protocol == IPPROTO_UDP)			if (unlikely(skb_checksum_help(skb, 0))) {				dev_kfree_skb_any(skb);				return -ENOMEM;			}		/* Hmmm, assuming to catch the gratious arp... and we'll use		 * it to flush out stuck espi packets...		  */		if (unlikely(!adapter->sge->espibug_skb)) {			if (skb->protocol == htons(ETH_P_ARP) &&			    skb->nh.arph->ar_op == htons(ARPOP_REQUEST)) {				adapter->sge->espibug_skb = skb;				/* We want to re-use this skb later. We				 * simply bump the reference count and it				 * will not be freed...				 */				skb = skb_get(skb);			}		}		cpl = (struct cpl_tx_pkt *)__skb_push(skb, sizeof(*cpl));		cpl->opcode = CPL_TX_PKT;		cpl->ip_csum_dis = 1;    /* SW calculates IP csum */		cpl->l4_csum_dis = skb->ip_summed == CHECKSUM_HW ? 0 : 1;		/* the length field isn't used so don't bother setting it */		st->tx_cso += (skb->ip_summed == CHECKSUM_HW);		sge->stats.tx_do_cksum += (skb->ip_summed == CHECKSUM_HW);		sge->stats.tx_reg_pkts++;	}	cpl->iff = dev->if_port;#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)	if (adapter->vlan_grp && vlan_tx_tag_present(skb)) {		cpl->vlan_valid = 1;		cpl->vlan = htons(vlan_tx_tag_get(skb));		st->vlan_insert++;	} else#endif		cpl->vlan_valid = 0;	dev->trans_start = jiffies;	return t1_sge_tx(skb, adapter, 0, dev);}/* * Callback for the Tx buffer reclaim timer.  Runs with softirqs disabled. */static void sge_tx_reclaim_cb(unsigned long data){	int i;	struct sge *sge = (struct sge *)data;	for (i = 0; i < SGE_CMDQ_N; ++i) {		struct cmdQ *q = &sge->cmdQ[i];		if (!spin_trylock(&q->lock))			continue;		reclaim_completed_tx(sge, q);		if (i == 0 && q->in_use)   /* flush pending credits */			writel(F_CMDQ0_ENABLE,				sge->adapter->regs + A_SG_DOORBELL);		spin_unlock(&q->lock);	}	mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);}/* * Propagate changes of the SGE coalescing parameters to the HW. */int t1_sge_set_coalesce_params(struct sge *sge, struct sge_params *p){	sge->netdev->poll = t1_poll;	sge->fixed_intrtimer = p->rx_coalesce_usecs *		core_ticks_per_usec(sge->adapter);	writel(sge->fixed_intrtimer, sge->adapter->regs + A_SG_INTRTIMER);	return 0;}/* * Allocates both RX and TX resources and configures the SGE. However, * the hardware is not enabled yet. */int t1_sge_configure(struct sge *sge, struct sge_params *p){	if (alloc_rx_resources(sge, p))		return -ENOMEM;	if (alloc_tx_resources(sge, p)) {		free_rx_resources(sge);		return -ENOMEM;	}	configure_sge(sge, p);	/*	 * Now that we have sized the free lists calculate the payload	 * capacity of the large buffers.  Other parts of the driver use	 * this to set the max offload coalescing size so that RX packets	 * do not overflow our large buffers.	 */	p->large_buf_capacity = jumbo_payload_capacity(sge);	return 0;}/* * Disables the DMA engine. */void t1_sge_stop(struct sge *sge){	writel(0, sge->adapter->regs + A_SG_CONTROL);	(void) readl(sge->adapter->regs + A_SG_CONTROL); /* flush */	if (is_T2(sge->adapter))		del_timer_sync(&sge->espibug_timer);	del_timer_sync(&sge->tx_reclaim_timer);}/* * Enables the DMA engine. */void t1_sge_start(struct sge *sge){	refill_free_list(sge, &sge->freelQ[0]);	refill_free_list(sge, &sge->freelQ[1]);	writel(sge->sge_control, sge->adapter->regs + A_SG_CONTROL);	doorbell_pio(sge->adapter, F_FL0_ENABLE | F_FL1_ENABLE);	(void) readl(sge->adapter->regs + A_SG_CONTROL); /* flush */	mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);	if (is_T2(sge->adapter)) 		mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);}/* * Callback for the T2 ESPI 'stuck packet feature' workaorund */static void espibug_workaround(void *data){	struct adapter *adapter = (struct adapter *)data;	struct sge *sge = adapter->sge;	if (netif_running(adapter->port[0].dev)) {		struct sk_buff *skb = sge->espibug_skb;		u32 seop = t1_espi_get_mon(adapter, 0x930, 0);		if ((seop & 0xfff0fff) == 0xfff && skb) {			if (!skb->cb[0]) {				u8 ch_mac_addr[ETH_ALEN] =				    {0x0, 0x7, 0x43, 0x0, 0x0, 0x0};				memcpy(skb->data + sizeof(struct cpl_tx_pkt),				    ch_mac_addr, ETH_ALEN);				memcpy(skb->data + skb->len - 10, ch_mac_addr,				    ETH_ALEN);				skb->cb[0] = 0xff;			}			/* bump the reference count to avoid freeing of the			 * skb once the DMA has completed.			 */			skb = skb_get(skb);			t1_sge_tx(skb, adapter, 0, adapter->port[0].dev);		}	}	mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);}/* * Creates a t1_sge structure and returns suggested resource parameters. */struct sge * __devinit t1_sge_create(struct adapter *adapter,				     struct sge_params *p){	struct sge *sge = kmalloc(sizeof(*sge), GFP_KERNEL);	if (!sge)		return NULL;	memset(sge, 0, sizeof(*sge));	sge->adapter = adapter;	sge->netdev = adapter->port[0].dev;	sge->rx_pkt_pad = t1_is_T1B(adapter) ? 0 : 2;	sge->jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;	init_timer(&sge->tx_reclaim_timer);	sge->tx_reclaim_timer.data = (unsigned long)sge;	sge->tx_reclaim_timer.function = sge_tx_reclaim_cb;	if (is_T2(sge->adapter)) {		init_timer(&sge->espibug_timer);		sge->espibug_timer.function = (void *)&espibug_workaround;		sge->espibug_timer.data = (unsigned long)sge->adapter;		sge->espibug_timeout = 1;	}	 	p->cmdQ_size[0] = SGE_CMDQ0_E_N;	p->cmdQ_size[1] = SGE_CMDQ1_E_N;	p->freelQ_size[!sge->jumbo_fl] = SGE_FREEL_SIZE;	p->freelQ_size[sge->jumbo_fl] = SGE_JUMBO_FREEL_SIZE;	p->rx_coalesce_usecs =  50;	p->coalesce_enable = 0;	p->sample_interval_usecs = 0;	p->polling = 0;	return sge;}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -