⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 sge.c

📁 linux-2.6.15.6
💻 C
📖 第 1 页 / 共 4 页
字号:
 *	recycles the buffer. */static void unexpected_offload(struct adapter *adapter, struct freelQ *fl){	struct freelQ_ce *ce = &fl->centries[fl->cidx];	struct sk_buff *skb = ce->skb;	pci_dma_sync_single_for_cpu(adapter->pdev, pci_unmap_addr(ce, dma_addr),			    pci_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);	CH_ERR("%s: unexpected offload packet, cmd %u\n",	       adapter->name, *skb->data);	recycle_fl_buf(fl, fl->cidx);}/* * Write the command descriptors to transmit the given skb starting at * descriptor pidx with the given generation. */static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb,				  unsigned int pidx, unsigned int gen,				  struct cmdQ *q){	dma_addr_t mapping;	struct cmdQ_e *e, *e1;	struct cmdQ_ce *ce;	unsigned int i, flags, nfrags = skb_shinfo(skb)->nr_frags;	mapping = pci_map_single(adapter->pdev, skb->data,				 skb->len - skb->data_len, PCI_DMA_TODEVICE);	ce = &q->centries[pidx];	ce->skb = NULL;	pci_unmap_addr_set(ce, dma_addr, mapping);	pci_unmap_len_set(ce, dma_len, skb->len - skb->data_len);	flags = F_CMD_DATAVALID | F_CMD_SOP | V_CMD_EOP(nfrags == 0) |		V_CMD_GEN2(gen);	e = &q->entries[pidx];	e->addr_lo = (u32)mapping;	e->addr_hi = (u64)mapping >> 32;	e->len_gen = V_CMD_LEN(skb->len - skb->data_len) | V_CMD_GEN1(gen);	for (e1 = e, i = 0; nfrags--; i++) {		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];		ce++;		e1++;		if (++pidx == q->size) {			pidx = 0;			gen ^= 1;			ce = q->centries;			e1 = q->entries;		}		mapping = pci_map_page(adapter->pdev, frag->page,				       frag->page_offset, frag->size,				       PCI_DMA_TODEVICE);		ce->skb = NULL;		pci_unmap_addr_set(ce, dma_addr, mapping);		pci_unmap_len_set(ce, dma_len, frag->size);		e1->addr_lo = (u32)mapping;		e1->addr_hi = (u64)mapping >> 32;		e1->len_gen = V_CMD_LEN(frag->size) | V_CMD_GEN1(gen);		e1->flags = F_CMD_DATAVALID | V_CMD_EOP(nfrags == 0) |			    V_CMD_GEN2(gen);	}	ce->skb = skb;	wmb();	e->flags = flags;}/* * Clean up completed Tx buffers. */static inline void reclaim_completed_tx(struct sge *sge, struct cmdQ *q){	unsigned int reclaim = q->processed - q->cleaned;	if (reclaim) {		free_cmdQ_buffers(sge, q, reclaim);		q->cleaned += reclaim;	}}#ifndef SET_ETHTOOL_OPS# define __netif_rx_complete(dev) netif_rx_complete(dev)#endif/* * We cannot use the standard netif_rx_schedule_prep() because we have multiple * ports plus the TOE all multiplexing onto a single response queue, therefore * accepting new responses cannot depend on the state of any particular port. * So define our own equivalent that omits the netif_running() test. */static inline int napi_schedule_prep(struct net_device *dev){	return !test_and_set_bit(__LINK_STATE_RX_SCHED, &dev->state);}/** *	sge_rx - process an ingress ethernet packet *	@sge: the sge structure *	@fl: the free list that contains the packet buffer *	@len: the packet length * *	Process an ingress ethernet pakcet and deliver it to the stack. */static int sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len){	struct sk_buff *skb;	struct cpl_rx_pkt *p;	struct adapter *adapter = sge->adapter;	sge->stats.ethernet_pkts++;	skb = get_packet(adapter->pdev, fl, len - sge->rx_pkt_pad,			 sge->rx_pkt_pad, 2, SGE_RX_COPY_THRES,			 SGE_RX_DROP_THRES);	if (!skb) {		sge->port_stats[0].rx_drops++; /* charge only port 0 for now */		return 0;	}	p = (struct cpl_rx_pkt *)skb->data;	skb_pull(skb, sizeof(*p));	skb->dev = adapter->port[p->iff].dev;	skb->dev->last_rx = jiffies;	skb->protocol = eth_type_trans(skb, skb->dev);	if ((adapter->flags & RX_CSUM_ENABLED) && p->csum == 0xffff &&	    skb->protocol == htons(ETH_P_IP) &&	    (skb->data[9] == IPPROTO_TCP || skb->data[9] == IPPROTO_UDP)) {		sge->port_stats[p->iff].rx_cso_good++;		skb->ip_summed = CHECKSUM_UNNECESSARY;	} else		skb->ip_summed = CHECKSUM_NONE;	if (unlikely(adapter->vlan_grp && p->vlan_valid)) {		sge->port_stats[p->iff].vlan_xtract++;		if (adapter->params.sge.polling)			vlan_hwaccel_receive_skb(skb, adapter->vlan_grp,						 ntohs(p->vlan));		else			vlan_hwaccel_rx(skb, adapter->vlan_grp,					ntohs(p->vlan));	} else if (adapter->params.sge.polling)		netif_receive_skb(skb);	else		netif_rx(skb);	return 0;}/* * Returns true if a command queue has enough available descriptors that * we can resume Tx operation after temporarily disabling its packet queue. */static inline int enough_free_Tx_descs(const struct cmdQ *q){	unsigned int r = q->processed - q->cleaned;	return q->in_use - r < (q->size >> 1);}/* * Called when sufficient space has become available in the SGE command queues * after the Tx packet schedulers have been suspended to restart the Tx path. */static void restart_tx_queues(struct sge *sge){	struct adapter *adap = sge->adapter;	if (enough_free_Tx_descs(&sge->cmdQ[0])) {		int i;		for_each_port(adap, i) {			struct net_device *nd = adap->port[i].dev;			if (test_and_clear_bit(nd->if_port,					       &sge->stopped_tx_queues) &&			    netif_running(nd)) {				sge->stats.cmdQ_restarted[3]++;				netif_wake_queue(nd);			}		}	}}/* * update_tx_info is called from the interrupt handler/NAPI to return cmdQ0  * information. */static unsigned int update_tx_info(struct adapter *adapter, 					  unsigned int flags, 					  unsigned int pr0){	struct sge *sge = adapter->sge;	struct cmdQ *cmdq = &sge->cmdQ[0];	cmdq->processed += pr0;	if (flags & F_CMDQ0_ENABLE) {		clear_bit(CMDQ_STAT_RUNNING, &cmdq->status);			if (cmdq->cleaned + cmdq->in_use != cmdq->processed &&		    !test_and_set_bit(CMDQ_STAT_LAST_PKT_DB, &cmdq->status)) {			set_bit(CMDQ_STAT_RUNNING, &cmdq->status);			writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);		}	 	flags &= ~F_CMDQ0_ENABLE;	}		if (unlikely(sge->stopped_tx_queues != 0))		restart_tx_queues(sge);	return flags;}/* * Process SGE responses, up to the supplied budget.  Returns the number of * responses processed.  A negative budget is effectively unlimited. */static int process_responses(struct adapter *adapter, int budget){	struct sge *sge = adapter->sge;	struct respQ *q = &sge->respQ;	struct respQ_e *e = &q->entries[q->cidx];	int budget_left = budget;	unsigned int flags = 0;	unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0};		while (likely(budget_left && e->GenerationBit == q->genbit)) {		flags |= e->Qsleeping;				cmdq_processed[0] += e->Cmdq0CreditReturn;		cmdq_processed[1] += e->Cmdq1CreditReturn;				/* We batch updates to the TX side to avoid cacheline		 * ping-pong of TX state information on MP where the sender		 * might run on a different CPU than this function...		 */		if (unlikely(flags & F_CMDQ0_ENABLE || cmdq_processed[0] > 64)) {			flags = update_tx_info(adapter, flags, cmdq_processed[0]);			cmdq_processed[0] = 0;		}		if (unlikely(cmdq_processed[1] > 16)) {			sge->cmdQ[1].processed += cmdq_processed[1];			cmdq_processed[1] = 0;		}		if (likely(e->DataValid)) {			struct freelQ *fl = &sge->freelQ[e->FreelistQid];			if (unlikely(!e->Sop || !e->Eop))				BUG();			if (unlikely(e->Offload))				unexpected_offload(adapter, fl);			else				sge_rx(sge, fl, e->BufferLength);			/*			 * Note: this depends on each packet consuming a			 * single free-list buffer; cf. the BUG above.			 */			if (++fl->cidx == fl->size)				fl->cidx = 0;			if (unlikely(--fl->credits <				     fl->size - SGE_FREEL_REFILL_THRESH))				refill_free_list(sge, fl);		} else			sge->stats.pure_rsps++;		e++;		if (unlikely(++q->cidx == q->size)) {			q->cidx = 0;			q->genbit ^= 1;			e = q->entries;		}		prefetch(e);		if (++q->credits > SGE_RESPQ_REPLENISH_THRES) {			writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT);			q->credits = 0;		}		--budget_left;	}	flags = update_tx_info(adapter, flags, cmdq_processed[0]); 	sge->cmdQ[1].processed += cmdq_processed[1];	budget -= budget_left;	return budget;}/* * A simpler version of process_responses() that handles only pure (i.e., * non data-carrying) responses.  Such respones are too light-weight to justify * calling a softirq when using NAPI, so we handle them specially in hard * interrupt context.  The function is called with a pointer to a response, * which the caller must ensure is a valid pure response.  Returns 1 if it * encounters a valid data-carrying response, 0 otherwise. */static int process_pure_responses(struct adapter *adapter, struct respQ_e *e){	struct sge *sge = adapter->sge;	struct respQ *q = &sge->respQ;	unsigned int flags = 0;	unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0};	do {		flags |= e->Qsleeping;		cmdq_processed[0] += e->Cmdq0CreditReturn;		cmdq_processed[1] += e->Cmdq1CreditReturn;				e++;		if (unlikely(++q->cidx == q->size)) {			q->cidx = 0;			q->genbit ^= 1;			e = q->entries;		}		prefetch(e);		if (++q->credits > SGE_RESPQ_REPLENISH_THRES) {			writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT);			q->credits = 0;		}		sge->stats.pure_rsps++;	} while (e->GenerationBit == q->genbit && !e->DataValid);	flags = update_tx_info(adapter, flags, cmdq_processed[0]); 	sge->cmdQ[1].processed += cmdq_processed[1];	return e->GenerationBit == q->genbit;}/* * Handler for new data events when using NAPI.  This does not need any locking * or protection from interrupts as data interrupts are off at this point and * other adapter interrupts do not interfere. */static int t1_poll(struct net_device *dev, int *budget){	struct adapter *adapter = dev->priv;	int effective_budget = min(*budget, dev->quota);	int work_done = process_responses(adapter, effective_budget);	*budget -= work_done;	dev->quota -= work_done;	if (work_done >= effective_budget)		return 1;	__netif_rx_complete(dev);	/*	 * Because we don't atomically flush the following write it is	 * possible that in very rare cases it can reach the device in a way	 * that races with a new response being written plus an error interrupt	 * causing the NAPI interrupt handler below to return unhandled status	 * to the OS.  To protect against this would require flushing the write	 * and doing both the write and the flush with interrupts off.  Way too	 * expensive and unjustifiable given the rarity of the race.	 */	writel(adapter->sge->respQ.cidx, adapter->regs + A_SG_SLEEPING);	return 0;}/* * Returns true if the device is already scheduled for polling. */static inline int napi_is_scheduled(struct net_device *dev){	return test_bit(__LINK_STATE_RX_SCHED, &dev->state);}/* * NAPI version of the main interrupt handler. */static irqreturn_t t1_interrupt_napi(int irq, void *data, struct pt_regs *regs){	int handled;	struct adapter *adapter = data;	struct sge *sge = adapter->sge;	struct respQ *q = &adapter->sge->respQ;	/*	 * Clear the SGE_DATA interrupt first thing.  Normally the NAPI	 * handler has control of the response queue and the interrupt handler	 * can look at the queue reliably only once it knows NAPI is off.	 * We can't wait that long to clear the SGE_DATA interrupt because we	 * could race with t1_poll rearming the SGE interrupt, so we need to	 * clear the interrupt speculatively and really early on.	 */	writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE);	spin_lock(&adapter->async_lock);	if (!napi_is_scheduled(sge->netdev)) {		struct respQ_e *e = &q->entries[q->cidx];		if (e->GenerationBit == q->genbit) {			if (e->DataValid ||			    process_pure_responses(adapter, e)) {				if (likely(napi_schedule_prep(sge->netdev)))					__netif_rx_schedule(sge->netdev);				else					printk(KERN_CRIT					       "NAPI schedule failure!\n");			} else			writel(q->cidx, adapter->regs + A_SG_SLEEPING);			handled = 1;			goto unlock;		} else		writel(q->cidx, adapter->regs + A_SG_SLEEPING);	}  else	if (readl(adapter->regs + A_PL_CAUSE) & F_PL_INTR_SGE_DATA)		printk(KERN_ERR "data interrupt while NAPI running\n");		handled = t1_slow_intr_handler(adapter);	if (!handled)		sge->stats.unhandled_irqs++; unlock:	spin_unlock(&adapter->async_lock);	return IRQ_RETVAL(handled != 0);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -