sge.c

来自「linux 内核源代码」· C语言 代码 · 共 2,209 行 · 第 1/5 页

C
2,209
字号
/***************************************************************************** *                                                                           * * File: sge.c                                                               * * $Revision: 1.26 $                                                         * * $Date: 2005/06/21 18:29:48 $                                              * * Description:                                                              * *  DMA engine.                                                              * *  part of the Chelsio 10Gb Ethernet Driver.                                * *                                                                           * * This program is free software; you can redistribute it and/or modify      * * it under the terms of the GNU General Public License, version 2, as       * * published by the Free Software Foundation.                                * *                                                                           * * You should have received a copy of the GNU General Public License along   * * with this program; if not, write to the Free Software Foundation, Inc.,   * * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.                 * *                                                                           * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    * * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      * * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.                     * *                                                                           * * http://www.chelsio.com                                                    * *                                                                           * * Copyright (c) 2003 - 2005 Chelsio Communications, Inc.                    * * All rights reserved.                                                      * *                                                                           * * Maintainers: maintainers@chelsio.com                                      * *                                                                           * * Authors: Dimitrios Michailidis   <dm@chelsio.com>                         * *          Tina Yang               <tainay@chelsio.com>                     * *          Felix Marti             <felix@chelsio.com>                      * *          Scott Bardone           <sbardone@chelsio.com>                   * *          Kurt Ottaway            <kottaway@chelsio.com>                   * *          Frank DiMambro          <frank@chelsio.com>                      * *                                                                           * * History:                                                                  * *                                                                           * ****************************************************************************/#include "common.h"#include <linux/types.h>#include <linux/errno.h>#include <linux/pci.h>#include <linux/ktime.h>#include <linux/netdevice.h>#include <linux/etherdevice.h>#include <linux/if_vlan.h>#include <linux/skbuff.h>#include <linux/init.h>#include <linux/mm.h>#include <linux/tcp.h>#include <linux/ip.h>#include <linux/in.h>#include <linux/if_arp.h>#include "cpl5_cmd.h"#include "sge.h"#include "regs.h"#include "espi.h"/* This belongs in if_ether.h */#define ETH_P_CPL5 0xf#define SGE_CMDQ_N		2#define SGE_FREELQ_N		2#define SGE_CMDQ0_E_N		1024#define SGE_CMDQ1_E_N		128#define SGE_FREEL_SIZE		4096#define SGE_JUMBO_FREEL_SIZE	512#define SGE_FREEL_REFILL_THRESH	16#define SGE_RESPQ_E_N		1024#define SGE_INTRTIMER_NRES	1000#define SGE_RX_SM_BUF_SIZE	1536#define SGE_TX_DESC_MAX_PLEN	16384#define SGE_RESPQ_REPLENISH_THRES (SGE_RESPQ_E_N / 4)/* * Period of the TX buffer reclaim timer.  This timer does not need to run * frequently as TX buffers are usually reclaimed by new TX packets. */#define TX_RECLAIM_PERIOD (HZ / 4)#define M_CMD_LEN       0x7fffffff#define V_CMD_LEN(v)    (v)#define G_CMD_LEN(v)    ((v) & M_CMD_LEN)#define V_CMD_GEN1(v)   ((v) << 31)#define V_CMD_GEN2(v)   (v)#define F_CMD_DATAVALID (1 << 1)#define F_CMD_SOP       (1 << 2)#define V_CMD_EOP(v)    ((v) << 3)/* * Command queue, receive buffer list, and response queue descriptors. */#if defined(__BIG_ENDIAN_BITFIELD)struct cmdQ_e {	u32 addr_lo;	u32 len_gen;	u32 flags;	u32 addr_hi;};struct freelQ_e {	u32 addr_lo;	u32 len_gen;	u32 gen2;	u32 addr_hi;};struct respQ_e {	u32 Qsleeping		: 4;	u32 Cmdq1CreditReturn	: 5;	u32 Cmdq1DmaComplete	: 5;	u32 Cmdq0CreditReturn	: 5;	u32 Cmdq0DmaComplete	: 5;	u32 FreelistQid		: 2;	u32 CreditValid		: 1;	u32 DataValid		: 1;	u32 Offload		: 1;	u32 Eop			: 1;	u32 Sop			: 1;	u32 GenerationBit	: 1;	u32 BufferLength;};#elif defined(__LITTLE_ENDIAN_BITFIELD)struct cmdQ_e {	u32 len_gen;	u32 addr_lo;	u32 addr_hi;	u32 flags;};struct freelQ_e {	u32 len_gen;	u32 addr_lo;	u32 addr_hi;	u32 gen2;};struct respQ_e {	u32 BufferLength;	u32 GenerationBit	: 1;	u32 Sop			: 1;	u32 Eop			: 1;	u32 Offload		: 1;	u32 DataValid		: 1;	u32 CreditValid		: 1;	u32 FreelistQid		: 2;	u32 Cmdq0DmaComplete	: 5;	u32 Cmdq0CreditReturn	: 5;	u32 Cmdq1DmaComplete	: 5;	u32 Cmdq1CreditReturn	: 5;	u32 Qsleeping		: 4;} ;#endif/* * SW Context Command and Freelist Queue Descriptors */struct cmdQ_ce {	struct sk_buff *skb;	DECLARE_PCI_UNMAP_ADDR(dma_addr);	DECLARE_PCI_UNMAP_LEN(dma_len);};struct freelQ_ce {	struct sk_buff *skb;	DECLARE_PCI_UNMAP_ADDR(dma_addr);	DECLARE_PCI_UNMAP_LEN(dma_len);};/* * SW command, freelist and response rings */struct cmdQ {	unsigned long   status;         /* HW DMA fetch status */	unsigned int    in_use;         /* # of in-use command descriptors */	unsigned int	size;	        /* # of descriptors */	unsigned int    processed;      /* total # of descs HW has processed */	unsigned int    cleaned;        /* total # of descs SW has reclaimed */	unsigned int    stop_thres;     /* SW TX queue suspend threshold */	u16		pidx;           /* producer index (SW) */	u16		cidx;           /* consumer index (HW) */	u8		genbit;         /* current generation (=valid) bit */	u8              sop;            /* is next entry start of packet? */	struct cmdQ_e  *entries;        /* HW command descriptor Q */	struct cmdQ_ce *centries;       /* SW command context descriptor Q */	dma_addr_t	dma_addr;       /* DMA addr HW command descriptor Q */	spinlock_t	lock;           /* Lock to protect cmdQ enqueuing */};struct freelQ {	unsigned int	credits;        /* # of available RX buffers */	unsigned int	size;	        /* free list capacity */	u16		pidx;           /* producer index (SW) */	u16		cidx;           /* consumer index (HW) */	u16		rx_buffer_size; /* Buffer size on this free list */	u16             dma_offset;     /* DMA offset to align IP headers */	u16             recycleq_idx;   /* skb recycle q to use */	u8		genbit;	        /* current generation (=valid) bit */	struct freelQ_e	*entries;       /* HW freelist descriptor Q */	struct freelQ_ce *centries;     /* SW freelist context descriptor Q */	dma_addr_t	dma_addr;       /* DMA addr HW freelist descriptor Q */};struct respQ {	unsigned int	credits;        /* credits to be returned to SGE */	unsigned int	size;	        /* # of response Q descriptors */	u16		cidx;	        /* consumer index (SW) */	u8		genbit;	        /* current generation(=valid) bit */	struct respQ_e *entries;        /* HW response descriptor Q */	dma_addr_t	dma_addr;       /* DMA addr HW response descriptor Q */};/* Bit flags for cmdQ.status */enum {	CMDQ_STAT_RUNNING = 1,          /* fetch engine is running */	CMDQ_STAT_LAST_PKT_DB = 2       /* last packet rung the doorbell */};/* T204 TX SW scheduler *//* Per T204 TX port */struct sched_port {	unsigned int	avail;		/* available bits - quota */	unsigned int	drain_bits_per_1024ns; /* drain rate */	unsigned int	speed;		/* drain rate, mbps */	unsigned int	mtu;		/* mtu size */	struct sk_buff_head skbq;	/* pending skbs */};/* Per T204 device */struct sched {	ktime_t         last_updated;   /* last time quotas were computed */	unsigned int	max_avail;	/* max bits to be sent to any port */	unsigned int	port;		/* port index (round robin ports) */	unsigned int	num;		/* num skbs in per port queues */	struct sched_port p[MAX_NPORTS];	struct tasklet_struct sched_tsk;/* tasklet used to run scheduler */};static void restart_sched(unsigned long);/* * Main SGE data structure * * Interrupts are handled by a single CPU and it is likely that on a MP system * the application is migrated to another CPU. In that scenario, we try to * seperate the RX(in irq context) and TX state in order to decrease memory * contention. */struct sge {	struct adapter *adapter;	/* adapter backpointer */	struct net_device *netdev;      /* netdevice backpointer */	struct freelQ	freelQ[SGE_FREELQ_N]; /* buffer free lists */	struct respQ	respQ;		/* response Q */	unsigned long   stopped_tx_queues; /* bitmap of suspended Tx queues */	unsigned int	rx_pkt_pad;     /* RX padding for L2 packets */	unsigned int	jumbo_fl;       /* jumbo freelist Q index */	unsigned int	intrtimer_nres;	/* no-resource interrupt timer */	unsigned int    fixed_intrtimer;/* non-adaptive interrupt timer */	struct timer_list tx_reclaim_timer; /* reclaims TX buffers */	struct timer_list espibug_timer;	unsigned long	espibug_timeout;	struct sk_buff	*espibug_skb[MAX_NPORTS];	u32		sge_control;	/* shadow value of sge control reg */	struct sge_intr_counts stats;	struct sge_port_stats *port_stats[MAX_NPORTS];	struct sched	*tx_sched;	struct cmdQ cmdQ[SGE_CMDQ_N] ____cacheline_aligned_in_smp;};/* * stop tasklet and free all pending skb's */static void tx_sched_stop(struct sge *sge){	struct sched *s = sge->tx_sched;	int i;	tasklet_kill(&s->sched_tsk);	for (i = 0; i < MAX_NPORTS; i++)		__skb_queue_purge(&s->p[s->port].skbq);}/* * t1_sched_update_parms() is called when the MTU or link speed changes. It * re-computes scheduler parameters to scope with the change. */unsigned int t1_sched_update_parms(struct sge *sge, unsigned int port,				   unsigned int mtu, unsigned int speed){	struct sched *s = sge->tx_sched;	struct sched_port *p = &s->p[port];	unsigned int max_avail_segs;	pr_debug("t1_sched_update_params mtu=%d speed=%d\n", mtu, speed);	if (speed)		p->speed = speed;	if (mtu)		p->mtu = mtu;	if (speed || mtu) {		unsigned long long drain = 1024ULL * p->speed * (p->mtu - 40);		do_div(drain, (p->mtu + 50) * 1000);		p->drain_bits_per_1024ns = (unsigned int) drain;		if (p->speed < 1000)			p->drain_bits_per_1024ns =				90 * p->drain_bits_per_1024ns / 100;	}	if (board_info(sge->adapter)->board == CHBT_BOARD_CHT204) {		p->drain_bits_per_1024ns -= 16;		s->max_avail = max(4096U, p->mtu + 16 + 14 + 4);		max_avail_segs = max(1U, 4096 / (p->mtu - 40));	} else {		s->max_avail = 16384;		max_avail_segs = max(1U, 9000 / (p->mtu - 40));	}	pr_debug("t1_sched_update_parms: mtu %u speed %u max_avail %u "		 "max_avail_segs %u drain_bits_per_1024ns %u\n", p->mtu,		 p->speed, s->max_avail, max_avail_segs,		 p->drain_bits_per_1024ns);	return max_avail_segs * (p->mtu - 40);}/* * t1_sched_max_avail_bytes() tells the scheduler the maximum amount of * data that can be pushed per port. */void t1_sched_set_max_avail_bytes(struct sge *sge, unsigned int val){	struct sched *s = sge->tx_sched;	unsigned int i;	s->max_avail = val;	for (i = 0; i < MAX_NPORTS; i++)		t1_sched_update_parms(sge, i, 0, 0);}/* * t1_sched_set_drain_bits_per_us() tells the scheduler at which rate a port * is draining. */void t1_sched_set_drain_bits_per_us(struct sge *sge, unsigned int port,					 unsigned int val){	struct sched *s = sge->tx_sched;	struct sched_port *p = &s->p[port];	p->drain_bits_per_1024ns = val * 1024 / 1000;	t1_sched_update_parms(sge, port, 0, 0);}/* * get_clock() implements a ns clock (see ktime_get) */static inline ktime_t get_clock(void){	struct timespec ts;	ktime_get_ts(&ts);	return timespec_to_ktime(ts);}/* * tx_sched_init() allocates resources and does basic initialization. */static int tx_sched_init(struct sge *sge){	struct sched *s;	int i;	s = kzalloc(sizeof (struct sched), GFP_KERNEL);	if (!s)		return -ENOMEM;	pr_debug("tx_sched_init\n");	tasklet_init(&s->sched_tsk, restart_sched, (unsigned long) sge);	sge->tx_sched = s;	for (i = 0; i < MAX_NPORTS; i++) {		skb_queue_head_init(&s->p[i].skbq);		t1_sched_update_parms(sge, i, 1500, 1000);	}	return 0;}/* * sched_update_avail() computes the delta since the last time it was called * and updates the per port quota (number of bits that can be sent to the any * port). */static inline int sched_update_avail(struct sge *sge){	struct sched *s = sge->tx_sched;	ktime_t now = get_clock();	unsigned int i;	long long delta_time_ns;	delta_time_ns = ktime_to_ns(ktime_sub(now, s->last_updated));	pr_debug("sched_update_avail delta=%lld\n", delta_time_ns);	if (delta_time_ns < 15000)		return 0;	for (i = 0; i < MAX_NPORTS; i++) {		struct sched_port *p = &s->p[i];		unsigned int delta_avail;		delta_avail = (p->drain_bits_per_1024ns * delta_time_ns) >> 13;		p->avail = min(p->avail + delta_avail, s->max_avail);	}	s->last_updated = now;	return 1;}/* * sched_skb() is called from two different places. In the tx path, any * packet generating load on an output port will call sched_skb() * (skb != NULL). In addition, sched_skb() is called from the irq/soft irq * context (skb == NULL). * The scheduler only returns a skb (which will then be sent) if the * length of the skb is <= the current quota of the output port. */static struct sk_buff *sched_skb(struct sge *sge, struct sk_buff *skb,				unsigned int credits){	struct sched *s = sge->tx_sched;	struct sk_buff_head *skbq;	unsigned int i, len, update = 1;	pr_debug("sched_skb %p\n", skb);

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?