⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 he.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
		he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);		if (++lbuf_count == lbufs_per_row) {			lbuf_count = 0;			row_offset += he_dev->bytes_per_row;		}		lbm_offset += 4;	}			he_writel(he_dev, lbufd_index - 2, RLBF0_T);	he_writel(he_dev, he_dev->r0_numbuffs, RLBF0_C);}static void __devinithe_init_rx_lbfp1(struct he_dev *he_dev){	unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;	unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;	unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;	unsigned row_offset = he_dev->r1_startrow * he_dev->bytes_per_row;		lbufd_index = 1;	lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);	he_writel(he_dev, lbufd_index, RLBF1_H);	for (i = 0, lbuf_count = 0; i < he_dev->r1_numbuffs; ++i) {		lbufd_index += 2;		lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;		he_writel_rcm(he_dev, lbuf_addr, lbm_offset);		he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);		if (++lbuf_count == lbufs_per_row) {			lbuf_count = 0;			row_offset += he_dev->bytes_per_row;		}		lbm_offset += 4;	}			he_writel(he_dev, lbufd_index - 2, RLBF1_T);	he_writel(he_dev, he_dev->r1_numbuffs, RLBF1_C);}static void __devinithe_init_tx_lbfp(struct he_dev *he_dev){	unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;	unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;	unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;	unsigned row_offset = he_dev->tx_startrow * he_dev->bytes_per_row;		lbufd_index = he_dev->r0_numbuffs + he_dev->r1_numbuffs;	lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);	he_writel(he_dev, lbufd_index, TLBF_H);	for (i = 0, lbuf_count = 0; i < he_dev->tx_numbuffs; ++i) {		lbufd_index += 1;		lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;		he_writel_rcm(he_dev, lbuf_addr, lbm_offset);		he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);		if (++lbuf_count == lbufs_per_row) {			lbuf_count = 0;			row_offset += he_dev->bytes_per_row;		}		lbm_offset += 2;	}			he_writel(he_dev, lbufd_index - 1, TLBF_T);}static int __devinithe_init_tpdrq(struct he_dev *he_dev){	he_dev->tpdrq_base = pci_alloc_consistent(he_dev->pci_dev,		CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq), &he_dev->tpdrq_phys);	if (he_dev->tpdrq_base == NULL) {		hprintk("failed to alloc tpdrq\n");		return -ENOMEM;	}	memset(he_dev->tpdrq_base, 0,				CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq));	he_dev->tpdrq_tail = he_dev->tpdrq_base;	he_dev->tpdrq_head = he_dev->tpdrq_base;	he_writel(he_dev, he_dev->tpdrq_phys, TPDRQ_B_H);	he_writel(he_dev, 0, TPDRQ_T);		he_writel(he_dev, CONFIG_TPDRQ_SIZE - 1, TPDRQ_S);	return 0;}static void __devinithe_init_cs_block(struct he_dev *he_dev){	unsigned clock, rate, delta;	int reg;	/* 5.1.7 cs block initialization */	for (reg = 0; reg < 0x20; ++reg)		he_writel_mbox(he_dev, 0x0, CS_STTIM0 + reg);	/* rate grid timer reload values */	clock = he_is622(he_dev) ? 66667000 : 50000000;	rate = he_dev->atm_dev->link_rate;	delta = rate / 16 / 2;	for (reg = 0; reg < 0x10; ++reg) {		/* 2.4 internal transmit function		 *	 	 * we initialize the first row in the rate grid.		 * values are period (in clock cycles) of timer		 */		unsigned period = clock / rate;		he_writel_mbox(he_dev, period, CS_TGRLD0 + reg);		rate -= delta;	}	if (he_is622(he_dev)) {		/* table 5.2 (4 cells per lbuf) */		he_writel_mbox(he_dev, 0x000800fa, CS_ERTHR0);		he_writel_mbox(he_dev, 0x000c33cb, CS_ERTHR1);		he_writel_mbox(he_dev, 0x0010101b, CS_ERTHR2);		he_writel_mbox(he_dev, 0x00181dac, CS_ERTHR3);		he_writel_mbox(he_dev, 0x00280600, CS_ERTHR4);		/* table 5.3, 5.4, 5.5, 5.6, 5.7 */		he_writel_mbox(he_dev, 0x023de8b3, CS_ERCTL0);		he_writel_mbox(he_dev, 0x1801, CS_ERCTL1);		he_writel_mbox(he_dev, 0x68b3, CS_ERCTL2);		he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);		he_writel_mbox(he_dev, 0x68b3, CS_ERSTAT1);		he_writel_mbox(he_dev, 0x14585, CS_RTFWR);		he_writel_mbox(he_dev, 0x4680, CS_RTATR);		/* table 5.8 */		he_writel_mbox(he_dev, 0x00159ece, CS_TFBSET);		he_writel_mbox(he_dev, 0x68b3, CS_WCRMAX);		he_writel_mbox(he_dev, 0x5eb3, CS_WCRMIN);		he_writel_mbox(he_dev, 0xe8b3, CS_WCRINC);		he_writel_mbox(he_dev, 0xdeb3, CS_WCRDEC);		he_writel_mbox(he_dev, 0x68b3, CS_WCRCEIL);		/* table 5.9 */		he_writel_mbox(he_dev, 0x5, CS_OTPPER);		he_writel_mbox(he_dev, 0x14, CS_OTWPER);	} else {		/* table 5.1 (4 cells per lbuf) */		he_writel_mbox(he_dev, 0x000400ea, CS_ERTHR0);		he_writel_mbox(he_dev, 0x00063388, CS_ERTHR1);		he_writel_mbox(he_dev, 0x00081018, CS_ERTHR2);		he_writel_mbox(he_dev, 0x000c1dac, CS_ERTHR3);		he_writel_mbox(he_dev, 0x0014051a, CS_ERTHR4);		/* table 5.3, 5.4, 5.5, 5.6, 5.7 */		he_writel_mbox(he_dev, 0x0235e4b1, CS_ERCTL0);		he_writel_mbox(he_dev, 0x4701, CS_ERCTL1);		he_writel_mbox(he_dev, 0x64b1, CS_ERCTL2);		he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);		he_writel_mbox(he_dev, 0x64b1, CS_ERSTAT1);		he_writel_mbox(he_dev, 0xf424, CS_RTFWR);		he_writel_mbox(he_dev, 0x4680, CS_RTATR);		/* table 5.8 */		he_writel_mbox(he_dev, 0x000563b7, CS_TFBSET);		he_writel_mbox(he_dev, 0x64b1, CS_WCRMAX);		he_writel_mbox(he_dev, 0x5ab1, CS_WCRMIN);		he_writel_mbox(he_dev, 0xe4b1, CS_WCRINC);		he_writel_mbox(he_dev, 0xdab1, CS_WCRDEC);		he_writel_mbox(he_dev, 0x64b1, CS_WCRCEIL);		/* table 5.9 */		he_writel_mbox(he_dev, 0x6, CS_OTPPER);		he_writel_mbox(he_dev, 0x1e, CS_OTWPER);	}	he_writel_mbox(he_dev, 0x8, CS_OTTLIM);	for (reg = 0; reg < 0x8; ++reg)		he_writel_mbox(he_dev, 0x0, CS_HGRRT0 + reg);}static int __devinithe_init_cs_block_rcm(struct he_dev *he_dev){	unsigned (*rategrid)[16][16];	unsigned rate, delta;	int i, j, reg;	unsigned rate_atmf, exp, man;	unsigned long long rate_cps;	int mult, buf, buf_limit = 4;	rategrid = kmalloc( sizeof(unsigned) * 16 * 16, GFP_KERNEL);	if (!rategrid)		return -ENOMEM;	/* initialize rate grid group table */	for (reg = 0x0; reg < 0xff; ++reg)		he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);	/* initialize rate controller groups */	for (reg = 0x100; reg < 0x1ff; ++reg)		he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);		/* initialize tNrm lookup table */	/* the manual makes reference to a routine in a sample driver	   for proper configuration; fortunately, we only need this	   in order to support abr connection */		/* initialize rate to group table */	rate = he_dev->atm_dev->link_rate;	delta = rate / 32;	/*	 * 2.4 transmit internal functions	 * 	 * we construct a copy of the rate grid used by the scheduler	 * in order to construct the rate to group table below	 */	for (j = 0; j < 16; j++) {		(*rategrid)[0][j] = rate;		rate -= delta;	}	for (i = 1; i < 16; i++)		for (j = 0; j < 16; j++)			if (i > 14)				(*rategrid)[i][j] = (*rategrid)[i - 1][j] / 4;			else				(*rategrid)[i][j] = (*rategrid)[i - 1][j] / 2;	/*	 * 2.4 transmit internal function	 *	 * this table maps the upper 5 bits of exponent and mantissa	 * of the atm forum representation of the rate into an index	 * on rate grid  	 */	rate_atmf = 0;	while (rate_atmf < 0x400) {		man = (rate_atmf & 0x1f) << 4;		exp = rate_atmf >> 5;		/* 			instead of '/ 512', use '>> 9' to prevent a call			to divdu3 on x86 platforms		*/		rate_cps = (unsigned long long) (1 << exp) * (man + 512) >> 9;		if (rate_cps < 10)			rate_cps = 10;	/* 2.2.1 minimum payload rate is 10 cps */		for (i = 255; i > 0; i--)			if ((*rategrid)[i/16][i%16] >= rate_cps)				break;	 /* pick nearest rate instead? */		/*		 * each table entry is 16 bits: (rate grid index (8 bits)		 * and a buffer limit (8 bits)		 * there are two table entries in each 32-bit register		 */#ifdef notdef		buf = rate_cps * he_dev->tx_numbuffs /				(he_dev->atm_dev->link_rate * 2);#else		/* this is pretty, but avoids _divdu3 and is mostly correct */		mult = he_dev->atm_dev->link_rate / ATM_OC3_PCR;		if (rate_cps > (272 * mult))			buf = 4;		else if (rate_cps > (204 * mult))			buf = 3;		else if (rate_cps > (136 * mult))			buf = 2;		else if (rate_cps > (68 * mult))			buf = 1;		else			buf = 0;#endif		if (buf > buf_limit)			buf = buf_limit;		reg = (reg << 16) | ((i << 8) | buf);#define RTGTBL_OFFSET 0x400	  		if (rate_atmf & 0x1)			he_writel_rcm(he_dev, reg,				CONFIG_RCMABR + RTGTBL_OFFSET + (rate_atmf >> 1));		++rate_atmf;	}	kfree(rategrid);	return 0;}static int __devinithe_init_group(struct he_dev *he_dev, int group){	int i;#ifdef USE_RBPS	/* small buffer pool */#ifdef USE_RBPS_POOL	he_dev->rbps_pool = pci_pool_create("rbps", he_dev->pci_dev,			CONFIG_RBPS_BUFSIZE, 8, 0);	if (he_dev->rbps_pool == NULL) {		hprintk("unable to create rbps pages\n");		return -ENOMEM;	}#else /* !USE_RBPS_POOL */	he_dev->rbps_pages = pci_alloc_consistent(he_dev->pci_dev,		CONFIG_RBPS_SIZE * CONFIG_RBPS_BUFSIZE, &he_dev->rbps_pages_phys);	if (he_dev->rbps_pages == NULL) {		hprintk("unable to create rbps page pool\n");		return -ENOMEM;	}#endif /* USE_RBPS_POOL */	he_dev->rbps_base = pci_alloc_consistent(he_dev->pci_dev,		CONFIG_RBPS_SIZE * sizeof(struct he_rbp), &he_dev->rbps_phys);	if (he_dev->rbps_base == NULL) {		hprintk("failed to alloc rbps\n");		return -ENOMEM;	}	memset(he_dev->rbps_base, 0, CONFIG_RBPS_SIZE * sizeof(struct he_rbp));	he_dev->rbps_virt = kmalloc(CONFIG_RBPS_SIZE * sizeof(struct he_virt), GFP_KERNEL);	for (i = 0; i < CONFIG_RBPS_SIZE; ++i) {		dma_addr_t dma_handle;		void *cpuaddr;#ifdef USE_RBPS_POOL 		cpuaddr = pci_pool_alloc(he_dev->rbps_pool, GFP_KERNEL|GFP_DMA, &dma_handle);		if (cpuaddr == NULL)			return -ENOMEM;#else		cpuaddr = he_dev->rbps_pages + (i * CONFIG_RBPS_BUFSIZE);		dma_handle = he_dev->rbps_pages_phys + (i * CONFIG_RBPS_BUFSIZE);#endif		he_dev->rbps_virt[i].virt = cpuaddr;		he_dev->rbps_base[i].status = RBP_LOANED | RBP_SMALLBUF | (i << RBP_INDEX_OFF);		he_dev->rbps_base[i].phys = dma_handle;	}	he_dev->rbps_tail = &he_dev->rbps_base[CONFIG_RBPS_SIZE - 1];	he_writel(he_dev, he_dev->rbps_phys, G0_RBPS_S + (group * 32));	he_writel(he_dev, RBPS_MASK(he_dev->rbps_tail),						G0_RBPS_T + (group * 32));	he_writel(he_dev, CONFIG_RBPS_BUFSIZE/4,						G0_RBPS_BS + (group * 32));	he_writel(he_dev,			RBP_THRESH(CONFIG_RBPS_THRESH) |			RBP_QSIZE(CONFIG_RBPS_SIZE - 1) |			RBP_INT_ENB,						G0_RBPS_QI + (group * 32));#else /* !USE_RBPS */	he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));	he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));	he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));	he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),						G0_RBPS_BS + (group * 32));#endif /* USE_RBPS */	/* large buffer pool */#ifdef USE_RBPL_POOL	he_dev->rbpl_pool = pci_pool_create("rbpl", he_dev->pci_dev,			CONFIG_RBPL_BUFSIZE, 8, 0);	if (he_dev->rbpl_pool == NULL) {		hprintk("unable to create rbpl pool\n");		return -ENOMEM;	}#else /* !USE_RBPL_POOL */	he_dev->rbpl_pages = (void *) pci_alloc_consistent(he_dev->pci_dev,		CONFIG_RBPL_SIZE * CONFIG_RBPL_BUFSIZE, &he_dev->rbpl_pages_phys);	if (he_dev->rbpl_pages == NULL) {		hprintk("unable to create rbpl pages\n");		return -ENOMEM;	}#endif /* USE_RBPL_POOL */	he_dev->rbpl_base = pci_alloc_consistent(he_dev->pci_dev,		CONFIG_RBPL_SIZE * sizeof(struct he_rbp), &he_dev->rbpl_phys);	if (he_dev->rbpl_base == NULL) {		hprintk("failed to alloc rbpl\n");		return -ENOMEM;	}	memset(he_dev->rbpl_base, 0, CONFIG_RBPL_SIZE * sizeof(struct he_rbp));	he_dev->rbpl_virt = kmalloc(CONFIG_RBPL_SIZE * sizeof(struct he_virt), GFP_KERNEL);	for (i = 0; i < CONFIG_RBPL_SIZE; ++i) {		dma_addr_t dma_handle;		void *cpuaddr;#ifdef USE_RBPL_POOL		cpuaddr = pci_pool_alloc(he_dev->rbpl_pool, GFP_KERNEL|GFP_DMA, &dma_handle);		if (cpuaddr == NULL)			return -ENOMEM;#else		cpuaddr = he_dev->rbpl_pages + (i * CONFIG_RBPL_BUFSIZE);		dma_handle = he_dev->rbpl_pages_phys + (i * CONFIG_RBPL_BUFSIZE);#endif		he_dev->rbpl_virt[i].virt = cpuaddr;		he_dev->rbpl_base[i].status = RBP_LOANED | (i << RBP_INDEX_OFF);		he_dev->rbpl_base[i].phys = dma_handle;	}	he_dev->rbpl_tail = &he_dev->rbpl_base[CONFIG_RBPL_SIZE - 1];	he_writel(he_dev, he_dev->rbpl_phys, G0_RBPL_S + (group * 32));	he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail),						G0_RBPL_T + (group * 32));	he_writel(he_dev, CONFIG_RBPL_BUFSIZE/4,						G0_RBPL_BS + (group * 32));	he_writel(he_dev,			RBP_THRESH(CONFIG_RBPL_THRESH) |			RBP_QSIZE(CONFIG_RBPL_SIZE - 1) |			RBP_INT_ENB,						G0_RBPL_QI + (group * 32));	/* rx buffer ready queue */	he_dev->rbrq_base = pci_alloc_consistent(he_dev->pci_dev,		CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), &he_dev->rbrq_phys);	if (he_dev->rbrq_base == NULL) {		hprintk("failed to allocate rbrq\n");		return -ENOMEM;	}	memset(he_dev->rbrq_base, 0, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq));	he_dev->rbrq_head = he_dev->rbrq_base;	he_writel(he_dev, he_dev->rbrq_phys, G0_RBRQ_ST + (group * 16));	he_writel(he_dev, 0, G0_RBRQ_H + (group * 16));	he_writel(he_dev,		RBRQ_THRESH(CONFIG_RBRQ_THRESH) | RBRQ_SIZE(CONFIG_RBRQ_SIZE - 1),						G0_RBRQ_Q + (group * 16));	if (irq_coalesce) {		hprintk("coalescing interrupts\n");		he_writel(he_dev, RBRQ_TIME(768) | RBRQ_COUNT(7),						G0_RBRQ_I + (group * 16));	} else		he_writel(he_dev, RBRQ_TIME(0) | RBRQ_COUNT(1),						G0_RBRQ_I + (group * 16));	/* tx buffer ready queue */	he_dev->tbrq_base = pci_alloc_consistent(he_dev->pci_dev,		CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq), &he_dev->tbrq_phys);	if (he_dev->tbrq_base == NULL) {		hprintk("failed to allocate tbrq\n");		return -ENOMEM;	}	memset(he_dev->tbrq_base, 0, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq));	he_dev->tbrq_head = he_dev->tbrq_base;	he_writel(he_dev, he_dev->tbrq_phys, G0_TBRQ_B_T + (group * 16));	he_writel(he_dev, 0, G0_TBRQ_H + (group * 16));

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -