⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 pci_dma.c

📁 microwindows移植到S3C44B0的源码
💻 C
📖 第 1 页 / 共 4 页
字号:
	PPCDBG(PPCDBG_TCE, "\thwdev = 0x%16.16lx, size = 0x%16.16lx, direction = 0x%16.16lx, dma_handle = 0x%16.16lx\n", hwdev, size, direction, dma_handle);		if ( direction == PCI_DMA_NONE )		BUG();	nPages = PAGE_ALIGN( dma_handle + size ) - ( dma_handle & PAGE_MASK );	order = get_order( nPages & PAGE_MASK );	nPages >>= PAGE_SHIFT; 	/* Client asked for way to much space.  This is checked later anyway */	/* It is easier to debug here for the drivers than in the tce tables.*/ 	if(order >= NUM_TCE_LEVELS) { 		printk("PCI_DMA: pci_unmap_single size to large: 0x%lx \n",size); 		return; 	}		tbl = get_tce_table(hwdev); 	if ( tbl ) 		tce_free(tbl, dma_handle, order, nPages);}/* Figure out how many TCEs are actually going to be required * to map this scatterlist.  This code is not optimal.  It  * takes into account the case where entry n ends in the same * page in which entry n+1 starts.  It does not handle the  * general case of entry n ending in the same page in which  * entry m starts.    */static unsigned long num_tces_sg( struct scatterlist *sg, int nents ){	unsigned long nTces, numPages, startPage, endPage, prevEndPage;	unsigned i;	prevEndPage = 0;	nTces = 0;	for (i=0; i<nents; ++i) {		/* Compute the starting page number and		 * the ending page number for this entry		 */		startPage = (unsigned long)sg->address >> PAGE_SHIFT;		endPage = ((unsigned long)sg->address + sg->length - 1) >> PAGE_SHIFT;		numPages = endPage - startPage + 1;		/* Simple optimization: if the previous entry ended		 * in the same page in which this entry starts		 * then we can reduce the required pages by one.		 * This matches assumptions in fill_scatterlist_sg and		 * create_tces_sg		 */		if ( startPage == prevEndPage )			--numPages;		nTces += numPages;		prevEndPage = endPage;		sg++;	}	return nTces;}/* Fill in the dma data in the scatterlist * return the number of dma sg entries created */static unsigned fill_scatterlist_sg( struct scatterlist *sg, int nents, 				 dma_addr_t dma_addr , unsigned long numTces){	struct scatterlist *dma_sg;	u32 cur_start_dma;	unsigned long cur_len_dma, cur_end_virt, uaddr;	unsigned num_dma_ents;	dma_sg = sg;	num_dma_ents = 1;	/* Process the first sg entry */	cur_start_dma = dma_addr + ((unsigned long)sg->address & (~PAGE_MASK));	cur_len_dma = sg->length;	/* cur_end_virt holds the address of the byte immediately after the	 * end of the current buffer.	 */	cur_end_virt = (unsigned long)sg->address + cur_len_dma;	/* Later code assumes that unused sg->dma_address and sg->dma_length	 * fields will be zero.  Other archs seem to assume that the user	 * (device driver) guarantees that...I don't want to depend on that	 */	sg->dma_address = sg->dma_length = 0;		/* Process the rest of the sg entries */	while (--nents) {		++sg;		/* Clear possibly unused fields. Note: sg >= dma_sg so		 * this can't be clearing a field we've already set		 */		sg->dma_address = sg->dma_length = 0;		/* Check if it is possible to make this next entry		 * contiguous (in dma space) with the previous entry.		 */				/* The entries can be contiguous in dma space if		 * the previous entry ends immediately before the		 * start of the current entry (in virtual space)		 * or if the previous entry ends at a page boundary		 * and the current entry starts at a page boundary.		 */		uaddr = (unsigned long)sg->address;		if ( ( uaddr != cur_end_virt ) &&		     ( ( ( uaddr | cur_end_virt ) & (~PAGE_MASK) ) ||		       ( ( uaddr & PAGE_MASK ) == ( ( cur_end_virt-1 ) & PAGE_MASK ) ) ) ) {			/* This entry can not be contiguous in dma space.			 * save the previous dma entry and start a new one			 */			dma_sg->dma_address = cur_start_dma;			dma_sg->dma_length  = cur_len_dma;			++dma_sg;			++num_dma_ents;						cur_start_dma += cur_len_dma-1;			/* If the previous entry ends and this entry starts			 * in the same page then they share a tce.  In that			 * case don't bump cur_start_dma to the next page 			 * in dma space.  This matches assumptions made in			 * num_tces_sg and create_tces_sg.			 */			if ((uaddr & PAGE_MASK) == ((cur_end_virt-1) & PAGE_MASK))				cur_start_dma &= PAGE_MASK;			else				cur_start_dma = PAGE_ALIGN(cur_start_dma+1);			cur_start_dma += ( uaddr & (~PAGE_MASK) );			cur_len_dma = 0;		}		/* Accumulate the length of this entry for the next 		 * dma entry		 */		cur_len_dma += sg->length;		cur_end_virt = uaddr + sg->length;	}	/* Fill in the last dma entry */	dma_sg->dma_address = cur_start_dma;	dma_sg->dma_length  = cur_len_dma;	if ((((cur_start_dma +cur_len_dma - 1)>> PAGE_SHIFT) - (dma_addr >> PAGE_SHIFT) + 1) != numTces)	  {	    PPCDBG(PPCDBG_TCE, "fill_scatterlist_sg: numTces %ld, used tces %d\n",		   numTces,		   (unsigned)(((cur_start_dma + cur_len_dma - 1) >> PAGE_SHIFT) - (dma_addr >> PAGE_SHIFT) + 1));	  }		return num_dma_ents;}/* Call the hypervisor to create the TCE entries. * return the number of TCEs created */static dma_addr_t create_tces_sg( struct TceTable *tbl, struct scatterlist *sg, 		   int nents, unsigned numTces, int direction ){	unsigned order, i, j;	unsigned long startPage, endPage, prevEndPage, numPages, uaddr;	long tcenum, starttcenum;	dma_addr_t dmaAddr;	dmaAddr = NO_TCE;	order = get_order( numTces << PAGE_SHIFT ); 	/* Client asked for way to much space.  This is checked later anyway */	/* It is easier to debug here for the drivers than in the tce tables.*/ 	if(order >= NUM_TCE_LEVELS) {		printk("PCI_DMA: create_tces_sg size to large: 0x%x \n",(numTces << PAGE_SHIFT)); 		return NO_TCE; 	}	/* allocate a block of tces */	tcenum = alloc_tce_range( tbl, order );	if ( tcenum != -1 ) {		tcenum += tbl->startOffset;		starttcenum = tcenum;		dmaAddr = tcenum << PAGE_SHIFT;		prevEndPage = 0;		for (j=0; j<nents; ++j) {			startPage = (unsigned long)sg->address >> PAGE_SHIFT;			endPage = ((unsigned long)sg->address + sg->length - 1) >> PAGE_SHIFT;			numPages = endPage - startPage + 1;						uaddr = (unsigned long)sg->address;			/* If the previous entry ended in the same page that			 * the current page starts then they share that			 * tce and we reduce the number of tces we need			 * by one.  This matches assumptions made in			 * num_tces_sg and fill_scatterlist_sg			 */			if ( startPage == prevEndPage ) {				--numPages;				uaddr += PAGE_SIZE;			}						for (i=0; i<numPages; ++i) {			  ppc_md.tce_build(tbl, tcenum, uaddr, direction); 			  ++tcenum;			  uaddr += PAGE_SIZE;			}					prevEndPage = endPage;			sg++;		}		/* Make sure the update is visible to hardware. 		   sync required to synchronize the update to 		   the TCE table with the MMIO that will send		   the bus address to the IOA */		__asm__ __volatile__ ("sync" : : : "memory");		if ((tcenum - starttcenum) != numTces)	    		PPCDBG(PPCDBG_TCE, "create_tces_sg: numTces %d, tces used %d\n",		   		numTces, (unsigned)(tcenum - starttcenum));	}	return dmaAddr;}int pci_map_sg( struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction ){	struct TceTable * tbl;	unsigned numTces;	int num_dma;	dma_addr_t dma_handle;	PPCDBG(PPCDBG_TCE, "pci_map_sg:\n");	PPCDBG(PPCDBG_TCE, "\thwdev = 0x%16.16lx, sg = 0x%16.16lx, direction = 0x%16.16lx, nents = 0x%16.16lx\n", hwdev, sg, direction, nents);		/* Fast path for a single entry scatterlist */	if ( nents == 1 ) {		sg->dma_address = pci_map_single( hwdev, sg->address, 					sg->length, direction );		sg->dma_length = sg->length;		return 1;	}		if ( direction == PCI_DMA_NONE )		BUG();		tbl = get_tce_table(hwdev); 	if ( tbl ) {		/* Compute the number of tces required */		numTces = num_tces_sg( sg, nents );		/* Create the tces and get the dma address */ 		dma_handle = create_tces_sg( tbl, sg, nents, numTces, direction );		/* Fill in the dma scatterlist */		num_dma = fill_scatterlist_sg( sg, nents, dma_handle, numTces );	}	return num_dma;}void pci_unmap_sg( struct pci_dev *hwdev, struct scatterlist *sg, int nelms, int direction ){	struct TceTable * tbl;	unsigned order, numTces, i;	dma_addr_t dma_end_page, dma_start_page;		PPCDBG(PPCDBG_TCE, "pci_unmap_sg:\n");	PPCDBG(PPCDBG_TCE, "\thwdev = 0x%16.16lx, sg = 0x%16.16lx, direction = 0x%16.16lx, nelms = 0x%16.16lx\n", hwdev, sg, direction, nelms);		if ( direction == PCI_DMA_NONE )		BUG();	dma_start_page = sg->dma_address & PAGE_MASK;	for ( i=nelms; i>0; --i ) {		unsigned k = i - 1;		if ( sg[k].dma_length ) {			dma_end_page = ( sg[k].dma_address +					 sg[k].dma_length - 1 ) & PAGE_MASK;			break;		}	}	numTces = ((dma_end_page - dma_start_page ) >> PAGE_SHIFT) + 1;	order = get_order( numTces << PAGE_SHIFT ); 	/* Client asked for way to much space.  This is checked later anyway */	/* It is easier to debug here for the drivers than in the tce tables.*/ 	if(order >= NUM_TCE_LEVELS) {		printk("PCI_DMA: pci_unmap_sg size to large: 0x%x \n",(numTces << PAGE_SHIFT)); 		return; 	}		tbl = get_tce_table(hwdev); 	if ( tbl ) 		tce_free( tbl, dma_start_page, order, numTces );}/* * phb_tce_table_init *  * Function: Display TCE config registers.  Could be easily changed *           to initialize the hardware to use TCEs. */unsigned long phb_tce_table_init(struct pci_controller *phb) {	unsigned int r, cfg_rw, i;		unsigned long r64;		phandle node;	PPCDBG(PPCDBG_TCE, "phb_tce_table_init: start.\n"); 	node = ((struct device_node *)(phb->arch_data))->node;	PPCDBG(PPCDBG_TCEINIT, "\tphb            = 0x%lx\n", phb); 	PPCDBG(PPCDBG_TCEINIT, "\tphb->type      = 0x%lx\n", phb->type); 	PPCDBG(PPCDBG_TCEINIT, "\tphb->phb_regs  = 0x%lx\n", phb->phb_regs); 	PPCDBG(PPCDBG_TCEINIT, "\tphb->chip_regs = 0x%lx\n", phb->chip_regs); 	PPCDBG(PPCDBG_TCEINIT, "\tphb: node      = 0x%lx\n", node);	PPCDBG(PPCDBG_TCEINIT, "\tphb->arch_data = 0x%lx\n", phb->arch_data); 	i = 0;	while(of_tce_table[i].node) {		if(of_tce_table[i].node == node) {			if(phb->type == phb_type_python) {				r = *(((unsigned int *)phb->phb_regs) + (0xf10>>2)); 				PPCDBG(PPCDBG_TCEINIT, "\tTAR(low)    = 0x%x\n", r);				r = *(((unsigned int *)phb->phb_regs) + (0xf00>>2)); 				PPCDBG(PPCDBG_TCEINIT, "\tTAR(high)   = 0x%x\n", r);				r = *(((unsigned int *)phb->phb_regs) + (0xfd0>>2)); 				PPCDBG(PPCDBG_TCEINIT, "\tPHB cfg(rw) = 0x%x\n", r);				break;			} else if(phb->type == phb_type_speedwagon) {				r64 = *(((unsigned long *)phb->chip_regs) + 					(0x800>>3)); 				PPCDBG(PPCDBG_TCEINIT, "\tNCFG    = 0x%lx\n", r64);				r64 = *(((unsigned long *)phb->chip_regs) + 					(0x580>>3)); 				PPCDBG(PPCDBG_TCEINIT, "\tTAR0    = 0x%lx\n", r64);				r64 = *(((unsigned long *)phb->chip_regs) + 					(0x588>>3)); 				PPCDBG(PPCDBG_TCEINIT, "\tTAR1    = 0x%lx\n", r64);				r64 = *(((unsigned long *)phb->chip_regs) + 					(0x590>>3)); 				PPCDBG(PPCDBG_TCEINIT, "\tTAR2    = 0x%lx\n", r64);				r64 = *(((unsigned long *)phb->chip_regs) + 					(0x598>>3)); 				PPCDBG(PPCDBG_TCEINIT, "\tTAR3    = 0x%lx\n", r64);				cfg_rw = *(((unsigned int *)phb->chip_regs) + 					   ((0x160 +					     (((phb->local_number)+8)<<12))>>2)); 				PPCDBG(PPCDBG_TCEINIT, "\tcfg_rw = 0x%x\n", cfg_rw);			}		}		i++;	}	PPCDBG(PPCDBG_TCEINIT, "phb_tce_table_init: done\n"); 	return(0); }/* These are called very early. */void tce_init_pSeries(void){	ppc_md.tce_build = tce_build_pSeries;	ppc_md.tce_free_one = tce_free_one_pSeries;}void tce_init_iSeries(void){	ppc_md.tce_build = tce_build_iSeries;	ppc_md.tce_free_one = tce_free_one_iSeries;}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -