⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 pci-gart.c

📁 LINUX 2.6.17.4的源码
💻 C
📖 第 1 页 / 共 2 页
字号:
	return __dma_map_cont(sg, start, stopat, sout, pages);}		/* * DMA map all entries in a scatterlist. * Merge chunks that have page aligned sizes into a continuous mapping.  */int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir){	int i;	int out;	int start;	unsigned long pages = 0;	int need = 0, nextneed;	BUG_ON(dir == DMA_NONE);	if (nents == 0) 		return 0;	if (!dev)		dev = &fallback_dev;	out = 0;	start = 0;	for (i = 0; i < nents; i++) {		struct scatterlist *s = &sg[i];		dma_addr_t addr = page_to_phys(s->page) + s->offset;		s->dma_address = addr;		BUG_ON(s->length == 0); 		nextneed = need_iommu(dev, addr, s->length); 		/* Handle the previous not yet processed entries */		if (i > start) {			struct scatterlist *ps = &sg[i-1];			/* Can only merge when the last chunk ends on a page 			   boundary and the new one doesn't have an offset. */			if (!iommu_merge || !nextneed || !need || s->offset ||			    (ps->offset + ps->length) % PAGE_SIZE) { 				if (dma_map_cont(sg, start, i, sg+out, pages,						 need) < 0)					goto error;				out++;				pages = 0;				start = i;				}		}		need = nextneed;		pages += to_pages(s->offset, s->length);	}	if (dma_map_cont(sg, start, i, sg+out, pages, need) < 0)		goto error;	out++;	flush_gart(dev);	if (out < nents) 		sg[out].dma_length = 0; 	return out;error:	flush_gart(NULL);	gart_unmap_sg(dev, sg, nents, dir);	/* When it was forced or merged try again in a dumb way */	if (force_iommu || iommu_merge) {		out = dma_map_sg_nonforce(dev, sg, nents, dir);		if (out > 0)			return out;	}	if (panic_on_overflow)		panic("dma_map_sg: overflow on %lu pages\n", pages);	iommu_full(dev, pages << PAGE_SHIFT, dir);	for (i = 0; i < nents; i++)		sg[i].dma_address = bad_dma_address;	return 0;} /* * Free a DMA mapping. */ void gart_unmap_single(struct device *dev, dma_addr_t dma_addr,		      size_t size, int direction){	unsigned long iommu_page; 	int npages;	int i;	if (dma_addr < iommu_bus_base + EMERGENCY_PAGES*PAGE_SIZE || 	    dma_addr >= iommu_bus_base + iommu_size)		return;	iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT;		npages = to_pages(dma_addr, size);	for (i = 0; i < npages; i++) { 		iommu_gatt_base[iommu_page + i] = gart_unmapped_entry; 		CLEAR_LEAK(iommu_page + i);	}	free_iommu(iommu_page, npages);}static int no_agp;static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size){ 	unsigned long a; 	if (!iommu_size) { 		iommu_size = aper_size; 		if (!no_agp) 			iommu_size /= 2; 	} 	a = aper + iommu_size; 	iommu_size -= round_up(a, LARGE_PAGE_SIZE) - a;	if (iommu_size < 64*1024*1024) 		printk(KERN_WARNING  "PCI-DMA: Warning: Small IOMMU %luMB. Consider increasing the AGP aperture in BIOS\n",iommu_size>>20); 		return iommu_size;} static __init unsigned read_aperture(struct pci_dev *dev, u32 *size) { 	unsigned aper_size = 0, aper_base_32;	u64 aper_base;	unsigned aper_order;	pci_read_config_dword(dev, 0x94, &aper_base_32); 	pci_read_config_dword(dev, 0x90, &aper_order);	aper_order = (aper_order >> 1) & 7;		aper_base = aper_base_32 & 0x7fff; 	aper_base <<= 25;	aper_size = (32 * 1024 * 1024) << aper_order; 	if (aper_base + aper_size >= 0xffffffff || !aper_size)		aper_base = 0;	*size = aper_size;	return aper_base;} /*  * Private Northbridge GATT initialization in case we cannot use the * AGP driver for some reason.   */static __init int init_k8_gatt(struct agp_kern_info *info){ 	struct pci_dev *dev;	void *gatt;	unsigned aper_base, new_aper_base;	unsigned aper_size, gatt_size, new_aper_size;		printk(KERN_INFO "PCI-DMA: Disabling AGP.\n");	aper_size = aper_base = info->aper_size = 0;	for_all_nb(dev) { 		new_aper_base = read_aperture(dev, &new_aper_size); 		if (!new_aper_base) 			goto nommu; 				if (!aper_base) { 			aper_size = new_aper_size;			aper_base = new_aper_base;		} 		if (aper_size != new_aper_size || aper_base != new_aper_base) 			goto nommu;	}	if (!aper_base)		goto nommu; 	info->aper_base = aper_base;	info->aper_size = aper_size>>20; 	gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32); 	gatt = (void *)__get_free_pages(GFP_KERNEL, get_order(gatt_size)); 	if (!gatt) 		panic("Cannot allocate GATT table"); 	memset(gatt, 0, gatt_size); 	agp_gatt_table = gatt;		for_all_nb(dev) { 		u32 ctl; 		u32 gatt_reg; 		gatt_reg = __pa(gatt) >> 12; 		gatt_reg <<= 4; 		pci_write_config_dword(dev, 0x98, gatt_reg);		pci_read_config_dword(dev, 0x90, &ctl); 		ctl |= 1;		ctl &= ~((1<<4) | (1<<5));		pci_write_config_dword(dev, 0x90, ctl); 	}	flush_gart(NULL); 		printk("PCI-DMA: aperture base @ %x size %u KB\n",aper_base, aper_size>>10); 	return 0; nommu: 	/* Should not happen anymore */	printk(KERN_ERR "PCI-DMA: More than 4GB of RAM and no IOMMU\n"	       KERN_ERR "PCI-DMA: 32bit PCI IO may malfunction.\n");	return -1; } extern int agp_amd64_init(void);static struct dma_mapping_ops gart_dma_ops = {	.mapping_error = NULL,	.map_single = gart_map_single,	.map_simple = gart_map_simple,	.unmap_single = gart_unmap_single,	.sync_single_for_cpu = NULL,	.sync_single_for_device = NULL,	.sync_single_range_for_cpu = NULL,	.sync_single_range_for_device = NULL,	.sync_sg_for_cpu = NULL,	.sync_sg_for_device = NULL,	.map_sg = gart_map_sg,	.unmap_sg = gart_unmap_sg,};static int __init pci_iommu_init(void){ 	struct agp_kern_info info;	unsigned long aper_size;	unsigned long iommu_start;	struct pci_dev *dev;	unsigned long scratch;	long i;#ifndef CONFIG_AGP_AMD64	no_agp = 1; #else	/* Makefile puts PCI initialization via subsys_initcall first. */	/* Add other K8 AGP bridge drivers here */	no_agp = no_agp || 		(agp_amd64_init() < 0) || 		(agp_copy_info(agp_bridge, &info) < 0);#endif		if (swiotlb)		return -1; 	if (no_iommu ||	    (!force_iommu && end_pfn <= MAX_DMA32_PFN) ||	    !iommu_aperture ||	    (no_agp && init_k8_gatt(&info) < 0)) {		printk(KERN_INFO "PCI-DMA: Disabling IOMMU.\n");		if (end_pfn > MAX_DMA32_PFN) {			printk(KERN_ERR "WARNING more than 4GB of memory "					"but IOMMU not available.\n"			       KERN_ERR "WARNING 32bit PCI may malfunction.\n");		}		return -1;	}	i = 0;	for_all_nb(dev)		i++;	if (i > MAX_NB) {		printk(KERN_ERR "PCI-GART: Too many northbridges (%ld). Disabled\n", i);		return -1;	}	printk(KERN_INFO "PCI-DMA: using GART IOMMU.\n");	aper_size = info.aper_size * 1024 * 1024;		iommu_size = check_iommu_size(info.aper_base, aper_size); 	iommu_pages = iommu_size >> PAGE_SHIFT; 	iommu_gart_bitmap = (void*)__get_free_pages(GFP_KERNEL, 						    get_order(iommu_pages/8)); 	if (!iommu_gart_bitmap) 		panic("Cannot allocate iommu bitmap\n"); 	memset(iommu_gart_bitmap, 0, iommu_pages/8);#ifdef CONFIG_IOMMU_LEAK	if (leak_trace) { 		iommu_leak_tab = (void *)__get_free_pages(GFP_KERNEL, 				  get_order(iommu_pages*sizeof(void *)));		if (iommu_leak_tab) 			memset(iommu_leak_tab, 0, iommu_pages * 8); 		else			printk("PCI-DMA: Cannot allocate leak trace area\n"); 	} #endif	/* 	 * Out of IOMMU space handling.	 * Reserve some invalid pages at the beginning of the GART. 	 */ 	set_bit_string(iommu_gart_bitmap, 0, EMERGENCY_PAGES); 	agp_memory_reserved = iommu_size;		printk(KERN_INFO	       "PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n",	       iommu_size>>20); 	iommu_start = aper_size - iommu_size;		iommu_bus_base = info.aper_base + iommu_start; 	bad_dma_address = iommu_bus_base;	iommu_gatt_base = agp_gatt_table + (iommu_start>>PAGE_SHIFT);	/* 	 * Unmap the IOMMU part of the GART. The alias of the page is	 * always mapped with cache enabled and there is no full cache	 * coherency across the GART remapping. The unmapping avoids	 * automatic prefetches from the CPU allocating cache lines in	 * there. All CPU accesses are done via the direct mapping to	 * the backing memory. The GART address is only used by PCI	 * devices. 	 */	clear_kernel_mapping((unsigned long)__va(iommu_bus_base), iommu_size);	/* 	 * Try to workaround a bug (thanks to BenH) 	 * Set unmapped entries to a scratch page instead of 0. 	 * Any prefetches that hit unmapped entries won't get an bus abort	 * then.	 */	scratch = get_zeroed_page(GFP_KERNEL); 	if (!scratch) 		panic("Cannot allocate iommu scratch page");	gart_unmapped_entry = GPTE_ENCODE(__pa(scratch));	for (i = EMERGENCY_PAGES; i < iommu_pages; i++) 		iommu_gatt_base[i] = gart_unmapped_entry;	for_all_nb(dev) {		u32 flag; 		int cpu = PCI_SLOT(dev->devfn) - 24;		if (cpu >= MAX_NB)			continue;		northbridges[cpu] = dev;		pci_read_config_dword(dev, 0x9c, &flag); /* cache flush word */		northbridge_flush_word[cpu] = flag; 	}		     	flush_gart(NULL);	dma_ops = &gart_dma_ops;	return 0;} /* Must execute after PCI subsystem */fs_initcall(pci_iommu_init);void gart_parse_options(char *p){	int arg;#ifdef CONFIG_IOMMU_LEAK	if (!strncmp(p,"leak",4)) {		leak_trace = 1;		p += 4;		if (*p == '=') ++p;		if (isdigit(*p) && get_option(&p, &arg))			iommu_leak_pages = arg;	}#endif	if (isdigit(*p) && get_option(&p, &arg))		iommu_size = arg;	if (!strncmp(p, "fullflush",8))		iommu_fullflush = 1;	if (!strncmp(p, "nofullflush",11))		iommu_fullflush = 0;	if (!strncmp(p,"noagp",5))		no_agp = 1;	if (!strncmp(p, "noaperture",10))		fix_aperture = 0;	/* duplicated from pci-dma.c */	if (!strncmp(p,"force",5))		iommu_aperture_allowed = 1;	if (!strncmp(p,"allowed",7))		iommu_aperture_allowed = 1;	if (!strncmp(p, "memaper", 7)) {		fallback_aper_force = 1;		p += 7;		if (*p == '=') {			++p;			if (get_option(&p, &arg))				fallback_aper_order = arg;		}	}}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -