⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 pci_iommu.c

📁 内核linux2.4.20,可跟rtlinux3.2打补丁 组成实时linux系统,编译内核
💻 C
📖 第 1 页 / 共 2 页
字号:
/* *	linux/arch/alpha/kernel/pci_iommu.c */#include <linux/kernel.h>#include <linux/mm.h>#include <linux/pci.h>#include <linux/slab.h>#include <linux/bootmem.h>#include <asm/io.h>#include <asm/hwrpb.h>#include "proto.h"#include "pci_impl.h"#define DEBUG_ALLOC 0#if DEBUG_ALLOC > 0# define DBGA(args...)		printk(KERN_DEBUG args)#else# define DBGA(args...)#endif#if DEBUG_ALLOC > 1# define DBGA2(args...)		printk(KERN_DEBUG args)#else# define DBGA2(args...)#endif#define DEBUG_NODIRECT 0#define DEBUG_FORCEDAC 0/* Most Alphas support 32-bit ISA DMA. Exceptions are XL, Ruffian and   Nautilus (see asm/dma.h for details). */#define ISA_DMA_MASK	(MAX_DMA_ADDRESS - IDENT_ADDR - 1 < 0xffffffff ? \			 MAX_DMA_ADDRESS - IDENT_ADDR - 1 : 0xffffffff)static inline unsigned longmk_iommu_pte(unsigned long paddr){	return (paddr >> (PAGE_SHIFT-1)) | 1;}static inline longcalc_npages(long bytes){	return (bytes + PAGE_SIZE - 1) >> PAGE_SHIFT;}/* Return the minimum of MAX or the first power of two larger   than main memory.  */unsigned longsize_for_memory(unsigned long max){	unsigned long mem = max_low_pfn << PAGE_SHIFT;	if (mem < max)		max = 1UL << ceil_log2(mem);	return max;}struct pci_iommu_arena *iommu_arena_new(struct pci_controller *hose, dma_addr_t base,		unsigned long window_size, unsigned long align){	unsigned long mem_size;	struct pci_iommu_arena *arena;	mem_size = window_size / (PAGE_SIZE / sizeof(unsigned long));	/* Note that the TLB lookup logic uses bitwise concatenation,	   not addition, so the required arena alignment is based on	   the size of the window.  Retain the align parameter so that	   particular systems can over-align the arena.  */	if (align < mem_size)		align = mem_size;	arena = alloc_bootmem(sizeof(*arena));	arena->ptes = __alloc_bootmem(mem_size, align, 0);	spin_lock_init(&arena->lock);	arena->hose = hose;	arena->dma_base = base;	arena->size = window_size;	arena->next_entry = 0;	/* Align allocations to a multiple of a page size.  Not needed	   unless there are chip bugs.  */	arena->align_entry = 1;	return arena;}/* Must be called with the arena lock held */static longiommu_arena_find_pages(struct pci_iommu_arena *arena, long n, long mask){	unsigned long *ptes;	long i, p, nent;	/* Search forward for the first mask-aligned sequence of N free ptes */	ptes = arena->ptes;	nent = arena->size >> PAGE_SHIFT;	p = (arena->next_entry + mask) & ~mask;	i = 0;	while (i < n && p+i < nent) {		if (ptes[p+i])			p = (p + i + 1 + mask) & ~mask, i = 0;		else			i = i + 1;	}	if (i < n) {                /* Reached the end.  Flush the TLB and restart the                   search from the beginning.  */		alpha_mv.mv_pci_tbi(arena->hose, 0, -1);		p = 0, i = 0;		while (i < n && p+i < nent) {			if (ptes[p+i])				p = (p + i + 1 + mask) & ~mask, i = 0;			else				i = i + 1;		}		if (i < n)			return -1;	}	/* Success. It's the responsibility of the caller to mark them	   in use before releasing the lock */	return p;}longiommu_arena_alloc(struct pci_iommu_arena *arena, long n){	unsigned long flags;	unsigned long *ptes;	long i, p, mask;	spin_lock_irqsave(&arena->lock, flags);	/* Search for N empty ptes */	ptes = arena->ptes;	mask = arena->align_entry - 1;	p = iommu_arena_find_pages(arena, n, mask);	if (p < 0) {		spin_unlock_irqrestore(&arena->lock, flags);		return -1;	}	/* Success.  Mark them all in use, ie not zero and invalid	   for the iommu tlb that could load them from under us.	   The chip specific bits will fill this in with something	   kosher when we return.  */	for (i = 0; i < n; ++i)		ptes[p+i] = IOMMU_INVALID_PTE;	arena->next_entry = p + n;	spin_unlock_irqrestore(&arena->lock, flags);	return p;}static voidiommu_arena_free(struct pci_iommu_arena *arena, long ofs, long n){	unsigned long *p;	long i;	p = arena->ptes + ofs;	for (i = 0; i < n; ++i)		p[i] = 0;}/* Map a single buffer of the indicated size for PCI DMA in streaming   mode.  The 32-bit PCI bus mastering address to use is returned.   Once the device is given the dma address, the device owns this memory   until either pci_unmap_single or pci_dma_sync_single is performed.  */static dma_addr_tpci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size,		 int dac_allowed){	struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose;	dma_addr_t max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;	struct pci_iommu_arena *arena;	long npages, dma_ofs, i;	unsigned long paddr;	dma_addr_t ret;	paddr = __pa(cpu_addr);#if !DEBUG_NODIRECT	/* First check to see if we can use the direct map window.  */	if (paddr + size + __direct_map_base - 1 <= max_dma	    && paddr + size <= __direct_map_size) {		ret = paddr + __direct_map_base;		DBGA2("pci_map_single: [%p,%lx] -> direct %lx from %p\n",		      cpu_addr, size, ret, __builtin_return_address(0));		return ret;	}#endif	/* Next, use DAC if selected earlier.  */	if (dac_allowed) {		ret = paddr + alpha_mv.pci_dac_offset;		DBGA2("pci_map_single: [%p,%lx] -> DAC %lx from %p\n",		      cpu_addr, size, ret, __builtin_return_address(0));		return ret;	}	/* If the machine doesn't define a pci_tbi routine, we have to	   assume it doesn't support sg mapping.  */	if (! alpha_mv.mv_pci_tbi) {		printk(KERN_WARNING "pci_map_single failed: no hw sg\n");		return 0;	}			arena = hose->sg_pci;	if (!arena || arena->dma_base + arena->size - 1 > max_dma)		arena = hose->sg_isa;	npages = calc_npages((paddr & ~PAGE_MASK) + size);	dma_ofs = iommu_arena_alloc(arena, npages);	if (dma_ofs < 0) {		printk(KERN_WARNING "pci_map_single failed: "		       "could not allocate dma page tables\n");		return 0;	}	paddr &= PAGE_MASK;	for (i = 0; i < npages; ++i, paddr += PAGE_SIZE)		arena->ptes[i + dma_ofs] = mk_iommu_pte(paddr);	ret = arena->dma_base + dma_ofs * PAGE_SIZE;	ret += (unsigned long)cpu_addr & ~PAGE_MASK;	DBGA2("pci_map_single: [%p,%lx] np %ld -> sg %lx from %p\n",	      cpu_addr, size, npages, ret, __builtin_return_address(0));	return ret;}dma_addr_tpci_map_single(struct pci_dev *pdev, void *cpu_addr, size_t size, int dir){	int dac_allowed; 	if (dir == PCI_DMA_NONE)		BUG();	dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0; 	return pci_map_single_1(pdev, cpu_addr, size, dac_allowed);}dma_addr_tpci_map_page(struct pci_dev *pdev, struct page *page, unsigned long offset,	     size_t size, int dir){	int dac_allowed;	if (dir == PCI_DMA_NONE)		BUG();	dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0; 	return pci_map_single_1(pdev, (char *)page_address(page) + offset, 				size, dac_allowed);}/* Unmap a single streaming mode DMA translation.  The DMA_ADDR and   SIZE must match what was provided for in a previous pci_map_single   call.  All other usages are undefined.  After this call, reads by   the cpu to the buffer are guarenteed to see whatever the device   wrote there.  */voidpci_unmap_single(struct pci_dev *pdev, dma_addr_t dma_addr, size_t size,		 int direction){	unsigned long flags;	struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose;	struct pci_iommu_arena *arena;	long dma_ofs, npages;	if (direction == PCI_DMA_NONE)		BUG();	if (dma_addr >= __direct_map_base	    && dma_addr < __direct_map_base + __direct_map_size) {		/* Nothing to do.  */		DBGA2("pci_unmap_single: direct [%lx,%lx] from %p\n",		      dma_addr, size, __builtin_return_address(0));		return;	}	if (dma_addr > 0xffffffff) {		DBGA2("pci64_unmap_single: DAC [%lx,%lx] from %p\n",		      dma_addr, size, __builtin_return_address(0));		return;	}	arena = hose->sg_pci;	if (!arena || dma_addr < arena->dma_base)		arena = hose->sg_isa;	dma_ofs = (dma_addr - arena->dma_base) >> PAGE_SHIFT;	if (dma_ofs * PAGE_SIZE >= arena->size) {		printk(KERN_ERR "Bogus pci_unmap_single: dma_addr %lx "		       " base %lx size %x\n", dma_addr, arena->dma_base,		       arena->size);		return;		BUG();	}	npages = calc_npages((dma_addr & ~PAGE_MASK) + size);	spin_lock_irqsave(&arena->lock, flags);	iommu_arena_free(arena, dma_ofs, npages);        /* If we're freeing ptes above the `next_entry' pointer (they           may have snuck back into the TLB since the last wrap flush),           we need to flush the TLB before reallocating the latter.  */	if (dma_ofs >= arena->next_entry)		alpha_mv.mv_pci_tbi(hose, dma_addr, dma_addr + size - 1);	spin_unlock_irqrestore(&arena->lock, flags);	DBGA2("pci_unmap_single: sg [%lx,%lx] np %ld from %p\n",	      dma_addr, size, npages, __builtin_return_address(0));}voidpci_unmap_page(struct pci_dev *pdev, dma_addr_t dma_addr,	       size_t size, int direction){	pci_unmap_single(pdev, dma_addr, size, direction);}/* Allocate and map kernel buffer using consistent mode DMA for PCI   device.  Returns non-NULL cpu-view pointer to the buffer if   successful and sets *DMA_ADDRP to the pci side dma address as well,   else DMA_ADDRP is undefined.  */void *pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp){	void *cpu_addr;	long order = get_order(size);	cpu_addr = (void *)__get_free_pages(GFP_ATOMIC, order);	if (! cpu_addr) {		printk(KERN_INFO "pci_alloc_consistent: "		       "get_free_pages failed from %p\n",			__builtin_return_address(0));		/* ??? Really atomic allocation?  Otherwise we could play		   with vmalloc and sg if we can't find contiguous memory.  */		return NULL;	}	memset(cpu_addr, 0, size);	*dma_addrp = pci_map_single_1(pdev, cpu_addr, size, 0);	if (*dma_addrp == 0) {		free_pages((unsigned long)cpu_addr, order);		return NULL;	}			DBGA2("pci_alloc_consistent: %lx -> [%p,%x] from %p\n",	      size, cpu_addr, *dma_addrp, __builtin_return_address(0));	return cpu_addr;}/* Free and unmap a consistent DMA buffer.  CPU_ADDR and DMA_ADDR must   be values that were returned from pci_alloc_consistent.  SIZE must   be the same as what as passed into pci_alloc_consistent.   References to the memory and mappings assosciated with CPU_ADDR or   DMA_ADDR past this call are illegal.  */voidpci_free_consistent(struct pci_dev *pdev, size_t size, void *cpu_addr,		    dma_addr_t dma_addr){	pci_unmap_single(pdev, dma_addr, size, PCI_DMA_BIDIRECTIONAL);	free_pages((unsigned long)cpu_addr, get_order(size));	DBGA2("pci_free_consistent: [%x,%lx] from %p\n",	      dma_addr, size, __builtin_return_address(0));}/* Classify the elements of the scatterlist.  Write dma_address   of each element with:	0   : Followers all physically adjacent.	1   : Followers all virtually adjacent.	-1  : Not leader, physically adjacent to previous.	-2  : Not leader, virtually adjacent to previous.   Write dma_length of each leader with the combined lengths of   the mergable followers.  */#define SG_ENT_VIRT_ADDRESS(SG)				\	((SG)->address					\	 ? (SG)->address				\	 : page_address((SG)->page) + (SG)->offset)#define SG_ENT_PHYS_ADDRESS(SG)	\        __pa(SG_ENT_VIRT_ADDRESS(SG))static voidsg_classify(struct scatterlist *sg, struct scatterlist *end, int virt_ok){	unsigned long next_paddr;	struct scatterlist *leader;	long leader_flag, leader_length;	leader = sg;	leader_flag = 0;	leader_length = leader->length;	next_paddr = SG_ENT_PHYS_ADDRESS(leader) + leader_length;	for (++sg; sg < end; ++sg) {		unsigned long addr, len;		addr = SG_ENT_PHYS_ADDRESS(sg);		len = sg->length;		if (next_paddr == addr) {			sg->dma_address = -1;			leader_length += len;		} else if (((next_paddr | addr) & ~PAGE_MASK) == 0 && virt_ok) {			sg->dma_address = -2;			leader_flag = 1;			leader_length += len;		} else {			leader->dma_address = leader_flag;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -