📄 pci-dma.c
字号:
/*** PARISC 1.1 Dynamic DMA mapping support.** This implementation is for PA-RISC platforms that do not support** I/O TLBs (aka DMA address translation hardware).** See Documentation/DMA-mapping.txt for interface definitions.**** (c) Copyright 1999,2000 Hewlett-Packard Company** (c) Copyright 2000 Grant Grundler** (c) Copyright 2000 Philipp Rumpf <prumpf@tux.org>** (c) Copyright 2000 John Marvin**** "leveraged" from 2.3.47: arch/ia64/kernel/pci-dma.c.** (I assume it's from David Mosberger-Tang but there was no Copyright)**** AFAIK, all PA7100LC and PA7300LC platforms can use this code.**** - ggg*/#include <linux/init.h>#include <linux/mm.h>#include <linux/pci.h>#include <linux/proc_fs.h>#include <linux/slab.h>#include <linux/string.h>#include <linux/types.h>#include <asm/cacheflush.h>#include <asm/dma.h> /* for DMA_CHUNK_SIZE */#include <asm/io.h>#include <asm/page.h> /* get_order */#include <asm/pgalloc.h>#include <asm/uaccess.h>#ifdef DEBUG_PCI#undef ASSERT#define ASSERT(expr) \ if(!(expr)) { \ printk("\n%s:%d: Assertion " #expr " failed!\n", \ __FILE__, __LINE__); \ panic(#expr); \ }#else#define ASSERT(expr)#endifstatic struct proc_dir_entry * proc_gsc_root = NULL;static int pcxl_proc_info(char *buffer, char **start, off_t offset, int length);static unsigned long pcxl_used_bytes = 0;static unsigned long pcxl_used_pages = 0;extern unsigned long pcxl_dma_start; /* Start of pcxl dma mapping area */static spinlock_t pcxl_res_lock;static char *pcxl_res_map;static int pcxl_res_hint;static int pcxl_res_size;#ifdef DEBUG_PCXL_RESOURCE#define DBG_RES(x...) printk(x)#else#define DBG_RES(x...)#endif/*** Dump a hex representation of the resource map.*/#ifdef DUMP_RESMAPstaticvoid dump_resmap(void){ u_long *res_ptr = (unsigned long *)pcxl_res_map; u_long i = 0; printk("res_map: "); for(; i < (pcxl_res_size / sizeof(unsigned long)); ++i, ++res_ptr) printk("%08lx ", *res_ptr); printk("\n");}#elsestatic inline void dump_resmap(void) {;}#endifstatic int pa11_dma_supported( struct device *dev, u64 mask){ return 1;}static inline int map_pte_uncached(pte_t * pte, unsigned long vaddr, unsigned long size, unsigned long *paddr_ptr){ unsigned long end; unsigned long orig_vaddr = vaddr; vaddr &= ~PMD_MASK; end = vaddr + size; if (end > PMD_SIZE) end = PMD_SIZE; do { if (!pte_none(*pte)) printk(KERN_ERR "map_pte_uncached: page already exists\n"); set_pte(pte, __mk_pte(*paddr_ptr, PAGE_KERNEL_UNC)); pdtlb_kernel(orig_vaddr); vaddr += PAGE_SIZE; orig_vaddr += PAGE_SIZE; (*paddr_ptr) += PAGE_SIZE; pte++; } while (vaddr < end); return 0;}static inline int map_pmd_uncached(pmd_t * pmd, unsigned long vaddr, unsigned long size, unsigned long *paddr_ptr){ unsigned long end; unsigned long orig_vaddr = vaddr; vaddr &= ~PGDIR_MASK; end = vaddr + size; if (end > PGDIR_SIZE) end = PGDIR_SIZE; do { pte_t * pte = pte_alloc_kernel(&init_mm, pmd, vaddr); if (!pte) return -ENOMEM; if (map_pte_uncached(pte, orig_vaddr, end - vaddr, paddr_ptr)) return -ENOMEM; vaddr = (vaddr + PMD_SIZE) & PMD_MASK; orig_vaddr += PMD_SIZE; pmd++; } while (vaddr < end); return 0;}static inline int map_uncached_pages(unsigned long vaddr, unsigned long size, unsigned long paddr){ pgd_t * dir; unsigned long end = vaddr + size; dir = pgd_offset_k(vaddr); do { pmd_t *pmd; pmd = pmd_alloc(NULL, dir, vaddr); if (!pmd) return -ENOMEM; if (map_pmd_uncached(pmd, vaddr, end - vaddr, &paddr)) return -ENOMEM; vaddr = vaddr + PGDIR_SIZE; dir++; } while (vaddr && (vaddr < end)); return 0;}static inline void unmap_uncached_pte(pmd_t * pmd, unsigned long vaddr, unsigned long size){ pte_t * pte; unsigned long end; unsigned long orig_vaddr = vaddr; if (pmd_none(*pmd)) return; if (pmd_bad(*pmd)) { pmd_ERROR(*pmd); pmd_clear(pmd); return; } pte = pte_offset_map(pmd, vaddr); vaddr &= ~PMD_MASK; end = vaddr + size; if (end > PMD_SIZE) end = PMD_SIZE; do { pte_t page = *pte; pte_clear(pte); pdtlb_kernel(orig_vaddr); vaddr += PAGE_SIZE; orig_vaddr += PAGE_SIZE; pte++; if (pte_none(page) || pte_present(page)) continue; printk(KERN_CRIT "Whee.. Swapped out page in kernel page table\n"); } while (vaddr < end);}static inline void unmap_uncached_pmd(pgd_t * dir, unsigned long vaddr, unsigned long size){ pmd_t * pmd; unsigned long end; unsigned long orig_vaddr = vaddr; if (pgd_none(*dir)) return; if (pgd_bad(*dir)) { pgd_ERROR(*dir); pgd_clear(dir); return; } pmd = pmd_offset(dir, vaddr); vaddr &= ~PGDIR_MASK; end = vaddr + size; if (end > PGDIR_SIZE) end = PGDIR_SIZE; do { unmap_uncached_pte(pmd, orig_vaddr, end - vaddr); vaddr = (vaddr + PMD_SIZE) & PMD_MASK; orig_vaddr += PMD_SIZE; pmd++; } while (vaddr < end);}static void unmap_uncached_pages(unsigned long vaddr, unsigned long size){ pgd_t * dir; unsigned long end = vaddr + size; dir = pgd_offset_k(vaddr); do { unmap_uncached_pmd(dir, vaddr, end - vaddr); vaddr = vaddr + PGDIR_SIZE; dir++; } while (vaddr && (vaddr < end));}#define PCXL_SEARCH_LOOP(idx, mask, size) \ for(; res_ptr < res_end; ++res_ptr) \ { \ if(0 == ((*res_ptr) & mask)) { \ *res_ptr |= mask; \ idx = (int)((u_long)res_ptr - (u_long)pcxl_res_map); \ pcxl_res_hint = idx + (size >> 3); \ goto resource_found; \ } \ }#define PCXL_FIND_FREE_MAPPING(idx, mask, size) { \ u##size *res_ptr = (u##size *)&(pcxl_res_map[pcxl_res_hint & ~((size >> 3) - 1)]); \ u##size *res_end = (u##size *)&pcxl_res_map[pcxl_res_size]; \ PCXL_SEARCH_LOOP(idx, mask, size); \ res_ptr = (u##size *)&pcxl_res_map[0]; \ PCXL_SEARCH_LOOP(idx, mask, size); \}unsigned longpcxl_alloc_range(size_t size){ int res_idx; u_long mask, flags; unsigned int pages_needed = size >> PAGE_SHIFT; ASSERT(pages_needed); ASSERT((pages_needed * PAGE_SIZE) < DMA_CHUNK_SIZE); ASSERT(pages_needed < (BITS_PER_LONG - PAGE_SHIFT)); mask = (u_long) -1L; mask >>= BITS_PER_LONG - pages_needed; DBG_RES("pcxl_alloc_range() size: %d pages_needed %d pages_mask 0x%08lx\n", size, pages_needed, mask); spin_lock_irqsave(&pcxl_res_lock, flags); if(pages_needed <= 8) { PCXL_FIND_FREE_MAPPING(res_idx, mask, 8); } else if(pages_needed <= 16) { PCXL_FIND_FREE_MAPPING(res_idx, mask, 16); } else if(pages_needed <= 32) { PCXL_FIND_FREE_MAPPING(res_idx, mask, 32); } else { panic("%s: pcxl_alloc_range() Too many pages to map.\n", __FILE__); } dump_resmap(); panic("%s: pcxl_alloc_range() out of dma mapping resources\n", __FILE__); resource_found: DBG_RES("pcxl_alloc_range() res_idx %d mask 0x%08lx res_hint: %d\n", res_idx, mask, pcxl_res_hint); pcxl_used_pages += pages_needed; pcxl_used_bytes += ((pages_needed >> 3) ? (pages_needed >> 3) : 1); spin_unlock_irqrestore(&pcxl_res_lock, flags); dump_resmap(); /* ** return the corresponding vaddr in the pcxl dma map
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -