⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 iommu.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 2 页
字号:
/* * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation *  * Rewrite, cleanup, new allocation schemes, virtual merging:  * Copyright (C) 2004 Olof Johansson, IBM Corporation *               and  Ben. Herrenschmidt, IBM Corporation * * Dynamic DMA mapping support, bus-independent parts. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. *  * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the * GNU General Public License for more details. *  * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA */#include <linux/init.h>#include <linux/types.h>#include <linux/slab.h>#include <linux/mm.h>#include <linux/spinlock.h>#include <linux/string.h>#include <linux/dma-mapping.h>#include <linux/bitops.h>#include <asm/io.h>#include <asm/prom.h>#include <asm/iommu.h>#include <asm/pci-bridge.h>#include <asm/machdep.h>#include <asm/kdump.h>#define DBG(...)#ifdef CONFIG_IOMMU_VMERGEstatic int novmerge = 0;#elsestatic int novmerge = 1;#endifstatic int protect4gb = 1;static inline unsigned long iommu_num_pages(unsigned long vaddr,					    unsigned long slen){	unsigned long npages;	npages = IOMMU_PAGE_ALIGN(vaddr + slen) - (vaddr & IOMMU_PAGE_MASK);	npages >>= IOMMU_PAGE_SHIFT;	return npages;}static int __init setup_protect4gb(char *str){	if (strcmp(str, "on") == 0)		protect4gb = 1;	else if (strcmp(str, "off") == 0)		protect4gb = 0;	return 1;}static int __init setup_iommu(char *str){	if (!strcmp(str, "novmerge"))		novmerge = 1;	else if (!strcmp(str, "vmerge"))		novmerge = 0;	return 1;}__setup("protect4gb=", setup_protect4gb);__setup("iommu=", setup_iommu);static unsigned long iommu_range_alloc(struct iommu_table *tbl,                                       unsigned long npages,                                       unsigned long *handle,                                       unsigned long mask,                                       unsigned int align_order){ 	unsigned long n, end, i, start;	unsigned long limit;	int largealloc = npages > 15;	int pass = 0;	unsigned long align_mask;	align_mask = 0xffffffffffffffffl >> (64 - align_order);	/* This allocator was derived from x86_64's bit string search */	/* Sanity check */	if (unlikely(npages == 0)) {		if (printk_ratelimit())			WARN_ON(1);		return DMA_ERROR_CODE;	}	if (handle && *handle)		start = *handle;	else		start = largealloc ? tbl->it_largehint : tbl->it_hint;	/* Use only half of the table for small allocs (15 pages or less) */	limit = largealloc ? tbl->it_size : tbl->it_halfpoint;	if (largealloc && start < tbl->it_halfpoint)		start = tbl->it_halfpoint;	/* The case below can happen if we have a small segment appended	 * to a large, or when the previous alloc was at the very end of	 * the available space. If so, go back to the initial start.	 */	if (start >= limit)		start = largealloc ? tbl->it_largehint : tbl->it_hint; again:	if (limit + tbl->it_offset > mask) {		limit = mask - tbl->it_offset + 1;		/* If we're constrained on address range, first try		 * at the masked hint to avoid O(n) search complexity,		 * but on second pass, start at 0.		 */		if ((start & mask) >= limit || pass > 0)			start = 0;		else			start &= mask;	}	n = find_next_zero_bit(tbl->it_map, limit, start);	/* Align allocation */	n = (n + align_mask) & ~align_mask;	end = n + npages;	if (unlikely(end >= limit)) {		if (likely(pass < 2)) {			/* First failure, just rescan the half of the table.			 * Second failure, rescan the other half of the table.			 */			start = (largealloc ^ pass) ? tbl->it_halfpoint : 0;			limit = pass ? tbl->it_size : limit;			pass++;			goto again;		} else {			/* Third failure, give up */			return DMA_ERROR_CODE;		}	}	for (i = n; i < end; i++)		if (test_bit(i, tbl->it_map)) {			start = i+1;			goto again;		}	for (i = n; i < end; i++)		__set_bit(i, tbl->it_map);	/* Bump the hint to a new block for small allocs. */	if (largealloc) {		/* Don't bump to new block to avoid fragmentation */		tbl->it_largehint = end;	} else {		/* Overflow will be taken care of at the next allocation */		tbl->it_hint = (end + tbl->it_blocksize - 1) &		                ~(tbl->it_blocksize - 1);	}	/* Update handle for SG allocations */	if (handle)		*handle = end;	return n;}static dma_addr_t iommu_alloc(struct iommu_table *tbl, void *page,		       unsigned int npages, enum dma_data_direction direction,		       unsigned long mask, unsigned int align_order){	unsigned long entry, flags;	dma_addr_t ret = DMA_ERROR_CODE;	spin_lock_irqsave(&(tbl->it_lock), flags);	entry = iommu_range_alloc(tbl, npages, NULL, mask, align_order);	if (unlikely(entry == DMA_ERROR_CODE)) {		spin_unlock_irqrestore(&(tbl->it_lock), flags);		return DMA_ERROR_CODE;	}	entry += tbl->it_offset;	/* Offset into real TCE table */	ret = entry << IOMMU_PAGE_SHIFT;	/* Set the return dma address */	/* Put the TCEs in the HW table */	ppc_md.tce_build(tbl, entry, npages, (unsigned long)page & IOMMU_PAGE_MASK,			 direction);	/* Flush/invalidate TLB caches if necessary */	if (ppc_md.tce_flush)		ppc_md.tce_flush(tbl);	spin_unlock_irqrestore(&(tbl->it_lock), flags);	/* Make sure updates are seen by hardware */	mb();	return ret;}static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, 			 unsigned int npages){	unsigned long entry, free_entry;	unsigned long i;	entry = dma_addr >> IOMMU_PAGE_SHIFT;	free_entry = entry - tbl->it_offset;	if (((free_entry + npages) > tbl->it_size) ||	    (entry < tbl->it_offset)) {		if (printk_ratelimit()) {			printk(KERN_INFO "iommu_free: invalid entry\n");			printk(KERN_INFO "\tentry     = 0x%lx\n", entry); 			printk(KERN_INFO "\tdma_addr  = 0x%lx\n", (u64)dma_addr);			printk(KERN_INFO "\tTable     = 0x%lx\n", (u64)tbl);			printk(KERN_INFO "\tbus#      = 0x%lx\n", (u64)tbl->it_busno);			printk(KERN_INFO "\tsize      = 0x%lx\n", (u64)tbl->it_size);			printk(KERN_INFO "\tstartOff  = 0x%lx\n", (u64)tbl->it_offset);			printk(KERN_INFO "\tindex     = 0x%lx\n", (u64)tbl->it_index);			WARN_ON(1);		}		return;	}	ppc_md.tce_free(tbl, entry, npages);		for (i = 0; i < npages; i++)		__clear_bit(free_entry+i, tbl->it_map);}static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,		unsigned int npages){	unsigned long flags;	spin_lock_irqsave(&(tbl->it_lock), flags);	__iommu_free(tbl, dma_addr, npages);	/* Make sure TLB cache is flushed if the HW needs it. We do	 * not do an mb() here on purpose, it is not needed on any of	 * the current platforms.	 */	if (ppc_md.tce_flush)		ppc_md.tce_flush(tbl);	spin_unlock_irqrestore(&(tbl->it_lock), flags);}int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist,		 int nelems, unsigned long mask,		 enum dma_data_direction direction){	dma_addr_t dma_next = 0, dma_addr;	unsigned long flags;	struct scatterlist *s, *outs, *segstart;	int outcount, incount, i;	unsigned int align;	unsigned long handle;	BUG_ON(direction == DMA_NONE);	if ((nelems == 0) || !tbl)		return 0;	outs = s = segstart = &sglist[0];	outcount = 1;	incount = nelems;	handle = 0;	/* Init first segment length for backout at failure */	outs->dma_length = 0;	DBG("sg mapping %d elements:\n", nelems);	spin_lock_irqsave(&(tbl->it_lock), flags);	for_each_sg(sglist, s, nelems, i) {		unsigned long vaddr, npages, entry, slen;		slen = s->length;		/* Sanity check */		if (slen == 0) {			dma_next = 0;			continue;		}		/* Allocate iommu entries for that segment */		vaddr = (unsigned long) sg_virt(s);		npages = iommu_num_pages(vaddr, slen);		align = 0;		if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && slen >= PAGE_SIZE &&		    (vaddr & ~PAGE_MASK) == 0)			align = PAGE_SHIFT - IOMMU_PAGE_SHIFT;		entry = iommu_range_alloc(tbl, npages, &handle,					  mask >> IOMMU_PAGE_SHIFT, align);		DBG("  - vaddr: %lx, size: %lx\n", vaddr, slen);		/* Handle failure */		if (unlikely(entry == DMA_ERROR_CODE)) {			if (printk_ratelimit())				printk(KERN_INFO "iommu_alloc failed, tbl %p vaddr %lx"				       " npages %lx\n", tbl, vaddr, npages);			goto failure;		}		/* Convert entry to a dma_addr_t */		entry += tbl->it_offset;		dma_addr = entry << IOMMU_PAGE_SHIFT;		dma_addr |= (s->offset & ~IOMMU_PAGE_MASK);		DBG("  - %lu pages, entry: %lx, dma_addr: %lx\n",			    npages, entry, dma_addr);		/* Insert into HW table */		ppc_md.tce_build(tbl, entry, npages, vaddr & IOMMU_PAGE_MASK, direction);		/* If we are in an open segment, try merging */		if (segstart != s) {

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -