pci_dma.c
来自「是关于linux2.5.1的完全源码」· C语言 代码 · 共 1,482 行 · 第 1/3 页
C
1,482 行
/* * pci_dma.c * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation * * Dynamic DMA mapping support. * * Manages the TCE space assigned to this partition. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */#include <linux/init.h>#include <linux/types.h>#include <linux/slab.h>#include <linux/mm.h>#include <linux/spinlock.h>#include <linux/string.h>#include <linux/pci.h>#include <asm/io.h>#include <asm/prom.h>#include <asm/rtas.h>#include <asm/ppcdebug.h>#include <asm/iSeries/HvCallXm.h>#include <asm/iSeries/LparData.h>#include <asm/pci_dma.h>#include <asm/pci-bridge.h>#include <asm/iSeries/iSeries_pci.h>#include <asm/machdep.h>#include "pci.h"// #define DEBUG_TCE 1/* Initialize so this guy does not end up in the BSS section. * Only used to pass OF initialization data set in prom.c into the main * kernel code -- data ultimately copied into tceTables[]. */extern struct _of_tce_table of_tce_table[];extern struct pci_controller* hose_head;extern struct pci_controller** hose_tail;struct TceTable virtBusVethTceTable; /* Tce table for virtual ethernet */struct TceTable virtBusVioTceTable; /* Tce table for virtual I/O */struct device_node iSeries_veth_dev_node = { tce_table: &virtBusVethTceTable };struct device_node iSeries_vio_dev_node = { tce_table: &virtBusVioTceTable };struct pci_dev iSeries_veth_dev_st = { sysdata: &iSeries_veth_dev_node };struct pci_dev iSeries_vio_dev_st = { sysdata: &iSeries_vio_dev_node };struct pci_dev * iSeries_veth_dev = &iSeries_veth_dev_st;struct pci_dev * iSeries_vio_dev = &iSeries_vio_dev_st;struct TceTable * tceTables[256]; /* Tce tables for 256 busses * Bus 255 is the virtual bus * zero indicates no bus defined *//* allocates a contiguous range of tces (power-of-2 size) */static inline long alloc_tce_range(struct TceTable *, unsigned order );/* allocates a contiguous range of tces (power-of-2 size) * assumes lock already held */static long alloc_tce_range_nolock(struct TceTable *, unsigned order );/* frees a contiguous range of tces (power-of-2 size) */static inline void free_tce_range(struct TceTable *, long tcenum, unsigned order );/* frees a contiguous rnage of tces (power-of-2 size) * assumes lock already held */void free_tce_range_nolock(struct TceTable *, long tcenum, unsigned order );/* allocates a range of tces and sets them to the pages */static inline dma_addr_t get_tces( struct TceTable *, unsigned order, void *page, unsigned numPages, int direction );static long test_tce_range( struct TceTable *, long tcenum, unsigned order );static unsigned fill_scatterlist_sg(struct scatterlist *sg, int nents, dma_addr_t dma_addr, unsigned long numTces );static unsigned long num_tces_sg( struct scatterlist *sg, int nents ); static dma_addr_t create_tces_sg( struct TceTable *tbl, struct scatterlist *sg, int nents, unsigned numTces, int direction );static void getTceTableParmsPSeries( struct pci_controller *phb, struct device_node *dn, struct TceTable *tce_table_parms );static void getTceTableParmsPSeriesLP(struct pci_controller *phb, struct device_node *dn, struct TceTable *newTceTable );void create_pci_bus_tce_table( unsigned long token );u8 iSeries_Get_Bus( struct pci_dev * dv ){ return 0;}static inline struct TceTable *get_tce_table(struct pci_dev *dev){ if (!dev) dev = ppc64_isabridge_dev; if (!dev) return NULL; if ( ( _machine == _MACH_iSeries ) && ( dev->bus ) ) return tceTables[dev->bus->number]; /* On the iSeries, the virtual bus will take this path. There is a */ /* fake pci_dev and dev_node built and used. */ return PCI_GET_DN(dev)->tce_table;}static unsigned long __inline__ count_leading_zeros64( unsigned long x ){ unsigned long lz; asm("cntlzd %0,%1" : "=r"(lz) : "r"(x)); return lz;}static void tce_build_iSeries(struct TceTable *tbl, long tcenum, unsigned long uaddr, int direction ){ u64 setTceRc; union Tce tce; PPCDBG(PPCDBG_TCE, "build_tce: uaddr = 0x%lx\n", uaddr); PPCDBG(PPCDBG_TCE, "\ttcenum = 0x%lx, tbl = 0x%lx, index=%lx\n", tcenum, tbl, tbl->index); tce.wholeTce = 0; tce.tceBits.rpn = (virt_to_absolute(uaddr)) >> PAGE_SHIFT; /* If for virtual bus */ if ( tbl->tceType == TCE_VB ) { tce.tceBits.valid = 1; tce.tceBits.allIo = 1; if ( direction != PCI_DMA_TODEVICE ) tce.tceBits.readWrite = 1; } else { /* If for PCI bus */ tce.tceBits.readWrite = 1; // Read allowed if ( direction != PCI_DMA_TODEVICE ) tce.tceBits.pciWrite = 1; } setTceRc = HvCallXm_setTce((u64)tbl->index, (u64)tcenum, tce.wholeTce ); if(setTceRc) { printk("PCI: tce_build failed 0x%lx tcenum: 0x%lx\n", setTceRc, (u64)tcenum); //PPCDBG(PPCDBG_TCE, "setTce failed. rc=%ld\n", setTceRc); //PPCDBG(PPCDBG_TCE, "\tindex = 0x%lx\n", (u64)tbl->index); //PPCDBG(PPCDBG_TCE, "\ttce num = 0x%lx\n", (u64)tcenum); //PPCDBG(PPCDBG_TCE, "\ttce val = 0x%lx\n", tce.wholeTce ); }}static void tce_build_pSeries(struct TceTable *tbl, long tcenum, unsigned long uaddr, int direction ){ union Tce tce; union Tce *tce_addr; PPCDBG(PPCDBG_TCE, "build_tce: uaddr = 0x%lx\n", uaddr); PPCDBG(PPCDBG_TCE, "\ttcenum = 0x%lx, tbl = 0x%lx, index=%lx\n", tcenum, tbl, tbl->index); tce.wholeTce = 0; tce.tceBits.rpn = (virt_to_absolute(uaddr)) >> PAGE_SHIFT; tce.tceBits.readWrite = 1; // Read allowed if ( direction != PCI_DMA_TODEVICE ) tce.tceBits.pciWrite = 1; tce_addr = ((union Tce *)tbl->base) + tcenum; *tce_addr = (union Tce)tce.wholeTce; /* Make sure the update is visible to hardware. */ __asm__ __volatile__ ("sync" : : : "memory");}/* * Build a TceTable structure. This contains a multi-level bit map which * is used to manage allocation of the tce space. */static struct TceTable *build_tce_table( struct TceTable * tbl ){ unsigned long bits, bytes, totalBytes; unsigned long numBits[NUM_TCE_LEVELS], numBytes[NUM_TCE_LEVELS]; unsigned i, k, m; unsigned char * pos, * p, b; PPCDBG(PPCDBG_TCEINIT, "build_tce_table: tbl = 0x%lx\n", tbl); spin_lock_init( &(tbl->lock) ); tbl->mlbm.maxLevel = 0; /* Compute number of bits and bytes for each level of the * multi-level bit map */ totalBytes = 0; bits = tbl->size * (PAGE_SIZE / sizeof( union Tce )); for ( i=0; i<NUM_TCE_LEVELS; ++i ) { bytes = ((bits+63)/64) * 8; PPCDBG(PPCDBG_TCEINIT, "build_tce_table: level %d bits=%ld, bytes=%ld\n", i, bits, bytes ); numBits[i] = bits; numBytes[i] = bytes; bits /= 2; totalBytes += bytes; } PPCDBG(PPCDBG_TCEINIT, "build_tce_table: totalBytes=%ld\n", totalBytes ); pos = (char *)__get_free_pages( GFP_ATOMIC, get_order( totalBytes )); if ( !pos ) return NULL; memset( pos, 0, totalBytes ); /* For each level, fill in the pointer to the bit map, * and turn on the last bit in the bit map (if the * number of bits in the map is odd). The highest * level will get all of its bits turned on. */ for (i=0; i<NUM_TCE_LEVELS; ++i) { if ( numBytes[i] ) { tbl->mlbm.level[i].map = pos; tbl->mlbm.maxLevel = i; if ( numBits[i] & 1 ) { p = pos + numBytes[i] - 1; m = (( numBits[i] % 8) - 1) & 7; *p = 0x80 >> m; PPCDBG(PPCDBG_TCEINIT, "build_tce_table: level %d last bit %x\n", i, 0x80>>m ); } } else tbl->mlbm.level[i].map = 0; pos += numBytes[i]; tbl->mlbm.level[i].numBits = numBits[i]; tbl->mlbm.level[i].numBytes = numBytes[i]; } /* For the highest level, turn on all the bits */ i = tbl->mlbm.maxLevel; p = tbl->mlbm.level[i].map; m = numBits[i]; PPCDBG(PPCDBG_TCEINIT, "build_tce_table: highest level (%d) has all bits set\n", i); for (k=0; k<numBytes[i]; ++k) { if ( m >= 8 ) { /* handle full bytes */ *p++ = 0xff; m -= 8; } else if(m>0) { /* handle the last partial byte */ b = 0x80; *p = 0; while (m) { *p |= b; b >>= 1; --m; } } else { break; } } return tbl;}static inline long alloc_tce_range( struct TceTable *tbl, unsigned order ){ long retval; unsigned long flags; /* Lock the tce allocation bitmap */ spin_lock_irqsave( &(tbl->lock), flags ); /* Do the actual work */ retval = alloc_tce_range_nolock( tbl, order ); /* Unlock the tce allocation bitmap */ spin_unlock_irqrestore( &(tbl->lock), flags ); return retval;}static long alloc_tce_range_nolock( struct TceTable *tbl, unsigned order ){ unsigned long numBits, numBytes; unsigned long i, bit, block, mask; long tcenum; u64 * map; /* If the order (power of 2 size) requested is larger than our * biggest, indicate failure */ if(order >= NUM_TCE_LEVELS) { PPCDBG(PPCDBG_TCE, "alloc_tce_range_nolock: invalid order: %d\n", order ); return -1; } numBits = tbl->mlbm.level[order].numBits; numBytes = tbl->mlbm.level[order].numBytes; map = (u64 *)tbl->mlbm.level[order].map; /* Initialize return value to -1 (failure) */ tcenum = -1; /* Loop through the bytes of the bitmap */ for (i=0; i<numBytes/8; ++i) { if ( *map ) { /* A free block is found, compute the block * number (of this size) */ bit = count_leading_zeros64( *map ); block = (i * 64) + bit; /* turn off the bit in the map to indicate * that the block is now in use */ mask = 0x1UL << (63 - bit); *map &= ~mask; /* compute the index into our tce table for * the first tce in the block */ PPCDBG(PPCDBG_TCE, "alloc_tce_range_nolock: allocating block %ld, (byte=%ld, bit=%ld) order %d\n", block, i, bit, order ); tcenum = block << order; return tcenum; } ++map; }#ifdef DEBUG_TCE if ( tcenum == -1 ) { PPCDBG(PPCDBG_TCE, "alloc_tce_range_nolock: no available blocks of order = %d\n", order ); if ( order < tbl->mlbm.maxLevel ) PPCDBG(PPCDBG_TCE, "alloc_tce_range_nolock: trying next bigger size\n" ); else PPCDBG(PPCDBG_TCE, "alloc_tce_range_nolock: maximum size reached...failing\n"); }#endif /* If no block of the requested size was found, try the next * size bigger. If one of those is found, return the second * half of the block to freespace and keep the first half */ if((tcenum == -1) && (order < (NUM_TCE_LEVELS - 1))) { tcenum = alloc_tce_range_nolock( tbl, order+1 ); if ( tcenum != -1 ) { free_tce_range_nolock( tbl, tcenum+(1<<order), order ); } } /* Return the index of the first tce in the block * (or -1 if we failed) */ return tcenum; }static inline void free_tce_range(struct TceTable *tbl, long tcenum, unsigned order ){ unsigned long flags; /* Lock the tce allocation bitmap */ spin_lock_irqsave( &(tbl->lock), flags ); /* Do the actual work */ free_tce_range_nolock( tbl, tcenum, order ); /* Unlock the tce allocation bitmap */ spin_unlock_irqrestore( &(tbl->lock), flags );}void free_tce_range_nolock(struct TceTable *tbl, long tcenum, unsigned order ){ unsigned long block; unsigned byte, bit, mask, b; unsigned char * map, * bytep; if (order >= NUM_TCE_LEVELS) { PPCDBG(PPCDBG_TCE, "free_tce_range: invalid order: %d, tcenum = %d\n", order, tcenum ); return; } block = tcenum >> order;#ifdef DEBUG_TCE if ( tcenum != (block << order ) ) { PPCDBG(PPCDBG_TCE, "free_tce_range: tcenum %lx misaligned for order %x\n", tcenum, order ); return; } if ( block >= tbl->mlbm.level[order].numBits ) { PPCDBG(PPCDBG_TCE, "free_tce_range: tcenum %lx is outside the range of this map (order %x, numBits %lx\n", tcenum, order, tbl->mlbm.level[order].numBits ); return; } if ( test_tce_range( tbl, tcenum, order ) ) { PPCDBG(PPCDBG_TCE, "free_tce_range: freeing range not allocated.\n"); PPCDBG(PPCDBG_TCE, "\tTceTable %p, tcenum %lx, order %x\n", tbl, tcenum, order ); }#endif map = tbl->mlbm.level[order].map; byte = block / 8; bit = block % 8; mask = 0x80 >> bit; bytep = map + byte;#ifdef DEBUG_TCE PPCDBG(PPCDBG_TCE, "free_tce_range_nolock: freeing block %ld (byte=%d, bit=%d) of order %d\n", block, byte, bit, order); if ( *bytep & mask ) PPCDBG(PPCDBG_TCE, "free_tce_range: already free: TceTable %p, tcenum %lx, order %x\n", tbl, tcenum, order );#endif *bytep |= mask; /* If there is a higher level in the bit map than this we may be * able to buddy up this block with its partner. * If this is the highest level we can't buddy up * If this level has an odd number of bits and * we are freeing the last block we can't buddy up * Don't buddy up if it's in the first 1/4 of the level */ if (( block > (tbl->mlbm.level[order].numBits/4) ) && (( block < tbl->mlbm.level[order].numBits-1 ) || ( 0 == ( tbl->mlbm.level[order].numBits & 1)))) { /* See if we can buddy up the block we just freed */ bit &= 6; /* get to the first of the buddy bits */ mask = 0xc0 >> bit; /* build two bit mask */ b = *bytep & mask; /* Get the two bits */ if ( 0 == (b ^ mask) ) { /* If both bits are on */ /* both of the buddy blocks are free we can combine them */ *bytep ^= mask; /* turn off the two bits */ block = ( byte * 8 ) + bit; /* block of first of buddies */
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?