📄 pci_dma.c
字号:
if ( tcenum == -1 ) { PPCDBG(PPCDBG_TCE, "alloc_tce_range_nolock: no available blocks of order = %d\n", order ); if ( order < tbl->mlbm.maxLevel ) { PPCDBG(PPCDBG_TCE, "alloc_tce_range_nolock: trying next bigger size\n" ); } else { panic("PCI_DMA: alloc_tce_range_nolock: maximum size reached...failing\n"); } }#endif /* If no block of the requested size was found, try the next * size bigger. If one of those is found, return the second * half of the block to freespace and keep the first half */ if((tcenum == -1) && (order < (NUM_TCE_LEVELS - 1))) { tcenum = alloc_tce_range_nolock( tbl, order+1 ); if ( tcenum != -1 ) { free_tce_range_nolock( tbl, tcenum+(1<<order), order ); } } /* Return the index of the first tce in the block * (or -1 if we failed) */ return tcenum;}static inline void free_tce_range(struct TceTable *tbl, long tcenum, unsigned order ){ unsigned long flags; /* Lock the tce allocation bitmap */ spin_lock_irqsave( &(tbl->lock), flags ); /* Do the actual work */ free_tce_range_nolock( tbl, tcenum, order ); /* Unlock the tce allocation bitmap */ spin_unlock_irqrestore( &(tbl->lock), flags );}void free_tce_range_nolock(struct TceTable *tbl, long tcenum, unsigned order ){ unsigned long block; unsigned byte, bit, mask, b; unsigned char * map, * bytep; if (order >= NUM_TCE_LEVELS) { panic("PCI_DMA: free_tce_range: invalid order: 0x%x\n",order); return; } block = tcenum >> order;#ifdef MONITOR_TCE if ( tcenum != (block << order ) ) { printk("PCI_DMA: Free_tce_range: tcenum %lx misaligned for order %x\n",tcenum, order); return; } if ( block >= tbl->mlbm.level[order].numBits ) { printk("PCI_DMA: Free_tce_range: tcenum %lx is outside the range of this map (order %x, numBits %lx\n", tcenum, order, tbl->mlbm.level[order].numBits ); return; } if ( test_tce_range( tbl, tcenum, order ) ) { printk("PCI_DMA: Freeing range not allocated: tTceTable %p, tcenum %lx, order %x\n",tbl, tcenum, order ); return; }#endif map = tbl->mlbm.level[order].map; byte = block / 8; bit = block % 8; mask = 0x80 >> bit; bytep = map + byte;#ifdef DEBUG_TCE PPCDBG(PPCDBG_TCE,"free_tce_range_nolock: freeing block %ld (byte=%d, bit=%d) of order %d\n", block, byte, bit, order);#endif #ifdef MONITOR_TCE if ( *bytep & mask ) { panic("PCI_DMA: Tce already free: TceTable %p, tcenum %lx, order %x\n",tbl,tcenum,order); }#endif *bytep |= mask; /* If there is a higher level in the bit map than this we may be * able to buddy up this block with its partner. * If this is the highest level we can't buddy up * If this level has an odd number of bits and * we are freeing the last block we can't buddy up * Don't buddy up if it's in the first 1/4 of the level */ if (( order < tbl->mlbm.maxLevel ) && ( block > (tbl->mlbm.level[order].numBits/4) ) && (( block < tbl->mlbm.level[order].numBits-1 ) || ( 0 == ( tbl->mlbm.level[order].numBits & 1)))) { /* See if we can buddy up the block we just freed */ bit &= 6; /* get to the first of the buddy bits */ mask = 0xc0 >> bit; /* build two bit mask */ b = *bytep & mask; /* Get the two bits */ if ( 0 == (b ^ mask) ) { /* If both bits are on */ /* both of the buddy blocks are free we can combine them */ *bytep ^= mask; /* turn off the two bits */ block = ( byte * 8 ) + bit; /* block of first of buddies */ tcenum = block << order; /* free the buddied block */ PPCDBG(PPCDBG_TCE, "free_tce_range: buddying blocks %ld & %ld\n", block, block+1); free_tce_range_nolock( tbl, tcenum, order+1 ); } }}static long test_tce_range( struct TceTable *tbl, long tcenum, unsigned order ){ unsigned long block; unsigned byte, bit, mask, b; long retval, retLeft, retRight; unsigned char * map; map = tbl->mlbm.level[order].map; block = tcenum >> order; byte = block / 8; /* Byte within bitmap */ bit = block % 8; /* Bit within byte */ mask = 0x80 >> bit; b = (*(map+byte) & mask ); /* 0 if block is allocated, else free */ if ( b ) retval = 1; /* 1 == block is free */ else retval = 0; /* 0 == block is allocated */ /* Test bits at all levels below this to ensure that all agree */ if (order) { retLeft = test_tce_range( tbl, tcenum, order-1 ); retRight = test_tce_range( tbl, tcenum+(1<<(order-1)), order-1 ); if ( retLeft || retRight ) { retval = 2; } } /* Test bits at all levels above this to ensure that all agree */ return retval;}static inline dma_addr_t get_tces( struct TceTable *tbl, unsigned order, void *page, unsigned numPages, int direction ){ long tcenum; unsigned long uaddr; unsigned i; dma_addr_t retTce = NO_TCE; uaddr = (unsigned long)page & PAGE_MASK; /* Allocate a range of tces */ tcenum = alloc_tce_range( tbl, order ); if ( tcenum != -1 ) { /* We got the tces we wanted */ tcenum += tbl->startOffset; /* Offset into real TCE table */ retTce = tcenum << PAGE_SHIFT; /* Set the return dma address */ /* Setup a tce for each page */ for (i=0; i<numPages; ++i) { ppc_md.tce_build(tbl, tcenum, uaddr, direction); ++tcenum; uaddr += PAGE_SIZE; } /* Make sure the update is visible to hardware. sync required to synchronize the update to the TCE table with the MMIO that will send the bus address to the IOA */ __asm__ __volatile__ ("sync" : : : "memory"); } else { panic("PCI_DMA: Tce Allocation failure in get_tces. 0x%p\n",tbl); } return retTce; }static void tce_free_one_iSeries( struct TceTable *tbl, long tcenum ){ u64 set_tce_rc; union Tce tce; tce.wholeTce = 0; set_tce_rc = HvCallXm_setTce((u64)tbl->index, (u64)tcenum, tce.wholeTce); if ( set_tce_rc ) panic("PCI_DMA: HvCallXm_setTce failed, Rc: 0x%lx\n", set_tce_rc);}static void tce_free_one_pSeries( struct TceTable *tbl, long tcenum ){ union Tce tce; union Tce *tce_addr; tce.wholeTce = 0; tce_addr = ((union Tce *)tbl->base) + tcenum; *tce_addr = (union Tce)tce.wholeTce;}static void tce_free(struct TceTable *tbl, dma_addr_t dma_addr, unsigned order, unsigned num_pages){ long tcenum, total_tces, free_tce; unsigned i; total_tces = (tbl->size * (PAGE_SIZE / sizeof(union Tce))); tcenum = dma_addr >> PAGE_SHIFT; free_tce = tcenum - tbl->startOffset; if ( ( (free_tce + num_pages) > total_tces ) || ( tcenum < tbl->startOffset ) ) { printk("tce_free: invalid tcenum\n"); printk("\ttcenum = 0x%lx\n", tcenum); printk("\tTCE Table = 0x%lx\n", (u64)tbl); printk("\tbus# = 0x%lx\n", (u64)tbl->busNumber ); printk("\tsize = 0x%lx\n", (u64)tbl->size); printk("\tstartOff = 0x%lx\n", (u64)tbl->startOffset ); printk("\tindex = 0x%lx\n", (u64)tbl->index); return; } for (i=0; i<num_pages; ++i) { ppc_md.tce_free_one(tbl, tcenum); ++tcenum; } /* No sync (to make TCE change visible) is required here. The lwsync when acquiring the lock in free_tce_range is sufficient to synchronize with the bitmap. */ free_tce_range( tbl, free_tce, order );}void __init create_virtual_bus_tce_table(void){ struct TceTable *t; struct TceTableManagerCB virtBusTceTableParms; u64 absParmsPtr; virtBusTceTableParms.busNumber = 255; /* Bus 255 is the virtual bus */ virtBusTceTableParms.virtualBusFlag = 0xff; /* Ask for virtual bus */ absParmsPtr = virt_to_absolute( (u64)&virtBusTceTableParms ); HvCallXm_getTceTableParms( absParmsPtr ); virtBusVethTceTable.size = virtBusTceTableParms.size / 2; virtBusVethTceTable.busNumber = virtBusTceTableParms.busNumber; virtBusVethTceTable.startOffset = virtBusTceTableParms.startOffset; virtBusVethTceTable.index = virtBusTceTableParms.index; virtBusVethTceTable.tceType = TCE_VB; virtBusVioTceTable.size = virtBusTceTableParms.size - virtBusVethTceTable.size; virtBusVioTceTable.busNumber = virtBusTceTableParms.busNumber; virtBusVioTceTable.startOffset = virtBusTceTableParms.startOffset + virtBusVethTceTable.size * (PAGE_SIZE/sizeof(union Tce)); virtBusVioTceTable.index = virtBusTceTableParms.index; virtBusVioTceTable.tceType = TCE_VB; t = build_tce_table( &virtBusVethTceTable ); if ( t ) { /* tceTables[255] = t; */ //VirtBusVethTceTable = t; printk( "Virtual Bus VETH TCE table built successfully.\n"); printk( " TCE table size = %ld entries\n", (unsigned long)t->size*(PAGE_SIZE/sizeof(union Tce)) ); printk( " TCE table token = %d\n", (unsigned)t->index ); printk( " TCE table start entry = 0x%lx\n", (unsigned long)t->startOffset ); } else printk( "Virtual Bus VETH TCE table failed.\n"); t = build_tce_table( &virtBusVioTceTable ); if ( t ) { //VirtBusVioTceTable = t; printk( "Virtual Bus VIO TCE table built successfully.\n"); printk( " TCE table size = %ld entries\n", (unsigned long)t->size*(PAGE_SIZE/sizeof(union Tce)) ); printk( " TCE table token = %d\n", (unsigned)t->index ); printk( " TCE table start entry = 0x%lx\n", (unsigned long)t->startOffset ); } else printk( "Virtual Bus VIO TCE table failed.\n");}void create_tce_tables_for_buses(struct list_head *bus_list){ struct pci_controller* phb; struct device_node *dn, *first_dn; int num_slots, num_slots_ilog2; int first_phb = 1; for (phb=hose_head;phb;phb=phb->next) { first_dn = ((struct device_node *)phb->arch_data)->child; /* Carve 2GB into the largest dma_window_size possible */ for (dn = first_dn, num_slots = 0; dn != NULL; dn = dn->sibling) num_slots++; num_slots_ilog2 = __ilog2(num_slots); if ((1<<num_slots_ilog2) != num_slots) num_slots_ilog2++; phb->dma_window_size = 1 << (22 - num_slots_ilog2); /* Reserve 16MB of DMA space on the first PHB. * We should probably be more careful and use firmware props. * In reality this space is remapped, not lost. But we don't * want to get that smart to handle it -- too much work. */ phb->dma_window_base_cur = first_phb ? (1 << 12) : 0; first_phb = 0; for (dn = first_dn, num_slots = 0; dn != NULL; dn = dn->sibling) { create_pci_bus_tce_table((unsigned long)dn); } }}void create_tce_tables_for_busesLP(struct list_head *bus_list){ struct list_head *ln; struct pci_bus *bus; struct device_node *busdn; u32 *dma_window; for (ln=bus_list->next; ln != bus_list; ln=ln->next) { bus = pci_bus_b(ln); busdn = PCI_GET_DN(bus); /* NOTE: there should never be a window declared on a bus when * child devices also have a window. If this should ever be * architected, we probably want children to have priority. * In reality, the PHB containing ISA has the property, but otherwise * it is the pci-bridges that have the property. */ dma_window = (u32 *)get_property(busdn, "ibm,dma-window", 0); if (dma_window) { /* Busno hasn't been copied yet. * Do it now because getTceTableParmsPSeriesLP needs it. */ busdn->busno = bus->number; create_pci_bus_tce_table((unsigned long)busdn); } else create_tce_tables_for_busesLP(&bus->children); }}void create_tce_tables(void) { struct pci_dev *dev; struct device_node *dn, *mydn; if (naca->platform == PLATFORM_PSERIES_LPAR) { create_tce_tables_for_busesLP(&pci_root_buses); } else { create_tce_tables_for_buses(&pci_root_buses); } /* Now copy the tce_table ptr from the bus devices down to every * pci device_node. This means get_tce_table() won't need to search * up the device tree to find it. */ pci_for_each_dev(dev) { mydn = dn = PCI_GET_DN(dev); while (dn && dn->tce_table == NULL)
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -