📄 pci_iommu.c
字号:
leader->dma_length = leader_length; leader = sg; leader_flag = 0; leader_length = len; } next_paddr = addr + len; } leader->dma_address = leader_flag; leader->dma_length = leader_length;}/* Given a scatterlist leader, choose an allocation method and fill in the blanks. */static inline intsg_fill(struct scatterlist *leader, struct scatterlist *end, struct scatterlist *out, struct pci_iommu_arena *arena, dma_addr_t max_dma, int dac_allowed){ unsigned long paddr = SG_ENT_PHYS_ADDRESS(leader); long size = leader->dma_length; struct scatterlist *sg; unsigned long *ptes; long npages, dma_ofs, i;#if !DEBUG_NODIRECT /* If everything is physically contiguous, and the addresses fall into the direct-map window, use it. */ if (leader->dma_address == 0 && paddr + size + __direct_map_base - 1 <= max_dma && paddr + size <= __direct_map_size) { out->dma_address = paddr + __direct_map_base; out->dma_length = size; DBGA(" sg_fill: [%p,%lx] -> direct %lx\n", __va(paddr), size, out->dma_address); return 0; }#endif /* If physically contiguous and DAC is available, use it. */ if (leader->dma_address == 0 && dac_allowed) { out->dma_address = paddr + alpha_mv.pci_dac_offset; out->dma_length = size; DBGA(" sg_fill: [%p,%lx] -> DAC %lx\n", __va(paddr), size, out->dma_address); return 0; } /* Otherwise, we'll use the iommu to make the pages virtually contiguous. */ paddr &= ~PAGE_MASK; npages = calc_npages(paddr + size); dma_ofs = iommu_arena_alloc(arena, npages); if (dma_ofs < 0) { /* If we attempted a direct map above but failed, die. */ if (leader->dma_address == 0) return -1; /* Otherwise, break up the remaining virtually contiguous hunks into individual direct maps and retry. */ sg_classify(leader, end, 0); return sg_fill(leader, end, out, arena, max_dma, dac_allowed); } out->dma_address = arena->dma_base + dma_ofs*PAGE_SIZE + paddr; out->dma_length = size; DBGA(" sg_fill: [%p,%lx] -> sg %lx np %ld\n", __va(paddr), size, out->dma_address, npages); /* All virtually contiguous. We need to find the length of each physically contiguous subsegment to fill in the ptes. */ ptes = &arena->ptes[dma_ofs]; sg = leader; do {#if DEBUG_ALLOC > 0 struct scatterlist *last_sg = sg;#endif size = sg->length; paddr = SG_ENT_PHYS_ADDRESS(sg); while (sg+1 < end && (int) sg[1].dma_address == -1) { size += sg[1].length; sg++; } npages = calc_npages((paddr & ~PAGE_MASK) + size); paddr &= PAGE_MASK; for (i = 0; i < npages; ++i, paddr += PAGE_SIZE) *ptes++ = mk_iommu_pte(paddr);#if DEBUG_ALLOC > 0 DBGA(" (%ld) [%p,%x] np %ld\n", last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg), last_sg->length, npages); while (++last_sg <= sg) { DBGA(" (%ld) [%p,%x] cont\n", last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg), last_sg->length); }#endif } while (++sg < end && (int) sg->dma_address < 0); return 1;}intpci_map_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents, int direction){ struct scatterlist *start, *end, *out; struct pci_controller *hose; struct pci_iommu_arena *arena; dma_addr_t max_dma; int dac_allowed; if (direction == PCI_DMA_NONE) BUG(); dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0; /* Fast path single entry scatterlists. */ if (nents == 1) { sg->dma_length = sg->length; sg->dma_address = pci_map_single_1(pdev, SG_ENT_VIRT_ADDRESS(sg), sg->length, dac_allowed); return sg->dma_address != 0; } start = sg; end = sg + nents; /* First, prepare information about the entries. */ sg_classify(sg, end, alpha_mv.mv_pci_tbi != 0); /* Second, figure out where we're going to map things. */ if (alpha_mv.mv_pci_tbi) { hose = pdev ? pdev->sysdata : pci_isa_hose; max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK; arena = hose->sg_pci; if (!arena || arena->dma_base + arena->size - 1 > max_dma) arena = hose->sg_isa; } else { max_dma = -1; arena = NULL; hose = NULL; } /* Third, iterate over the scatterlist leaders and allocate dma space as needed. */ for (out = sg; sg < end; ++sg) { if ((int) sg->dma_address < 0) continue; if (sg_fill(sg, end, out, arena, max_dma, dac_allowed) < 0) goto error; out++; } /* Mark the end of the list for pci_unmap_sg. */ if (out < end) out->dma_length = 0; if (out - start == 0) printk(KERN_WARNING "pci_map_sg failed: no entries?\n"); DBGA("pci_map_sg: %ld entries\n", out - start); return out - start; error: printk(KERN_WARNING "pci_map_sg failed: " "could not allocate dma page tables\n"); /* Some allocation failed while mapping the scatterlist entries. Unmap them now. */ if (out > start) pci_unmap_sg(pdev, start, out - start, direction); return 0;}/* Unmap a set of streaming mode DMA translations. Again, cpu read rules concerning calls here are the same as for pci_unmap_single() above. */voidpci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents, int direction){ unsigned long flags; struct pci_controller *hose; struct pci_iommu_arena *arena; struct scatterlist *end; dma_addr_t max_dma; dma_addr_t fbeg, fend; if (direction == PCI_DMA_NONE) BUG(); if (! alpha_mv.mv_pci_tbi) return; hose = pdev ? pdev->sysdata : pci_isa_hose; max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK; arena = hose->sg_pci; if (!arena || arena->dma_base + arena->size - 1 > max_dma) arena = hose->sg_isa; fbeg = -1, fend = 0; spin_lock_irqsave(&arena->lock, flags); for (end = sg + nents; sg < end; ++sg) { dma64_addr_t addr; size_t size; long npages, ofs; dma_addr_t tend; addr = sg->dma_address; size = sg->dma_length; if (!size) break; if (addr > 0xffffffff) { /* It's a DAC address -- nothing to do. */ DBGA(" (%ld) DAC [%lx,%lx]\n", sg - end + nents, addr, size); continue; } if (addr >= __direct_map_base && addr < __direct_map_base + __direct_map_size) { /* Nothing to do. */ DBGA(" (%ld) direct [%lx,%lx]\n", sg - end + nents, addr, size); continue; } DBGA(" (%ld) sg [%lx,%lx]\n", sg - end + nents, addr, size); npages = calc_npages((addr & ~PAGE_MASK) + size); ofs = (addr - arena->dma_base) >> PAGE_SHIFT; iommu_arena_free(arena, ofs, npages); tend = addr + size - 1; if (fbeg > addr) fbeg = addr; if (fend < tend) fend = tend; } /* If we're freeing ptes above the `next_entry' pointer (they may have snuck back into the TLB since the last wrap flush), we need to flush the TLB before reallocating the latter. */ if ((fend - arena->dma_base) >> PAGE_SHIFT >= arena->next_entry) alpha_mv.mv_pci_tbi(hose, fbeg, fend); spin_unlock_irqrestore(&arena->lock, flags); DBGA("pci_unmap_sg: %ld entries\n", nents - (end - sg));}/* Return whether the given PCI device DMA address mask can be supported properly. */intpci_dma_supported(struct pci_dev *pdev, u64 mask){ struct pci_controller *hose; struct pci_iommu_arena *arena; /* If there exists a direct map, and the mask fits either the entire direct mapped space or the total system memory as shifted by the map base */ if (__direct_map_size != 0 && (__direct_map_base + __direct_map_size - 1 <= mask || __direct_map_base + (max_low_pfn<<PAGE_SHIFT)-1 <= mask)) return 1; /* Check that we have a scatter-gather arena that fits. */ hose = pdev ? pdev->sysdata : pci_isa_hose; arena = hose->sg_isa; if (arena && arena->dma_base + arena->size - 1 <= mask) return 1; arena = hose->sg_pci; if (arena && arena->dma_base + arena->size - 1 <= mask) return 1; return 0;}/* * AGP GART extensions to the IOMMU */intiommu_reserve(struct pci_iommu_arena *arena, long pg_count, long align_mask) { unsigned long flags; unsigned long *ptes; long i, p; if (!arena) return -EINVAL; spin_lock_irqsave(&arena->lock, flags); /* Search for N empty ptes. */ ptes = arena->ptes; p = iommu_arena_find_pages(arena, pg_count, align_mask); if (p < 0) { spin_unlock_irqrestore(&arena->lock, flags); return -1; } /* Success. Mark them all reserved (ie not zero and invalid) for the iommu tlb that could load them from under us. They will be filled in with valid bits by _bind() */ for (i = 0; i < pg_count; ++i) ptes[p+i] = IOMMU_RESERVED_PTE; arena->next_entry = p + pg_count; spin_unlock_irqrestore(&arena->lock, flags); return p;}int iommu_release(struct pci_iommu_arena *arena, long pg_start, long pg_count){ unsigned long *ptes; long i; if (!arena) return -EINVAL; ptes = arena->ptes; /* Make sure they're all reserved first... */ for(i = pg_start; i < pg_start + pg_count; i++) if (ptes[i] != IOMMU_RESERVED_PTE) return -EBUSY; iommu_arena_free(arena, pg_start, pg_count); return 0;}intiommu_bind(struct pci_iommu_arena *arena, long pg_start, long pg_count, unsigned long *physaddrs){ unsigned long flags; unsigned long *ptes; long i, j; if (!arena) return -EINVAL; spin_lock_irqsave(&arena->lock, flags); ptes = arena->ptes; for(j = pg_start; j < pg_start + pg_count; j++) { if (ptes[j] != IOMMU_RESERVED_PTE) { spin_unlock_irqrestore(&arena->lock, flags); return -EBUSY; } } for(i = 0, j = pg_start; i < pg_count; i++, j++) ptes[j] = mk_iommu_pte(physaddrs[i]); spin_unlock_irqrestore(&arena->lock, flags); return 0;}intiommu_unbind(struct pci_iommu_arena *arena, long pg_start, long pg_count){ unsigned long *p; long i; if (!arena) return -EINVAL; p = arena->ptes + pg_start; for(i = 0; i < pg_count; i++) p[i] = IOMMU_RESERVED_PTE; return 0;}/* True if the machine supports DAC addressing, and DEV can make use of it given MASK. */intpci_dac_dma_supported(struct pci_dev *dev, u64 mask){ dma64_addr_t dac_offset = alpha_mv.pci_dac_offset; int ok = 1; /* If this is not set, the machine doesn't support DAC at all. */ if (dac_offset == 0) ok = 0; /* The device has to be able to address our DAC bit. */ if ((dac_offset & dev->dma_mask) != dac_offset) ok = 0; /* If both conditions above are met, we are fine. */ DBGA("pci_dac_dma_supported %s from %p\n", ok ? "yes" : "no", __builtin_return_address(0)); return ok;}dma64_addr_tpci_dac_page_to_dma(struct pci_dev *pdev, struct page *page, unsigned long offset, int direction){ return (alpha_mv.pci_dac_offset + __pa(page_address(page)) + (dma64_addr_t) offset);}struct page *pci_dac_dma_to_page(struct pci_dev *pdev, dma64_addr_t dma_addr){ unsigned long paddr = (dma_addr & PAGE_MASK) - alpha_mv.pci_dac_offset; return virt_to_page(__va(paddr));}unsigned longpci_dac_dma_to_offset(struct pci_dev *pdev, dma64_addr_t dma_addr){ return (dma_addr & ~PAGE_MASK);}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -