⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 pci_iommu.c

📁 讲述linux的初始化过程
💻 C
📖 第 1 页 / 共 2 页
字号:
	pci_unmap_single(pdev, dma_addr, size, PCI_DMA_BIDIRECTIONAL);	free_pages((unsigned long)cpu_addr, get_order(size));	DBGA2("pci_free_consistent: [%x,%lx] from %p\n",	      dma_addr, size, __builtin_return_address(0));}/* Classify the elements of the scatterlist.  Write dma_address   of each element with:	0   : Followers all physically adjacent.	1   : Followers all virtually adjacent.	-1  : Not leader, physically adjacent to previous.	-2  : Not leader, virtually adjacent to previous.   Write dma_length of each leader with the combined lengths of   the mergable followers.  */static inline voidsg_classify(struct scatterlist *sg, struct scatterlist *end, int virt_ok){	unsigned long next_vaddr;	struct scatterlist *leader;	long leader_flag, leader_length;	leader = sg;	leader_flag = 0;	leader_length = leader->length;	next_vaddr = (unsigned long)leader->address + leader_length;	for (++sg; sg < end; ++sg) {		unsigned long addr, len;		addr = (unsigned long) sg->address;		len = sg->length;		if (next_vaddr == addr) {			sg->dma_address = -1;			leader_length += len;		} else if (((next_vaddr | addr) & ~PAGE_MASK) == 0 && virt_ok) {			sg->dma_address = -2;			leader_flag = 1;			leader_length += len;		} else {			leader->dma_address = leader_flag;			leader->dma_length = leader_length;			leader = sg;			leader_flag = 0;			leader_length = len;		}		next_vaddr = addr + len;	}	leader->dma_address = leader_flag;	leader->dma_length = leader_length;}/* Given a scatterlist leader, choose an allocation method and fill   in the blanks.  */static inline intsg_fill(struct scatterlist *leader, struct scatterlist *end,	struct scatterlist *out, struct pci_iommu_arena *arena,	dma_addr_t max_dma){	unsigned long paddr = virt_to_phys(leader->address);	long size = leader->dma_length;	struct scatterlist *sg;	unsigned long *ptes;	long npages, dma_ofs, i;#if !DEBUG_NODIRECT	/* If everything is physically contiguous, and the addresses	   fall into the direct-map window, use it.  */	if (leader->dma_address == 0	    && paddr + size + __direct_map_base - 1 <= max_dma	    && paddr + size <= __direct_map_size) {		out->dma_address = paddr + __direct_map_base;		out->dma_length = size;		DBGA("    sg_fill: [%p,%lx] -> direct %x\n",		     leader->address, size, out->dma_address);		return 0;	}#endif	/* Otherwise, we'll use the iommu to make the pages virtually	   contiguous.  */	paddr &= ~PAGE_MASK;	npages = calc_npages(paddr + size);	dma_ofs = iommu_arena_alloc(arena, npages);	if (dma_ofs < 0)		return -1;	out->dma_address = arena->dma_base + dma_ofs*PAGE_SIZE + paddr;	out->dma_length = size;	DBGA("    sg_fill: [%p,%lx] -> sg %x np %ld\n",	     leader->address, size, out->dma_address, npages);	/* All virtually contiguous.  We need to find the length of each	   physically contiguous subsegment to fill in the ptes.  */	ptes = &arena->ptes[dma_ofs];	sg = leader;	do {#if DEBUG_ALLOC > 0		struct scatterlist *last_sg = sg;#endif		size = sg->length;		paddr = virt_to_phys(sg->address);		while (sg+1 < end && (int) sg[1].dma_address == -1) {			size += sg[1].length;			sg++;		}		npages = calc_npages((paddr & ~PAGE_MASK) + size);		paddr &= PAGE_MASK;		for (i = 0; i < npages; ++i, paddr += PAGE_SIZE)			*ptes++ = mk_iommu_pte(paddr);#if DEBUG_ALLOC > 0		DBGA("    (%ld) [%p,%x] np %ld\n",		     last_sg - leader, last_sg->address,		     last_sg->length, npages);		while (++last_sg <= sg) {			DBGA("        (%ld) [%p,%x] cont\n",			     last_sg - leader, last_sg->address,			     last_sg->length);		}#endif	} while (++sg < end && (int) sg->dma_address < 0);	return 1;}intpci_map_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents,	   int direction){	struct scatterlist *start, *end, *out;	struct pci_controler *hose;	struct pci_iommu_arena *arena;	dma_addr_t max_dma;	if (direction == PCI_DMA_NONE)		BUG();	/* Fast path single entry scatterlists.  */	if (nents == 1) {		sg->dma_length = sg->length;		sg->dma_address		  = pci_map_single(pdev, sg->address, sg->length, direction);		return sg->dma_address != 0;	}	start = sg;	end = sg + nents;	/* First, prepare information about the entries.  */	sg_classify(sg, end, alpha_mv.mv_pci_tbi != 0);	/* Second, figure out where we're going to map things.  */	if (alpha_mv.mv_pci_tbi) {		hose = pdev ? pdev->sysdata : pci_isa_hose;		max_dma = pdev ? pdev->dma_mask : 0x00ffffff;		arena = hose->sg_pci;		if (!arena || arena->dma_base + arena->size > max_dma)			arena = hose->sg_isa;	} else {		max_dma = -1;		arena = NULL;		hose = NULL;	}	/* Third, iterate over the scatterlist leaders and allocate	   dma space as needed.  */	for (out = sg; sg < end; ++sg) {		if ((int) sg->dma_address < 0)			continue;		if (sg_fill(sg, end, out, arena, max_dma) < 0)			goto error;		out++;	}	/* Mark the end of the list for pci_unmap_sg.  */	if (out < end)		out->dma_length = 0;	if (out - start == 0)		printk(KERN_INFO "pci_map_sg failed: no entries?\n");	DBGA("pci_map_sg: %ld entries\n", out - start);	return out - start;error:	printk(KERN_INFO "pci_map_sg failed: "	       "could not allocate dma page tables\n");	/* Some allocation failed while mapping the scatterlist	   entries.  Unmap them now.  */	if (out > start)		pci_unmap_sg(pdev, start, out - start, direction);	return 0;}/* Unmap a set of streaming mode DMA translations.  Again, cpu read   rules concerning calls here are the same as for pci_unmap_single()   above.  */voidpci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents,	     int direction){	struct pci_controler *hose;	struct pci_iommu_arena *arena;	struct scatterlist *end;	dma_addr_t max_dma;	dma_addr_t fbeg, fend;	if (direction == PCI_DMA_NONE)		BUG();	if (! alpha_mv.mv_pci_tbi)		return;	hose = pdev ? pdev->sysdata : pci_isa_hose;	max_dma = pdev ? pdev->dma_mask : 0x00ffffff;	arena = hose->sg_pci;	if (!arena || arena->dma_base + arena->size > max_dma)		arena = hose->sg_isa;	fbeg = -1, fend = 0;	for (end = sg + nents; sg < end; ++sg) {		unsigned long addr, size;		long npages, ofs;		dma_addr_t tend;		addr = sg->dma_address;		size = sg->dma_length;		if (!size)			break;#if !DEBUG_NODIRECT		if (addr >= __direct_map_base		    && addr < __direct_map_base + __direct_map_size) {			/* Nothing to do.  */			DBGA("    (%ld) direct [%lx,%lx]\n",			      sg - end + nents, addr, size);			continue;		}#endif		DBGA("    (%ld) sg [%lx,%lx]\n",		     sg - end + nents, addr, size);		npages = calc_npages((addr & ~PAGE_MASK) + size);		ofs = (addr - arena->dma_base) >> PAGE_SHIFT;		iommu_arena_free(arena, ofs, npages);		tend = addr + size - 1;		if (fbeg > addr) fbeg = addr;		if (fend < tend) fend = tend;	}        /*	   If we're freeing ptes above the `next_entry' pointer (they           may have snuck back into the TLB since the last wrap flush),           we need to flush the TLB before reallocating the latter.	*/	if ((fend - arena->dma_base) >> PAGE_SHIFT >= arena->next_entry)		alpha_mv.mv_pci_tbi(hose, fbeg, fend);	DBGA("pci_unmap_sg: %d entries\n", nents - (end - sg));}/* Return whether the given PCI device DMA address mask can be   supported properly.  */intpci_dma_supported(struct pci_dev *pdev, dma_addr_t mask){	struct pci_controler *hose;	struct pci_iommu_arena *arena;#if !DEBUG_NODIRECT	/* If there exists a direct map, and the mask fits either	   MAX_DMA_ADDRESS defined such that GFP_DMA does something	   useful, or the total system memory as shifted by the	   map base.  */	if (__direct_map_size != 0	    && (__direct_map_base + MAX_DMA_ADDRESS-IDENT_ADDR-1 <= mask		|| __direct_map_base + (max_low_pfn<<PAGE_SHIFT)-1 <= mask))		return 1;#endif	/* Check that we have a scatter-gather arena that fits.  */	hose = pdev ? pdev->sysdata : pci_isa_hose;	arena = hose->sg_isa;	if (arena && arena->dma_base + arena->size - 1 <= mask)		return 1;	arena = hose->sg_pci;	if (arena && arena->dma_base + arena->size - 1 <= mask)		return 1;	return 0;}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -