⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 pci_iommu.c

📁 microwindows移植到S3C44B0的源码
💻 C
📖 第 1 页 / 共 2 页
字号:
			do {				pci_iommu_write(flushreg, ctx);			} while(((long)pci_iommu_read(matchreg)) < 0L);		} else {			for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE)				pci_iommu_write(strbuf->strbuf_pflush, vaddr);		}		pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);		(void) pci_iommu_read(iommu->write_complete_reg);		while (!PCI_STC_FLUSHFLAG_SET(strbuf))			membar("#LoadLoad");	}	/* Step 2: Clear out first TSB entry. */	iopte_val(*base) = IOPTE_INVALID;	free_streaming_cluster(iommu, bus_addr - iommu->page_table_map_base,			       npages, ctx);	spin_unlock_irqrestore(&iommu->lock, flags);}#define SG_ENT_PHYS_ADDRESS(SG)	\	((SG)->address ? \	 __pa((SG)->address) : \	 (__pa(page_address((SG)->page)) + (SG)->offset))static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg,			   int nused, int nelems, unsigned long iopte_protection){	struct scatterlist *dma_sg = sg;	struct scatterlist *sg_end = sg + nelems;	int i;	for (i = 0; i < nused; i++) {		unsigned long pteval = ~0UL;		u32 dma_npages;		dma_npages = ((dma_sg->dma_address & (IO_PAGE_SIZE - 1UL)) +			      dma_sg->dma_length +			      ((IO_PAGE_SIZE - 1UL))) >> IO_PAGE_SHIFT;		do {			unsigned long offset;			signed int len;			/* If we are here, we know we have at least one			 * more page to map.  So walk forward until we			 * hit a page crossing, and begin creating new			 * mappings from that spot.			 */			for (;;) {				unsigned long tmp;				tmp = SG_ENT_PHYS_ADDRESS(sg);				len = sg->length;				if (((tmp ^ pteval) >> IO_PAGE_SHIFT) != 0UL) {					pteval = tmp & IO_PAGE_MASK;					offset = tmp & (IO_PAGE_SIZE - 1UL);					break;				}				if (((tmp ^ (tmp + len - 1UL)) >> IO_PAGE_SHIFT) != 0UL) {					pteval = (tmp + IO_PAGE_SIZE) & IO_PAGE_MASK;					offset = 0UL;					len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));					break;				}				sg++;			}			pteval = iopte_protection | (pteval & IOPTE_PAGE);			while (len > 0) {				*iopte++ = __iopte(pteval);				pteval += IO_PAGE_SIZE;				len -= (IO_PAGE_SIZE - offset);				offset = 0;				dma_npages--;			}			pteval = (pteval & IOPTE_PAGE) + len;			sg++;			/* Skip over any tail mappings we've fully mapped,			 * adjusting pteval along the way.  Stop when we			 * detect a page crossing event.			 */			while (sg < sg_end &&			       (pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&			       (pteval == SG_ENT_PHYS_ADDRESS(sg)) &&			       ((pteval ^				 (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {				pteval += sg->length;				sg++;			}			if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)				pteval = ~0UL;		} while (dma_npages != 0);		dma_sg++;	}}/* Map a set of buffers described by SGLIST with NELEMS array * elements in streaming mode for PCI DMA. * When making changes here, inspect the assembly output. I was having * hard time to kepp this routine out of using stack slots for holding variables. */int pci_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction){	struct pcidev_cookie *pcp;	struct pci_iommu *iommu;	struct pci_strbuf *strbuf;	unsigned long flags, ctx, npages, iopte_protection;	iopte_t *base;	u32 dma_base;	struct scatterlist *sgtmp;	int used;	/* Fast path single entry scatterlists. */	if (nelems == 1) {		sglist->dma_address =			pci_map_single(pdev,				       (sglist->address ?					sglist->address :					(page_address(sglist->page) + sglist->offset)),				       sglist->length, direction);		sglist->dma_length = sglist->length;		return 1;	}	pcp = pdev->sysdata;	iommu = pcp->pbm->iommu;	strbuf = &pcp->pbm->stc;		if (direction == PCI_DMA_NONE)		BUG();	/* Step 1: Prepare scatter list. */	npages = prepare_sg(sglist, nelems);	/* Step 2: Allocate a cluster. */	spin_lock_irqsave(&iommu->lock, flags);	base = alloc_streaming_cluster(iommu, npages);	if (base == NULL)		goto bad;	dma_base = iommu->page_table_map_base + ((base - iommu->page_table) << IO_PAGE_SHIFT);	/* Step 3: Normalize DMA addresses. */	used = nelems;	sgtmp = sglist;	while (used && sgtmp->dma_length) {		sgtmp->dma_address += dma_base;		sgtmp++;		used--;	}	used = nelems - used;	/* Step 4: Choose a context if necessary. */	ctx = 0;	if (iommu->iommu_ctxflush)		ctx = iommu->iommu_cur_ctx++;	/* Step 5: Create the mappings. */	if (strbuf->strbuf_enabled)		iopte_protection = IOPTE_STREAMING(ctx);	else		iopte_protection = IOPTE_CONSISTENT(ctx);	if (direction != PCI_DMA_TODEVICE)		iopte_protection |= IOPTE_WRITE;	fill_sg (base, sglist, used, nelems, iopte_protection);#ifdef VERIFY_SG	verify_sglist(sglist, nelems, base, npages);#endif	spin_unlock_irqrestore(&iommu->lock, flags);	return used;bad:	spin_unlock_irqrestore(&iommu->lock, flags);	BUG();	return 0;}/* Unmap a set of streaming mode DMA translations. */void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction){	struct pcidev_cookie *pcp;	struct pci_iommu *iommu;	struct pci_strbuf *strbuf;	iopte_t *base;	unsigned long flags, ctx, i, npages;	u32 bus_addr;	if (direction == PCI_DMA_NONE)		BUG();	pcp = pdev->sysdata;	iommu = pcp->pbm->iommu;	strbuf = &pcp->pbm->stc;		bus_addr = sglist->dma_address & IO_PAGE_MASK;	for (i = 1; i < nelems; i++)		if (sglist[i].dma_length == 0)			break;	i--;	npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) - bus_addr) >> IO_PAGE_SHIFT;	base = iommu->page_table +		((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);#ifdef DEBUG_PCI_IOMMU	if (iopte_val(*base) == IOPTE_INVALID)		printk("pci_unmap_sg called on non-mapped region %016lx,%d from %016lx\n", sglist->dma_address, nelems, __builtin_return_address(0));#endif	spin_lock_irqsave(&iommu->lock, flags);	/* Record the context, if any. */	ctx = 0;	if (iommu->iommu_ctxflush)		ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;	/* Step 1: Kick data out of streaming buffers if necessary. */	if (strbuf->strbuf_enabled) {		u32 vaddr = (u32) bus_addr;		PCI_STC_FLUSHFLAG_INIT(strbuf);		if (strbuf->strbuf_ctxflush &&		    iommu->iommu_ctxflush) {			unsigned long matchreg, flushreg;			flushreg = strbuf->strbuf_ctxflush;			matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx);			do {				pci_iommu_write(flushreg, ctx);			} while(((long)pci_iommu_read(matchreg)) < 0L);		} else {			for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE)				pci_iommu_write(strbuf->strbuf_pflush, vaddr);		}		pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);		(void) pci_iommu_read(iommu->write_complete_reg);		while (!PCI_STC_FLUSHFLAG_SET(strbuf))			membar("#LoadLoad");	}	/* Step 2: Clear out first TSB entry. */	iopte_val(*base) = IOPTE_INVALID;	free_streaming_cluster(iommu, bus_addr - iommu->page_table_map_base,			       npages, ctx);	spin_unlock_irqrestore(&iommu->lock, flags);}/* Make physical memory consistent for a single * streaming mode DMA translation after a transfer. */void pci_dma_sync_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction){	struct pcidev_cookie *pcp;	struct pci_iommu *iommu;	struct pci_strbuf *strbuf;	unsigned long flags, ctx, npages;	pcp = pdev->sysdata;	iommu = pcp->pbm->iommu;	strbuf = &pcp->pbm->stc;	if (!strbuf->strbuf_enabled)		return;	spin_lock_irqsave(&iommu->lock, flags);	npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);	npages >>= IO_PAGE_SHIFT;	bus_addr &= IO_PAGE_MASK;	/* Step 1: Record the context, if any. */	ctx = 0;	if (iommu->iommu_ctxflush &&	    strbuf->strbuf_ctxflush) {		iopte_t *iopte;		iopte = iommu->page_table +			((bus_addr - iommu->page_table_map_base)>>IO_PAGE_SHIFT);		ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;	}	/* Step 2: Kick data out of streaming buffers. */	PCI_STC_FLUSHFLAG_INIT(strbuf);	if (iommu->iommu_ctxflush &&	    strbuf->strbuf_ctxflush) {		unsigned long matchreg, flushreg;		flushreg = strbuf->strbuf_ctxflush;		matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx);		do {			pci_iommu_write(flushreg, ctx);		} while(((long)pci_iommu_read(matchreg)) < 0L);	} else {		unsigned long i;		for (i = 0; i < npages; i++, bus_addr += IO_PAGE_SIZE)			pci_iommu_write(strbuf->strbuf_pflush, bus_addr);	}	/* Step 3: Perform flush synchronization sequence. */	pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);	(void) pci_iommu_read(iommu->write_complete_reg);	while (!PCI_STC_FLUSHFLAG_SET(strbuf))		membar("#LoadLoad");	spin_unlock_irqrestore(&iommu->lock, flags);}/* Make physical memory consistent for a set of streaming * mode DMA translations after a transfer. */void pci_dma_sync_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction){	struct pcidev_cookie *pcp;	struct pci_iommu *iommu;	struct pci_strbuf *strbuf;	unsigned long flags, ctx;	pcp = pdev->sysdata;	iommu = pcp->pbm->iommu;	strbuf = &pcp->pbm->stc;	if (!strbuf->strbuf_enabled)		return;	spin_lock_irqsave(&iommu->lock, flags);	/* Step 1: Record the context, if any. */	ctx = 0;	if (iommu->iommu_ctxflush &&	    strbuf->strbuf_ctxflush) {		iopte_t *iopte;		iopte = iommu->page_table +			((sglist[0].dma_address - iommu->page_table_map_base) >> IO_PAGE_SHIFT);		ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;	}	/* Step 2: Kick data out of streaming buffers. */	PCI_STC_FLUSHFLAG_INIT(strbuf);	if (iommu->iommu_ctxflush &&	    strbuf->strbuf_ctxflush) {		unsigned long matchreg, flushreg;		flushreg = strbuf->strbuf_ctxflush;		matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx);		do {			pci_iommu_write(flushreg, ctx);		} while (((long)pci_iommu_read(matchreg)) < 0L);	} else {		unsigned long i, npages;		u32 bus_addr;		bus_addr = sglist[0].dma_address & IO_PAGE_MASK;		for(i = 1; i < nelems; i++)			if (!sglist[i].dma_length)				break;		i--;		npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) - bus_addr) >> IO_PAGE_SHIFT;		for (i = 0; i < npages; i++, bus_addr += IO_PAGE_SIZE)			pci_iommu_write(strbuf->strbuf_pflush, bus_addr);	}	/* Step 3: Perform flush synchronization sequence. */	pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);	(void) pci_iommu_read(iommu->write_complete_reg);	while (!PCI_STC_FLUSHFLAG_SET(strbuf))		membar("#LoadLoad");	spin_unlock_irqrestore(&iommu->lock, flags);}int pci_dma_supported(struct pci_dev *pdev, u64 device_mask){	struct pcidev_cookie *pcp = pdev->sysdata;	u64 dma_addr_mask;	if (pdev == NULL) {		dma_addr_mask = 0xffffffff;	} else {		struct pci_iommu *iommu = pcp->pbm->iommu;		dma_addr_mask = iommu->dma_addr_mask;	}	return (device_mask & dma_addr_mask) == dma_addr_mask;}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -