iommu.c

来自「linux 内核源代码」· C语言 代码 · 共 817 行 · 第 1/2 页

C
817
字号
	STC_FLUSHFLAG_INIT(strbuf);	iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);	(void) iommu_read(iommu->write_complete_reg);	limit = 100000;	while (!STC_FLUSHFLAG_SET(strbuf)) {		limit--;		if (!limit)			break;		udelay(1);		rmb();	}	if (!limit)		printk(KERN_WARNING "strbuf_flush: flushflag timeout "		       "vaddr[%08x] ctx[%lx] npages[%ld]\n",		       vaddr, ctx, npages);}static void dma_4u_unmap_single(struct device *dev, dma_addr_t bus_addr,				size_t sz, enum dma_data_direction direction){	struct iommu *iommu;	struct strbuf *strbuf;	iopte_t *base;	unsigned long flags, npages, ctx, i;	if (unlikely(direction == DMA_NONE)) {		if (printk_ratelimit())			WARN_ON(1);		return;	}	iommu = dev->archdata.iommu;	strbuf = dev->archdata.stc;	npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);	npages >>= IO_PAGE_SHIFT;	base = iommu->page_table +		((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);	bus_addr &= IO_PAGE_MASK;	spin_lock_irqsave(&iommu->lock, flags);	/* Record the context, if any. */	ctx = 0;	if (iommu->iommu_ctxflush)		ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;	/* Step 1: Kick data out of streaming buffers if necessary. */	if (strbuf->strbuf_enabled)		strbuf_flush(strbuf, iommu, bus_addr, ctx,			     npages, direction);	/* Step 2: Clear out TSB entries. */	for (i = 0; i < npages; i++)		iopte_make_dummy(iommu, base + i);	free_npages(iommu, bus_addr - iommu->page_table_map_base, npages);	iommu_free_ctx(iommu, ctx);	spin_unlock_irqrestore(&iommu->lock, flags);}#define SG_ENT_PHYS_ADDRESS(SG)	(__pa(sg_virt((SG))))static void fill_sg(iopte_t *iopte, struct scatterlist *sg,		    int nused, int nelems,		    unsigned long iopte_protection){	struct scatterlist *dma_sg = sg;	int i;	for (i = 0; i < nused; i++) {		unsigned long pteval = ~0UL;		u32 dma_npages;		dma_npages = ((dma_sg->dma_address & (IO_PAGE_SIZE - 1UL)) +			      dma_sg->dma_length +			      ((IO_PAGE_SIZE - 1UL))) >> IO_PAGE_SHIFT;		do {			unsigned long offset;			signed int len;			/* If we are here, we know we have at least one			 * more page to map.  So walk forward until we			 * hit a page crossing, and begin creating new			 * mappings from that spot.			 */			for (;;) {				unsigned long tmp;				tmp = SG_ENT_PHYS_ADDRESS(sg);				len = sg->length;				if (((tmp ^ pteval) >> IO_PAGE_SHIFT) != 0UL) {					pteval = tmp & IO_PAGE_MASK;					offset = tmp & (IO_PAGE_SIZE - 1UL);					break;				}				if (((tmp ^ (tmp + len - 1UL)) >> IO_PAGE_SHIFT) != 0UL) {					pteval = (tmp + IO_PAGE_SIZE) & IO_PAGE_MASK;					offset = 0UL;					len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));					break;				}				sg = sg_next(sg);				nelems--;			}			pteval = iopte_protection | (pteval & IOPTE_PAGE);			while (len > 0) {				*iopte++ = __iopte(pteval);				pteval += IO_PAGE_SIZE;				len -= (IO_PAGE_SIZE - offset);				offset = 0;				dma_npages--;			}			pteval = (pteval & IOPTE_PAGE) + len;			sg = sg_next(sg);			nelems--;			/* Skip over any tail mappings we've fully mapped,			 * adjusting pteval along the way.  Stop when we			 * detect a page crossing event.			 */			while (nelems &&			       (pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&			       (pteval == SG_ENT_PHYS_ADDRESS(sg)) &&			       ((pteval ^				 (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {				pteval += sg->length;				sg = sg_next(sg);				nelems--;			}			if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)				pteval = ~0UL;		} while (dma_npages != 0);		dma_sg = sg_next(dma_sg);	}}static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,			 int nelems, enum dma_data_direction direction){	struct iommu *iommu;	struct strbuf *strbuf;	unsigned long flags, ctx, npages, iopte_protection;	iopte_t *base;	u32 dma_base;	struct scatterlist *sgtmp;	int used;	/* Fast path single entry scatterlists. */	if (nelems == 1) {		sglist->dma_address =			dma_4u_map_single(dev, sg_virt(sglist),					  sglist->length, direction);		if (unlikely(sglist->dma_address == DMA_ERROR_CODE))			return 0;		sglist->dma_length = sglist->length;		return 1;	}	iommu = dev->archdata.iommu;	strbuf = dev->archdata.stc;	if (unlikely(direction == DMA_NONE))		goto bad_no_ctx;	/* Step 1: Prepare scatter list. */	npages = prepare_sg(sglist, nelems);	/* Step 2: Allocate a cluster and context, if necessary. */	spin_lock_irqsave(&iommu->lock, flags);	base = alloc_npages(iommu, npages);	ctx = 0;	if (iommu->iommu_ctxflush)		ctx = iommu_alloc_ctx(iommu);	spin_unlock_irqrestore(&iommu->lock, flags);	if (base == NULL)		goto bad;	dma_base = iommu->page_table_map_base +		((base - iommu->page_table) << IO_PAGE_SHIFT);	/* Step 3: Normalize DMA addresses. */	used = nelems;	sgtmp = sglist;	while (used && sgtmp->dma_length) {		sgtmp->dma_address += dma_base;		sgtmp = sg_next(sgtmp);		used--;	}	used = nelems - used;	/* Step 4: Create the mappings. */	if (strbuf->strbuf_enabled)		iopte_protection = IOPTE_STREAMING(ctx);	else		iopte_protection = IOPTE_CONSISTENT(ctx);	if (direction != DMA_TO_DEVICE)		iopte_protection |= IOPTE_WRITE;	fill_sg(base, sglist, used, nelems, iopte_protection);#ifdef VERIFY_SG	verify_sglist(sglist, nelems, base, npages);#endif	return used;bad:	iommu_free_ctx(iommu, ctx);bad_no_ctx:	if (printk_ratelimit())		WARN_ON(1);	return 0;}static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,			    int nelems, enum dma_data_direction direction){	struct iommu *iommu;	struct strbuf *strbuf;	iopte_t *base;	unsigned long flags, ctx, i, npages;	struct scatterlist *sg, *sgprv;	u32 bus_addr;	if (unlikely(direction == DMA_NONE)) {		if (printk_ratelimit())			WARN_ON(1);	}	iommu = dev->archdata.iommu;	strbuf = dev->archdata.stc;	bus_addr = sglist->dma_address & IO_PAGE_MASK;	sgprv = NULL;	for_each_sg(sglist, sg, nelems, i) {		if (sg->dma_length == 0)			break;		sgprv = sg;	}	npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length) -		  bus_addr) >> IO_PAGE_SHIFT;	base = iommu->page_table +		((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);	spin_lock_irqsave(&iommu->lock, flags);	/* Record the context, if any. */	ctx = 0;	if (iommu->iommu_ctxflush)		ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;	/* Step 1: Kick data out of streaming buffers if necessary. */	if (strbuf->strbuf_enabled)		strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);	/* Step 2: Clear out the TSB entries. */	for (i = 0; i < npages; i++)		iopte_make_dummy(iommu, base + i);	free_npages(iommu, bus_addr - iommu->page_table_map_base, npages);	iommu_free_ctx(iommu, ctx);	spin_unlock_irqrestore(&iommu->lock, flags);}static void dma_4u_sync_single_for_cpu(struct device *dev,				       dma_addr_t bus_addr, size_t sz,				       enum dma_data_direction direction){	struct iommu *iommu;	struct strbuf *strbuf;	unsigned long flags, ctx, npages;	iommu = dev->archdata.iommu;	strbuf = dev->archdata.stc;	if (!strbuf->strbuf_enabled)		return;	spin_lock_irqsave(&iommu->lock, flags);	npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);	npages >>= IO_PAGE_SHIFT;	bus_addr &= IO_PAGE_MASK;	/* Step 1: Record the context, if any. */	ctx = 0;	if (iommu->iommu_ctxflush &&	    strbuf->strbuf_ctxflush) {		iopte_t *iopte;		iopte = iommu->page_table +			((bus_addr - iommu->page_table_map_base)>>IO_PAGE_SHIFT);		ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;	}	/* Step 2: Kick data out of streaming buffers. */	strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);	spin_unlock_irqrestore(&iommu->lock, flags);}static void dma_4u_sync_sg_for_cpu(struct device *dev,				   struct scatterlist *sglist, int nelems,				   enum dma_data_direction direction){	struct iommu *iommu;	struct strbuf *strbuf;	unsigned long flags, ctx, npages, i;	struct scatterlist *sg, *sgprv;	u32 bus_addr;	iommu = dev->archdata.iommu;	strbuf = dev->archdata.stc;	if (!strbuf->strbuf_enabled)		return;	spin_lock_irqsave(&iommu->lock, flags);	/* Step 1: Record the context, if any. */	ctx = 0;	if (iommu->iommu_ctxflush &&	    strbuf->strbuf_ctxflush) {		iopte_t *iopte;		iopte = iommu->page_table +			((sglist[0].dma_address - iommu->page_table_map_base) >> IO_PAGE_SHIFT);		ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;	}	/* Step 2: Kick data out of streaming buffers. */	bus_addr = sglist[0].dma_address & IO_PAGE_MASK;	sgprv = NULL;	for_each_sg(sglist, sg, nelems, i) {		if (sg->dma_length == 0)			break;		sgprv = sg;	}	npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length)		  - bus_addr) >> IO_PAGE_SHIFT;	strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);	spin_unlock_irqrestore(&iommu->lock, flags);}const struct dma_ops sun4u_dma_ops = {	.alloc_coherent		= dma_4u_alloc_coherent,	.free_coherent		= dma_4u_free_coherent,	.map_single		= dma_4u_map_single,	.unmap_single		= dma_4u_unmap_single,	.map_sg			= dma_4u_map_sg,	.unmap_sg		= dma_4u_unmap_sg,	.sync_single_for_cpu	= dma_4u_sync_single_for_cpu,	.sync_sg_for_cpu	= dma_4u_sync_sg_for_cpu,};const struct dma_ops *dma_ops = &sun4u_dma_ops;EXPORT_SYMBOL(dma_ops);int dma_supported(struct device *dev, u64 device_mask){	struct iommu *iommu = dev->archdata.iommu;	u64 dma_addr_mask = iommu->dma_addr_mask;	if (device_mask >= (1UL << 32UL))		return 0;	if ((device_mask & dma_addr_mask) == dma_addr_mask)		return 1;#ifdef CONFIG_PCI	if (dev->bus == &pci_bus_type)		return pci_dma_supported(to_pci_dev(dev), device_mask);#endif	return 0;}EXPORT_SYMBOL(dma_supported);int dma_set_mask(struct device *dev, u64 dma_mask){#ifdef CONFIG_PCI	if (dev->bus == &pci_bus_type)		return pci_set_dma_mask(to_pci_dev(dev), dma_mask);#endif	return -EINVAL;}EXPORT_SYMBOL(dma_set_mask);

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?