⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 sbus.c

📁 一个2.4.21版本的嵌入式linux内核
💻 C
📖 第 1 页 / 共 3 页
字号:
/* $Id: sbus.c,v 1.17.2.1 2002/03/03 10:31:56 davem Exp $ * sbus.c: UltraSparc SBUS controller support. * * Copyright (C) 1999 David S. Miller (davem@redhat.com) */#include <linux/kernel.h>#include <linux/types.h>#include <linux/mm.h>#include <linux/spinlock.h>#include <linux/slab.h>#include <linux/init.h>#include <asm/page.h>#include <asm/sbus.h>#include <asm/io.h>#include <asm/upa.h>#include <asm/cache.h>#include <asm/dma.h>#include <asm/irq.h>#include <asm/starfire.h>#include "iommu_common.h"/* These should be allocated on an SMP_CACHE_BYTES * aligned boundry for optimal performance. * * On SYSIO, using an 8K page size we have 1GB of SBUS * DMA space mapped.  We divide this space into equally * sized clusters.  Currently we allow clusters up to a * size of 1MB.  If anything begins to generate DMA * mapping requests larger than this we will need to * increase things a bit. */#define NCLUSTERS	8UL#define ONE_GIG		(1UL * 1024UL * 1024UL * 1024UL)#define CLUSTER_SIZE	(ONE_GIG / NCLUSTERS)#define CLUSTER_MASK	(CLUSTER_SIZE - 1)#define CLUSTER_NPAGES	(CLUSTER_SIZE >> IO_PAGE_SHIFT)#define MAP_BASE	((u32)0xc0000000)struct sbus_iommu {/*0x00*/spinlock_t		lock;/*0x08*/iopte_t			*page_table;/*0x10*/unsigned long		strbuf_regs;/*0x18*/unsigned long		iommu_regs;/*0x20*/unsigned long		sbus_control_reg;/*0x28*/volatile unsigned long	strbuf_flushflag;	/* If NCLUSTERS is ever decresed to 4 or lower,	 * you must increase the size of the type of	 * these counters.  You have been duly warned. -DaveM	 *//*0x30*/struct {		u16	next;		u16	flush;	} alloc_info[NCLUSTERS];	/* The lowest used consistent mapping entry.  Since	 * we allocate consistent maps out of cluster 0 this	 * is relative to the beginning of closter 0.	 *//*0x50*/u32		lowest_consistent_map;};/* Offsets from iommu_regs */#define SYSIO_IOMMUREG_BASE	0x2400UL#define IOMMU_CONTROL	(0x2400UL - 0x2400UL)	/* IOMMU control register */#define IOMMU_TSBBASE	(0x2408UL - 0x2400UL)	/* TSB base address register */#define IOMMU_FLUSH	(0x2410UL - 0x2400UL)	/* IOMMU flush register */#define IOMMU_VADIAG	(0x4400UL - 0x2400UL)	/* SBUS virtual address diagnostic */#define IOMMU_TAGCMP	(0x4408UL - 0x2400UL)	/* TLB tag compare diagnostics */#define IOMMU_LRUDIAG	(0x4500UL - 0x2400UL)	/* IOMMU LRU queue diagnostics */#define IOMMU_TAGDIAG	(0x4580UL - 0x2400UL)	/* TLB tag diagnostics */#define IOMMU_DRAMDIAG	(0x4600UL - 0x2400UL)	/* TLB data RAM diagnostics */#define IOMMU_DRAM_VALID	(1UL << 30UL)static void __iommu_flushall(struct sbus_iommu *iommu){	unsigned long tag = iommu->iommu_regs + IOMMU_TAGDIAG;	int entry;	for (entry = 0; entry < 16; entry++) {		upa_writeq(0, tag);		tag += 8UL;	}	upa_readq(iommu->sbus_control_reg);	for (entry = 0; entry < NCLUSTERS; entry++) {		iommu->alloc_info[entry].flush =			iommu->alloc_info[entry].next;	}}static void iommu_flush(struct sbus_iommu *iommu, u32 base, unsigned long npages){	while (npages--)		upa_writeq(base + (npages << IO_PAGE_SHIFT),			   iommu->iommu_regs + IOMMU_FLUSH);	upa_readq(iommu->sbus_control_reg);}/* Offsets from strbuf_regs */#define SYSIO_STRBUFREG_BASE	0x2800UL#define STRBUF_CONTROL	(0x2800UL - 0x2800UL)	/* Control */#define STRBUF_PFLUSH	(0x2808UL - 0x2800UL)	/* Page flush/invalidate */#define STRBUF_FSYNC	(0x2810UL - 0x2800UL)	/* Flush synchronization */#define STRBUF_DRAMDIAG	(0x5000UL - 0x2800UL)	/* data RAM diagnostic */#define STRBUF_ERRDIAG	(0x5400UL - 0x2800UL)	/* error status diagnostics */#define STRBUF_PTAGDIAG	(0x5800UL - 0x2800UL)	/* Page tag diagnostics */#define STRBUF_LTAGDIAG	(0x5900UL - 0x2800UL)	/* Line tag diagnostics */#define STRBUF_TAG_VALID	0x02ULstatic void strbuf_flush(struct sbus_iommu *iommu, u32 base, unsigned long npages){	iommu->strbuf_flushflag = 0UL;	while (npages--)		upa_writeq(base + (npages << IO_PAGE_SHIFT),			   iommu->strbuf_regs + STRBUF_PFLUSH);	/* Whoopee cushion! */	upa_writeq(__pa(&iommu->strbuf_flushflag),		   iommu->strbuf_regs + STRBUF_FSYNC);	upa_readq(iommu->sbus_control_reg);	while (iommu->strbuf_flushflag == 0UL)		membar("#LoadLoad");}static iopte_t *alloc_streaming_cluster(struct sbus_iommu *iommu, unsigned long npages){	iopte_t *iopte, *limit, *first;	unsigned long cnum, ent, flush_point;	cnum = 0;	while ((1UL << cnum) < npages)		cnum++;	iopte  = iommu->page_table + (cnum * CLUSTER_NPAGES);	if (cnum == 0)		limit = (iommu->page_table +			 iommu->lowest_consistent_map);	else		limit = (iopte + CLUSTER_NPAGES);	iopte += ((ent = iommu->alloc_info[cnum].next) << cnum);	flush_point = iommu->alloc_info[cnum].flush;	first = iopte;	for (;;) {		if (iopte_val(*iopte) == 0UL) {			if ((iopte + (1 << cnum)) >= limit)				ent = 0;			else				ent = ent + 1;			iommu->alloc_info[cnum].next = ent;			if (ent == flush_point)				__iommu_flushall(iommu);			break;		}		iopte += (1 << cnum);		ent++;		if (iopte >= limit) {			iopte = (iommu->page_table + (cnum * CLUSTER_NPAGES));			ent = 0;		}		if (ent == flush_point)			__iommu_flushall(iommu);		if (iopte == first)			goto bad;	}	/* I've got your streaming cluster right here buddy boy... */	return iopte;bad:	printk(KERN_EMERG "sbus: alloc_streaming_cluster of npages(%ld) failed!\n",	       npages);	return NULL;}static void free_streaming_cluster(struct sbus_iommu *iommu, u32 base, unsigned long npages){	unsigned long cnum, ent;	iopte_t *iopte;	cnum = 0;	while ((1UL << cnum) < npages)		cnum++;	ent = (base & CLUSTER_MASK) >> (IO_PAGE_SHIFT + cnum);	iopte = iommu->page_table + ((base - MAP_BASE) >> IO_PAGE_SHIFT);	iopte_val(*iopte) = 0UL;	/* If the global flush might not have caught this entry,	 * adjust the flush point such that we will flush before	 * ever trying to reuse it.	 */#define between(X,Y,Z)	(((Z) - (Y)) >= ((X) - (Y)))	if (between(ent, iommu->alloc_info[cnum].next, iommu->alloc_info[cnum].flush))		iommu->alloc_info[cnum].flush = ent;#undef between}/* We allocate consistent mappings from the end of cluster zero. */static iopte_t *alloc_consistent_cluster(struct sbus_iommu *iommu, unsigned long npages){	iopte_t *iopte;	iopte = iommu->page_table + (1 * CLUSTER_NPAGES);	while (iopte > iommu->page_table) {		iopte--;		if (!(iopte_val(*iopte) & IOPTE_VALID)) {			unsigned long tmp = npages;			while (--tmp) {				iopte--;				if (iopte_val(*iopte) & IOPTE_VALID)					break;			}			if (tmp == 0) {				u32 entry = (iopte - iommu->page_table);				if (entry < iommu->lowest_consistent_map)					iommu->lowest_consistent_map = entry;				return iopte;			}		}	}	return NULL;}static void free_consistent_cluster(struct sbus_iommu *iommu, u32 base, unsigned long npages){	iopte_t *iopte = iommu->page_table + ((base - MAP_BASE) >> IO_PAGE_SHIFT);	if ((iopte - iommu->page_table) == iommu->lowest_consistent_map) {		iopte_t *walk = iopte + npages;		iopte_t *limit;		limit = iommu->page_table + CLUSTER_NPAGES;		while (walk < limit) {			if (iopte_val(*walk) != 0UL)				break;			walk++;		}		iommu->lowest_consistent_map =			(walk - iommu->page_table);	}	while (npages--)		*iopte++ = __iopte(0UL);}void *sbus_alloc_consistent(struct sbus_dev *sdev, size_t size, dma_addr_t *dvma_addr){	unsigned long order, first_page, flags;	struct sbus_iommu *iommu;	iopte_t *iopte;	void *ret;	int npages;	if (size <= 0 || sdev == NULL || dvma_addr == NULL)		return NULL;	size = IO_PAGE_ALIGN(size);	order = get_order(size);	if (order >= 10)		return NULL;	first_page = __get_free_pages(GFP_KERNEL, order);	if (first_page == 0UL)		return NULL;	memset((char *)first_page, 0, PAGE_SIZE << order);	iommu = sdev->bus->iommu;	spin_lock_irqsave(&iommu->lock, flags);	iopte = alloc_consistent_cluster(iommu, size >> IO_PAGE_SHIFT);	if (iopte == NULL) {		spin_unlock_irqrestore(&iommu->lock, flags);		free_pages(first_page, order);		return NULL;	}	/* Ok, we're committed at this point. */	*dvma_addr = MAP_BASE +	((iopte - iommu->page_table) << IO_PAGE_SHIFT);	ret = (void *) first_page;	npages = size >> IO_PAGE_SHIFT;	while (npages--) {		*iopte++ = __iopte(IOPTE_VALID | IOPTE_CACHE | IOPTE_WRITE |				   (__pa(first_page) & IOPTE_PAGE));		first_page += IO_PAGE_SIZE;	}	iommu_flush(iommu, *dvma_addr, size >> IO_PAGE_SHIFT);	spin_unlock_irqrestore(&iommu->lock, flags);	return ret;}void sbus_free_consistent(struct sbus_dev *sdev, size_t size, void *cpu, dma_addr_t dvma){	unsigned long order, npages;	struct sbus_iommu *iommu;	if (size <= 0 || sdev == NULL || cpu == NULL)		return;	npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;	iommu = sdev->bus->iommu;	spin_lock_irq(&iommu->lock);	free_consistent_cluster(iommu, dvma, npages);	iommu_flush(iommu, dvma, npages);	spin_unlock_irq(&iommu->lock);	order = get_order(size);	if (order < 10)		free_pages((unsigned long)cpu, order);}dma_addr_t sbus_map_single(struct sbus_dev *sdev, void *ptr, size_t size, int dir){	struct sbus_iommu *iommu = sdev->bus->iommu;	unsigned long npages, pbase, flags;	iopte_t *iopte;	u32 dma_base, offset;	unsigned long iopte_bits;	if (dir == SBUS_DMA_NONE)		BUG();	pbase = (unsigned long) ptr;	offset = (u32) (pbase & ~IO_PAGE_MASK);	size = (IO_PAGE_ALIGN(pbase + size) - (pbase & IO_PAGE_MASK));	pbase = (unsigned long) __pa(pbase & IO_PAGE_MASK);	spin_lock_irqsave(&iommu->lock, flags);	npages = size >> IO_PAGE_SHIFT;	iopte = alloc_streaming_cluster(iommu, npages);	if (iopte == NULL)		goto bad;	dma_base = MAP_BASE + ((iopte - iommu->page_table) << IO_PAGE_SHIFT);	npages = size >> IO_PAGE_SHIFT;	iopte_bits = IOPTE_VALID | IOPTE_STBUF | IOPTE_CACHE;	if (dir != SBUS_DMA_TODEVICE)		iopte_bits |= IOPTE_WRITE;	while (npages--) {		*iopte++ = __iopte(iopte_bits | (pbase & IOPTE_PAGE));		pbase += IO_PAGE_SIZE;	}	npages = size >> IO_PAGE_SHIFT;	spin_unlock_irqrestore(&iommu->lock, flags);	return (dma_base | offset);bad:	spin_unlock_irqrestore(&iommu->lock, flags);	BUG();	return 0;}void sbus_unmap_single(struct sbus_dev *sdev, dma_addr_t dma_addr, size_t size, int direction){	struct sbus_iommu *iommu = sdev->bus->iommu;	u32 dma_base = dma_addr & IO_PAGE_MASK;	unsigned long flags;	size = (IO_PAGE_ALIGN(dma_addr + size) - dma_base);	spin_lock_irqsave(&iommu->lock, flags);	free_streaming_cluster(iommu, dma_base, size >> IO_PAGE_SHIFT);	strbuf_flush(iommu, dma_base, size >> IO_PAGE_SHIFT);	spin_unlock_irqrestore(&iommu->lock, flags);}#define SG_ENT_PHYS_ADDRESS(SG)	\	((SG)->address ? \	 __pa((SG)->address) : \	 (__pa(page_address((SG)->page)) + (SG)->offset))static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg, int nused, int nelems, unsigned long iopte_bits){	struct scatterlist *dma_sg = sg;	struct scatterlist *sg_end = sg + nelems;	int i;	for (i = 0; i < nused; i++) {		unsigned long pteval = ~0UL;		u32 dma_npages;		dma_npages = ((dma_sg->dma_address & (IO_PAGE_SIZE - 1UL)) +			      dma_sg->dma_length +			      ((IO_PAGE_SIZE - 1UL))) >> IO_PAGE_SHIFT;		do {			unsigned long offset;			signed int len;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -