⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 directpool.c

📁 1. 8623L平台
💻 C
📖 第 1 页 / 共 2 页
字号:
	clear_bit(dmapool_id, &kdmapool_mmap_mask);#endif 	clear_bit(dmapool_id, &kdmapool_usage_mask);	return 0;}int kdmapool_reset(struct llad *h, unsigned long dmapool_id){	int i, count;	struct kdmapool *pool;	if (!test_bit(dmapool_id, &(kdmapool_usage_mask))) { 		printk("dmapool index %lu is not opened\n", dmapool_id);		return -EINVAL;	}	pool = &(bufferpools[dmapool_id]);	if ((count = atomic_read(&(pool->available_buffer_count))) != pool->buffercount) {		printk("cannot reset dmapool index %lu since %lu buffers are still acquired\n", dmapool_id, pool->buffercount-count);		return pool->buffercount - count;	}	spin_lock_bh(&(pool->lock));	for (i=0 ; i<pool->buffercount ;  i++) {		atomic_set(&(pool->buf_info[i].ref_count), 0);		/* the last link is not dereferenced. It's like we used NULL. */ 		pool->buf_info[i].next_free = i+1;	}		pool->first_free = 0;	pool->last_free = pool->buffercount - 1;	spin_unlock_bh(&(pool->lock)); 		return 0;}int kdmapool_getinfo(struct llad *h, unsigned long dmapool_id, unsigned long *size){	struct kdmapool *pool;		if (!test_bit(dmapool_id, &(kdmapool_usage_mask))) { 		printk("dmapool index %lu is not opened\n", dmapool_id);		return -EINVAL;	}		pool = &(bufferpools[dmapool_id]);		*size = pool->buffercount * pool->buffersize;		return 0;}EXPORT_SYMBOL(kdmapool_getbuffer);unsigned char *kdmapool_getbuffer(struct llad *h, unsigned long dmapool_id, unsigned long *timeout_us){	struct kdmapool *pool;	long timeout_jiffies = US_TO_JIFFIES(*timeout_us);		pool = &(bufferpools[dmapool_id]);		while (1) {		if (atomic_add_negative(-1, &(pool->available_buffer_count))) {			atomic_inc(&(pool->available_buffer_count));			timeout_jiffies = interruptible_sleep_on_timeout(&(pool->queue), timeout_jiffies);				*timeout_us = JIFFIES_TO_US(timeout_jiffies);						// handle signals gently (esp. Control-C...)			if ((timeout_jiffies == 0) || signal_pending(current)) break;		}		else {			unsigned long i;						/* once here, there is a buffer available for sure */			spin_lock_bh(&(pool->lock));			i = pool->first_free;			pool->first_free = pool->buf_info[i].next_free;			if (pool->first_free == pool->buffercount)				pool->last_free = pool->buffercount;			spin_unlock_bh(&(pool->lock));			atomic_set(&(pool->buf_info[i].ref_count), 1);			#if (EM86XX_CHIP==EM86XX_CHIPID_TANGO2)			if (pool->user_addr != NULL)				return (pool->user_addr + pool->buffersize*i);			else 				return pool->buf_info[i].addr;#else			return (unsigned char *) pool->buf_info[i].addr;#endif // EM86XX_CHIP		}			}	return NULL;}EXPORT_SYMBOL(kdmapool_get_available_buffer_count);unsigned long kdmapool_get_available_buffer_count(struct llad *h, unsigned long dmapool_id){	struct kdmapool *pool;		pool = &(bufferpools[dmapool_id]);	return atomic_read(&(pool->available_buffer_count));}EXPORT_SYMBOL(kdmapool_get_bus_address);unsigned long kdmapool_get_bus_address(struct llad *h, unsigned long dmapool_id, unsigned char *ptr, unsigned long size){#if (EM86XX_CHIP==EM86XX_CHIPID_TANGO2)	struct kdmapool *pool;	unsigned long user_offset, buffer_index, buffer_offset,i;#endif // EM86XX_CHIP#if (EM86XX_CHIP >= EM86XX_CHIPID_TANGO2)	pool = &(bufferpools[dmapool_id]);	if (pool->user_addr != NULL) {		if ((ptr < pool->user_addr) || (ptr + size > pool->user_addr + pool->buffersize * pool->buffercount)) {			printk("user address out of range : 0x%p (%lu) [0x%p,0x%p[\n", 			       ptr, size, pool->user_addr, pool->user_addr + pool->buffersize * pool->buffercount);			return 0;		}				user_offset = (unsigned long) (ptr-pool->user_addr);				buffer_index = user_offset >> pool->log2_buffersize;		if ((size > 0) && (((user_offset + size - 1) >> pool->log2_buffersize) != buffer_index)) {			printk("cannot send data belonging to two different dma buffers : %lu %lu\n", user_offset, size);			return 0;		}		/* buffersize is a power of 2 */		buffer_offset = user_offset & (pool->buffersize-1);		return pool->buf_info[buffer_index].bus_addr + buffer_offset;	}	else{		// RMuint32 buffersize = 1 << pool->log2_buffersize;				for (i=0 ; i<pool->buffercount ; i++) {			if ((ptr >= (RMuint8 *) pool->buf_info[i].addr) && (ptr + size <= (RMuint8 *) pool->buf_info[i].addr + pool->buffersize))				break;		}				if (i == pool->buffercount) {			printk("virtual address out of range 0x%p\n", ptr);			return 0;		}				buffer_offset = ptr - (RMuint8 *) pool->buf_info[i].addr;		return pool->buf_info[i].bus_addr + buffer_offset;	}	#else	/* mask bit 31 in order to convert a pt110 address to a mbus address */	return UNCACHED(ptr);#endif // EM86XX_CHIP}EXPORT_SYMBOL(kdmapool_get_virt_address);unsigned char *kdmapool_get_virt_address(struct llad *h, unsigned long dmapool_id, unsigned long bus_addr, unsigned long size){#if (EM86XX_CHIP==EM86XX_CHIPID_TANGO2)	struct kdmapool *pool;	unsigned long i;#endif // EM86XX_CHIP#if (EM86XX_CHIP >= EM86XX_CHIPID_TANGO2)	pool = &(bufferpools[dmapool_id]);	for (i=0 ; i<pool->buffercount ; i++) {		if ((bus_addr >= pool->buf_info[i].bus_addr) && (bus_addr + size <= pool->buf_info[i].bus_addr + pool->buffersize))			break;	}	if (i == pool->buffercount) {		printk("kdmapool_get_virt_address bus_address out of range 0x%08lx\n", bus_addr);		return NULL;	}		if (pool->user_addr != NULL)		return pool->user_addr + i*pool->buffersize + (bus_addr-pool->buf_info[i].bus_addr);	else		return pool->buf_info[i].addr + (bus_addr-pool->buf_info[i].bus_addr);#else	/* physical address is already uncached */	return (unsigned char *) bus_addr;#endif // EM86XX_CHIP}static inline unsigned long get_index_from_address(struct kdmapool *pool, unsigned long bus_addr){	unsigned long i;	for (i=0 ; i<pool->buffercount ; i++) {#if (EM86XX_CHIP >= EM86XX_CHIPID_TANGO2)		unsigned long addr = pool->buf_info[i].bus_addr;#else		unsigned long addr = UNCACHED(pool->buf_info[i].addr);#endif // EM86XX_CHIP				if ((bus_addr >= addr) && (bus_addr < addr + pool->buffersize))			break;	}	return i;}int kdmapool_acquire(struct llad *h, unsigned long dmapool_id, unsigned long bus_addr){	struct kdmapool *pool;	unsigned long i;		pool = &(bufferpools[dmapool_id]);	i = get_index_from_address(pool, bus_addr);	if (i == pool->buffercount) {		printk("kdmapool_acquire bus_address out of range 0x%08lx\n", bus_addr);		return -EINVAL;	}	if (atomic_read(&(pool->buf_info[i].ref_count)) == 0) {		printk("kdmapool_acquire Buffer 0x%08lx already released\n", bus_addr);		return -EINVAL;	}	atomic_inc(&(pool->buf_info[i].ref_count));		return 0;}EXPORT_SYMBOL(kdmapool_release);int kdmapool_release(struct llad *h, unsigned long dmapool_id, unsigned long bus_addr){	struct kdmapool *pool;	unsigned long i;		pool = &(bufferpools[dmapool_id]);	i = get_index_from_address(pool, bus_addr);	if (i == pool->buffercount) {		printk("kdmapool_release bus_address out of range 0x%08lx\n", bus_addr);		return -EINVAL;	}	if (atomic_read(&(pool->buf_info[i].ref_count)) == 0) {		printk("kdmapool_release Buffer 0x%08lx already released\n", bus_addr);		return -EINVAL;	}	if (atomic_dec_and_test(&(pool->buf_info[i].ref_count))) {		spin_lock_bh(&(pool->lock));		pool->buf_info[i].next_free = pool->buffercount;		if (pool->last_free == pool->buffercount)			pool->first_free = i;		else 			pool->buf_info[pool->last_free].next_free = i;		pool->last_free = i;		spin_unlock_bh(&(pool->lock));		atomic_inc(&(pool->available_buffer_count));		wake_up_interruptible(&(pool->queue));	}	return 0;}#if (EM86XX_CHIP==EM86XX_CHIPID_TANGO2)#define DIRECT_MMAP_MASK          0xff000000#define DIRECT_MMAP_DMAPOOL       0x02000000#define DIRECT_MMAP_REGION        0x03000000#define DIRECT_MMAP_DMAPOOL_SHIFT 12#define DIRECT_MMAP_REGION_SHIFT  12#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)struct page *kdmapool_vma_nopage(struct vm_area_struct *vma, unsigned long addr, int *type)#elsestruct page *kdmapool_vma_nopage(struct vm_area_struct *vma, unsigned long addr, int unused)#endif{	unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;	struct page *page = NULL;	unsigned long dmapool_id, vaddr, i;	struct kdmapool *pool;	dmapool_id = (offset - DIRECT_MMAP_DMAPOOL) >> DIRECT_MMAP_DMAPOOL_SHIFT;	pool = &(bufferpools[dmapool_id]);	offset = (addr - vma->vm_start); /* absolute offset */	i = offset / pool->buffersize;	offset -= (i * pool->buffersize);  /* pool offset */	vaddr = ((unsigned long)(pool->buf_info[i].addr + offset)) & PAGE_MASK;	if ((page = virt_to_page((void *)vaddr)) == NULL)		return page;	get_page(page);#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)	if (type)		*type = VM_FAULT_MINOR;#endif	return page;}int kdmapool_mmap(struct llad *h, struct kc_vm_area_struct *kc_vma, unsigned long dmapool_id, unsigned long start, int size,struct kc_pgprot_t *kc_prot){	struct kdmapool *pool;	struct vm_area_struct *vma = (struct vm_area_struct *) kc_vma;	pgprot_t *prot = (pgprot_t *) kc_prot;	if (!test_bit(dmapool_id, &kdmapool_usage_mask)) { 		printk("dmapool index %lu is not opened\n", dmapool_id);		return -EINVAL;	}	if (test_and_set_bit(dmapool_id, &kdmapool_mmap_mask)) {		printk("dmapool index %lu is already mmaped\n", dmapool_id);		return -EINVAL;	}		pool = &(bufferpools[dmapool_id]);	if (size != pool->buffercount * pool->buffersize) {		printk("wrong size to map: %u instead of %lu\n", size, pool->buffercount * pool->buffersize);		clear_bit(dmapool_id, &kdmapool_mmap_mask);		return -EINVAL;	}	/* because we run on MIPS */	vma->vm_page_prot = pgprot_noncached(*prot);	pool->user_addr = (unsigned char *) start;#ifdef _DEBUG	printk("process %d maps %d bytes at userland 0x%p\n", kc_currentpid(), size, (void *)start);#endif	return 0;}#endif // EM86XX_CHIP

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -