⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 pciba.c

📁 microwindows移植到S3C44B0的源码
💻 C
📖 第 1 页 / 共 2 页
字号:
			if (devfs_register(device_dir_handle, "io",					   DEVFS_FL_NONE, 0, 0,					   S_IFREG | S_IRUSR | S_IWUSR,					   &base_fops, 					   &dev->resource[ri]) == NULL)				return failure;			break;		}	}	/* register a node corresponding to the device's ROM resource,           if present */	if (pci_resource_len(dev, PCI_ROM_RESOURCE) != 0) {		nd = new_node();		if (nd == NULL)			return failure;		node_devfs_handle = devfs_register(device_dir_handle, "rom",						   DEVFS_FL_NONE, 0, 0,						   S_IFREG | S_IRUSR,						   &rom_fops, nd);		if (node_devfs_handle == NULL)			return failure;		init_rom_node(nd, dev, node_devfs_handle);	}	/* register a node that allows ioctl's to read and write to           the device's config space */	if (devfs_register(device_dir_handle, "config", DEVFS_FL_NONE,			   0, 0, S_IFREG | S_IRUSR | S_IWUSR,			   &config_fops, dev) == NULL)		return failure;	/* finally, register a node that allows ioctl's to allocate           and free DMA buffers, as well as memory map those           buffers. */	nd = new_node();	if (nd == NULL)		return failure;	node_devfs_handle =		devfs_register(device_dir_handle, "dma", DEVFS_FL_NONE,			       0, 0, S_IFREG | S_IRUSR | S_IWUSR,			       &dma_fops, nd);	if (node_devfs_handle == NULL)		return failure;	init_dma_node(nd, dev, node_devfs_handle);#ifdef DEBUG_PCIBA	dump_nodes(&global_node_list);#endif		return success;}static intgeneric_open(struct inode * inode, struct file * file){	TRACE();	/* FIXME: should check that they're not trying to open the ROM           writable */	return 0; /* success */}static introm_mmap(struct file * file, struct vm_area_struct * vma){	unsigned long pci_pa;	struct node_data * nd;	TRACE();	nd = (struct node_data * )file->private_data;	pci_pa = pci_resource_start(nd->u.rom.dev, PCI_ROM_RESOURCE);	if (!nd->u.rom.mmapped) {		nd->u.rom.mmapped = true;		DPRINTF("Enabling ROM address decoder.\n");		DPRINTF("rom_mmap: FIXME: some cards do not allow both ROM and memory addresses to\n""rom_mmap: FIXME: be enabled simultaneously, as they share a decoder.\n");		pci_read_config_dword(nd->u.rom.dev, PCI_ROM_ADDRESS,				      &nd->u.rom.saved_rom_base_reg);		DPRINTF("ROM base address contains %x\n",			nd->u.rom.saved_rom_base_reg);		pci_write_config_dword(nd->u.rom.dev, PCI_ROM_ADDRESS,				       nd->u.rom.saved_rom_base_reg |				       PCI_ROM_ADDRESS_ENABLE);	}		return mmap_pci_address(vma, pci_pa);}static introm_release(struct inode * inode, struct file * file){	struct node_data * nd;	TRACE();	nd = (struct node_data * )file->private_data;	if (nd->u.rom.mmapped) {		nd->u.rom.mmapped = false;		DPRINTF("Disabling ROM address decoder.\n");		pci_write_config_dword(nd->u.rom.dev, PCI_ROM_ADDRESS,				       nd->u.rom.saved_rom_base_reg);	}	return 0; /* indicate success */}static intbase_mmap(struct file * file, struct vm_area_struct * vma){	struct resource * resource;	TRACE();	resource = (struct resource *)file->private_data;	return mmap_pci_address(vma, resource->start);}static intconfig_ioctl(struct inode * inode, struct file * file, 	     unsigned int cmd, 	     unsigned long arg){	struct pci_dev * dev;	union cfg_data {		uint8_t byte;		uint16_t word;		uint32_t dword;	} read_data, write_data;	int dir, size, offset;	TRACE();	DPRINTF("cmd = %x (DIR = %x, TYPE = %x, NR = %x, SIZE = %x)\n", 		cmd, 		_IOC_DIR(cmd), _IOC_TYPE(cmd), _IOC_NR(cmd), _IOC_SIZE(cmd));	DPRINTF("arg = %lx\n", arg);	dev = (struct pci_dev *)file->private_data;	/* PCIIOCCFG{RD,WR}: read and/or write PCI configuration	   space. If both, the read happens first (this becomes a swap	   operation, atomic with respect to other updates through	   this path).  */	dir = _IOC_DIR(cmd);#define do_swap(suffix, type)	 					\	do {								\		if (dir & _IOC_READ) {					\			pci_read_config_##suffix(dev, _IOC_NR(cmd), 	\						 &read_data.suffix);	\		}							\		if (dir & _IOC_WRITE) {					\			get_user(write_data.suffix, (type)arg);		\			pci_write_config_##suffix(dev, _IOC_NR(cmd), 	\						  write_data.suffix);	\		}							\		if (dir & _IOC_READ) {					\			put_user(read_data.suffix, (type)arg);		\		}							\	} while (0)	size = _IOC_SIZE(cmd);	offset = _IOC_NR(cmd);	DPRINTF("sanity check\n");	if (((size > 0) || (size <= 4)) &&	    ((offset + size) <= 256) &&	    (dir & (_IOC_READ | _IOC_WRITE))) {		switch (size)		{		case 1:			do_swap(byte, uint8_t *);			break;		case 2:			do_swap(word, uint16_t *);			break;		case 4:			do_swap(dword, uint32_t *);			break;		default:			DPRINTF("invalid ioctl\n");			return -EINVAL;		}	} else		return -EINVAL;			return 0;}#ifdef DEBUG_PCIBAstatic voiddump_allocations(struct list_head * dalp){	struct dma_allocation * dap;	struct list_head * p;		printk("{\n");	list_for_each(p, dalp) {		dap = list_entry(p, struct dma_allocation, 				 list);		printk("  handle = %lx, va = %p\n",		       dap->handle, dap->va);	}	printk("}\n");}static voiddump_nodes(struct list_head * nodes){	struct node_data * ndp;	struct list_head * p;		printk("{\n");	list_for_each(p, nodes) {		ndp = list_entry(p, struct node_data, 				 global_node_list);		printk("  %p\n", (void *)ndp);	}	printk("}\n");}#if 0#define NEW(ptr) (ptr = kmalloc(sizeof (*(ptr)), GFP_KERNEL))static voidtest_list(void){	u64 i;	LIST_HEAD(the_list);	for (i = 0; i < 5; i++) {		struct dma_allocation * new_alloc;		NEW(new_alloc);		new_alloc->va = (void *)i;		new_alloc->handle = 5*i;		printk("%d - the_list->next = %lx\n", i, the_list.next);		list_add(&new_alloc->list, &the_list);	}	dump_allocations(&the_list);}#endif#endifstatic LIST_HEAD(dma_buffer_list);static intdma_ioctl(struct inode * inode, struct file * file, 	  unsigned int cmd, 	  unsigned long arg){	struct node_data * nd;	uint64_t argv;	int result;	struct dma_allocation * dma_alloc;	struct list_head * iterp;	TRACE();	DPRINTF("cmd = %x\n", cmd);	DPRINTF("arg = %lx\n", arg);	nd = (struct node_data *)file->private_data;#ifdef DEBUG_PCIBA	DPRINTF("at dma_ioctl entry\n");	dump_allocations(&nd->u.dma.dma_allocs);#endif	switch (cmd) {	case PCIIOCDMAALLOC:		/* PCIIOCDMAALLOC: allocate a chunk of physical memory		   and set it up for DMA. Return the PCI address that		   gets to it.  */		DPRINTF("case PCIIOCDMAALLOC (%lx)\n", PCIIOCDMAALLOC);				if ( (result = get_user(argv, (uint64_t *)arg)) )			return result;		DPRINTF("argv (size of buffer) = %lx\n", argv);		dma_alloc = (struct dma_allocation *)			kmalloc(sizeof(struct dma_allocation), GFP_KERNEL);		if (dma_alloc == NULL)			return -ENOMEM;		dma_alloc->size = (size_t)argv;		dma_alloc->va = pci_alloc_consistent(nd->u.dma.dev,						     dma_alloc->size,						     &dma_alloc->handle);		DPRINTF("dma_alloc->va = %p, dma_alloc->handle = %lx\n",			dma_alloc->va, dma_alloc->handle);		if (dma_alloc->va == NULL) {			kfree(dma_alloc);			return -ENOMEM;		}		list_add(&dma_alloc->list, &nd->u.dma.dma_allocs);		if ( (result = put_user((uint64_t)dma_alloc->handle, 				      (uint64_t *)arg)) ) {			DPRINTF("put_user failed\n");			pci_free_consistent(nd->u.dma.dev, (size_t)argv,					    dma_alloc->va, dma_alloc->handle);			kfree(dma_alloc);			return result;		}#ifdef DEBUG_PCIBA		DPRINTF("after insertion\n");		dump_allocations(&nd->u.dma.dma_allocs);#endif		break;	case PCIIOCDMAFREE:		DPRINTF("case PCIIOCDMAFREE (%lx)\n", PCIIOCDMAFREE);		if ( (result = get_user(argv, (uint64_t *)arg)) ) {			DPRINTF("get_user failed\n");			return result;		}		DPRINTF("argv (physical address of DMA buffer) = %lx\n", argv);		list_for_each(iterp, &nd->u.dma.dma_allocs) {			struct dma_allocation * da =				list_entry(iterp, struct dma_allocation, list);			if (da->handle == argv) {				pci_free_consistent(nd->u.dma.dev, da->size,						    da->va, da->handle);				list_del(&da->list);				kfree(da);#ifdef DEBUG_PCIBA				DPRINTF("after deletion\n");				dump_allocations(&nd->u.dma.dma_allocs);#endif				return 0; /* success */			}		}		/* previously allocated dma buffer wasn't found */		DPRINTF("attempt to free invalid dma handle\n");		return -EINVAL;	default:		DPRINTF("undefined ioctl\n");		return -EINVAL;	}	DPRINTF("success\n");	return 0;}		static intdma_mmap(struct file * file, struct vm_area_struct * vma){	struct node_data * nd;	struct list_head * iterp;	int result;		TRACE();	nd = (struct node_data *)file->private_data;		DPRINTF("vma->vm_start is %lx\n", vma->vm_start);	DPRINTF("vma->vm_end is %lx\n", vma->vm_end);	DPRINTF("offset = %lx\n", vma->vm_pgoff);	/* get kernel virtual address for the dma buffer (necessary	 * for the mmap). */	list_for_each(iterp, &nd->u.dma.dma_allocs) {		struct dma_allocation * da =			list_entry(iterp, struct dma_allocation, list);		/* why does mmap shift its offset argument? */		if (da->handle == vma->vm_pgoff << PAGE_SHIFT) {			DPRINTF("found dma handle\n");			if ( (result = mmap_kernel_address(vma,							   da->va)) ) {				return result; /* failure */			} else {				/* it seems like at least one of these				   should show up in user land....				   I'm missing something */				*(char *)da->va = 0xaa;				strncpy(da->va, "        Toastie!", da->size);				if (put_user(0x18badbeeful,					     (u64 *)vma->vm_start))					DPRINTF("put_user failed?!\n");				return 0; /* success */			}		}	}	DPRINTF("attempt to mmap an invalid dma handle\n");	return -EINVAL;}static intmmap_pci_address(struct vm_area_struct * vma, unsigned long pci_va){	unsigned long pci_pa;	TRACE();	DPRINTF("vma->vm_start is %lx\n", vma->vm_start);	DPRINTF("vma->vm_end is %lx\n", vma->vm_end);	/* the size of the vma doesn't necessarily correspond to the           size specified in the mmap call.  So we can't really do any           kind of sanity check here.  This is a dangerous driver, and           it's very easy for a user process to kill the machine.  */	DPRINTF("PCI base at virtual address %lx\n", pci_va);	/* the __pa macro is intended for region 7 on IA64, so it	   doesn't work for region 6 */  	/* pci_pa = __pa(pci_va); */	/* should be replaced by __tpa or equivalent (preferably a	   generic equivalent) */	pci_pa = pci_va & ~0xe000000000000000ul;	DPRINTF("PCI base at physical address %lx\n", pci_pa);	/* there are various arch-specific versions of this function           defined in linux/drivers/char/mem.c, but it would be nice           if all architectures put it in pgtable.h.  it's defined           there for ia64.... */	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);	vma->vm_flags |= VM_NONCACHED | VM_RESERVED | VM_IO;	return io_remap_page_range(vma->vm_start, pci_pa, 				   vma->vm_end-vma->vm_start,				   vma->vm_page_prot);}static intmmap_kernel_address(struct vm_area_struct * vma, void * kernel_va){	unsigned long kernel_pa;	TRACE();	DPRINTF("vma->vm_start is %lx\n", vma->vm_start);	DPRINTF("vma->vm_end is %lx\n", vma->vm_end);	/* the size of the vma doesn't necessarily correspond to the           size specified in the mmap call.  So we can't really do any           kind of sanity check here.  This is a dangerous driver, and           it's very easy for a user process to kill the machine.  */	DPRINTF("mapping virtual address %p\n", kernel_va);	kernel_pa = __pa(kernel_va);	DPRINTF("mapping physical address %lx\n", kernel_pa);	vma->vm_flags |= VM_NONCACHED | VM_RESERVED | VM_IO;	return remap_page_range(vma->vm_start, kernel_pa, 				vma->vm_end-vma->vm_start,				vma->vm_page_prot);}	

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -