⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 pci_dma.c

📁 这个linux源代码是很全面的~基本完整了~使用c编译的~由于时间问题我没有亲自测试~但就算用来做参考资料也是非常好的
💻 C
📖 第 1 页 / 共 2 页
字号:
/* * This file is subject to the terms and conditions of the GNU General Public * License.  See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2000,2002 Silicon Graphics, Inc. All rights reserved. * * Routines for PCI DMA mapping.  See Documentation/DMA-mapping.txt for * a description of how these routines should be used. */#include <linux/types.h>#include <linux/mm.h>#include <linux/string.h>#include <linux/pci.h>#include <linux/slab.h>#include <linux/devfs_fs_kernel.h>#include <linux/module.h>#include <asm/delay.h>#include <asm/io.h>#include <asm/sn/sgi.h>#include <asm/sn/io.h>#include <asm/sn/invent.h>#include <asm/sn/hcl.h>#include <asm/sn/pci/pcibr.h>#include <asm/sn/pci/pcibr_private.h>#include <asm/sn/driver.h>#include <asm/sn/types.h>#include <asm/sn/alenlist.h>#include <asm/sn/pci/pci_bus_cvlink.h>#include <asm/sn/nag.h>/* DMA, PIO, and memory allocation flags */#ifdef CONFIG_IA64_SGI_SN1#define DMA_DATA_FLAGS		( PCIIO_BYTE_STREAM | PCIIO_DMA_DATA )#define DMA_CONTROL_FLAGS	( PCIIO_BYTE_STREAM | PCIBR_BARRIER | \				  PCIIO_DMA_CMD )#elif defined(CONFIG_IA64_SGI_SN2)#define DMA_DATA_FLAGS		( PCIIO_DMA_DATA )#define DMA_CONTROL_FLAGS	( PCIBR_BARRIER | PCIIO_DMA_CMD )#else#error need to define DMA mapping flags for this platform#endif/* * For ATE allocations */pciio_dmamap_t get_free_pciio_dmamap(devfs_handle_t);void free_pciio_dmamap(pcibr_dmamap_t);static struct sn_dma_maps_s *find_sn_dma_map(dma_addr_t, unsigned char);/* * Toplogy stuff */extern devfs_handle_t busnum_to_pcibr_vhdl[];extern nasid_t busnum_to_nid[];extern void * busnum_to_atedmamaps[];/** * get_free_pciio_dmamap - find and allocate an ATE * @pci_bus: PCI bus to get an entry for * * Finds and allocates an ATE on the PCI bus specified * by @pci_bus. */pciio_dmamap_tget_free_pciio_dmamap(devfs_handle_t pci_bus){	int i;	struct sn_dma_maps_s *sn_dma_map = NULL;	/*	 * Darn, we need to get the maps allocated for this bus.	 */	for (i = 0; i < MAX_PCI_XWIDGET; i++) {		if (busnum_to_pcibr_vhdl[i] == pci_bus) {			sn_dma_map = busnum_to_atedmamaps[i];		}	}	/*	 * Now get a free dmamap entry from this list.	 */	for (i = 0; i < MAX_ATE_MAPS; i++, sn_dma_map++) {		if (!sn_dma_map->dma_addr) {			sn_dma_map->dma_addr = -1;			return( (pciio_dmamap_t) sn_dma_map );		}	}	return NULL;}/** * free_pciio_dmamap - free an ATE * @dma_map: ATE to free * * Frees the ATE specified by @dma_map. */voidfree_pciio_dmamap(pcibr_dmamap_t dma_map){	struct sn_dma_maps_s *sn_dma_map;	sn_dma_map = (struct sn_dma_maps_s *) dma_map;	sn_dma_map->dma_addr = 0;}/** * find_sn_dma_map - find an ATE associated with @dma_addr and @busnum * @dma_addr: DMA address to look for * @busnum: PCI bus to look on * * Finds the ATE associated with @dma_addr and @busnum. */static struct sn_dma_maps_s *find_sn_dma_map(dma_addr_t dma_addr, unsigned char busnum){	struct sn_dma_maps_s *sn_dma_map = NULL;	int i;	sn_dma_map = busnum_to_atedmamaps[busnum];	for (i = 0; i < MAX_ATE_MAPS; i++, sn_dma_map++) {		if (sn_dma_map->dma_addr == dma_addr) {			return sn_dma_map;		}	}	printk(KERN_WARNING "find_sn_dma_map: Unable find the corresponding "	       "dma map\n");	return NULL;}/** * sn_dma_sync - try to flush DMA buffers into the coherence domain * @hwdev: device to flush * * This routine flushes all DMA buffers for the device into the II of * the destination hub. * * NOTE!: this does not mean that the data is in the "coherence domain", * but it is very close.  In other words, this routine *does not work* * as advertised due to hardware bugs.  That said, it should be good enough for * most situations. */voidsn_dma_sync(struct pci_dev *hwdev){	struct sn_device_sysdata *device_sysdata;	volatile unsigned long dummy;	/*	 * It is expected that on an IA64 platform, a DMA sync ensures that 	 * all the DMA from a particular device is complete and coherent.  We	 * try to do this by	 *	1. flushing the write wuffers from Bridge	 *	2. flushing the Xbow port.	 * Unfortunately, this only gets the DMA transactions 'very close' to	 * the coherence domain, but not quite in it.	 */	device_sysdata = (struct sn_device_sysdata *)hwdev->sysdata;	dummy = (volatile unsigned long ) *device_sysdata->dma_buf_sync;	/*	 * For the Xbow port flush, we maybe denied the request because 	 * someone else may be flushing the port .. try again.	 */	while((volatile unsigned long ) *device_sysdata->xbow_buf_sync) {		udelay(2);	}}/** * sn_pci_alloc_consistent - allocate memory for coherent DMA * @hwdev: device to allocate for * @size: size of the region * @dma_handle: DMA (bus) address * * pci_alloc_consistent() returns a pointer to a memory region suitable for * coherent DMA traffic to/from a PCI device.  On SN platforms, this means * that @dma_handle will have the PCIBR_BARRIER and PCIIO_DMA_CMD flags * set. * * This interface is usually used for "command" streams (e.g. the command * queue for a SCSI controller).  See Documentation/DMA-mapping.txt for * more information.  Note that this routine should always put a 32 bit * DMA address into @dma_handle.  This is because most other platforms * that are capable of 64 bit PCI DMA transactions can't do 64 bit _coherent_ * DMAs, and unfortunately this interface has to cater to the LCD.  Oh well. * * Also known as platform_pci_alloc_consistent() by the IA64 machvec code. */void *sn_pci_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle){        void *cpuaddr;	devfs_handle_t vhdl;	struct sn_device_sysdata *device_sysdata;	unsigned long phys_addr;	pciio_dmamap_t dma_map = 0;	struct sn_dma_maps_s *sn_dma_map;	*dma_handle = 0;	/* We can't easily support < 32 bit devices */	if (IS_PCI32L(hwdev))		return NULL;	/*	 * Get hwgraph vertex for the device	 */	device_sysdata = (struct sn_device_sysdata *) hwdev->sysdata;	vhdl = device_sysdata->vhdl;	/*	 * Allocate the memory.  FIXME: if we're allocating for	 * two devices on the same bus, we should at least try to	 * allocate memory in the same 2 GB window to avoid using	 * ATEs for the translation.  See the comment above about the	 * 32 bit requirement for this function.	 */	if(!(cpuaddr = (void *)__get_free_pages(GFP_ATOMIC, get_order(size))))		return NULL;	memset(cpuaddr, 0, size); /* have to zero it out */	/* physical addr. of the memory we just got */	phys_addr = __pa(cpuaddr);	*dma_handle = pciio_dmatrans_addr(vhdl, NULL, phys_addr, size,					  DMA_CONTROL_FLAGS);	/*	 * It is a 32 bit card and we cannot do direct mapping,	 * so we use an ATE.	 */	if (!(*dma_handle)) {		dma_map = pciio_dmamap_alloc(vhdl, NULL, size,					     DMA_CONTROL_FLAGS | PCIIO_FIXED);		if (!dma_map) {			printk(KERN_ERR "sn_pci_alloc_consistent: Unable to "			       "allocate anymore 32 bit page map entries.\n");			BUG();		}		*dma_handle = (dma_addr_t) pciio_dmamap_addr(dma_map,phys_addr,							     size);		sn_dma_map = (struct sn_dma_maps_s *)dma_map;		sn_dma_map->dma_addr = *dma_handle;		printk(KERN_INFO "%s: PMU mapping: %p\n", __FUNCTION__,		       (void *)*dma_handle);	}	else		printk(KERN_INFO "%s: direct mapping: %p\n", __FUNCTION__,		       (void *)*dma_handle);	        return cpuaddr;}/** * sn_pci_free_consistent - free memory associated with coherent DMAable region * @hwdev: device to free for * @size: size to free * @vaddr: kernel virtual address to free * @dma_handle: DMA address associated with this region * * Frees the memory allocated by pci_alloc_consistent().  Also known * as platform_pci_free_consistent() by the IA64 machvec code. */voidsn_pci_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle){	struct sn_dma_maps_s *sn_dma_map = NULL;	/*	 * Get the sn_dma_map entry.	 */	if (IS_PCI32_MAPPED(dma_handle))		sn_dma_map = find_sn_dma_map(dma_handle, hwdev->bus->number);	/*	 * and free it if necessary...	 */	if (sn_dma_map) {		pciio_dmamap_done((pciio_dmamap_t)sn_dma_map);		pciio_dmamap_free((pciio_dmamap_t)sn_dma_map);		sn_dma_map->dma_addr = (dma_addr_t)NULL;	}	free_pages((unsigned long) vaddr, get_order(size));}/** * sn_pci_map_sg - map a scatter-gather list for DMA * @hwdev: device to map for * @sg: scatterlist to map * @nents: number of entries * @direction: direction of the DMA transaction * * Maps each entry of @sg for DMA.  Also known as platform_pci_map_sg by the * IA64 machvec code. */intsn_pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction){	int i;	devfs_handle_t vhdl;	unsigned long phys_addr;	struct sn_device_sysdata *device_sysdata;	pciio_dmamap_t dma_map;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -