📄 pci_dma.c
字号:
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2000,2002 Silicon Graphics, Inc. All rights reserved. * * Routines for PCI DMA mapping. See Documentation/DMA-mapping.txt for * a description of how these routines should be used. */#include <linux/config.h>#include <linux/types.h>#include <linux/mm.h>#include <linux/string.h>#include <linux/pci.h>#include <linux/slab.h>#include <linux/devfs_fs_kernel.h>#include <linux/module.h>#include <asm/delay.h>#include <asm/io.h>#include <asm/sn/sgi.h>#include <asm/sn/io.h>#include <asm/sn/invent.h>#include <asm/sn/hcl.h>#include <asm/sn/pci/pcibr.h>#include <asm/sn/pci/pcibr_private.h>#include <asm/sn/driver.h>#include <asm/sn/types.h>#include <asm/sn/alenlist.h>#include <asm/sn/pci/pci_bus_cvlink.h>#include <asm/sn/nag.h>/* * For ATE allocations */pciio_dmamap_t get_free_pciio_dmamap(devfs_handle_t);void free_pciio_dmamap(pcibr_dmamap_t);static struct sn_dma_maps_s *find_sn_dma_map(dma_addr_t, unsigned char);/* * Toplogy stuff */extern devfs_handle_t busnum_to_pcibr_vhdl[];extern nasid_t busnum_to_nid[];extern void * busnum_to_atedmamaps[];/** * get_free_pciio_dmamap - find and allocate an ATE * @pci_bus: PCI bus to get an entry for * * Finds and allocates an ATE on the PCI bus specified * by @pci_bus. */pciio_dmamap_tget_free_pciio_dmamap(devfs_handle_t pci_bus){ int i; struct sn_dma_maps_s *sn_dma_map = NULL; /* * Darn, we need to get the maps allocated for this bus. */ for (i = 0; i < MAX_PCI_XWIDGET; i++) { if (busnum_to_pcibr_vhdl[i] == pci_bus) { sn_dma_map = busnum_to_atedmamaps[i]; } } /* * Now get a free dmamap entry from this list. */ for (i = 0; i < MAX_ATE_MAPS; i++, sn_dma_map++) { if (!sn_dma_map->dma_addr) { sn_dma_map->dma_addr = -1; return( (pciio_dmamap_t) sn_dma_map ); } } return NULL;}/** * free_pciio_dmamap - free an ATE * @dma_map: ATE to free * * Frees the ATE specified by @dma_map. */voidfree_pciio_dmamap(pcibr_dmamap_t dma_map){ struct sn_dma_maps_s *sn_dma_map; sn_dma_map = (struct sn_dma_maps_s *) dma_map; sn_dma_map->dma_addr = 0;}/** * find_sn_dma_map - find an ATE associated with @dma_addr and @busnum * @dma_addr: DMA address to look for * @busnum: PCI bus to look on * * Finds the ATE associated with @dma_addr and @busnum. */static struct sn_dma_maps_s *find_sn_dma_map(dma_addr_t dma_addr, unsigned char busnum){ struct sn_dma_maps_s *sn_dma_map = NULL; int i; sn_dma_map = busnum_to_atedmamaps[busnum]; for (i = 0; i < MAX_ATE_MAPS; i++, sn_dma_map++) { if (sn_dma_map->dma_addr == dma_addr) { return sn_dma_map; } } return NULL;}/** * sn_dma_sync - try to flush DMA buffers into the coherence domain * @hwdev: device to flush * * This routine flushes all DMA buffers for the device into the II of * the destination hub. * * NOTE!: this does not mean that the data is in the "coherence domain", * but it is very close. In other words, this routine *does not work* * as advertised due to hardware bugs. That said, it should be good enough for * most situations. */voidsn_dma_sync(struct pci_dev *hwdev){#ifdef SN_DMA_SYNC struct sn_device_sysdata *device_sysdata; volatile unsigned long dummy; /* * A DMA sync is supposed to ensure that * all the DMA from a particular device * is complete and coherent. We * try to do this by * 1. flushing the write wuffers from Bridge * 2. flushing the Xbow port. * Unfortunately, this only gets the DMA transactions 'very close' to * the coherence domain, but not quite in it. */ device_sysdata = (struct sn_device_sysdata *)hwdev->sysdata; dummy = (volatile unsigned long ) *device_sysdata->dma_buf_sync; /* * For the Xbow port flush, we may be denied the request because * someone else may be flushing the port .. try again. */ while((volatile unsigned long ) *device_sysdata->xbow_buf_sync) { udelay(2); }#endif}/** * sn_pci_alloc_consistent - allocate memory for coherent DMA * @hwdev: device to allocate for * @size: size of the region * @dma_handle: DMA (bus) address * * pci_alloc_consistent() returns a pointer to a memory region suitable for * coherent DMA traffic to/from a PCI device. On SN platforms, this means * that @dma_handle will have the %PCIIO_DMA_CMD flag set. * * This interface is usually used for "command" streams (e.g. the command * queue for a SCSI controller). See Documentation/DMA-mapping.txt for * more information. Note that this routine will always put a 32 bit * DMA address into @dma_handle. This is because most devices * that are capable of 64 bit PCI DMA transactions can't do 64 bit _coherent_ * DMAs, and unfortunately this interface has to cater to the LCD. Oh well. * * Also known as platform_pci_alloc_consistent() by the IA64 machvec code. */void *sn_pci_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle){ void *cpuaddr; devfs_handle_t vhdl; struct sn_device_sysdata *device_sysdata; unsigned long phys_addr; pciio_dmamap_t dma_map = 0; struct sn_dma_maps_s *sn_dma_map; *dma_handle = 0; /* We can't easily support < 32 bit devices */ if (IS_PCI32L(hwdev)) return NULL; /* * Get hwgraph vertex for the device */ device_sysdata = (struct sn_device_sysdata *) hwdev->sysdata; vhdl = device_sysdata->vhdl; /* * Allocate the memory. FIXME: if we're allocating for * two devices on the same bus, we should at least try to * allocate memory in the same 2 GB window to avoid using * ATEs for the translation. See the comment above about the * 32 bit requirement for this function. */ if(!(cpuaddr = (void *)__get_free_pages(GFP_ATOMIC, get_order(size)))) return NULL; memset(cpuaddr, 0, size); /* have to zero it out */ /* physical addr. of the memory we just got */ phys_addr = __pa(cpuaddr); /* * This will try to use a Direct Map register to do the * 32 bit DMA mapping, but it may not succeed if another * device on the same bus is already mapped with different * attributes or to a different memory region. */#ifdef CONFIG_IA64_SGI_SN1 *dma_handle = pciio_dmatrans_addr(vhdl, NULL, phys_addr, size, PCIIO_BYTE_STREAM | PCIIO_DMA_CMD);#elif defined(CONFIG_IA64_SGI_SN2) *dma_handle = pciio_dmatrans_addr(vhdl, NULL, phys_addr, size, ((IS_PIC_DEVICE(hwdev)) ? 0 : PCIIO_BYTE_STREAM) | PCIIO_DMA_CMD);#else#error unsupported platform#endif /* * It is a 32 bit card and we cannot do direct mapping, * so we try to use an ATE. */ if (!(*dma_handle)) {#ifdef CONFIG_IA64_SGI_SN1 dma_map = pciio_dmamap_alloc(vhdl, NULL, size, PCIIO_BYTE_STREAM | PCIIO_DMA_CMD);#elif defined(CONFIG_IA64_SGI_SN2) dma_map = pciio_dmamap_alloc(vhdl, NULL, size, ((IS_PIC_DEVICE(hwdev)) ? 0 : PCIIO_BYTE_STREAM) | PCIIO_DMA_CMD);#else#error unsupported platform#endif if (!dma_map) { printk(KERN_ERR "sn_pci_alloc_consistent: Unable to " "allocate anymore 32 bit page map entries.\n"); BUG(); } *dma_handle = (dma_addr_t) pciio_dmamap_addr(dma_map,phys_addr, size); sn_dma_map = (struct sn_dma_maps_s *)dma_map; sn_dma_map->dma_addr = *dma_handle; } return cpuaddr;}/** * sn_pci_free_consistent - free memory associated with coherent DMAable region * @hwdev: device to free for * @size: size to free * @vaddr: kernel virtual address to free * @dma_handle: DMA address associated with this region * * Frees the memory allocated by pci_alloc_consistent(). Also known * as platform_pci_free_consistent() by the IA64 machvec code. */voidsn_pci_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle){ struct sn_dma_maps_s *sn_dma_map = NULL; /* * Get the sn_dma_map entry. */ if (IS_PCI32_MAPPED(dma_handle)) sn_dma_map = find_sn_dma_map(dma_handle, hwdev->bus->number); /* * and free it if necessary... */ if (sn_dma_map) { pciio_dmamap_done((pciio_dmamap_t)sn_dma_map); pciio_dmamap_free((pciio_dmamap_t)sn_dma_map); sn_dma_map->dma_addr = (dma_addr_t)NULL; } free_pages((unsigned long) vaddr, get_order(size));}/** * sn_pci_map_sg - map a scatter-gather list for DMA * @hwdev: device to map for * @sg: scatterlist to map * @nents: number of entries * @direction: direction of the DMA transaction * * Maps each entry of @sg for DMA. Also known as platform_pci_map_sg by the * IA64 machvec code. */intsn_pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction){ int i; devfs_handle_t vhdl; dma_addr_t dma_addr; unsigned long phys_addr; struct sn_device_sysdata *device_sysdata; pciio_dmamap_t dma_map; /* can't go anywhere w/o a direction in life */ if (direction == PCI_DMA_NONE) BUG(); /* * Get the hwgraph vertex for the device */ device_sysdata = (struct sn_device_sysdata *) hwdev->sysdata; vhdl = device_sysdata->vhdl; /* * Setup a DMA address for each entry in the * scatterlist. */ for (i = 0; i < nents; i++, sg++) { /* this catches incorrectly written drivers that attempt to map scatterlists that they have previously mapped. we print a warning and continue, but the driver should be fixed */ switch (((u64)sg->address) >> 60) { case 0xa: case 0xb:#ifdef DEBUG/* This needs to be cleaned up at some point. */ NAG("A PCI driver (for device at%8s) has attempted to " "map a scatterlist that was previously mapped at " "%p - this is currently being worked around.\n",
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -