📄 ioport.c
字号:
return 0; } return mmu_get_scsi_one(va, len, sdev->bus);#endif}void sbus_unmap_single(struct sbus_dev *sdev, u32 ba, long n, int direction){#if 0 /* This is the version that abuses consistent space */ struct resource *res; unsigned long va; if ((res = _sparc_find_resource(&_sparc_dvma, ba)) == NULL) { printk("sbus_unmap_single: cannot find %08x\n", (unsigned)ba); return; } n = (n + PAGE_SIZE-1) & PAGE_MASK; if ((res->end-res->start)+1 != n) { printk("sbus_unmap_single: region 0x%lx asked 0x%lx\n", (long)((res->end-res->start)+1), n); return; } va = (unsigned long) res->name; /* XXX Ouch */ mmu_inval_dma_area(va, n); /* in all contexts, mm's?... */ mmu_unmap_dma_area(ba, n); /* iounit cache flush is here */ release_resource(res); kfree(res);#endif#if 1 /* "trampoline" version */ mmu_release_scsi_one(ba, n, sdev->bus);#endif}int sbus_map_sg(struct sbus_dev *sdev, struct scatterlist *sg, int n, int direction){ mmu_get_scsi_sgl(sg, n, sdev->bus); /* * XXX sparc64 can return a partial length here. sun4c should do this * but it currently panics if it can't fulfill the request - Anton */ return n;}void sbus_unmap_sg(struct sbus_dev *sdev, struct scatterlist *sg, int n, int direction){ mmu_release_scsi_sgl(sg, n, sdev->bus);}/* */void sbus_dma_sync_single(struct sbus_dev *sdev, u32 ba, long size, int direction){#if 0 unsigned long va; struct resource *res; /* We do not need the resource, just print a message if invalid. */ res = _sparc_find_resource(&_sparc_dvma, ba); if (res == NULL) panic("sbus_dma_sync_single: 0x%x\n", ba); va = (unsigned long) phys_to_virt(mmu_translate_dvma(ba)); /* * XXX This bogosity will be fixed with the iommu rewrite coming soon * to a kernel near you. - Anton */ /* mmu_inval_dma_area(va, (size + PAGE_SIZE-1) & PAGE_MASK); */#endif}void sbus_dma_sync_sg(struct sbus_dev *sdev, struct scatterlist *sg, int n, int direction){ printk("sbus_dma_sync_sg: not implemented yet\n");}#endif /* CONFIG_SBUS */#ifdef CONFIG_PCI/* Allocate and map kernel buffer using consistent mode DMA for a device. * hwdev should be valid struct pci_dev pointer for PCI devices. */void *pci_alloc_consistent(struct pci_dev *pdev, size_t len, dma_addr_t *pba){ unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK; unsigned long va; struct resource *res; int order; if (len == 0) { return NULL; } if (len > 256*1024) { /* __get_free_pages() limit */ return NULL; } order = get_order(len_total); va = __get_free_pages(GFP_KERNEL, order); if (va == 0) { printk("pci_alloc_consistent: no %ld pages\n", len_total>>PAGE_SHIFT); return NULL; } if ((res = kmalloc(sizeof(struct resource), GFP_KERNEL)) == NULL) { free_pages(va, order); printk("pci_alloc_consistent: no core\n"); return NULL; } memset((char*)res, 0, sizeof(struct resource)); if (allocate_resource(&_sparc_dvma, res, len_total, _sparc_dvma.start, _sparc_dvma.end, PAGE_SIZE, NULL, NULL) != 0) { printk("pci_alloc_consistent: cannot occupy 0x%lx", len_total); free_pages(va, order); kfree(res); return NULL; } mmu_inval_dma_area(va, len_total);#if 1/* P3 */ printk("pci_alloc_consistent: kva %lx uncva %lx phys %lx size %x\n", (long)va, (long)res->start, (long)virt_to_phys(va), len_total);#endif { unsigned long xva, xpa; xva = res->start; xpa = virt_to_phys(va); while (len_total != 0) { len_total -= PAGE_SIZE; (*_sparc_mapioaddr)(xpa, xva, 0, 0); xva += PAGE_SIZE; xpa += PAGE_SIZE; } } *pba = virt_to_bus(va); return (void *) res->start;}/* Free and unmap a consistent DMA buffer. * cpu_addr is what was returned from pci_alloc_consistent, * size must be the same as what as passed into pci_alloc_consistent, * and likewise dma_addr must be the same as what *dma_addrp was set to. * * References to the memory and mappings assosciated with cpu_addr/dma_addr * past this call are illegal. */void pci_free_consistent(struct pci_dev *pdev, size_t n, void *p, dma_addr_t ba){ struct resource *res; unsigned long pgp; if ((res = _sparc_find_resource(&_sparc_dvma, (unsigned long)p)) == NULL) { printk("pci_free_consistent: cannot free %p\n", p); return; } if (((unsigned long)p & (PAGE_SIZE-1)) != 0) { printk("pci_free_consistent: unaligned va %p\n", p); return; } n = (n + PAGE_SIZE-1) & PAGE_MASK; if ((res->end-res->start)+1 != n) { printk("pci_free_consistent: region 0x%lx asked 0x%lx\n", (long)((res->end-res->start)+1), (long)n); return; } pgp = (unsigned long) bus_to_virt(ba); mmu_inval_dma_area(pgp, n); { int x; for (x = 0; x < n; x += PAGE_SIZE) { (*_sparc_unmapioaddr)((unsigned long)p + n); } } release_resource(res); kfree(res); free_pages(pgp, get_order(n));}/* Map a single buffer of the indicated size for DMA in streaming mode. * The 32-bit bus address to use is returned. * * Once the device is given the dma address, the device owns this memory * until either pci_unmap_single or pci_dma_sync_single is performed. */dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction){ if (direction == PCI_DMA_NONE) BUG(); /* IIep is write-through, not flushing. */ return virt_to_bus(ptr);}/* Unmap a single streaming mode DMA translation. The dma_addr and size * must match what was provided for in a previous pci_map_single call. All * other usages are undefined. * * After this call, reads by the cpu to the buffer are guarenteed to see * whatever the device wrote there. */void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t ba, size_t size, int direction){ if (direction == PCI_DMA_NONE) BUG(); if (direction != PCI_DMA_TODEVICE) { mmu_inval_dma_area((unsigned long)bus_to_virt(ba), (size + PAGE_SIZE-1) & PAGE_MASK); }}/* Map a set of buffers described by scatterlist in streaming * mode for DMA. This is the scather-gather version of the * above pci_map_single interface. Here the scatter gather list * elements are each tagged with the appropriate dma address * and length. They are obtained via sg_dma_{address,length}(SG). * * NOTE: An implementation may be able to use a smaller number of * DMA address/length pairs than there are SG table elements. * (for example via virtual mapping capabilities) * The routine returns the number of addr/length pairs actually * used, at most nents. * * Device ownership issues as mentioned above for pci_map_single are * the same here. */int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction){ int n; if (direction == PCI_DMA_NONE) BUG(); /* IIep is write-through, not flushing. */ for (n = 0; n < nents; n++) { sg->dvma_address = virt_to_bus(sg->address); sg->dvma_length = sg->length; sg++; } return nents;}/* Unmap a set of streaming mode DMA translations. * Again, cpu read rules concerning calls here are the same as for * pci_unmap_single() above. */void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction){ int n; if (direction == PCI_DMA_NONE) BUG(); if (direction != PCI_DMA_TODEVICE) { for (n = 0; n < nents; n++) { mmu_inval_dma_area((unsigned long)sg->address, (sg->length + PAGE_SIZE-1) & PAGE_MASK); sg++; } }}/* Make physical memory consistent for a single * streaming mode DMA translation before or after a transfer. * * If you perform a pci_map_single() but wish to interrogate the * buffer using the cpu, yet do not wish to teardown the PCI dma * mapping, you must call this function before doing so. At the * next point you give the PCI dma address back to the card, the * device again owns the buffer. */void pci_dma_sync_single(struct pci_dev *hwdev, dma_addr_t ba, size_t size, int direction){ if (direction == PCI_DMA_NONE) BUG(); if (direction != PCI_DMA_TODEVICE) { mmu_inval_dma_area((unsigned long)bus_to_virt(ba), (size + PAGE_SIZE-1) & PAGE_MASK); }}/* Make physical memory consistent for a set of streaming * mode DMA translations after a transfer. * * The same as pci_dma_sync_single but for a scatter-gather list, * same rules and usage. */void pci_dma_sync_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction){ int n; if (direction == PCI_DMA_NONE) BUG(); if (direction != PCI_DMA_TODEVICE) { for (n = 0; n < nents; n++) { mmu_inval_dma_area((unsigned long)sg->address, (sg->length + PAGE_SIZE-1) & PAGE_MASK); sg++; } }}#endif /* CONFIG_PCI */#ifdef CONFIG_PROC_FSstatic int_sparc_io_get_info(char *buf, char **start, off_t fpos, int length, int *eof, void *data){ char *p = buf, *e = buf + length; struct resource *r; const char *nm; for (r = ((struct resource *)data)->child; r != NULL; r = r->sibling) { if (p + 32 >= e) /* Better than nothing */ break; if ((nm = r->name) == 0) nm = "???"; p += sprintf(p, "%08lx-%08lx: %s\n", r->start, r->end, nm); } return p-buf;}#endif /* CONFIG_PROC_FS *//* * This is a version of find_resource and it belongs to kernel/resource.c. * Until we have agreement with Linus and Martin, it lingers here. * * XXX Too slow. Can have 8192 DVMA pages on sun4m in the worst case. * This probably warrants some sort of hashing. */struct resource *_sparc_find_resource(struct resource *root, unsigned long hit){ struct resource *tmp; for (tmp = root->child; tmp != 0; tmp = tmp->sibling) { if (tmp->start <= hit && tmp->end >= hit) return tmp; } return NULL;}/* * Necessary boot time initializations. */void ioport_init(void){ extern void sun4c_mapioaddr(unsigned long, unsigned long, int, int); extern void srmmu_mapioaddr(unsigned long, unsigned long, int, int); extern void sun4c_unmapioaddr(unsigned long); extern void srmmu_unmapioaddr(unsigned long); switch(sparc_cpu_model) { case sun4c: case sun4: case sun4e: _sparc_mapioaddr = sun4c_mapioaddr; _sparc_unmapioaddr = sun4c_unmapioaddr; break; case sun4m: case sun4d: _sparc_mapioaddr = srmmu_mapioaddr; _sparc_unmapioaddr = srmmu_unmapioaddr; break; default: printk("ioport_init: cpu type %d is unknown.\n", sparc_cpu_model); halt(); };}void register_proc_sparc_ioport(void){#ifdef CONFIG_PROC_FS create_proc_read_entry("io_map",0,0,_sparc_io_get_info,&sparc_iomap); create_proc_read_entry("dvma_map",0,0,_sparc_io_get_info,&_sparc_dvma);#endif}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -