ipath_file_ops.c
来自「linux 内核源代码」· C语言 代码 · 共 2,267 行 · 第 1/5 页
C
2,267 行
gfp_flags); if (!pd->port_rcvegrbuf[e]) { ret = -ENOMEM; goto bail_rcvegrbuf_phys; } } pd->port_rcvegr_phys = pd->port_rcvegrbuf_phys[0]; for (e = chunk = 0; chunk < pd->port_rcvegrbuf_chunks; chunk++) { dma_addr_t pa = pd->port_rcvegrbuf_phys[chunk]; unsigned i; for (i = 0; e < egrcnt && i < egrperchunk; e++, i++) { dd->ipath_f_put_tid(dd, e + egroff + (u64 __iomem *) ((char __iomem *) dd->ipath_kregbase + dd->ipath_rcvegrbase), RCVHQ_RCV_TYPE_EAGER, pa); pa += egrsize; } cond_resched(); /* don't hog the cpu */ } ret = 0; goto bail;bail_rcvegrbuf_phys: for (e = 0; e < pd->port_rcvegrbuf_chunks && pd->port_rcvegrbuf[e]; e++) { dma_free_coherent(&dd->pcidev->dev, size, pd->port_rcvegrbuf[e], pd->port_rcvegrbuf_phys[e]); } kfree(pd->port_rcvegrbuf_phys); pd->port_rcvegrbuf_phys = NULL;bail_rcvegrbuf: kfree(pd->port_rcvegrbuf); pd->port_rcvegrbuf = NULL;bail: return ret;}/* common code for the mappings on dma_alloc_coherent mem */static int ipath_mmap_mem(struct vm_area_struct *vma, struct ipath_portdata *pd, unsigned len, int write_ok, void *kvaddr, char *what){ struct ipath_devdata *dd = pd->port_dd; unsigned long pfn; int ret; if ((vma->vm_end - vma->vm_start) > len) { dev_info(&dd->pcidev->dev, "FAIL on %s: len %lx > %x\n", what, vma->vm_end - vma->vm_start, len); ret = -EFAULT; goto bail; } if (!write_ok) { if (vma->vm_flags & VM_WRITE) { dev_info(&dd->pcidev->dev, "%s must be mapped readonly\n", what); ret = -EPERM; goto bail; } /* don't allow them to later change with mprotect */ vma->vm_flags &= ~VM_MAYWRITE; } pfn = virt_to_phys(kvaddr) >> PAGE_SHIFT; ret = remap_pfn_range(vma, vma->vm_start, pfn, len, vma->vm_page_prot); if (ret) dev_info(&dd->pcidev->dev, "%s port%u mmap of %lx, %x " "bytes r%c failed: %d\n", what, pd->port_port, pfn, len, write_ok?'w':'o', ret); else ipath_cdbg(VERBOSE, "%s port%u mmaped %lx, %x bytes " "r%c\n", what, pd->port_port, pfn, len, write_ok?'w':'o');bail: return ret;}static int mmap_ureg(struct vm_area_struct *vma, struct ipath_devdata *dd, u64 ureg){ unsigned long phys; int ret; /* * This is real hardware, so use io_remap. This is the mechanism * for the user process to update the head registers for their port * in the chip. */ if ((vma->vm_end - vma->vm_start) > PAGE_SIZE) { dev_info(&dd->pcidev->dev, "FAIL mmap userreg: reqlen " "%lx > PAGE\n", vma->vm_end - vma->vm_start); ret = -EFAULT; } else { phys = dd->ipath_physaddr + ureg; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND; ret = io_remap_pfn_range(vma, vma->vm_start, phys >> PAGE_SHIFT, vma->vm_end - vma->vm_start, vma->vm_page_prot); } return ret;}static int mmap_piobufs(struct vm_area_struct *vma, struct ipath_devdata *dd, struct ipath_portdata *pd, unsigned piobufs, unsigned piocnt){ unsigned long phys; int ret; /* * When we map the PIO buffers in the chip, we want to map them as * writeonly, no read possible. This prevents access to previous * process data, and catches users who might try to read the i/o * space due to a bug. */ if ((vma->vm_end - vma->vm_start) > (piocnt * dd->ipath_palign)) { dev_info(&dd->pcidev->dev, "FAIL mmap piobufs: " "reqlen %lx > PAGE\n", vma->vm_end - vma->vm_start); ret = -EINVAL; goto bail; } phys = dd->ipath_physaddr + piobufs; /* * Don't mark this as non-cached, or we don't get the * write combining behavior we want on the PIO buffers! */#if defined(__powerpc__) /* There isn't a generic way to specify writethrough mappings */ pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE; pgprot_val(vma->vm_page_prot) |= _PAGE_WRITETHRU; pgprot_val(vma->vm_page_prot) &= ~_PAGE_GUARDED;#endif /* * don't allow them to later change to readable with mprotect (for when * not initially mapped readable, as is normally the case) */ vma->vm_flags &= ~VM_MAYREAD; vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND; ret = io_remap_pfn_range(vma, vma->vm_start, phys >> PAGE_SHIFT, vma->vm_end - vma->vm_start, vma->vm_page_prot);bail: return ret;}static int mmap_rcvegrbufs(struct vm_area_struct *vma, struct ipath_portdata *pd){ struct ipath_devdata *dd = pd->port_dd; unsigned long start, size; size_t total_size, i; unsigned long pfn; int ret; size = pd->port_rcvegrbuf_size; total_size = pd->port_rcvegrbuf_chunks * size; if ((vma->vm_end - vma->vm_start) > total_size) { dev_info(&dd->pcidev->dev, "FAIL on egr bufs: " "reqlen %lx > actual %lx\n", vma->vm_end - vma->vm_start, (unsigned long) total_size); ret = -EINVAL; goto bail; } if (vma->vm_flags & VM_WRITE) { dev_info(&dd->pcidev->dev, "Can't map eager buffers as " "writable (flags=%lx)\n", vma->vm_flags); ret = -EPERM; goto bail; } /* don't allow them to later change to writeable with mprotect */ vma->vm_flags &= ~VM_MAYWRITE; start = vma->vm_start; for (i = 0; i < pd->port_rcvegrbuf_chunks; i++, start += size) { pfn = virt_to_phys(pd->port_rcvegrbuf[i]) >> PAGE_SHIFT; ret = remap_pfn_range(vma, start, pfn, size, vma->vm_page_prot); if (ret < 0) goto bail; } ret = 0;bail: return ret;}/* * ipath_file_vma_nopage - handle a VMA page fault. */static struct page *ipath_file_vma_nopage(struct vm_area_struct *vma, unsigned long address, int *type){ unsigned long offset = address - vma->vm_start; struct page *page = NOPAGE_SIGBUS; void *pageptr; /* * Convert the vmalloc address into a struct page. */ pageptr = (void *)(offset + (vma->vm_pgoff << PAGE_SHIFT)); page = vmalloc_to_page(pageptr); if (!page) goto out; /* Increment the reference count. */ get_page(page); if (type) *type = VM_FAULT_MINOR;out: return page;}static struct vm_operations_struct ipath_file_vm_ops = { .nopage = ipath_file_vma_nopage,};static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr, struct ipath_portdata *pd, unsigned subport){ unsigned long len; struct ipath_devdata *dd; void *addr; size_t size; int ret = 0; /* If the port is not shared, all addresses should be physical */ if (!pd->port_subport_cnt) goto bail; dd = pd->port_dd; size = pd->port_rcvegrbuf_chunks * pd->port_rcvegrbuf_size; /* * Each process has all the subport uregbase, rcvhdrq, and * rcvegrbufs mmapped - as an array for all the processes, * and also separately for this process. */ if (pgaddr == cvt_kvaddr(pd->subport_uregbase)) { addr = pd->subport_uregbase; size = PAGE_SIZE * pd->port_subport_cnt; } else if (pgaddr == cvt_kvaddr(pd->subport_rcvhdr_base)) { addr = pd->subport_rcvhdr_base; size = pd->port_rcvhdrq_size * pd->port_subport_cnt; } else if (pgaddr == cvt_kvaddr(pd->subport_rcvegrbuf)) { addr = pd->subport_rcvegrbuf; size *= pd->port_subport_cnt; } else if (pgaddr == cvt_kvaddr(pd->subport_uregbase + PAGE_SIZE * subport)) { addr = pd->subport_uregbase + PAGE_SIZE * subport; size = PAGE_SIZE; } else if (pgaddr == cvt_kvaddr(pd->subport_rcvhdr_base + pd->port_rcvhdrq_size * subport)) { addr = pd->subport_rcvhdr_base + pd->port_rcvhdrq_size * subport; size = pd->port_rcvhdrq_size; } else if (pgaddr == cvt_kvaddr(pd->subport_rcvegrbuf + size * subport)) { addr = pd->subport_rcvegrbuf + size * subport; /* rcvegrbufs are read-only on the slave */ if (vma->vm_flags & VM_WRITE) { dev_info(&dd->pcidev->dev, "Can't map eager buffers as " "writable (flags=%lx)\n", vma->vm_flags); ret = -EPERM; goto bail; } /* * Don't allow permission to later change to writeable * with mprotect. */ vma->vm_flags &= ~VM_MAYWRITE; } else { goto bail; } len = vma->vm_end - vma->vm_start; if (len > size) { ipath_cdbg(MM, "FAIL: reqlen %lx > %zx\n", len, size); ret = -EINVAL; goto bail; } vma->vm_pgoff = (unsigned long) addr >> PAGE_SHIFT; vma->vm_ops = &ipath_file_vm_ops; vma->vm_flags |= VM_RESERVED | VM_DONTEXPAND; ret = 1;bail: return ret;}/** * ipath_mmap - mmap various structures into user space * @fp: the file pointer * @vma: the VM area * * We use this to have a shared buffer between the kernel and the user code * for the rcvhdr queue, egr buffers, and the per-port user regs and pio * buffers in the chip. We have the open and close entries so we can bump * the ref count and keep the driver from being unloaded while still mapped. */static int ipath_mmap(struct file *fp, struct vm_area_struct *vma){ struct ipath_portdata *pd; struct ipath_devdata *dd; u64 pgaddr, ureg; unsigned piobufs, piocnt; int ret; pd = port_fp(fp); if (!pd) { ret = -EINVAL; goto bail; } dd = pd->port_dd; /* * This is the ipath_do_user_init() code, mapping the shared buffers * into the user process. The address referred to by vm_pgoff is the * file offset passed via mmap(). For shared ports, this is the * kernel vmalloc() address of the pages to share with the master. * For non-shared or master ports, this is a physical address. * We only do one mmap for each space mapped. */ pgaddr = vma->vm_pgoff << PAGE_SHIFT; /* * Check for 0 in case one of the allocations failed, but user * called mmap anyway. */ if (!pgaddr) { ret = -EINVAL; goto bail; } ipath_cdbg(MM, "pgaddr %llx vm_start=%lx len %lx port %u:%u:%u\n", (unsigned long long) pgaddr, vma->vm_start, vma->vm_end - vma->vm_start, dd->ipath_unit, pd->port_port, subport_fp(fp)); /* * Physical addresses must fit in 40 bits for our hardware. * Check for kernel virtual addresses first, anything else must * match a HW or memory address. */ ret = mmap_kvaddr(vma, pgaddr, pd, subport_fp(fp)); if (ret) { if (ret > 0) ret = 0; goto bail; } ureg = dd->ipath_uregbase + dd->ipath_palign * pd->port_port; if (!pd->port_subport_cnt) { /* port is not shared */ piocnt = dd->ipath_pbufsport; piobufs = pd->port_piobufs; } else if (!subport_fp(fp)) { /* caller is the master */ piocnt = (dd->ipath_pbufsport / pd->port_subport_cnt) + (dd->ipath_pbufsport % pd->port_subport_cnt); piobufs = pd->port_piobufs + dd->ipath_palign * (dd->ipath_pbufsport - piocnt); } else { unsigned slave = subport_fp(fp) - 1; /* caller is a slave */ piocnt = dd->ipath_pbufsport / pd->port_subport_cnt; piobufs = pd->port_piobufs + dd->ipath_palign * piocnt * slave; } if (pgaddr == ureg) ret = mmap_ureg(vma, dd, ureg); else if (pgaddr == piobufs) ret = mmap_piobufs(vma, dd, pd, piobufs, piocnt); else if (pgaddr == dd->ipath_pioavailregs_phys) /* in-memory copy of pioavail registers */ ret = ipath_mmap_mem(vma, pd, PAGE_SIZE, 0, (void *) dd->ipath_pioavailregs_dma, "pioavail registers"); else if (pgaddr == pd->port_rcvegr_phys) ret = mmap_rcvegrbufs(vma, pd); else if (pgaddr == (u64) pd->port_rcvhdrq_phys) /* * The rcvhdrq itself; readonly except on HT (so have * to allow writable mapping), multiple pages, contiguous * from an i/o perspective. */ ret = ipath_mmap_mem(vma, pd, pd->port_rcvhdrq_size, 1, pd->port_rcvhdrq, "rcvhdrq"); else if (pgaddr == (u64) pd->port_rcvhdrqtailaddr_phys) /* in-memory copy of rcvhdrq tail register */ ret = ipath_mmap_mem(vma, pd, PAGE_SIZE, 0, pd->port_rcvhdrtail_kvaddr, "rcvhdrq tail"); else ret = -EINVAL; vma->vm_private_data = NULL; if (ret < 0) dev_info(&dd->pcidev->dev, "Failure %d on off %llx len %lx\n", -ret, (unsigned long long)pgaddr, vma->vm_end - vma->vm_start);bail: return ret;}static unsigned ipath_poll_hdrqfull(struct ipath_portdata *pd){ unsigned pollflag = 0; if ((pd->poll_type & IPATH_POLL_TYPE_OVERFLOW) && pd->port_hdrqfull != pd->port_hdrqfull_poll) { pollflag |= POLLIN | POLLRDNORM; pd->port_hdrqfull_poll = pd->port_hdrqfull; } return pollflag;}static unsigned int ipath_poll_urgent(struct ipath_portdata *pd, struct file *fp, struct poll_table_struct *pt){ unsigned pollflag = 0;
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?