ipath_file_ops.c
来自「LINUX 2.6.17.4的源码」· C语言 代码 · 共 1,915 行 · 第 1/4 页
C
1,915 行
return ret;}static int mmap_piobufs(struct vm_area_struct *vma, struct ipath_devdata *dd, struct ipath_portdata *pd){ unsigned long phys; int ret; /* * When we map the PIO buffers, we want to map them as writeonly, no * read possible. */ if ((vma->vm_end - vma->vm_start) > (dd->ipath_pbufsport * dd->ipath_palign)) { dev_info(&dd->pcidev->dev, "FAIL mmap piobufs: " "reqlen %lx > PAGE\n", vma->vm_end - vma->vm_start); ret = -EFAULT; goto bail; } phys = dd->ipath_physaddr + pd->port_piobufs; /* * Do *NOT* mark this as non-cached (PWT bit), or we don't get the * write combining behavior we want on the PIO buffers! * vma->vm_page_prot = * pgprot_noncached(vma->vm_page_prot); */ if (vma->vm_flags & VM_READ) { dev_info(&dd->pcidev->dev, "Can't map piobufs as readable (flags=%lx)\n", vma->vm_flags); ret = -EPERM; goto bail; } /* don't allow them to later change to readable with mprotect */ vma->vm_flags &= ~VM_MAYWRITE; vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND; ret = io_remap_pfn_range(vma, vma->vm_start, phys >> PAGE_SHIFT, vma->vm_end - vma->vm_start, vma->vm_page_prot);bail: return ret;}static int mmap_rcvegrbufs(struct vm_area_struct *vma, struct ipath_portdata *pd){ struct ipath_devdata *dd = pd->port_dd; unsigned long start, size; size_t total_size, i; dma_addr_t *phys; int ret; if (!pd->port_rcvegrbuf) { ret = -EFAULT; goto bail; } size = pd->port_rcvegrbuf_size; total_size = pd->port_rcvegrbuf_chunks * size; if ((vma->vm_end - vma->vm_start) > total_size) { dev_info(&dd->pcidev->dev, "FAIL on egr bufs: " "reqlen %lx > actual %lx\n", vma->vm_end - vma->vm_start, (unsigned long) total_size); ret = -EFAULT; goto bail; } if (vma->vm_flags & VM_WRITE) { dev_info(&dd->pcidev->dev, "Can't map eager buffers as " "writable (flags=%lx)\n", vma->vm_flags); ret = -EPERM; goto bail; } start = vma->vm_start; phys = pd->port_rcvegrbuf_phys; /* don't allow them to later change to writeable with mprotect */ vma->vm_flags &= ~VM_MAYWRITE; for (i = 0; i < pd->port_rcvegrbuf_chunks; i++, start += size) { ret = remap_pfn_range(vma, start, phys[i] >> PAGE_SHIFT, size, vma->vm_page_prot); if (ret < 0) goto bail; } ret = 0;bail: return ret;}static int mmap_rcvhdrq(struct vm_area_struct *vma, struct ipath_portdata *pd){ struct ipath_devdata *dd = pd->port_dd; size_t total_size; int ret; /* * kmalloc'ed memory, physically contiguous; this is from * spi_rcvhdr_base; we allow user to map read-write so they can * write hdrq entries to allow protocol code to directly poll * whether a hdrq entry has been written. */ total_size = ALIGN(dd->ipath_rcvhdrcnt * dd->ipath_rcvhdrentsize * sizeof(u32), PAGE_SIZE); if ((vma->vm_end - vma->vm_start) > total_size) { dev_info(&dd->pcidev->dev, "FAIL on rcvhdrq: reqlen %lx > actual %lx\n", vma->vm_end - vma->vm_start, (unsigned long) total_size); ret = -EFAULT; goto bail; } ret = remap_pfn_range(vma, vma->vm_start, pd->port_rcvhdrq_phys >> PAGE_SHIFT, vma->vm_end - vma->vm_start, vma->vm_page_prot);bail: return ret;}static int mmap_pioavailregs(struct vm_area_struct *vma, struct ipath_portdata *pd){ struct ipath_devdata *dd = pd->port_dd; int ret; /* * when we map the PIO bufferavail registers, we want to map them as * readonly, no write possible. * * kmalloc'ed memory, physically contiguous, one page only, readonly */ if ((vma->vm_end - vma->vm_start) > PAGE_SIZE) { dev_info(&dd->pcidev->dev, "FAIL on pioavailregs_dma: " "reqlen %lx > actual %lx\n", vma->vm_end - vma->vm_start, (unsigned long) PAGE_SIZE); ret = -EFAULT; goto bail; } if (vma->vm_flags & VM_WRITE) { dev_info(&dd->pcidev->dev, "Can't map pioavailregs as writable (flags=%lx)\n", vma->vm_flags); ret = -EPERM; goto bail; } /* don't allow them to later change with mprotect */ vma->vm_flags &= ~VM_MAYWRITE; ret = remap_pfn_range(vma, vma->vm_start, dd->ipath_pioavailregs_phys >> PAGE_SHIFT, PAGE_SIZE, vma->vm_page_prot);bail: return ret;}/** * ipath_mmap - mmap various structures into user space * @fp: the file pointer * @vma: the VM area * * We use this to have a shared buffer between the kernel and the user code * for the rcvhdr queue, egr buffers, and the per-port user regs and pio * buffers in the chip. We have the open and close entries so we can bump * the ref count and keep the driver from being unloaded while still mapped. */static int ipath_mmap(struct file *fp, struct vm_area_struct *vma){ struct ipath_portdata *pd; struct ipath_devdata *dd; u64 pgaddr, ureg; int ret; pd = port_fp(fp); dd = pd->port_dd; /* * This is the ipath_do_user_init() code, mapping the shared buffers * into the user process. The address referred to by vm_pgoff is the * virtual, not physical, address; we only do one mmap for each * space mapped. */ pgaddr = vma->vm_pgoff << PAGE_SHIFT; /* * note that ureg does *NOT* have the kregvirt as part of it, to be * sure that for 32 bit programs, we don't end up trying to map a > * 44 address. Has to match ipath_get_base_info() code that sets * __spi_uregbase */ ureg = dd->ipath_uregbase + dd->ipath_palign * pd->port_port; ipath_cdbg(MM, "ushare: pgaddr %llx vm_start=%lx, vmlen %lx\n", (unsigned long long) pgaddr, vma->vm_start, vma->vm_end - vma->vm_start); if (pgaddr == ureg) ret = mmap_ureg(vma, dd, ureg); else if (pgaddr == pd->port_piobufs) ret = mmap_piobufs(vma, dd, pd); else if (pgaddr == (u64) pd->port_rcvegr_phys) ret = mmap_rcvegrbufs(vma, pd); else if (pgaddr == (u64) pd->port_rcvhdrq_phys) ret = mmap_rcvhdrq(vma, pd); else if (pgaddr == dd->ipath_pioavailregs_phys) ret = mmap_pioavailregs(vma, pd); else ret = -EINVAL; vma->vm_private_data = NULL; if (ret < 0) dev_info(&dd->pcidev->dev, "Failure %d on addr %lx, off %lx\n", -ret, vma->vm_start, vma->vm_pgoff); return ret;}static unsigned int ipath_poll(struct file *fp, struct poll_table_struct *pt){ struct ipath_portdata *pd; u32 head, tail; int bit; struct ipath_devdata *dd; pd = port_fp(fp); dd = pd->port_dd; bit = pd->port_port + INFINIPATH_R_INTRAVAIL_SHIFT; set_bit(bit, &dd->ipath_rcvctrl); /* * Before blocking, make sure that head is still == tail, * reading from the chip, so we can be sure the interrupt * enable has made it to the chip. If not equal, disable * interrupt again and return immediately. This avoids races, * and the overhead of the chip read doesn't matter much at * this point, since we are waiting for something anyway. */ ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, dd->ipath_rcvctrl); head = ipath_read_ureg32(dd, ur_rcvhdrhead, pd->port_port); tail = ipath_read_ureg32(dd, ur_rcvhdrtail, pd->port_port); if (tail == head) { set_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag); if(dd->ipath_rhdrhead_intr_off) /* arm rcv interrupt */ (void)ipath_write_ureg(dd, ur_rcvhdrhead, dd->ipath_rhdrhead_intr_off | head, pd->port_port); poll_wait(fp, &pd->port_wait, pt); if (test_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag)) { /* timed out, no packets received */ clear_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag); pd->port_rcvwait_to++; } } else { /* it's already happened; don't do wait_event overhead */ pd->port_rcvnowait++; } clear_bit(bit, &dd->ipath_rcvctrl); ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, dd->ipath_rcvctrl); return 0;}static int try_alloc_port(struct ipath_devdata *dd, int port, struct file *fp){ int ret; if (!dd->ipath_pd[port]) { void *p, *ptmp; p = kzalloc(sizeof(struct ipath_portdata), GFP_KERNEL); /* * Allocate memory for use in ipath_tid_update() just once * at open, not per call. Reduces cost of expected send * setup. */ ptmp = kmalloc(dd->ipath_rcvtidcnt * sizeof(u16) + dd->ipath_rcvtidcnt * sizeof(struct page **), GFP_KERNEL); if (!p || !ptmp) { ipath_dev_err(dd, "Unable to allocate portdata " "memory, failing open\n"); ret = -ENOMEM; kfree(p); kfree(ptmp); goto bail; } dd->ipath_pd[port] = p; dd->ipath_pd[port]->port_port = port; dd->ipath_pd[port]->port_dd = dd; dd->ipath_pd[port]->port_tid_pg_list = ptmp; init_waitqueue_head(&dd->ipath_pd[port]->port_wait); } if (!dd->ipath_pd[port]->port_cnt) { dd->ipath_pd[port]->port_cnt = 1; fp->private_data = (void *) dd->ipath_pd[port]; ipath_cdbg(PROC, "%s[%u] opened unit:port %u:%u\n", current->comm, current->pid, dd->ipath_unit, port); dd->ipath_pd[port]->port_pid = current->pid; strncpy(dd->ipath_pd[port]->port_comm, current->comm, sizeof(dd->ipath_pd[port]->port_comm)); ipath_stats.sps_ports++; ret = 0; goto bail; } ret = -EBUSY;bail: return ret;}static inline int usable(struct ipath_devdata *dd){ return dd && (dd->ipath_flags & IPATH_PRESENT) && dd->ipath_kregbase && dd->ipath_lid && !(dd->ipath_flags & (IPATH_LINKDOWN | IPATH_DISABLED | IPATH_LINKUNK));}static int find_free_port(int unit, struct file *fp){ struct ipath_devdata *dd = ipath_lookup(unit); int ret, i; if (!dd) { ret = -ENODEV; goto bail; } if (!usable(dd)) { ret = -ENETDOWN; goto bail; } for (i = 0; i < dd->ipath_cfgports; i++) { ret = try_alloc_port(dd, i, fp); if (ret != -EBUSY) goto bail; } ret = -EBUSY;bail: return ret;}static int find_best_unit(struct file *fp){ int ret = 0, i, prefunit = -1, devmax; int maxofallports, npresent, nup; int ndev; (void) ipath_count_units(&npresent, &nup, &maxofallports); /* * This code is present to allow a knowledgeable person to * specify the layout of processes to processors before opening * this driver, and then we'll assign the process to the "closest" * HT-400 to that processor (we assume reasonable connectivity, * for now). This code assumes that if affinity has been set * before this point, that at most one cpu is set; for now this * is reasonable. I check for both cpus_empty() and cpus_full(), * in case some kernel variant sets none of the bits when no * affinity is set. 2.6.11 and 12 kernels have all present * cpus set. Some day we'll have to fix it up further to handle * a cpu subset. This algorithm fails for two HT-400's connected * in tunnel fashion. Eventually this needs real topology * information. There may be some issues with dual core numbering * as well. This needs more work prior to release. */ if (!cpus_empty(current->cpus_allowed) && !cpus_full(current->cpus_allowed)) { int ncpus = num_online_cpus(), curcpu = -1; for (i = 0; i < ncpus; i++) if (cpu_isset(i, current->cpus_allowed)) { ipath_cdbg(PROC, "%s[%u] affinity set for " "cpu %d\n", current->comm, current->pid, i); curcpu = i; } if (curcpu != -1) { if (npresent) { prefunit = curcpu / (ncpus / npresent); ipath_dbg("%s[%u] %d chips, %d cpus, " "%d cpus/chip, select unit %d\n", current->comm, current->pid, npresent, ncpus, ncpus / npresent, prefunit); } } } /* * user ports start at 1, kernel port is 0 * For now, we do round-robin access across all chips */ if (prefunit != -1) devmax = prefunit + 1; else devmax = ipath_count_units(NULL, NULL, NULL);recheck: for (i = 1; i < maxofallports; i++) { for (ndev = prefunit != -1 ? prefunit : 0; ndev < devmax; ndev++) { struct ipath_devdata *dd = ipath_lookup(ndev); if (!usable(dd)) continue; /* can't use this unit */ if (i >= dd->ipath_cfgports) /* * Maxed out on users of this unit. Try * next. */ continue; ret = try_alloc_port(dd, i, fp); if (!ret) goto done; } } if (npresent) { if (nup == 0) { ret = -ENETDOWN; ipath_dbg("No ports available (none initialized " "and ready)\n"); } else { if (prefunit > 0) { /* if started above 0, retry from 0 */ ipath_cdbg(PROC, "%s[%u] no ports on prefunit " "%d, clear and re-check\n", current->comm, current->pid, prefunit); devmax = ipath_count_units(NULL, NULL, NULL); prefunit = -1; goto recheck; } ret = -EBUSY; ipath_dbg("No ports available\n"); } } else { ret = -ENXIO; ipath_dbg("No boards found\n"); }
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?