ipath_file_ops.c
来自「linux 内核源代码」· C语言 代码 · 共 2,267 行 · 第 1/5 页
C
2,267 行
/* * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved. * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */#include <linux/pci.h>#include <linux/poll.h>#include <linux/cdev.h>#include <linux/swap.h>#include <linux/vmalloc.h>#include <asm/pgtable.h>#include "ipath_kernel.h"#include "ipath_common.h"static int ipath_open(struct inode *, struct file *);static int ipath_close(struct inode *, struct file *);static ssize_t ipath_write(struct file *, const char __user *, size_t, loff_t *);static unsigned int ipath_poll(struct file *, struct poll_table_struct *);static int ipath_mmap(struct file *, struct vm_area_struct *);static const struct file_operations ipath_file_ops = { .owner = THIS_MODULE, .write = ipath_write, .open = ipath_open, .release = ipath_close, .poll = ipath_poll, .mmap = ipath_mmap};/* * Convert kernel virtual addresses to physical addresses so they don't * potentially conflict with the chip addresses used as mmap offsets. * It doesn't really matter what mmap offset we use as long as we can * interpret it correctly. */static u64 cvt_kvaddr(void *p){ struct page *page; u64 paddr = 0; page = vmalloc_to_page(p); if (page) paddr = page_to_pfn(page) << PAGE_SHIFT; return paddr;}static int ipath_get_base_info(struct file *fp, void __user *ubase, size_t ubase_size){ struct ipath_portdata *pd = port_fp(fp); int ret = 0; struct ipath_base_info *kinfo = NULL; struct ipath_devdata *dd = pd->port_dd; unsigned subport_cnt; int shared, master; size_t sz; subport_cnt = pd->port_subport_cnt; if (!subport_cnt) { shared = 0; master = 0; subport_cnt = 1; } else { shared = 1; master = !subport_fp(fp); } sz = sizeof(*kinfo); /* If port sharing is not requested, allow the old size structure */ if (!shared) sz -= 7 * sizeof(u64); if (ubase_size < sz) { ipath_cdbg(PROC, "Base size %zu, need %zu (version mismatch?)\n", ubase_size, sz); ret = -EINVAL; goto bail; } kinfo = kzalloc(sizeof(*kinfo), GFP_KERNEL); if (kinfo == NULL) { ret = -ENOMEM; goto bail; } ret = dd->ipath_f_get_base_info(pd, kinfo); if (ret < 0) goto bail; kinfo->spi_rcvhdr_cnt = dd->ipath_rcvhdrcnt; kinfo->spi_rcvhdrent_size = dd->ipath_rcvhdrentsize; kinfo->spi_tidegrcnt = dd->ipath_rcvegrcnt; kinfo->spi_rcv_egrbufsize = dd->ipath_rcvegrbufsize; /* * have to mmap whole thing */ kinfo->spi_rcv_egrbuftotlen = pd->port_rcvegrbuf_chunks * pd->port_rcvegrbuf_size; kinfo->spi_rcv_egrperchunk = pd->port_rcvegrbufs_perchunk; kinfo->spi_rcv_egrchunksize = kinfo->spi_rcv_egrbuftotlen / pd->port_rcvegrbuf_chunks; kinfo->spi_tidcnt = dd->ipath_rcvtidcnt / subport_cnt; if (master) kinfo->spi_tidcnt += dd->ipath_rcvtidcnt % subport_cnt; /* * for this use, may be ipath_cfgports summed over all chips that * are are configured and present */ kinfo->spi_nports = dd->ipath_cfgports; /* unit (chip/board) our port is on */ kinfo->spi_unit = dd->ipath_unit; /* for now, only a single page */ kinfo->spi_tid_maxsize = PAGE_SIZE; /* * Doing this per port, and based on the skip value, etc. This has * to be the actual buffer size, since the protocol code treats it * as an array. * * These have to be set to user addresses in the user code via mmap. * These values are used on return to user code for the mmap target * addresses only. For 32 bit, same 44 bit address problem, so use * the physical address, not virtual. Before 2.6.11, using the * page_address() macro worked, but in 2.6.11, even that returns the * full 64 bit address (upper bits all 1's). So far, using the * physical addresses (or chip offsets, for chip mapping) works, but * no doubt some future kernel release will change that, and we'll be * on to yet another method of dealing with this. */ kinfo->spi_rcvhdr_base = (u64) pd->port_rcvhdrq_phys; kinfo->spi_rcvhdr_tailaddr = (u64) pd->port_rcvhdrqtailaddr_phys; kinfo->spi_rcv_egrbufs = (u64) pd->port_rcvegr_phys; kinfo->spi_pioavailaddr = (u64) dd->ipath_pioavailregs_phys; kinfo->spi_status = (u64) kinfo->spi_pioavailaddr + (void *) dd->ipath_statusp - (void *) dd->ipath_pioavailregs_dma; if (!shared) { kinfo->spi_piocnt = dd->ipath_pbufsport; kinfo->spi_piobufbase = (u64) pd->port_piobufs; kinfo->__spi_uregbase = (u64) dd->ipath_uregbase + dd->ipath_palign * pd->port_port; } else if (master) { kinfo->spi_piocnt = (dd->ipath_pbufsport / subport_cnt) + (dd->ipath_pbufsport % subport_cnt); /* Master's PIO buffers are after all the slave's */ kinfo->spi_piobufbase = (u64) pd->port_piobufs + dd->ipath_palign * (dd->ipath_pbufsport - kinfo->spi_piocnt); } else { unsigned slave = subport_fp(fp) - 1; kinfo->spi_piocnt = dd->ipath_pbufsport / subport_cnt; kinfo->spi_piobufbase = (u64) pd->port_piobufs + dd->ipath_palign * kinfo->spi_piocnt * slave; } if (shared) { kinfo->spi_port_uregbase = (u64) dd->ipath_uregbase + dd->ipath_palign * pd->port_port; kinfo->spi_port_rcvegrbuf = kinfo->spi_rcv_egrbufs; kinfo->spi_port_rcvhdr_base = kinfo->spi_rcvhdr_base; kinfo->spi_port_rcvhdr_tailaddr = kinfo->spi_rcvhdr_tailaddr; kinfo->__spi_uregbase = cvt_kvaddr(pd->subport_uregbase + PAGE_SIZE * subport_fp(fp)); kinfo->spi_rcvhdr_base = cvt_kvaddr(pd->subport_rcvhdr_base + pd->port_rcvhdrq_size * subport_fp(fp)); kinfo->spi_rcvhdr_tailaddr = 0; kinfo->spi_rcv_egrbufs = cvt_kvaddr(pd->subport_rcvegrbuf + pd->port_rcvegrbuf_chunks * pd->port_rcvegrbuf_size * subport_fp(fp)); kinfo->spi_subport_uregbase = cvt_kvaddr(pd->subport_uregbase); kinfo->spi_subport_rcvegrbuf = cvt_kvaddr(pd->subport_rcvegrbuf); kinfo->spi_subport_rcvhdr_base = cvt_kvaddr(pd->subport_rcvhdr_base); ipath_cdbg(PROC, "port %u flags %x %llx %llx %llx\n", kinfo->spi_port, kinfo->spi_runtime_flags, (unsigned long long) kinfo->spi_subport_uregbase, (unsigned long long) kinfo->spi_subport_rcvegrbuf, (unsigned long long) kinfo->spi_subport_rcvhdr_base); } kinfo->spi_pioindex = (kinfo->spi_piobufbase - dd->ipath_piobufbase) / dd->ipath_palign; kinfo->spi_pioalign = dd->ipath_palign; kinfo->spi_qpair = IPATH_KD_QP; kinfo->spi_piosize = dd->ipath_ibmaxlen; kinfo->spi_mtu = dd->ipath_ibmaxlen; /* maxlen, not ibmtu */ kinfo->spi_port = pd->port_port; kinfo->spi_subport = subport_fp(fp); kinfo->spi_sw_version = IPATH_KERN_SWVERSION; kinfo->spi_hw_version = dd->ipath_revision; if (master) { kinfo->spi_runtime_flags |= IPATH_RUNTIME_MASTER; } sz = (ubase_size < sizeof(*kinfo)) ? ubase_size : sizeof(*kinfo); if (copy_to_user(ubase, kinfo, sz)) ret = -EFAULT;bail: kfree(kinfo); return ret;}/** * ipath_tid_update - update a port TID * @pd: the port * @fp: the ipath device file * @ti: the TID information * * The new implementation as of Oct 2004 is that the driver assigns * the tid and returns it to the caller. To make it easier to * catch bugs, and to reduce search time, we keep a cursor for * each port, walking the shadow tid array to find one that's not * in use. * * For now, if we can't allocate the full list, we fail, although * in the long run, we'll allocate as many as we can, and the * caller will deal with that by trying the remaining pages later. * That means that when we fail, we have to mark the tids as not in * use again, in our shadow copy. * * It's up to the caller to free the tids when they are done. * We'll unlock the pages as they free them. * * Also, right now we are locking one page at a time, but since * the intended use of this routine is for a single group of * virtually contiguous pages, that should change to improve * performance. */static int ipath_tid_update(struct ipath_portdata *pd, struct file *fp, const struct ipath_tid_info *ti){ int ret = 0, ntids; u32 tid, porttid, cnt, i, tidcnt, tidoff; u16 *tidlist; struct ipath_devdata *dd = pd->port_dd; u64 physaddr; unsigned long vaddr; u64 __iomem *tidbase; unsigned long tidmap[8]; struct page **pagep = NULL; unsigned subport = subport_fp(fp); if (!dd->ipath_pageshadow) { ret = -ENOMEM; goto done; } cnt = ti->tidcnt; if (!cnt) { ipath_dbg("After copyin, tidcnt 0, tidlist %llx\n", (unsigned long long) ti->tidlist); /* * Should we treat as success? likely a bug */ ret = -EFAULT; goto done; } porttid = pd->port_port * dd->ipath_rcvtidcnt; if (!pd->port_subport_cnt) { tidcnt = dd->ipath_rcvtidcnt; tid = pd->port_tidcursor; tidoff = 0; } else if (!subport) { tidcnt = (dd->ipath_rcvtidcnt / pd->port_subport_cnt) + (dd->ipath_rcvtidcnt % pd->port_subport_cnt); tidoff = dd->ipath_rcvtidcnt - tidcnt; porttid += tidoff; tid = tidcursor_fp(fp); } else { tidcnt = dd->ipath_rcvtidcnt / pd->port_subport_cnt; tidoff = tidcnt * (subport - 1); porttid += tidoff; tid = tidcursor_fp(fp); } if (cnt > tidcnt) { /* make sure it all fits in port_tid_pg_list */ dev_info(&dd->pcidev->dev, "Process tried to allocate %u " "TIDs, only trying max (%u)\n", cnt, tidcnt); cnt = tidcnt; } pagep = &((struct page **) pd->port_tid_pg_list)[tidoff]; tidlist = &((u16 *) &pagep[dd->ipath_rcvtidcnt])[tidoff]; memset(tidmap, 0, sizeof(tidmap)); /* before decrement; chip actual # */ ntids = tidcnt; tidbase = (u64 __iomem *) (((char __iomem *) dd->ipath_kregbase) + dd->ipath_rcvtidbase + porttid * sizeof(*tidbase)); ipath_cdbg(VERBOSE, "Port%u %u tids, cursor %u, tidbase %p\n", pd->port_port, cnt, tid, tidbase); /* virtual address of first page in transfer */ vaddr = ti->tidvaddr; if (!access_ok(VERIFY_WRITE, (void __user *) vaddr, cnt * PAGE_SIZE)) { ipath_dbg("Fail vaddr %p, %u pages, !access_ok\n", (void *)vaddr, cnt); ret = -EFAULT; goto done; } ret = ipath_get_user_pages(vaddr, cnt, pagep); if (ret) { if (ret == -EBUSY) { ipath_dbg("Failed to lock addr %p, %u pages " "(already locked)\n", (void *) vaddr, cnt); /* * for now, continue, and see what happens but with * the new implementation, this should never happen, * unless perhaps the user has mpin'ed the pages * themselves (something we need to test) */ ret = 0; } else { dev_info(&dd->pcidev->dev, "Failed to lock addr %p, %u pages: " "errno %d\n", (void *) vaddr, cnt, -ret); goto done; } } for (i = 0; i < cnt; i++, vaddr += PAGE_SIZE) { for (; ntids--; tid++) { if (tid == tidcnt) tid = 0; if (!dd->ipath_pageshadow[porttid + tid]) break; } if (ntids < 0) { /* * oops, wrapped all the way through their TIDs, * and didn't have enough free; see comments at * start of routine */ ipath_dbg("Not enough free TIDs for %u pages " "(index %d), failing\n", cnt, i); i--; /* last tidlist[i] not filled in */ ret = -ENOMEM; break; } tidlist[i] = tid + tidoff; ipath_cdbg(VERBOSE, "Updating idx %u to TID %u, " "vaddr %lx\n", i, tid + tidoff, vaddr); /* we "know" system pages and TID pages are same size */ dd->ipath_pageshadow[porttid + tid] = pagep[i]; dd->ipath_physshadow[porttid + tid] = ipath_map_page( dd->pcidev, pagep[i], 0, PAGE_SIZE, PCI_DMA_FROMDEVICE); /* * don't need atomic or it's overhead */ __set_bit(tid, tidmap); physaddr = dd->ipath_physshadow[porttid + tid]; ipath_stats.sps_pagelocks++; ipath_cdbg(VERBOSE, "TID %u, vaddr %lx, physaddr %llx pgp %p\n", tid, vaddr, (unsigned long long) physaddr, pagep[i]); dd->ipath_f_put_tid(dd, &tidbase[tid], RCVHQ_RCV_TYPE_EXPECTED, physaddr); /* * don't check this tid in ipath_portshadow, since we * just filled it in; start with the next one. */ tid++; } if (ret) { u32 limit; cleanup: /* jump here if copy out of updated info failed... */ ipath_dbg("After failure (ret=%d), undo %d of %d entries\n", -ret, i, cnt); /* same code that's in ipath_free_tid() */ limit = sizeof(tidmap) * BITS_PER_BYTE; if (limit > tidcnt) /* just in case size changes in future */ limit = tidcnt; tid = find_first_bit((const unsigned long *)tidmap, limit); for (; tid < limit; tid++) { if (!test_bit(tid, tidmap)) continue; if (dd->ipath_pageshadow[porttid + tid]) { ipath_cdbg(VERBOSE, "Freeing TID %u\n", tid); dd->ipath_f_put_tid(dd, &tidbase[tid], RCVHQ_RCV_TYPE_EXPECTED, dd->ipath_tidinvalid); pci_unmap_page(dd->pcidev, dd->ipath_physshadow[porttid + tid], PAGE_SIZE, PCI_DMA_FROMDEVICE); dd->ipath_pageshadow[porttid + tid] = NULL; ipath_stats.sps_pageunlocks++; } } ipath_release_user_pages(pagep, cnt); } else { /* * Copy the updated array, with ipath_tid's filled in, back * to user. Since we did the copy in already, this "should * never fail" If it does, we have to clean up... */ if (copy_to_user((void __user *) (unsigned long) ti->tidlist, tidlist, cnt * sizeof(*tidlist))) { ret = -EFAULT; goto cleanup; } if (copy_to_user((void __user *) (unsigned long) ti->tidmap, tidmap, sizeof tidmap)) { ret = -EFAULT; goto cleanup; } if (tid == tidcnt)
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?