ipath_file_ops.c
来自「LINUX 2.6.17.4的源码」· C语言 代码 · 共 1,915 行 · 第 1/4 页
C
1,915 行
/* * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */#include <linux/pci.h>#include <linux/poll.h>#include <linux/cdev.h>#include <linux/swap.h>#include <linux/vmalloc.h>#include <asm/pgtable.h>#include "ipath_kernel.h"#include "ips_common.h"#include "ipath_layer.h"static int ipath_open(struct inode *, struct file *);static int ipath_close(struct inode *, struct file *);static ssize_t ipath_write(struct file *, const char __user *, size_t, loff_t *);static unsigned int ipath_poll(struct file *, struct poll_table_struct *);static int ipath_mmap(struct file *, struct vm_area_struct *);static struct file_operations ipath_file_ops = { .owner = THIS_MODULE, .write = ipath_write, .open = ipath_open, .release = ipath_close, .poll = ipath_poll, .mmap = ipath_mmap};static int ipath_get_base_info(struct ipath_portdata *pd, void __user *ubase, size_t ubase_size){ int ret = 0; struct ipath_base_info *kinfo = NULL; struct ipath_devdata *dd = pd->port_dd; if (ubase_size < sizeof(*kinfo)) { ipath_cdbg(PROC, "Base size %lu, need %lu (version mismatch?)\n", (unsigned long) ubase_size, (unsigned long) sizeof(*kinfo)); ret = -EINVAL; goto bail; } kinfo = kzalloc(sizeof(*kinfo), GFP_KERNEL); if (kinfo == NULL) { ret = -ENOMEM; goto bail; } ret = dd->ipath_f_get_base_info(pd, kinfo); if (ret < 0) goto bail; kinfo->spi_rcvhdr_cnt = dd->ipath_rcvhdrcnt; kinfo->spi_rcvhdrent_size = dd->ipath_rcvhdrentsize; kinfo->spi_tidegrcnt = dd->ipath_rcvegrcnt; kinfo->spi_rcv_egrbufsize = dd->ipath_rcvegrbufsize; /* * have to mmap whole thing */ kinfo->spi_rcv_egrbuftotlen = pd->port_rcvegrbuf_chunks * pd->port_rcvegrbuf_size; kinfo->spi_rcv_egrperchunk = pd->port_rcvegrbufs_perchunk; kinfo->spi_rcv_egrchunksize = kinfo->spi_rcv_egrbuftotlen / pd->port_rcvegrbuf_chunks; kinfo->spi_tidcnt = dd->ipath_rcvtidcnt; /* * for this use, may be ipath_cfgports summed over all chips that * are are configured and present */ kinfo->spi_nports = dd->ipath_cfgports; /* unit (chip/board) our port is on */ kinfo->spi_unit = dd->ipath_unit; /* for now, only a single page */ kinfo->spi_tid_maxsize = PAGE_SIZE; /* * Doing this per port, and based on the skip value, etc. This has * to be the actual buffer size, since the protocol code treats it * as an array. * * These have to be set to user addresses in the user code via mmap. * These values are used on return to user code for the mmap target * addresses only. For 32 bit, same 44 bit address problem, so use * the physical address, not virtual. Before 2.6.11, using the * page_address() macro worked, but in 2.6.11, even that returns the * full 64 bit address (upper bits all 1's). So far, using the * physical addresses (or chip offsets, for chip mapping) works, but * no doubt some future kernel release will chang that, and we'll be * on to yet another method of dealing with this */ kinfo->spi_rcvhdr_base = (u64) pd->port_rcvhdrq_phys; kinfo->spi_rcv_egrbufs = (u64) pd->port_rcvegr_phys; kinfo->spi_pioavailaddr = (u64) dd->ipath_pioavailregs_phys; kinfo->spi_status = (u64) kinfo->spi_pioavailaddr + (void *) dd->ipath_statusp - (void *) dd->ipath_pioavailregs_dma; kinfo->spi_piobufbase = (u64) pd->port_piobufs; kinfo->__spi_uregbase = dd->ipath_uregbase + dd->ipath_palign * pd->port_port; kinfo->spi_pioindex = dd->ipath_pbufsport * (pd->port_port - 1); kinfo->spi_piocnt = dd->ipath_pbufsport; kinfo->spi_pioalign = dd->ipath_palign; kinfo->spi_qpair = IPATH_KD_QP; kinfo->spi_piosize = dd->ipath_ibmaxlen; kinfo->spi_mtu = dd->ipath_ibmaxlen; /* maxlen, not ibmtu */ kinfo->spi_port = pd->port_port; kinfo->spi_sw_version = IPATH_KERN_SWVERSION; kinfo->spi_hw_version = dd->ipath_revision; if (copy_to_user(ubase, kinfo, sizeof(*kinfo))) ret = -EFAULT;bail: kfree(kinfo); return ret;}/** * ipath_tid_update - update a port TID * @pd: the port * @ti: the TID information * * The new implementation as of Oct 2004 is that the driver assigns * the tid and returns it to the caller. To make it easier to * catch bugs, and to reduce search time, we keep a cursor for * each port, walking the shadow tid array to find one that's not * in use. * * For now, if we can't allocate the full list, we fail, although * in the long run, we'll allocate as many as we can, and the * caller will deal with that by trying the remaining pages later. * That means that when we fail, we have to mark the tids as not in * use again, in our shadow copy. * * It's up to the caller to free the tids when they are done. * We'll unlock the pages as they free them. * * Also, right now we are locking one page at a time, but since * the intended use of this routine is for a single group of * virtually contiguous pages, that should change to improve * performance. */static int ipath_tid_update(struct ipath_portdata *pd, const struct ipath_tid_info *ti){ int ret = 0, ntids; u32 tid, porttid, cnt, i, tidcnt; u16 *tidlist; struct ipath_devdata *dd = pd->port_dd; u64 physaddr; unsigned long vaddr; u64 __iomem *tidbase; unsigned long tidmap[8]; struct page **pagep = NULL; if (!dd->ipath_pageshadow) { ret = -ENOMEM; goto done; } cnt = ti->tidcnt; if (!cnt) { ipath_dbg("After copyin, tidcnt 0, tidlist %llx\n", (unsigned long long) ti->tidlist); /* * Should we treat as success? likely a bug */ ret = -EFAULT; goto done; } tidcnt = dd->ipath_rcvtidcnt; if (cnt >= tidcnt) { /* make sure it all fits in port_tid_pg_list */ dev_info(&dd->pcidev->dev, "Process tried to allocate %u " "TIDs, only trying max (%u)\n", cnt, tidcnt); cnt = tidcnt; } pagep = (struct page **)pd->port_tid_pg_list; tidlist = (u16 *) (&pagep[cnt]); memset(tidmap, 0, sizeof(tidmap)); tid = pd->port_tidcursor; /* before decrement; chip actual # */ porttid = pd->port_port * tidcnt; ntids = tidcnt; tidbase = (u64 __iomem *) (((char __iomem *) dd->ipath_kregbase) + dd->ipath_rcvtidbase + porttid * sizeof(*tidbase)); ipath_cdbg(VERBOSE, "Port%u %u tids, cursor %u, tidbase %p\n", pd->port_port, cnt, tid, tidbase); /* virtual address of first page in transfer */ vaddr = ti->tidvaddr; if (!access_ok(VERIFY_WRITE, (void __user *) vaddr, cnt * PAGE_SIZE)) { ipath_dbg("Fail vaddr %p, %u pages, !access_ok\n", (void *)vaddr, cnt); ret = -EFAULT; goto done; } ret = ipath_get_user_pages(vaddr, cnt, pagep); if (ret) { if (ret == -EBUSY) { ipath_dbg("Failed to lock addr %p, %u pages " "(already locked)\n", (void *) vaddr, cnt); /* * for now, continue, and see what happens but with * the new implementation, this should never happen, * unless perhaps the user has mpin'ed the pages * themselves (something we need to test) */ ret = 0; } else { dev_info(&dd->pcidev->dev, "Failed to lock addr %p, %u pages: " "errno %d\n", (void *) vaddr, cnt, -ret); goto done; } } for (i = 0; i < cnt; i++, vaddr += PAGE_SIZE) { for (; ntids--; tid++) { if (tid == tidcnt) tid = 0; if (!dd->ipath_pageshadow[porttid + tid]) break; } if (ntids < 0) { /* * oops, wrapped all the way through their TIDs, * and didn't have enough free; see comments at * start of routine */ ipath_dbg("Not enough free TIDs for %u pages " "(index %d), failing\n", cnt, i); i--; /* last tidlist[i] not filled in */ ret = -ENOMEM; break; } tidlist[i] = tid; ipath_cdbg(VERBOSE, "Updating idx %u to TID %u, " "vaddr %lx\n", i, tid, vaddr); /* we "know" system pages and TID pages are same size */ dd->ipath_pageshadow[porttid + tid] = pagep[i]; /* * don't need atomic or it's overhead */ __set_bit(tid, tidmap); physaddr = page_to_phys(pagep[i]); ipath_stats.sps_pagelocks++; ipath_cdbg(VERBOSE, "TID %u, vaddr %lx, physaddr %llx pgp %p\n", tid, vaddr, (unsigned long long) physaddr, pagep[i]); dd->ipath_f_put_tid(dd, &tidbase[tid], 1, physaddr); /* * don't check this tid in ipath_portshadow, since we * just filled it in; start with the next one. */ tid++; } if (ret) { u32 limit; cleanup: /* jump here if copy out of updated info failed... */ ipath_dbg("After failure (ret=%d), undo %d of %d entries\n", -ret, i, cnt); /* same code that's in ipath_free_tid() */ limit = sizeof(tidmap) * BITS_PER_BYTE; if (limit > tidcnt) /* just in case size changes in future */ limit = tidcnt; tid = find_first_bit((const unsigned long *)tidmap, limit); for (; tid < limit; tid++) { if (!test_bit(tid, tidmap)) continue; if (dd->ipath_pageshadow[porttid + tid]) { ipath_cdbg(VERBOSE, "Freeing TID %u\n", tid); dd->ipath_f_put_tid(dd, &tidbase[tid], 1, dd->ipath_tidinvalid); dd->ipath_pageshadow[porttid + tid] = NULL; ipath_stats.sps_pageunlocks++; } } ipath_release_user_pages(pagep, cnt); } else { /* * Copy the updated array, with ipath_tid's filled in, back * to user. Since we did the copy in already, this "should * never fail" If it does, we have to clean up... */ if (copy_to_user((void __user *) (unsigned long) ti->tidlist, tidlist, cnt * sizeof(*tidlist))) { ret = -EFAULT; goto cleanup; } if (copy_to_user((void __user *) (unsigned long) ti->tidmap, tidmap, sizeof tidmap)) { ret = -EFAULT; goto cleanup; } if (tid == tidcnt) tid = 0; pd->port_tidcursor = tid; }done: if (ret) ipath_dbg("Failed to map %u TID pages, failing with %d\n", ti->tidcnt, -ret); return ret;}/** * ipath_tid_free - free a port TID * @pd: the port * @ti: the TID info * * right now we are unlocking one page at a time, but since * the intended use of this routine is for a single group of * virtually contiguous pages, that should change to improve * performance. We check that the TID is in range for this port * but otherwise don't check validity; if user has an error and * frees the wrong tid, it's only their own data that can thereby * be corrupted. We do check that the TID was in use, for sanity * We always use our idea of the saved address, not the address that * they pass in to us. */static int ipath_tid_free(struct ipath_portdata *pd, const struct ipath_tid_info *ti){ int ret = 0; u32 tid, porttid, cnt, limit, tidcnt; struct ipath_devdata *dd = pd->port_dd; u64 __iomem *tidbase; unsigned long tidmap[8]; if (!dd->ipath_pageshadow) { ret = -ENOMEM; goto done; } if (copy_from_user(tidmap, (void __user *)(unsigned long)ti->tidmap, sizeof tidmap)) { ret = -EFAULT; goto done; } porttid = pd->port_port * dd->ipath_rcvtidcnt; tidbase = (u64 __iomem *) ((char __iomem *)(dd->ipath_kregbase) + dd->ipath_rcvtidbase + porttid * sizeof(*tidbase)); tidcnt = dd->ipath_rcvtidcnt; limit = sizeof(tidmap) * BITS_PER_BYTE; if (limit > tidcnt) /* just in case size changes in future */ limit = tidcnt; tid = find_first_bit(tidmap, limit); ipath_cdbg(VERBOSE, "Port%u free %u tids; first bit (max=%d) " "set is %d, porttid %u\n", pd->port_port, ti->tidcnt, limit, tid, porttid); for (cnt = 0; tid < limit; tid++) { /* * small optimization; if we detect a run of 3 or so without * any set, use find_first_bit again. That's mainly to * accelerate the case where we wrapped, so we have some at * the beginning, and some at the end, and a big gap * in the middle. */ if (!test_bit(tid, tidmap)) continue; cnt++; if (dd->ipath_pageshadow[porttid + tid]) { ipath_cdbg(VERBOSE, "PID %u freeing TID %u\n", pd->port_pid, tid); dd->ipath_f_put_tid(dd, &tidbase[tid], 1, dd->ipath_tidinvalid); ipath_release_user_pages( &dd->ipath_pageshadow[porttid + tid], 1); dd->ipath_pageshadow[porttid + tid] = NULL; ipath_stats.sps_pageunlocks++; } else ipath_dbg("Unused tid %u, ignoring\n", tid); } if (cnt != ti->tidcnt) ipath_dbg("passed in tidcnt %d, only %d bits set in map\n", ti->tidcnt, cnt);done: if (ret) ipath_dbg("Failed to unmap %u TID pages, failing with %d\n", ti->tidcnt, -ret); return ret;}/** * ipath_set_part_key - set a partition key * @pd: the port * @key: the key * * We can have up to 4 active at a time (other than the default, which is * always allowed). This is somewhat tricky, since multiple ports may set * the same key, so we reference count them, and clean up at exit. All 4 * partition keys are packed into a single infinipath register. It's an * error for a process to set the same pkey multiple times. We provide no * mechanism to de-allocate a pkey at this time, we may eventually need to * do that. I've used the atomic operations, and no locking, and only make * a single pass through what's available. This should be more than * adequate for some time. I'll think about spinlocks or the like if and as * it's necessary. */static int ipath_set_part_key(struct ipath_portdata *pd, u16 key){ struct ipath_devdata *dd = pd->port_dd; int i, any = 0, pidx = -1; u16 lkey = key & 0x7FFF; int ret; if (lkey == (IPS_DEFAULT_P_KEY & 0x7FFF)) { /* nothing to do; this key always valid */ ret = 0; goto bail; } ipath_cdbg(VERBOSE, "p%u try to set pkey %hx, current keys " "%hx:%x %hx:%x %hx:%x %hx:%x\n", pd->port_port, key, dd->ipath_pkeys[0], atomic_read(&dd->ipath_pkeyrefs[0]), dd->ipath_pkeys[1], atomic_read(&dd->ipath_pkeyrefs[1]), dd->ipath_pkeys[2], atomic_read(&dd->ipath_pkeyrefs[2]), dd->ipath_pkeys[3], atomic_read(&dd->ipath_pkeyrefs[3])); if (!lkey) { ipath_cdbg(PROC, "p%u tries to set key 0, not allowed\n", pd->port_port); ret = -EINVAL; goto bail; }
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?