ipath_layer.c
来自「LINUX 2.6.17.4的源码」· C语言 代码 · 共 1,522 行 · 第 1/3 页
C
1,522 行
/* * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. *//* * These are the routines used by layered drivers, currently just the * layered ethernet driver and verbs layer. */#include <linux/io.h>#include <linux/pci.h>#include <asm/byteorder.h>#include "ipath_kernel.h"#include "ips_common.h"#include "ipath_layer.h"/* Acquire before ipath_devs_lock. */static DEFINE_MUTEX(ipath_layer_mutex);static int ipath_verbs_registered;u16 ipath_layer_rcv_opcode;static int (*layer_intr)(void *, u32);static int (*layer_rcv)(void *, void *, struct sk_buff *);static int (*layer_rcv_lid)(void *, void *);static int (*verbs_piobufavail)(void *);static void (*verbs_rcv)(void *, void *, void *, u32);static void *(*layer_add_one)(int, struct ipath_devdata *);static void (*layer_remove_one)(void *);static void *(*verbs_add_one)(int, struct ipath_devdata *);static void (*verbs_remove_one)(void *);static void (*verbs_timer_cb)(void *);int __ipath_layer_intr(struct ipath_devdata *dd, u32 arg){ int ret = -ENODEV; if (dd->ipath_layer.l_arg && layer_intr) ret = layer_intr(dd->ipath_layer.l_arg, arg); return ret;}int ipath_layer_intr(struct ipath_devdata *dd, u32 arg){ int ret; mutex_lock(&ipath_layer_mutex); ret = __ipath_layer_intr(dd, arg); mutex_unlock(&ipath_layer_mutex); return ret;}int __ipath_layer_rcv(struct ipath_devdata *dd, void *hdr, struct sk_buff *skb){ int ret = -ENODEV; if (dd->ipath_layer.l_arg && layer_rcv) ret = layer_rcv(dd->ipath_layer.l_arg, hdr, skb); return ret;}int __ipath_layer_rcv_lid(struct ipath_devdata *dd, void *hdr){ int ret = -ENODEV; if (dd->ipath_layer.l_arg && layer_rcv_lid) ret = layer_rcv_lid(dd->ipath_layer.l_arg, hdr); return ret;}int __ipath_verbs_piobufavail(struct ipath_devdata *dd){ int ret = -ENODEV; if (dd->verbs_layer.l_arg && verbs_piobufavail) ret = verbs_piobufavail(dd->verbs_layer.l_arg); return ret;}int __ipath_verbs_rcv(struct ipath_devdata *dd, void *rc, void *ebuf, u32 tlen){ int ret = -ENODEV; if (dd->verbs_layer.l_arg && verbs_rcv) { verbs_rcv(dd->verbs_layer.l_arg, rc, ebuf, tlen); ret = 0; } return ret;}int ipath_layer_set_linkstate(struct ipath_devdata *dd, u8 newstate){ u32 lstate; int ret; switch (newstate) { case IPATH_IB_LINKDOWN: ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_POLL << INFINIPATH_IBCC_LINKINITCMD_SHIFT); /* don't wait */ ret = 0; goto bail; case IPATH_IB_LINKDOWN_SLEEP: ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_SLEEP << INFINIPATH_IBCC_LINKINITCMD_SHIFT); /* don't wait */ ret = 0; goto bail; case IPATH_IB_LINKDOWN_DISABLE: ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_DISABLE << INFINIPATH_IBCC_LINKINITCMD_SHIFT); /* don't wait */ ret = 0; goto bail; case IPATH_IB_LINKINIT: if (dd->ipath_flags & IPATH_LINKINIT) { ret = 0; goto bail; } ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_INIT << INFINIPATH_IBCC_LINKCMD_SHIFT); lstate = IPATH_LINKINIT; break; case IPATH_IB_LINKARM: if (dd->ipath_flags & IPATH_LINKARMED) { ret = 0; goto bail; } if (!(dd->ipath_flags & (IPATH_LINKINIT | IPATH_LINKACTIVE))) { ret = -EINVAL; goto bail; } ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ARMED << INFINIPATH_IBCC_LINKCMD_SHIFT); /* * Since the port can transition to ACTIVE by receiving * a non VL 15 packet, wait for either state. */ lstate = IPATH_LINKARMED | IPATH_LINKACTIVE; break; case IPATH_IB_LINKACTIVE: if (dd->ipath_flags & IPATH_LINKACTIVE) { ret = 0; goto bail; } if (!(dd->ipath_flags & IPATH_LINKARMED)) { ret = -EINVAL; goto bail; } ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ACTIVE << INFINIPATH_IBCC_LINKCMD_SHIFT); lstate = IPATH_LINKACTIVE; break; default: ipath_dbg("Invalid linkstate 0x%x requested\n", newstate); ret = -EINVAL; goto bail; } ret = ipath_wait_linkstate(dd, lstate, 2000);bail: return ret;}EXPORT_SYMBOL_GPL(ipath_layer_set_linkstate);/** * ipath_layer_set_mtu - set the MTU * @dd: the infinipath device * @arg: the new MTU * * we can handle "any" incoming size, the issue here is whether we * need to restrict our outgoing size. For now, we don't do any * sanity checking on this, and we don't deal with what happens to * programs that are already running when the size changes. * NOTE: changing the MTU will usually cause the IBC to go back to * link initialize (IPATH_IBSTATE_INIT) state... */int ipath_layer_set_mtu(struct ipath_devdata *dd, u16 arg){ u32 piosize; int changed = 0; int ret; /* * mtu is IB data payload max. It's the largest power of 2 less * than piosize (or even larger, since it only really controls the * largest we can receive; we can send the max of the mtu and * piosize). We check that it's one of the valid IB sizes. */ if (arg != 256 && arg != 512 && arg != 1024 && arg != 2048 && arg != 4096) { ipath_dbg("Trying to set invalid mtu %u, failing\n", arg); ret = -EINVAL; goto bail; } if (dd->ipath_ibmtu == arg) { ret = 0; /* same as current */ goto bail; } piosize = dd->ipath_ibmaxlen; dd->ipath_ibmtu = arg; if (arg >= (piosize - IPATH_PIO_MAXIBHDR)) { /* Only if it's not the initial value (or reset to it) */ if (piosize != dd->ipath_init_ibmaxlen) { dd->ipath_ibmaxlen = piosize; changed = 1; } } else if ((arg + IPATH_PIO_MAXIBHDR) != dd->ipath_ibmaxlen) { piosize = arg + IPATH_PIO_MAXIBHDR; ipath_cdbg(VERBOSE, "ibmaxlen was 0x%x, setting to 0x%x " "(mtu 0x%x)\n", dd->ipath_ibmaxlen, piosize, arg); dd->ipath_ibmaxlen = piosize; changed = 1; } if (changed) { /* * set the IBC maxpktlength to the size of our pio * buffers in words */ u64 ibc = dd->ipath_ibcctrl; ibc &= ~(INFINIPATH_IBCC_MAXPKTLEN_MASK << INFINIPATH_IBCC_MAXPKTLEN_SHIFT); piosize = piosize - 2 * sizeof(u32); /* ignore pbc */ dd->ipath_ibmaxlen = piosize; piosize /= sizeof(u32); /* in words */ /* * for ICRC, which we only send in diag test pkt mode, and * we don't need to worry about that for mtu */ piosize += 1; ibc |= piosize << INFINIPATH_IBCC_MAXPKTLEN_SHIFT; dd->ipath_ibcctrl = ibc; ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl, dd->ipath_ibcctrl); dd->ipath_f_tidtemplate(dd); } ret = 0;bail: return ret;}EXPORT_SYMBOL_GPL(ipath_layer_set_mtu);int ipath_set_sps_lid(struct ipath_devdata *dd, u32 arg, u8 lmc){ ipath_stats.sps_lid[dd->ipath_unit] = arg; dd->ipath_lid = arg; dd->ipath_lmc = lmc; mutex_lock(&ipath_layer_mutex); if (dd->ipath_layer.l_arg && layer_intr) layer_intr(dd->ipath_layer.l_arg, IPATH_LAYER_INT_LID); mutex_unlock(&ipath_layer_mutex); return 0;}EXPORT_SYMBOL_GPL(ipath_set_sps_lid);int ipath_layer_set_guid(struct ipath_devdata *dd, __be64 guid){ /* XXX - need to inform anyone who cares this just happened. */ dd->ipath_guid = guid; return 0;}EXPORT_SYMBOL_GPL(ipath_layer_set_guid);__be64 ipath_layer_get_guid(struct ipath_devdata *dd){ return dd->ipath_guid;}EXPORT_SYMBOL_GPL(ipath_layer_get_guid);u32 ipath_layer_get_nguid(struct ipath_devdata *dd){ return dd->ipath_nguid;}EXPORT_SYMBOL_GPL(ipath_layer_get_nguid);int ipath_layer_query_device(struct ipath_devdata *dd, u32 * vendor, u32 * boardrev, u32 * majrev, u32 * minrev){ *vendor = dd->ipath_vendorid; *boardrev = dd->ipath_boardrev; *majrev = dd->ipath_majrev; *minrev = dd->ipath_minrev; return 0;}EXPORT_SYMBOL_GPL(ipath_layer_query_device);u32 ipath_layer_get_flags(struct ipath_devdata *dd){ return dd->ipath_flags;}EXPORT_SYMBOL_GPL(ipath_layer_get_flags);struct device *ipath_layer_get_device(struct ipath_devdata *dd){ return &dd->pcidev->dev;}EXPORT_SYMBOL_GPL(ipath_layer_get_device);u16 ipath_layer_get_deviceid(struct ipath_devdata *dd){ return dd->ipath_deviceid;}EXPORT_SYMBOL_GPL(ipath_layer_get_deviceid);u64 ipath_layer_get_lastibcstat(struct ipath_devdata *dd){ return dd->ipath_lastibcstat;}EXPORT_SYMBOL_GPL(ipath_layer_get_lastibcstat);u32 ipath_layer_get_ibmtu(struct ipath_devdata *dd){ return dd->ipath_ibmtu;}EXPORT_SYMBOL_GPL(ipath_layer_get_ibmtu);void ipath_layer_add(struct ipath_devdata *dd){ mutex_lock(&ipath_layer_mutex); if (layer_add_one) dd->ipath_layer.l_arg = layer_add_one(dd->ipath_unit, dd); if (verbs_add_one) dd->verbs_layer.l_arg = verbs_add_one(dd->ipath_unit, dd); mutex_unlock(&ipath_layer_mutex);}void ipath_layer_del(struct ipath_devdata *dd){ mutex_lock(&ipath_layer_mutex); if (dd->ipath_layer.l_arg && layer_remove_one) { layer_remove_one(dd->ipath_layer.l_arg); dd->ipath_layer.l_arg = NULL; } if (dd->verbs_layer.l_arg && verbs_remove_one) { verbs_remove_one(dd->verbs_layer.l_arg); dd->verbs_layer.l_arg = NULL; } mutex_unlock(&ipath_layer_mutex);}int ipath_layer_register(void *(*l_add)(int, struct ipath_devdata *), void (*l_remove)(void *), int (*l_intr)(void *, u32), int (*l_rcv)(void *, void *, struct sk_buff *), u16 l_rcv_opcode, int (*l_rcv_lid)(void *, void *)){ struct ipath_devdata *dd, *tmp; unsigned long flags; mutex_lock(&ipath_layer_mutex); layer_add_one = l_add; layer_remove_one = l_remove; layer_intr = l_intr; layer_rcv = l_rcv; layer_rcv_lid = l_rcv_lid; ipath_layer_rcv_opcode = l_rcv_opcode; spin_lock_irqsave(&ipath_devs_lock, flags); list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) { if (!(dd->ipath_flags & IPATH_INITTED)) continue; if (dd->ipath_layer.l_arg) continue; if (!(*dd->ipath_statusp & IPATH_STATUS_SMA)) *dd->ipath_statusp |= IPATH_STATUS_OIB_SMA; spin_unlock_irqrestore(&ipath_devs_lock, flags); dd->ipath_layer.l_arg = l_add(dd->ipath_unit, dd); spin_lock_irqsave(&ipath_devs_lock, flags); } spin_unlock_irqrestore(&ipath_devs_lock, flags); mutex_unlock(&ipath_layer_mutex); return 0;}EXPORT_SYMBOL_GPL(ipath_layer_register);void ipath_layer_unregister(void){ struct ipath_devdata *dd, *tmp; unsigned long flags; mutex_lock(&ipath_layer_mutex); spin_lock_irqsave(&ipath_devs_lock, flags); list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) { if (dd->ipath_layer.l_arg && layer_remove_one) { spin_unlock_irqrestore(&ipath_devs_lock, flags); layer_remove_one(dd->ipath_layer.l_arg); spin_lock_irqsave(&ipath_devs_lock, flags); dd->ipath_layer.l_arg = NULL; } } spin_unlock_irqrestore(&ipath_devs_lock, flags); layer_add_one = NULL; layer_remove_one = NULL; layer_intr = NULL; layer_rcv = NULL; layer_rcv_lid = NULL; mutex_unlock(&ipath_layer_mutex);}EXPORT_SYMBOL_GPL(ipath_layer_unregister);static void __ipath_verbs_timer(unsigned long arg){ struct ipath_devdata *dd = (struct ipath_devdata *) arg; /* * If port 0 receive packet interrupts are not available, or * can be missed, poll the receive queue */ if (dd->ipath_flags & IPATH_POLL_RX_INTR) ipath_kreceive(dd); /* Handle verbs layer timeouts. */
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?