ipath_init_chip.c
来自「LINUX 2.6.17.4的源码」· C语言 代码 · 共 959 行 · 第 1/2 页
C
959 行
/* * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */#include <linux/pci.h>#include <linux/netdevice.h>#include <linux/vmalloc.h>#include "ipath_kernel.h"#include "ips_common.h"/* * min buffers we want to have per port, after driver */#define IPATH_MIN_USER_PORT_BUFCNT 8/* * Number of ports we are configured to use (to allow for more pio * buffers per port, etc.) Zero means use chip value. */static ushort ipath_cfgports;module_param_named(cfgports, ipath_cfgports, ushort, S_IRUGO);MODULE_PARM_DESC(cfgports, "Set max number of ports to use");/* * Number of buffers reserved for driver (layered drivers and SMA * send). Reserved at end of buffer list. Initialized based on * number of PIO buffers if not set via module interface. * The problem with this is that it's global, but we'll use different * numbers for different chip types. So the default value is not * very useful. I've redefined it for the 1.3 release so that it's * zero unless set by the user to something else, in which case we * try to respect it. */static ushort ipath_kpiobufs;static int ipath_set_kpiobufs(const char *val, struct kernel_param *kp);module_param_call(kpiobufs, ipath_set_kpiobufs, param_get_ushort, &ipath_kpiobufs, S_IWUSR | S_IRUGO);MODULE_PARM_DESC(kpiobufs, "Set number of PIO buffers for driver");/** * create_port0_egr - allocate the eager TID buffers * @dd: the infinipath device * * This code is now quite different for user and kernel, because * the kernel uses skb's, for the accelerated network performance. * This is the kernel (port0) version. * * Allocate the eager TID buffers and program them into infinipath. * We use the network layer alloc_skb() allocator to allocate the * memory, and either use the buffers as is for things like SMA * packets, or pass the buffers up to the ipath layered driver and * thence the network layer, replacing them as we do so (see * ipath_rcv_layer()). */static int create_port0_egr(struct ipath_devdata *dd){ unsigned e, egrcnt; struct sk_buff **skbs; int ret; egrcnt = dd->ipath_rcvegrcnt; skbs = vmalloc(sizeof(*dd->ipath_port0_skbs) * egrcnt); if (skbs == NULL) { ipath_dev_err(dd, "allocation error for eager TID " "skb array\n"); ret = -ENOMEM; goto bail; } for (e = 0; e < egrcnt; e++) { /* * This is a bit tricky in that we allocate extra * space for 2 bytes of the 14 byte ethernet header. * These two bytes are passed in the ipath header so * the rest of the data is word aligned. We allocate * 4 bytes so that the data buffer stays word aligned. * See ipath_kreceive() for more details. */ skbs[e] = ipath_alloc_skb(dd, GFP_KERNEL); if (!skbs[e]) { ipath_dev_err(dd, "SKB allocation error for " "eager TID %u\n", e); while (e != 0) dev_kfree_skb(skbs[--e]); ret = -ENOMEM; goto bail; } } /* * After loop above, so we can test non-NULL to see if ready * to use at receive, etc. */ dd->ipath_port0_skbs = skbs; for (e = 0; e < egrcnt; e++) { unsigned long phys = virt_to_phys(dd->ipath_port0_skbs[e]->data); dd->ipath_f_put_tid(dd, e + (u64 __iomem *) ((char __iomem *) dd->ipath_kregbase + dd->ipath_rcvegrbase), 0, phys); } ret = 0;bail: return ret;}static int bringup_link(struct ipath_devdata *dd){ u64 val, ibc; int ret = 0; /* hold IBC in reset */ dd->ipath_control &= ~INFINIPATH_C_LINKENABLE; ipath_write_kreg(dd, dd->ipath_kregs->kr_control, dd->ipath_control); /* * Note that prior to try 14 or 15 of IB, the credit scaling * wasn't working, because it was swapped for writes with the * 1 bit default linkstate field */ /* ignore pbc and align word */ val = dd->ipath_piosize2k - 2 * sizeof(u32); /* * for ICRC, which we only send in diag test pkt mode, and we * don't need to worry about that for mtu */ val += 1; /* * Set the IBC maxpktlength to the size of our pio buffers the * maxpktlength is in words. This is *not* the IB data MTU. */ ibc = (val / sizeof(u32)) << INFINIPATH_IBCC_MAXPKTLEN_SHIFT; /* in KB */ ibc |= 0x5ULL << INFINIPATH_IBCC_FLOWCTRLWATERMARK_SHIFT; /* * How often flowctrl sent. More or less in usecs; balance against * watermark value, so that in theory senders always get a flow * control update in time to not let the IB link go idle. */ ibc |= 0x3ULL << INFINIPATH_IBCC_FLOWCTRLPERIOD_SHIFT; /* max error tolerance */ ibc |= 0xfULL << INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT; /* use "real" buffer space for */ ibc |= 4ULL << INFINIPATH_IBCC_CREDITSCALE_SHIFT; /* IB credit flow control. */ ibc |= 0xfULL << INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT; /* initially come up waiting for TS1, without sending anything. */ dd->ipath_ibcctrl = ibc; /* * Want to start out with both LINKCMD and LINKINITCMD in NOP * (0 and 0). Don't put linkinitcmd in ipath_ibcctrl, want that * to stay a NOP */ ibc |= INFINIPATH_IBCC_LINKINITCMD_DISABLE << INFINIPATH_IBCC_LINKINITCMD_SHIFT; ipath_cdbg(VERBOSE, "Writing 0x%llx to ibcctrl\n", (unsigned long long) ibc); ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl, ibc); // be sure chip saw it val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); ret = dd->ipath_f_bringup_serdes(dd); if (ret) dev_info(&dd->pcidev->dev, "Could not initialize SerDes, " "not usable\n"); else { /* enable IBC */ dd->ipath_control |= INFINIPATH_C_LINKENABLE; ipath_write_kreg(dd, dd->ipath_kregs->kr_control, dd->ipath_control); } return ret;}static int init_chip_first(struct ipath_devdata *dd, struct ipath_portdata **pdp){ struct ipath_portdata *pd = NULL; int ret = 0; u64 val; /* * skip cfgports stuff because we are not allocating memory, * and we don't want problems if the portcnt changed due to * cfgports. We do still check and report a difference, if * not same (should be impossible). */ dd->ipath_portcnt = ipath_read_kreg32(dd, dd->ipath_kregs->kr_portcnt); if (!ipath_cfgports) dd->ipath_cfgports = dd->ipath_portcnt; else if (ipath_cfgports <= dd->ipath_portcnt) { dd->ipath_cfgports = ipath_cfgports; ipath_dbg("Configured to use %u ports out of %u in chip\n", dd->ipath_cfgports, dd->ipath_portcnt); } else { dd->ipath_cfgports = dd->ipath_portcnt; ipath_dbg("Tried to configured to use %u ports; chip " "only supports %u\n", ipath_cfgports, dd->ipath_portcnt); } dd->ipath_pd = kzalloc(sizeof(*dd->ipath_pd) * dd->ipath_cfgports, GFP_KERNEL); if (!dd->ipath_pd) { ipath_dev_err(dd, "Unable to allocate portdata array, " "failing\n"); ret = -ENOMEM; goto done; } dd->ipath_lastegrheads = kzalloc(sizeof(*dd->ipath_lastegrheads) * dd->ipath_cfgports, GFP_KERNEL); dd->ipath_lastrcvhdrqtails = kzalloc(sizeof(*dd->ipath_lastrcvhdrqtails) * dd->ipath_cfgports, GFP_KERNEL); if (!dd->ipath_lastegrheads || !dd->ipath_lastrcvhdrqtails) { ipath_dev_err(dd, "Unable to allocate head arrays, " "failing\n"); ret = -ENOMEM; goto done; } dd->ipath_pd[0] = kzalloc(sizeof(*pd), GFP_KERNEL); if (!dd->ipath_pd[0]) { ipath_dev_err(dd, "Unable to allocate portdata for port " "0, failing\n"); ret = -ENOMEM; goto done; } pd = dd->ipath_pd[0]; pd->port_dd = dd; pd->port_port = 0; pd->port_cnt = 1; /* The port 0 pkey table is used by the layer interface. */ pd->port_pkeys[0] = IPS_DEFAULT_P_KEY; dd->ipath_rcvtidcnt = ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvtidcnt); dd->ipath_rcvtidbase = ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvtidbase); dd->ipath_rcvegrcnt = ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvegrcnt); dd->ipath_rcvegrbase = ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvegrbase); dd->ipath_palign = ipath_read_kreg32(dd, dd->ipath_kregs->kr_pagealign); dd->ipath_piobufbase = ipath_read_kreg64(dd, dd->ipath_kregs->kr_sendpiobufbase); val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_sendpiosize); dd->ipath_piosize2k = val & ~0U; dd->ipath_piosize4k = val >> 32; dd->ipath_ibmtu = 4096; /* default to largest legal MTU */ val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_sendpiobufcnt); dd->ipath_piobcnt2k = val & ~0U; dd->ipath_piobcnt4k = val >> 32; dd->ipath_pio2kbase = (u32 __iomem *) (((char __iomem *) dd->ipath_kregbase) + (dd->ipath_piobufbase & 0xffffffff)); if (dd->ipath_piobcnt4k) { dd->ipath_pio4kbase = (u32 __iomem *) (((char __iomem *) dd->ipath_kregbase) + (dd->ipath_piobufbase >> 32)); /* * 4K buffers take 2 pages; we use roundup just to be * paranoid; we calculate it once here, rather than on * ever buf allocate */ dd->ipath_4kalign = ALIGN(dd->ipath_piosize4k, dd->ipath_palign); ipath_dbg("%u 2k(%x) piobufs @ %p, %u 4k(%x) @ %p " "(%x aligned)\n", dd->ipath_piobcnt2k, dd->ipath_piosize2k, dd->ipath_pio2kbase, dd->ipath_piobcnt4k, dd->ipath_piosize4k, dd->ipath_pio4kbase, dd->ipath_4kalign); } else ipath_dbg("%u 2k piobufs @ %p\n", dd->ipath_piobcnt2k, dd->ipath_pio2kbase); spin_lock_init(&dd->ipath_tid_lock);done: *pdp = pd; return ret;}/** * init_chip_reset - re-initialize after a reset, or enable * @dd: the infinipath device * @pdp: output for port data * * sanity check at least some of the values after reset, and * ensure no receive or transmit (explictly, in case reset * failed */static int init_chip_reset(struct ipath_devdata *dd, struct ipath_portdata **pdp){ struct ipath_portdata *pd; u32 rtmp; *pdp = pd = dd->ipath_pd[0]; /* ensure chip does no sends or receives while we re-initialize */ dd->ipath_control = dd->ipath_sendctrl = dd->ipath_rcvctrl = 0U; ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, 0); ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, 0); ipath_write_kreg(dd, dd->ipath_kregs->kr_control, 0); rtmp = ipath_read_kreg32(dd, dd->ipath_kregs->kr_portcnt); if (dd->ipath_portcnt != rtmp) dev_info(&dd->pcidev->dev, "portcnt was %u before " "reset, now %u, using original\n", dd->ipath_portcnt, rtmp); rtmp = ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvtidcnt); if (rtmp != dd->ipath_rcvtidcnt) dev_info(&dd->pcidev->dev, "tidcnt was %u before " "reset, now %u, using original\n", dd->ipath_rcvtidcnt, rtmp); rtmp = ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvtidbase); if (rtmp != dd->ipath_rcvtidbase) dev_info(&dd->pcidev->dev, "tidbase was %u before " "reset, now %u, using original\n", dd->ipath_rcvtidbase, rtmp); rtmp = ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvegrcnt); if (rtmp != dd->ipath_rcvegrcnt) dev_info(&dd->pcidev->dev, "egrcnt was %u before " "reset, now %u, using original\n", dd->ipath_rcvegrcnt, rtmp); rtmp = ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvegrbase); if (rtmp != dd->ipath_rcvegrbase) dev_info(&dd->pcidev->dev, "egrbase was %u before " "reset, now %u, using original\n", dd->ipath_rcvegrbase, rtmp); return 0;}static int init_pioavailregs(struct ipath_devdata *dd){ int ret; dd->ipath_pioavailregs_dma = dma_alloc_coherent( &dd->pcidev->dev, PAGE_SIZE, &dd->ipath_pioavailregs_phys, GFP_KERNEL); if (!dd->ipath_pioavailregs_dma) { ipath_dev_err(dd, "failed to allocate PIOavail reg area " "in memory\n"); ret = -ENOMEM; goto done; } /* * we really want L2 cache aligned, but for current CPUs of * interest, they are the same. */ dd->ipath_statusp = (u64 *) ((char *)dd->ipath_pioavailregs_dma + ((2 * L1_CACHE_BYTES + dd->ipath_pioavregs * sizeof(u64)) & ~L1_CACHE_BYTES)); /* copy the current value now that it's really allocated */ *dd->ipath_statusp = dd->_ipath_status; /* * setup buffer to hold freeze msg, accessible to apps, * following statusp */ dd->ipath_freezemsg = (char *)&dd->ipath_statusp[1]; /* and its length */ dd->ipath_freezelen = L1_CACHE_BYTES - sizeof(dd->ipath_statusp[0]); if (dd->ipath_unit * 64 > (IPATH_PORT0_RCVHDRTAIL_SIZE - 64)) { ipath_dev_err(dd, "unit %u too large for port 0 " "rcvhdrtail buffer size\n", dd->ipath_unit); ret = -ENODEV; } else ret = 0; /* so we can get current tail in ipath_kreceive(), per chip */ dd->ipath_hdrqtailptr = &ipath_port0_rcvhdrtail[ dd->ipath_unit * (64 / sizeof(*ipath_port0_rcvhdrtail))];done: return ret;}/** * init_shadow_tids - allocate the shadow TID array * @dd: the infinipath device * * allocate the shadow TID array, so we can ipath_munlock previous * entries. It may make more sense to move the pageshadow to the * port data structure, so we only allocate memory for ports actually * in use, since we at 8k per port, now. */static void init_shadow_tids(struct ipath_devdata *dd){ dd->ipath_pageshadow = (struct page **) vmalloc(dd->ipath_cfgports * dd->ipath_rcvtidcnt * sizeof(struct page *)); if (!dd->ipath_pageshadow) ipath_dev_err(dd, "failed to allocate shadow page * " "array, no expected sends!\n"); else memset(dd->ipath_pageshadow, 0, dd->ipath_cfgports * dd->ipath_rcvtidcnt * sizeof(struct page *));}static void enable_chip(struct ipath_devdata *dd, struct ipath_portdata *pd, int reinit){ u32 val; int i; if (!reinit) { init_waitqueue_head(&ipath_sma_state_wait); } ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, dd->ipath_rcvctrl); /* Enable PIO send, and update of PIOavail regs to memory. */ dd->ipath_sendctrl = INFINIPATH_S_PIOENABLE | INFINIPATH_S_PIOBUFAVAILUPD; ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl); /* * enable port 0 receive, and receive interrupt. other ports * done as user opens and inits them. */ dd->ipath_rcvctrl = INFINIPATH_R_TAILUPD | (1ULL << INFINIPATH_R_PORTENABLE_SHIFT) | (1ULL << INFINIPATH_R_INTRAVAIL_SHIFT); ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, dd->ipath_rcvctrl); /* * now ready for use. this should be cleared whenever we * detect a reset, or initiate one.
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?