ipath_layer.c
来自「LINUX 2.6.17.4的源码」· C语言 代码 · 共 1,522 行 · 第 1/3 页
C
1,522 行
cntrs->link_error_recovery_counter = ipath_snap_cntr(dd, dd->ipath_cregs->cr_iblinkerrrecovcnt); cntrs->link_downed_counter = ipath_snap_cntr(dd, dd->ipath_cregs->cr_iblinkdowncnt); cntrs->port_rcv_errors = ipath_snap_cntr(dd, dd->ipath_cregs->cr_rxdroppktcnt) + ipath_snap_cntr(dd, dd->ipath_cregs->cr_rcvovflcnt) + ipath_snap_cntr(dd, dd->ipath_cregs->cr_portovflcnt) + ipath_snap_cntr(dd, dd->ipath_cregs->cr_errrcvflowctrlcnt) + ipath_snap_cntr(dd, dd->ipath_cregs->cr_err_rlencnt) + ipath_snap_cntr(dd, dd->ipath_cregs->cr_invalidrlencnt) + ipath_snap_cntr(dd, dd->ipath_cregs->cr_erricrccnt) + ipath_snap_cntr(dd, dd->ipath_cregs->cr_errvcrccnt) + ipath_snap_cntr(dd, dd->ipath_cregs->cr_errlpcrccnt) + ipath_snap_cntr(dd, dd->ipath_cregs->cr_errlinkcnt) + ipath_snap_cntr(dd, dd->ipath_cregs->cr_badformatcnt); cntrs->port_rcv_remphys_errors = ipath_snap_cntr(dd, dd->ipath_cregs->cr_rcvebpcnt); cntrs->port_xmit_discards = ipath_snap_cntr(dd, dd->ipath_cregs->cr_unsupvlcnt); cntrs->port_xmit_data = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt); cntrs->port_rcv_data = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt); cntrs->port_xmit_packets = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt); cntrs->port_rcv_packets = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt); ret = 0;bail: return ret;}EXPORT_SYMBOL_GPL(ipath_layer_get_counters);int ipath_layer_want_buffer(struct ipath_devdata *dd){ set_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl); ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl); return 0;}EXPORT_SYMBOL_GPL(ipath_layer_want_buffer);int ipath_layer_send_hdr(struct ipath_devdata *dd, struct ether_header *hdr){ int ret = 0; u32 __iomem *piobuf; u32 plen, *uhdr; size_t count; __be16 vlsllnh; if (!(dd->ipath_flags & IPATH_RCVHDRSZ_SET)) { ipath_dbg("send while not open\n"); ret = -EINVAL; } else if ((dd->ipath_flags & (IPATH_LINKUNK | IPATH_LINKDOWN)) || dd->ipath_lid == 0) { /* * lid check is for when sma hasn't yet configured */ ret = -ENETDOWN; ipath_cdbg(VERBOSE, "send while not ready, " "mylid=%u, flags=0x%x\n", dd->ipath_lid, dd->ipath_flags); } vlsllnh = *((__be16 *) hdr); if (vlsllnh != htons(IPS_LRH_BTH)) { ipath_dbg("Warning: lrh[0] wrong (%x, not %x); " "not sending\n", be16_to_cpu(vlsllnh), IPS_LRH_BTH); ret = -EINVAL; } if (ret) goto done; /* Get a PIO buffer to use. */ piobuf = ipath_getpiobuf(dd, NULL); if (piobuf == NULL) { ret = -EBUSY; goto done; } plen = (sizeof(*hdr) >> 2); /* actual length */ ipath_cdbg(EPKT, "0x%x+1w pio %p\n", plen, piobuf); writeq(plen+1, piobuf); /* len (+1 for pad) to pbc, no flags */ ipath_flush_wc(); piobuf += 2; uhdr = (u32 *)hdr; count = plen-1; /* amount we can copy before trigger word */ __iowrite32_copy(piobuf, uhdr, count); ipath_flush_wc(); __raw_writel(uhdr[count], piobuf + count); ipath_flush_wc(); /* ensure it's sent, now */ ipath_stats.sps_ether_spkts++; /* ether packet sent */done: return ret;}EXPORT_SYMBOL_GPL(ipath_layer_send_hdr);int ipath_layer_set_piointbufavail_int(struct ipath_devdata *dd){ set_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl); ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl); return 0;}EXPORT_SYMBOL_GPL(ipath_layer_set_piointbufavail_int);int ipath_layer_enable_timer(struct ipath_devdata *dd){ /* * HT-400 has a design flaw where the chip and kernel idea * of the tail register don't always agree, and therefore we won't * get an interrupt on the next packet received. * If the board supports per packet receive interrupts, use it. * Otherwise, the timer function periodically checks for packets * to cover this case. * Either way, the timer is needed for verbs layer related * processing. */ if (dd->ipath_flags & IPATH_GPIO_INTR) { ipath_write_kreg(dd, dd->ipath_kregs->kr_debugportselect, 0x2074076542310ULL); /* Enable GPIO bit 2 interrupt */ ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask, (u64) (1 << 2)); } init_timer(&dd->verbs_layer.l_timer); dd->verbs_layer.l_timer.function = __ipath_verbs_timer; dd->verbs_layer.l_timer.data = (unsigned long)dd; dd->verbs_layer.l_timer.expires = jiffies + 1; add_timer(&dd->verbs_layer.l_timer); return 0;}EXPORT_SYMBOL_GPL(ipath_layer_enable_timer);int ipath_layer_disable_timer(struct ipath_devdata *dd){ /* Disable GPIO bit 2 interrupt */ if (dd->ipath_flags & IPATH_GPIO_INTR) ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask, 0); del_timer_sync(&dd->verbs_layer.l_timer); return 0;}EXPORT_SYMBOL_GPL(ipath_layer_disable_timer);/** * ipath_layer_set_verbs_flags - set the verbs layer flags * @dd: the infinipath device * @flags: the flags to set */int ipath_layer_set_verbs_flags(struct ipath_devdata *dd, unsigned flags){ struct ipath_devdata *ss; unsigned long lflags; spin_lock_irqsave(&ipath_devs_lock, lflags); list_for_each_entry(ss, &ipath_dev_list, ipath_list) { if (!(ss->ipath_flags & IPATH_INITTED)) continue; if ((flags & IPATH_VERBS_KERNEL_SMA) && !(*ss->ipath_statusp & IPATH_STATUS_SMA)) *ss->ipath_statusp |= IPATH_STATUS_OIB_SMA; else *ss->ipath_statusp &= ~IPATH_STATUS_OIB_SMA; } spin_unlock_irqrestore(&ipath_devs_lock, lflags); return 0;}EXPORT_SYMBOL_GPL(ipath_layer_set_verbs_flags);/** * ipath_layer_get_npkeys - return the size of the PKEY table for port 0 * @dd: the infinipath device */unsigned ipath_layer_get_npkeys(struct ipath_devdata *dd){ return ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys);}EXPORT_SYMBOL_GPL(ipath_layer_get_npkeys);/** * ipath_layer_get_pkey - return the indexed PKEY from the port 0 PKEY table * @dd: the infinipath device * @index: the PKEY index */unsigned ipath_layer_get_pkey(struct ipath_devdata *dd, unsigned index){ unsigned ret; if (index >= ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys)) ret = 0; else ret = dd->ipath_pd[0]->port_pkeys[index]; return ret;}EXPORT_SYMBOL_GPL(ipath_layer_get_pkey);/** * ipath_layer_get_pkeys - return the PKEY table for port 0 * @dd: the infinipath device * @pkeys: the pkey table is placed here */int ipath_layer_get_pkeys(struct ipath_devdata *dd, u16 * pkeys){ struct ipath_portdata *pd = dd->ipath_pd[0]; memcpy(pkeys, pd->port_pkeys, sizeof(pd->port_pkeys)); return 0;}EXPORT_SYMBOL_GPL(ipath_layer_get_pkeys);/** * rm_pkey - decrecment the reference count for the given PKEY * @dd: the infinipath device * @key: the PKEY index * * Return true if this was the last reference and the hardware table entry * needs to be changed. */static int rm_pkey(struct ipath_devdata *dd, u16 key){ int i; int ret; for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) { if (dd->ipath_pkeys[i] != key) continue; if (atomic_dec_and_test(&dd->ipath_pkeyrefs[i])) { dd->ipath_pkeys[i] = 0; ret = 1; goto bail; } break; } ret = 0;bail: return ret;}/** * add_pkey - add the given PKEY to the hardware table * @dd: the infinipath device * @key: the PKEY * * Return an error code if unable to add the entry, zero if no change, * or 1 if the hardware PKEY register needs to be updated. */static int add_pkey(struct ipath_devdata *dd, u16 key){ int i; u16 lkey = key & 0x7FFF; int any = 0; int ret; if (lkey == 0x7FFF) { ret = 0; goto bail; } /* Look for an empty slot or a matching PKEY. */ for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) { if (!dd->ipath_pkeys[i]) { any++; continue; } /* If it matches exactly, try to increment the ref count */ if (dd->ipath_pkeys[i] == key) { if (atomic_inc_return(&dd->ipath_pkeyrefs[i]) > 1) { ret = 0; goto bail; } /* Lost the race. Look for an empty slot below. */ atomic_dec(&dd->ipath_pkeyrefs[i]); any++; } /* * It makes no sense to have both the limited and unlimited * PKEY set at the same time since the unlimited one will * disable the limited one. */ if ((dd->ipath_pkeys[i] & 0x7FFF) == lkey) { ret = -EEXIST; goto bail; } } if (!any) { ret = -EBUSY; goto bail; } for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) { if (!dd->ipath_pkeys[i] && atomic_inc_return(&dd->ipath_pkeyrefs[i]) == 1) { /* for ipathstats, etc. */ ipath_stats.sps_pkeys[i] = lkey; dd->ipath_pkeys[i] = key; ret = 1; goto bail; } } ret = -EBUSY;bail: return ret;}/** * ipath_layer_set_pkeys - set the PKEY table for port 0 * @dd: the infinipath device * @pkeys: the PKEY table */int ipath_layer_set_pkeys(struct ipath_devdata *dd, u16 * pkeys){ struct ipath_portdata *pd; int i; int changed = 0; pd = dd->ipath_pd[0]; for (i = 0; i < ARRAY_SIZE(pd->port_pkeys); i++) { u16 key = pkeys[i]; u16 okey = pd->port_pkeys[i]; if (key == okey) continue; /* * The value of this PKEY table entry is changing. * Remove the old entry in the hardware's array of PKEYs. */ if (okey & 0x7FFF) changed |= rm_pkey(dd, okey); if (key & 0x7FFF) { int ret = add_pkey(dd, key); if (ret < 0) key = 0; else changed |= ret; } pd->port_pkeys[i] = key; } if (changed) { u64 pkey; pkey = (u64) dd->ipath_pkeys[0] | ((u64) dd->ipath_pkeys[1] << 16) | ((u64) dd->ipath_pkeys[2] << 32) | ((u64) dd->ipath_pkeys[3] << 48); ipath_cdbg(VERBOSE, "p0 new pkey reg %llx\n", (unsigned long long) pkey); ipath_write_kreg(dd, dd->ipath_kregs->kr_partitionkey, pkey); } return 0;}EXPORT_SYMBOL_GPL(ipath_layer_set_pkeys);/** * ipath_layer_get_linkdowndefaultstate - get the default linkdown state * @dd: the infinipath device * * Returns zero if the default is POLL, 1 if the default is SLEEP. */int ipath_layer_get_linkdowndefaultstate(struct ipath_devdata *dd){ return !!(dd->ipath_ibcctrl & INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE);}EXPORT_SYMBOL_GPL(ipath_layer_get_linkdowndefaultstate);/** * ipath_layer_set_linkdowndefaultstate - set the default linkdown state * @dd: the infinipath device * @sleep: the new state * * Note that this will only take effect when the link state changes. */int ipath_layer_set_linkdowndefaultstate(struct ipath_devdata *dd, int sleep){ if (sleep) dd->ipath_ibcctrl |= INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE; else dd->ipath_ibcctrl &= ~INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE; ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl, dd->ipath_ibcctrl); return 0;}EXPORT_SYMBOL_GPL(ipath_layer_set_linkdowndefaultstate);int ipath_layer_get_phyerrthreshold(struct ipath_devdata *dd){ return (dd->ipath_ibcctrl >> INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) & INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;}EXPORT_SYMBOL_GPL(ipath_layer_get_phyerrthreshold);/** * ipath_layer_set_phyerrthreshold - set the physical error threshold * @dd: the infinipath device * @n: the new threshold * * Note that this will only take effect when the link state changes. */int ipath_layer_set_phyerrthreshold(struct ipath_devdata *dd, unsigned n){ unsigned v; v = (dd->ipath_ibcctrl >> INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) & INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK; if (v != n) { dd->ipath_ibcctrl &= ~(INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK << INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT); dd->ipath_ibcctrl |= (u64) n << INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT; ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl, dd->ipath_ibcctrl); } return 0;}EXPORT_SYMBOL_GPL(ipath_layer_set_phyerrthreshold);int ipath_layer_get_overrunthreshold(struct ipath_devdata *dd){ return (dd->ipath_ibcctrl >> INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT) & INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK;}EXPORT_SYMBOL_GPL(ipath_layer_get_overrunthreshold);/** * ipath_layer_set_overrunthreshold - set the overrun threshold * @dd: the infinipath device * @n: the new threshold * * Note that this will only take effect when the link state changes. */int ipath_layer_set_overrunthreshold(struct ipath_devdata *dd, unsigned n){ unsigned v; v = (dd->ipath_ibcctrl >> INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT) & INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK; if (v != n) { dd->ipath_ibcctrl &= ~(INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK << INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT); dd->ipath_ibcctrl |= (u64) n << INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT; ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl, dd->ipath_ibcctrl); } return 0;}EXPORT_SYMBOL_GPL(ipath_layer_set_overrunthreshold);int ipath_layer_get_boardname(struct ipath_devdata *dd, char *name, size_t namelen){ return dd->ipath_f_get_boardname(dd, name, namelen);}EXPORT_SYMBOL_GPL(ipath_layer_get_boardname);u32 ipath_layer_get_rcvhdrentsize(struct ipath_devdata *dd){ return dd->ipath_rcvhdrentsize;}EXPORT_SYMBOL_GPL(ipath_layer_get_rcvhdrentsize);
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?