ipath_layer.c
来自「LINUX 2.6.17.4的源码」· C语言 代码 · 共 1,522 行 · 第 1/3 页
C
1,522 行
if (dd->verbs_layer.l_arg && verbs_timer_cb) verbs_timer_cb(dd->verbs_layer.l_arg); mod_timer(&dd->verbs_layer.l_timer, jiffies + 1);}/** * ipath_verbs_register - verbs layer registration * @l_piobufavail: callback for when PIO buffers become available * @l_rcv: callback for receiving a packet * @l_timer_cb: timer callback * @ipath_devdata: device data structure is put here */int ipath_verbs_register(void *(*l_add)(int, struct ipath_devdata *), void (*l_remove)(void *arg), int (*l_piobufavail) (void *arg), void (*l_rcv) (void *arg, void *rhdr, void *data, u32 tlen), void (*l_timer_cb) (void *arg)){ struct ipath_devdata *dd, *tmp; unsigned long flags; mutex_lock(&ipath_layer_mutex); verbs_add_one = l_add; verbs_remove_one = l_remove; verbs_piobufavail = l_piobufavail; verbs_rcv = l_rcv; verbs_timer_cb = l_timer_cb; spin_lock_irqsave(&ipath_devs_lock, flags); list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) { if (!(dd->ipath_flags & IPATH_INITTED)) continue; if (dd->verbs_layer.l_arg) continue; spin_unlock_irqrestore(&ipath_devs_lock, flags); dd->verbs_layer.l_arg = l_add(dd->ipath_unit, dd); spin_lock_irqsave(&ipath_devs_lock, flags); } spin_unlock_irqrestore(&ipath_devs_lock, flags); mutex_unlock(&ipath_layer_mutex); ipath_verbs_registered = 1; return 0;}EXPORT_SYMBOL_GPL(ipath_verbs_register);void ipath_verbs_unregister(void){ struct ipath_devdata *dd, *tmp; unsigned long flags; mutex_lock(&ipath_layer_mutex); spin_lock_irqsave(&ipath_devs_lock, flags); list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) { *dd->ipath_statusp &= ~IPATH_STATUS_OIB_SMA; if (dd->verbs_layer.l_arg && verbs_remove_one) { spin_unlock_irqrestore(&ipath_devs_lock, flags); verbs_remove_one(dd->verbs_layer.l_arg); spin_lock_irqsave(&ipath_devs_lock, flags); dd->verbs_layer.l_arg = NULL; } } spin_unlock_irqrestore(&ipath_devs_lock, flags); verbs_add_one = NULL; verbs_remove_one = NULL; verbs_piobufavail = NULL; verbs_rcv = NULL; verbs_timer_cb = NULL; ipath_verbs_registered = 0; mutex_unlock(&ipath_layer_mutex);}EXPORT_SYMBOL_GPL(ipath_verbs_unregister);int ipath_layer_open(struct ipath_devdata *dd, u32 * pktmax){ int ret; u32 intval = 0; mutex_lock(&ipath_layer_mutex); if (!dd->ipath_layer.l_arg) { ret = -EINVAL; goto bail; } ret = ipath_setrcvhdrsize(dd, NUM_OF_EXTRA_WORDS_IN_HEADER_QUEUE); if (ret < 0) goto bail; *pktmax = dd->ipath_ibmaxlen; if (*dd->ipath_statusp & IPATH_STATUS_IB_READY) intval |= IPATH_LAYER_INT_IF_UP; if (ipath_stats.sps_lid[dd->ipath_unit]) intval |= IPATH_LAYER_INT_LID; if (ipath_stats.sps_mlid[dd->ipath_unit]) intval |= IPATH_LAYER_INT_BCAST; /* * do this on open, in case low level is already up and * just layered driver was reloaded, etc. */ if (intval) layer_intr(dd->ipath_layer.l_arg, intval); ret = 0;bail: mutex_unlock(&ipath_layer_mutex); return ret;}EXPORT_SYMBOL_GPL(ipath_layer_open);u16 ipath_layer_get_lid(struct ipath_devdata *dd){ return dd->ipath_lid;}EXPORT_SYMBOL_GPL(ipath_layer_get_lid);/** * ipath_layer_get_mac - get the MAC address * @dd: the infinipath device * @mac: the MAC is put here * * This is the EUID-64 OUI octets (top 3), then * skip the next 2 (which should both be zero or 0xff). * The returned MAC is in network order * mac points to at least 6 bytes of buffer * We assume that by the time the LID is set, that the GUID is as valid * as it's ever going to be, rather than adding yet another status bit. */int ipath_layer_get_mac(struct ipath_devdata *dd, u8 * mac){ u8 *guid; guid = (u8 *) &dd->ipath_guid; mac[0] = guid[0]; mac[1] = guid[1]; mac[2] = guid[2]; mac[3] = guid[5]; mac[4] = guid[6]; mac[5] = guid[7]; if ((guid[3] || guid[4]) && !(guid[3] == 0xff && guid[4] == 0xff)) ipath_dbg("Warning, guid bytes 3 and 4 not 0 or 0xffff: " "%x %x\n", guid[3], guid[4]); return 0;}EXPORT_SYMBOL_GPL(ipath_layer_get_mac);u16 ipath_layer_get_bcast(struct ipath_devdata *dd){ return dd->ipath_mlid;}EXPORT_SYMBOL_GPL(ipath_layer_get_bcast);u32 ipath_layer_get_cr_errpkey(struct ipath_devdata *dd){ return ipath_read_creg32(dd, dd->ipath_cregs->cr_errpkey);}EXPORT_SYMBOL_GPL(ipath_layer_get_cr_errpkey);static void update_sge(struct ipath_sge_state *ss, u32 length){ struct ipath_sge *sge = &ss->sge; sge->vaddr += length; sge->length -= length; sge->sge_length -= length; if (sge->sge_length == 0) { if (--ss->num_sge) *sge = *ss->sg_list++; } else if (sge->length == 0 && sge->mr != NULL) { if (++sge->n >= IPATH_SEGSZ) { if (++sge->m >= sge->mr->mapsz) return; sge->n = 0; } sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr; sge->length = sge->mr->map[sge->m]->segs[sge->n].length; }}#ifdef __LITTLE_ENDIANstatic inline u32 get_upper_bits(u32 data, u32 shift){ return data >> shift;}static inline u32 set_upper_bits(u32 data, u32 shift){ return data << shift;}static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off){ data <<= ((sizeof(u32) - n) * BITS_PER_BYTE); data >>= ((sizeof(u32) - n - off) * BITS_PER_BYTE); return data;}#elsestatic inline u32 get_upper_bits(u32 data, u32 shift){ return data << shift;}static inline u32 set_upper_bits(u32 data, u32 shift){ return data >> shift;}static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off){ data >>= ((sizeof(u32) - n) * BITS_PER_BYTE); data <<= ((sizeof(u32) - n - off) * BITS_PER_BYTE); return data;}#endifstatic void copy_io(u32 __iomem *piobuf, struct ipath_sge_state *ss, u32 length){ u32 extra = 0; u32 data = 0; u32 last; while (1) { u32 len = ss->sge.length; u32 off; BUG_ON(len == 0); if (len > length) len = length; if (len > ss->sge.sge_length) len = ss->sge.sge_length; /* If the source address is not aligned, try to align it. */ off = (unsigned long)ss->sge.vaddr & (sizeof(u32) - 1); if (off) { u32 *addr = (u32 *)((unsigned long)ss->sge.vaddr & ~(sizeof(u32) - 1)); u32 v = get_upper_bits(*addr, off * BITS_PER_BYTE); u32 y; y = sizeof(u32) - off; if (len > y) len = y; if (len + extra >= sizeof(u32)) { data |= set_upper_bits(v, extra * BITS_PER_BYTE); len = sizeof(u32) - extra; if (len == length) { last = data; break; } __raw_writel(data, piobuf); piobuf++; extra = 0; data = 0; } else { /* Clear unused upper bytes */ data |= clear_upper_bytes(v, len, extra); if (len == length) { last = data; break; } extra += len; } } else if (extra) { /* Source address is aligned. */ u32 *addr = (u32 *) ss->sge.vaddr; int shift = extra * BITS_PER_BYTE; int ushift = 32 - shift; u32 l = len; while (l >= sizeof(u32)) { u32 v = *addr; data |= set_upper_bits(v, shift); __raw_writel(data, piobuf); data = get_upper_bits(v, ushift); piobuf++; addr++; l -= sizeof(u32); } /* * We still have 'extra' number of bytes leftover. */ if (l) { u32 v = *addr; if (l + extra >= sizeof(u32)) { data |= set_upper_bits(v, shift); len -= l + extra - sizeof(u32); if (len == length) { last = data; break; } __raw_writel(data, piobuf); piobuf++; extra = 0; data = 0; } else { /* Clear unused upper bytes */ data |= clear_upper_bytes(v, l, extra); if (len == length) { last = data; break; } extra += l; } } else if (len == length) { last = data; break; } } else if (len == length) { u32 w; /* * Need to round up for the last dword in the * packet. */ w = (len + 3) >> 2; __iowrite32_copy(piobuf, ss->sge.vaddr, w - 1); piobuf += w - 1; last = ((u32 *) ss->sge.vaddr)[w - 1]; break; } else { u32 w = len >> 2; __iowrite32_copy(piobuf, ss->sge.vaddr, w); piobuf += w; extra = len & (sizeof(u32) - 1); if (extra) { u32 v = ((u32 *) ss->sge.vaddr)[w]; /* Clear unused upper bytes */ data = clear_upper_bytes(v, extra, 0); } } update_sge(ss, len); length -= len; } /* Update address before sending packet. */ update_sge(ss, length); /* must flush early everything before trigger word */ ipath_flush_wc(); __raw_writel(last, piobuf); /* be sure trigger word is written */ ipath_flush_wc();}/** * ipath_verbs_send - send a packet from the verbs layer * @dd: the infinipath device * @hdrwords: the number of works in the header * @hdr: the packet header * @len: the length of the packet in bytes * @ss: the SGE to send * * This is like ipath_sma_send_pkt() in that we need to be able to send * packets after the chip is initialized (MADs) but also like * ipath_layer_send_hdr() since its used by the verbs layer. */int ipath_verbs_send(struct ipath_devdata *dd, u32 hdrwords, u32 *hdr, u32 len, struct ipath_sge_state *ss){ u32 __iomem *piobuf; u32 plen; int ret; /* +1 is for the qword padding of pbc */ plen = hdrwords + ((len + 3) >> 2) + 1; if (unlikely((plen << 2) > dd->ipath_ibmaxlen)) { ipath_dbg("packet len 0x%x too long, failing\n", plen); ret = -EINVAL; goto bail; } /* Get a PIO buffer to use. */ piobuf = ipath_getpiobuf(dd, NULL); if (unlikely(piobuf == NULL)) { ret = -EBUSY; goto bail; } /* * Write len to control qword, no flags. * We have to flush after the PBC for correctness on some cpus * or WC buffer can be written out of order. */ writeq(plen, piobuf); ipath_flush_wc(); piobuf += 2; if (len == 0) { /* * If there is just the header portion, must flush before * writing last word of header for correctness, and after * the last header word (trigger word). */ __iowrite32_copy(piobuf, hdr, hdrwords - 1); ipath_flush_wc(); __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords - 1); ipath_flush_wc(); ret = 0; goto bail; } __iowrite32_copy(piobuf, hdr, hdrwords); piobuf += hdrwords; /* The common case is aligned and contained in one segment. */ if (likely(ss->num_sge == 1 && len <= ss->sge.length && !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) { u32 w; u32 *addr = (u32 *) ss->sge.vaddr; /* Update address before sending packet. */ update_sge(ss, len); /* Need to round up for the last dword in the packet. */ w = (len + 3) >> 2; __iowrite32_copy(piobuf, addr, w - 1); /* must flush early everything before trigger word */ ipath_flush_wc(); __raw_writel(addr[w - 1], piobuf + w - 1); /* be sure trigger word is written */ ipath_flush_wc(); ret = 0; goto bail; } copy_io(piobuf, ss, len); ret = 0;bail: return ret;}EXPORT_SYMBOL_GPL(ipath_verbs_send);int ipath_layer_snapshot_counters(struct ipath_devdata *dd, u64 *swords, u64 *rwords, u64 *spkts, u64 *rpkts, u64 *xmit_wait){ int ret; if (!(dd->ipath_flags & IPATH_INITTED)) { /* no hardware, freeze, etc. */ ipath_dbg("unit %u not usable\n", dd->ipath_unit); ret = -EINVAL; goto bail; } *swords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt); *rwords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt); *spkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt); *rpkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt); *xmit_wait = ipath_snap_cntr(dd, dd->ipath_cregs->cr_sendstallcnt); ret = 0;bail: return ret;}EXPORT_SYMBOL_GPL(ipath_layer_snapshot_counters);/** * ipath_layer_get_counters - get various chip counters * @dd: the infinipath device * @cntrs: counters are placed here * * Return the counters needed by recv_pma_get_portcounters(). */int ipath_layer_get_counters(struct ipath_devdata *dd, struct ipath_layer_counters *cntrs){ int ret; if (!(dd->ipath_flags & IPATH_INITTED)) { /* no hardware, freeze, etc. */ ipath_dbg("unit %u not usable\n", dd->ipath_unit); ret = -EINVAL; goto bail; } cntrs->symbol_error_counter = ipath_snap_cntr(dd, dd->ipath_cregs->cr_ibsymbolerrcnt);
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?