ipath_driver.c
来自「LINUX 2.6.17.4的源码」· C语言 代码 · 共 1,993 行 · 第 1/4 页
C
1,993 行
"kregvirt %p\n", addr, dd->ipath_kregbase, dd->ipath_kregvirt); /* * clear ipath_flags here instead of in ipath_init_chip as it is set * by ipath_setup_htconfig. */ dd->ipath_flags = 0; if (dd->ipath_f_bus(dd, pdev)) ipath_dev_err(dd, "Failed to setup config space; " "continuing anyway\n"); /* * set up our interrupt handler; SA_SHIRQ probably not needed, * since MSI interrupts shouldn't be shared but won't hurt for now. * check 0 irq after we return from chip-specific bus setup, since * that can affect this due to setup */ if (!pdev->irq) ipath_dev_err(dd, "irq is 0, BIOS error? Interrupts won't " "work\n"); else { ret = request_irq(pdev->irq, ipath_intr, SA_SHIRQ, IPATH_DRV_NAME, dd); if (ret) { ipath_dev_err(dd, "Couldn't setup irq handler, " "irq=%u: %d\n", pdev->irq, ret); goto bail_iounmap; } } ret = ipath_init_chip(dd, 0); /* do the chip-specific init */ if (ret) goto bail_iounmap; ret = ipath_enable_wc(dd); if (ret) { ipath_dev_err(dd, "Write combining not enabled " "(err %d): performance may be poor\n", -ret); ret = 0; } ipath_device_create_group(&pdev->dev, dd); ipathfs_add_device(dd); ipath_user_add(dd); ipath_layer_add(dd); goto bail;bail_iounmap: iounmap((volatile void __iomem *) dd->ipath_kregbase);bail_regions: pci_release_regions(pdev);bail_disable: pci_disable_device(pdev);bail_devdata: ipath_free_devdata(pdev, dd);bail_rcvhdrtail: cleanup_port0_rcvhdrtail(pdev);bail: return ret;}static void __devexit ipath_remove_one(struct pci_dev *pdev){ struct ipath_devdata *dd; ipath_cdbg(VERBOSE, "removing, pdev=%p\n", pdev); if (!pdev) return; dd = pci_get_drvdata(pdev); ipath_layer_del(dd); ipath_user_del(dd); ipathfs_remove_device(dd); ipath_device_remove_group(&pdev->dev, dd); ipath_cdbg(VERBOSE, "Releasing pci memory regions, dd %p, " "unit %u\n", dd, (u32) dd->ipath_unit); if (dd->ipath_kregbase) { ipath_cdbg(VERBOSE, "Unmapping kregbase %p\n", dd->ipath_kregbase); iounmap((volatile void __iomem *) dd->ipath_kregbase); dd->ipath_kregbase = NULL; } pci_release_regions(pdev); ipath_cdbg(VERBOSE, "calling pci_disable_device\n"); pci_disable_device(pdev); ipath_free_devdata(pdev, dd); cleanup_port0_rcvhdrtail(pdev);}/* general driver use */DEFINE_MUTEX(ipath_mutex);static DEFINE_SPINLOCK(ipath_pioavail_lock);/** * ipath_disarm_piobufs - cancel a range of PIO buffers * @dd: the infinipath device * @first: the first PIO buffer to cancel * @cnt: the number of PIO buffers to cancel * * cancel a range of PIO buffers, used when they might be armed, but * not triggered. Used at init to ensure buffer state, and also user * process close, in case it died while writing to a PIO buffer * Also after errors. */void ipath_disarm_piobufs(struct ipath_devdata *dd, unsigned first, unsigned cnt){ unsigned i, last = first + cnt; u64 sendctrl, sendorig; ipath_cdbg(PKT, "disarm %u PIObufs first=%u\n", cnt, first); sendorig = dd->ipath_sendctrl | INFINIPATH_S_DISARM; for (i = first; i < last; i++) { sendctrl = sendorig | (i << INFINIPATH_S_DISARMPIOBUF_SHIFT); ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, sendctrl); } /* * Write it again with current value, in case ipath_sendctrl changed * while we were looping; no critical bits that would require * locking. * * Write a 0, and then the original value, reading scratch in * between. This seems to avoid a chip timing race that causes * pioavail updates to memory to stop. */ ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, 0); sendorig = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);}/** * ipath_wait_linkstate - wait for an IB link state change to occur * @dd: the infinipath device * @state: the state to wait for * @msecs: the number of milliseconds to wait * * wait up to msecs milliseconds for IB link state change to occur for * now, take the easy polling route. Currently used only by * ipath_layer_set_linkstate. Returns 0 if state reached, otherwise * -ETIMEDOUT state can have multiple states set, for any of several * transitions. */int ipath_wait_linkstate(struct ipath_devdata *dd, u32 state, int msecs){ dd->ipath_sma_state_wanted = state; wait_event_interruptible_timeout(ipath_sma_state_wait, (dd->ipath_flags & state), msecs_to_jiffies(msecs)); dd->ipath_sma_state_wanted = 0; if (!(dd->ipath_flags & state)) { u64 val; ipath_cdbg(SMA, "Didn't reach linkstate %s within %u ms\n", /* test INIT ahead of DOWN, both can be set */ (state & IPATH_LINKINIT) ? "INIT" : ((state & IPATH_LINKDOWN) ? "DOWN" : ((state & IPATH_LINKARMED) ? "ARM" : "ACTIVE")), msecs); val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus); ipath_cdbg(VERBOSE, "ibcc=%llx ibcstatus=%llx (%s)\n", (unsigned long long) ipath_read_kreg64( dd, dd->ipath_kregs->kr_ibcctrl), (unsigned long long) val, ipath_ibcstatus_str[val & 0xf]); } return (dd->ipath_flags & state) ? 0 : -ETIMEDOUT;}void ipath_decode_err(char *buf, size_t blen, ipath_err_t err){ *buf = '\0'; if (err & INFINIPATH_E_RHDRLEN) strlcat(buf, "rhdrlen ", blen); if (err & INFINIPATH_E_RBADTID) strlcat(buf, "rbadtid ", blen); if (err & INFINIPATH_E_RBADVERSION) strlcat(buf, "rbadversion ", blen); if (err & INFINIPATH_E_RHDR) strlcat(buf, "rhdr ", blen); if (err & INFINIPATH_E_RLONGPKTLEN) strlcat(buf, "rlongpktlen ", blen); if (err & INFINIPATH_E_RSHORTPKTLEN) strlcat(buf, "rshortpktlen ", blen); if (err & INFINIPATH_E_RMAXPKTLEN) strlcat(buf, "rmaxpktlen ", blen); if (err & INFINIPATH_E_RMINPKTLEN) strlcat(buf, "rminpktlen ", blen); if (err & INFINIPATH_E_RFORMATERR) strlcat(buf, "rformaterr ", blen); if (err & INFINIPATH_E_RUNSUPVL) strlcat(buf, "runsupvl ", blen); if (err & INFINIPATH_E_RUNEXPCHAR) strlcat(buf, "runexpchar ", blen); if (err & INFINIPATH_E_RIBFLOW) strlcat(buf, "ribflow ", blen); if (err & INFINIPATH_E_REBP) strlcat(buf, "EBP ", blen); if (err & INFINIPATH_E_SUNDERRUN) strlcat(buf, "sunderrun ", blen); if (err & INFINIPATH_E_SPIOARMLAUNCH) strlcat(buf, "spioarmlaunch ", blen); if (err & INFINIPATH_E_SUNEXPERRPKTNUM) strlcat(buf, "sunexperrpktnum ", blen); if (err & INFINIPATH_E_SDROPPEDDATAPKT) strlcat(buf, "sdroppeddatapkt ", blen); if (err & INFINIPATH_E_SDROPPEDSMPPKT) strlcat(buf, "sdroppedsmppkt ", blen); if (err & INFINIPATH_E_SMAXPKTLEN) strlcat(buf, "smaxpktlen ", blen); if (err & INFINIPATH_E_SMINPKTLEN) strlcat(buf, "sminpktlen ", blen); if (err & INFINIPATH_E_SUNSUPVL) strlcat(buf, "sunsupVL ", blen); if (err & INFINIPATH_E_SPKTLEN) strlcat(buf, "spktlen ", blen); if (err & INFINIPATH_E_INVALIDADDR) strlcat(buf, "invalidaddr ", blen); if (err & INFINIPATH_E_RICRC) strlcat(buf, "CRC ", blen); if (err & INFINIPATH_E_RVCRC) strlcat(buf, "VCRC ", blen); if (err & INFINIPATH_E_RRCVEGRFULL) strlcat(buf, "rcvegrfull ", blen); if (err & INFINIPATH_E_RRCVHDRFULL) strlcat(buf, "rcvhdrfull ", blen); if (err & INFINIPATH_E_IBSTATUSCHANGED) strlcat(buf, "ibcstatuschg ", blen); if (err & INFINIPATH_E_RIBLOSTLINK) strlcat(buf, "riblostlink ", blen); if (err & INFINIPATH_E_HARDWARE) strlcat(buf, "hardware ", blen); if (err & INFINIPATH_E_RESET) strlcat(buf, "reset ", blen);}/** * get_rhf_errstring - decode RHF errors * @err: the err number * @msg: the output buffer * @len: the length of the output buffer * * only used one place now, may want more later */static void get_rhf_errstring(u32 err, char *msg, size_t len){ /* if no errors, and so don't need to check what's first */ *msg = '\0'; if (err & INFINIPATH_RHF_H_ICRCERR) strlcat(msg, "icrcerr ", len); if (err & INFINIPATH_RHF_H_VCRCERR) strlcat(msg, "vcrcerr ", len); if (err & INFINIPATH_RHF_H_PARITYERR) strlcat(msg, "parityerr ", len); if (err & INFINIPATH_RHF_H_LENERR) strlcat(msg, "lenerr ", len); if (err & INFINIPATH_RHF_H_MTUERR) strlcat(msg, "mtuerr ", len); if (err & INFINIPATH_RHF_H_IHDRERR) /* infinipath hdr checksum error */ strlcat(msg, "ipathhdrerr ", len); if (err & INFINIPATH_RHF_H_TIDERR) strlcat(msg, "tiderr ", len); if (err & INFINIPATH_RHF_H_MKERR) /* bad port, offset, etc. */ strlcat(msg, "invalid ipathhdr ", len); if (err & INFINIPATH_RHF_H_IBERR) strlcat(msg, "iberr ", len); if (err & INFINIPATH_RHF_L_SWA) strlcat(msg, "swA ", len); if (err & INFINIPATH_RHF_L_SWB) strlcat(msg, "swB ", len);}/** * ipath_get_egrbuf - get an eager buffer * @dd: the infinipath device * @bufnum: the eager buffer to get * @err: unused * * must only be called if ipath_pd[port] is known to be allocated */static inline void *ipath_get_egrbuf(struct ipath_devdata *dd, u32 bufnum, int err){ return dd->ipath_port0_skbs ? (void *)dd->ipath_port0_skbs[bufnum]->data : NULL;}/** * ipath_alloc_skb - allocate an skb and buffer with possible constraints * @dd: the infinipath device * @gfp_mask: the sk_buff SFP mask */struct sk_buff *ipath_alloc_skb(struct ipath_devdata *dd, gfp_t gfp_mask){ struct sk_buff *skb; u32 len; /* * Only fully supported way to handle this is to allocate lots * extra, align as needed, and then do skb_reserve(). That wastes * a lot of memory... I'll have to hack this into infinipath_copy * also. */ /* * We need 4 extra bytes for unaligned transfer copying */ if (dd->ipath_flags & IPATH_4BYTE_TID) { /* we need a 4KB multiple alignment, and there is no way * to do it except to allocate extra and then skb_reserve * enough to bring it up to the right alignment. */ len = dd->ipath_ibmaxlen + 4 + (1 << 11) - 1; } else len = dd->ipath_ibmaxlen + 4; skb = __dev_alloc_skb(len, gfp_mask); if (!skb) { ipath_dev_err(dd, "Failed to allocate skbuff, length %u\n", len); goto bail; } if (dd->ipath_flags & IPATH_4BYTE_TID) { u32 una = ((1 << 11) - 1) & (unsigned long)(skb->data + 4); if (una) skb_reserve(skb, 4 + (1 << 11) - una); else skb_reserve(skb, 4); } else skb_reserve(skb, 4);bail: return skb;}/** * ipath_rcv_layer - receive a packet for the layered (ethernet) driver * @dd: the infinipath device * @etail: the sk_buff number * @tlen: the total packet length * @hdr: the ethernet header * * Separate routine for better overall optimization */static void ipath_rcv_layer(struct ipath_devdata *dd, u32 etail, u32 tlen, struct ether_header *hdr){ u32 elen; u8 pad, *bthbytes; struct sk_buff *skb, *nskb; if (dd->ipath_port0_skbs && hdr->sub_opcode == OPCODE_ENCAP) { /* * Allocate a new sk_buff to replace the one we give * to the network stack. */ nskb = ipath_alloc_skb(dd, GFP_ATOMIC); if (!nskb) { /* count OK packets that we drop */ ipath_stats.sps_krdrops++; return; } bthbytes = (u8 *) hdr->bth; pad = (bthbytes[1] >> 4) & 3; /* +CRC32 */ elen = tlen - (sizeof(*hdr) + pad + sizeof(u32)); skb = dd->ipath_port0_skbs[etail]; dd->ipath_port0_skbs[etail] = nskb; skb_put(skb, elen); dd->ipath_f_put_tid(dd, etail + (u64 __iomem *) ((char __iomem *) dd->ipath_kregbase + dd->ipath_rcvegrbase), 0, virt_to_phys(nskb->data)); __ipath_layer_rcv(dd, hdr, skb); /* another ether packet received */ ipath_stats.sps_ether_rpkts++; } else if (hdr->sub_opcode == OPCODE_LID_ARP) __ipath_layer_rcv_lid(dd, hdr);}/* * ipath_kreceive - receive a packet * @dd: the infinipath device * * called from interrupt handler for errors or receive interrupt */void ipath_kreceive(struct ipath_devdata *dd){ u64 *rc; void *ebuf; const u32 rsize = dd->ipath_rcvhdrentsize; /* words */ const u32 maxcnt = dd->ipath_rcvhdrcnt * rsize; /* words */ u32 etail = -1, l, hdrqtail; struct ips_message_header *hdr; u32 eflags, i, etype, tlen, pkttot = 0; static u64 totcalls; /* stats, may eventually remove */ char emsg[128]; if (!dd->ipath_hdrqtailptr) { ipath_dev_err(dd, "hdrqtailptr not set, can't do receives\n"); goto bail; } /* There is already a thread processing this queue. */ if (test_and_set_bit(0, &dd->ipath_rcv_pending)) goto bail; if (dd->ipath_port0head == (u32)le64_to_cpu(*dd->ipath_hdrqtailptr)) goto done;gotmore: /* * read only once at start. If in flood situation, this helps * performance slightly. If more arrive while we are processing, * we'll come back here and do them */ hdrqtail = (u32)le64_to_cpu(*dd->ipath_hdrqtailptr); for (i = 0, l = dd->ipath_port0head; l != hdrqtail; i++) { u32 qp; u8 *bthbytes; rc = (u64 *) (dd->ipath_pd[0]->port_rcvhdrq + (l << 2)); hdr = (struct ips_message_header *)&rc[1]; /* * could make a network order version of IPATH_KD_QP, and * do the obvious shift before masking to speed this up. */ qp = ntohl(hdr->bth[1]) & 0xffffff; bthbytes = (u8 *) hdr->bth; eflags = ips_get_hdr_err_flags((__le32 *) rc); etype = ips_get_rcv_type((__le32 *) rc); /* total length */ tlen = ips_get_length_in_bytes((__le32 *) rc); ebuf = NULL; if (etype != RCVHQ_RCV_TYPE_EXPECTED) { /* * it turns out that the chips uses an eager buffer * for all non-expected packets, whether it "needs" * one or not. So always get the index, but don't * set ebuf (so we try to copy data) unless the * length requires it. */ etail = ips_get_index((__le32 *) rc); if (tlen > sizeof(*hdr) || etype == RCVHQ_RCV_TYPE_NON_KD) ebuf = ipath_get_egrbuf(dd, etail, 0); } /* * both tiderr and ipathhdrerr are set for all plain IB * packets; only ipathhdrerr should be set. */ if (etype != RCVHQ_RCV_TYPE_NON_KD && etype != RCVHQ_RCV_TYPE_ERROR && ips_get_ipath_ver( hdr->iph.ver_port_tid_offset) != IPS_PROTO_VERSION) { ipath_cdbg(PKT, "Bad InfiniPath protocol version " "%x\n", etype); } if (eflags & ~(INFINIPATH_RHF_H_TIDERR | INFINIPATH_RHF_H_IHDRERR)) { get_rhf_errstring(eflags, emsg, sizeof emsg); ipath_cdbg(PKT, "RHFerrs %x hdrqtail=%x typ=%u " "tlen=%x opcode=%x egridx=%x: %s\n", eflags, l, etype, tlen, bthbytes[0], ips_get_index((__le32 *) rc), emsg); } else if (etype == RCVHQ_RCV_TYPE_NON_KD) {
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?