📄 xbow.c
字号:
iopaddr_t tmp; IOERROR_GETVALUE(tmp, ioerror, xtalkaddr); printk(KERN_ALERT "PIO Error on XIO Bus %s\n" "\tattempting to access XIO port %d\n" "\t(which has no device connected)" "\twith offset 0x%lx", soft->name, port, tmp); }#if !DEBUG if (kdebug) {#endif XEM_ADD_STR("Raw status values for Crossbow:\n"); XEM_ADD_VAR(wid_stat); XEM_ADD_VAR(wid_err_cmdword); XEM_ADD_VAR(wid_err_upper); XEM_ADD_VAR(wid_err_lower); XEM_ADD_VAR(wid_err_addr); XEM_ADD_VAR(port); XEM_ADD_VAR(link_control); XEM_ADD_VAR(link_status); XEM_ADD_VAR(link_aux_status);#if !DEBUG }#endif return retval; } /* Check that the link is alive. */ if (!(link_status & XB_STAT_LINKALIVE)) { iopaddr_t tmp; /* nobody connected. */ if (mode == MODE_DEVPROBE) return IOERROR_HANDLED; printk(KERN_ALERT "%s%sError on XIO Bus %s port %d", (error_code & IOECODE_DMA) ? "DMA " : "", (error_code & IOECODE_PIO) ? "PIO " : "", soft->name, port); IOERROR_GETVALUE(tmp, ioerror, xtalkaddr); if ((error_code & IOECODE_PIO) && (IOERROR_FIELDVALID(ioerror, xtalkaddr))) { printk("\tAccess attempted to offset 0x%lx\n", tmp); } if (link_aux_status & XB_AUX_LINKFAIL_RST_BAD) XEM_ADD_STR("\tLink never came out of reset\n"); else XEM_ADD_STR("\tLink failed while transferring data\n"); } /* get the connection point for the widget * involved in this error; if it exists and * is not our connectpoint, cycle back through * xtalk_error_handler to deliver control to * the proper handler (or to report a generic * crosstalk error). * * If the downstream handler won't handle * the problem, we let our upstream caller * deal with it, after (in DEBUG and kdebug * kernels) dumping the xbow state for this * port. */ conn = xbow_widget_lookup(busv, port); if ((conn != GRAPH_VERTEX_NONE) && (conn != soft->conn)) { retval = xtalk_error_handler(conn, error_code, mode, ioerror); if (retval == IOERROR_HANDLED) return IOERROR_HANDLED; } if (mode == MODE_DEVPROBE) return IOERROR_HANDLED; if (retval == IOERROR_UNHANDLED) { iopaddr_t tmp; retval = IOERROR_PANIC; printk(KERN_ALERT "%s%sError on XIO Bus %s port %d", (error_code & IOECODE_DMA) ? "DMA " : "", (error_code & IOECODE_PIO) ? "PIO " : "", soft->name, port); IOERROR_GETVALUE(tmp, ioerror, xtalkaddr); if ((error_code & IOECODE_PIO) && (IOERROR_FIELDVALID(ioerror, xtalkaddr))) { printk("\tAccess attempted to offset 0x%lx\n", tmp); } }#if !DEBUG if (kdebug) {#endif XEM_ADD_STR("Raw status values for Crossbow:\n"); XEM_ADD_VAR(wid_stat); XEM_ADD_VAR(wid_err_cmdword); XEM_ADD_VAR(wid_err_upper); XEM_ADD_VAR(wid_err_lower); XEM_ADD_VAR(wid_err_addr); XEM_ADD_VAR(port); XEM_ADD_VAR(link_control); XEM_ADD_VAR(link_status); XEM_ADD_VAR(link_aux_status);#if !DEBUG }#endif /* caller will dump raw ioerror data * in DEBUG and kdebug kernels. */ return retval;}voidxbow_update_perf_counters(vertex_hdl_t vhdl){ xbow_soft_t xbow_soft = xbow_soft_get(vhdl); xbow_perf_t *xbow_perf = xbow_soft->xbow_perfcnt; xbow_perf_link_t *xbow_plink = xbow_soft->xbow_perflink; xbow_perfcount_t perf_reg; int link, i; for (i = 0; i < XBOW_PERF_COUNTERS; i++, xbow_perf++) { if (xbow_perf->xp_mode == XBOW_MONITOR_NONE) continue; spin_lock(&xbow_soft->xbow_perf_lock); perf_reg.xb_counter_val = *(xbowreg_t *) xbow_perf->xp_perf_reg; link = perf_reg.xb_perf.link_select; (xbow_plink + link)->xlp_cumulative[xbow_perf->xp_curmode] += ((perf_reg.xb_perf.count - xbow_perf->xp_current) & XBOW_COUNTER_MASK); xbow_perf->xp_current = perf_reg.xb_perf.count; spin_unlock(&xbow_soft->xbow_perf_lock); }}xbow_perf_link_t *xbow_get_perf_counters(vertex_hdl_t vhdl){ xbow_soft_t xbow_soft = xbow_soft_get(vhdl); xbow_perf_link_t *xbow_perf_link = xbow_soft->xbow_perflink; return xbow_perf_link;}intxbow_enable_perf_counter(vertex_hdl_t vhdl, int link, int mode, int counter){ xbow_soft_t xbow_soft = xbow_soft_get(vhdl); xbow_perf_t *xbow_perf = xbow_soft->xbow_perfcnt; xbow_linkctrl_t xbow_link_ctrl; xbow_t *xbow = xbow_soft->base; xbow_perfcount_t perf_reg; int i; link -= BASE_XBOW_PORT; if ((link < 0) || (link >= MAX_XBOW_PORTS)) return -1; if ((mode < XBOW_MONITOR_NONE) || (mode > XBOW_MONITOR_DEST_LINK)) return -1; if ((counter < 0) || (counter >= XBOW_PERF_COUNTERS)) return -1; spin_lock(&xbow_soft->xbow_perf_lock); if ((xbow_perf + counter)->xp_mode && mode) { spin_unlock(&xbow_soft->xbow_perf_lock); return -1; } for (i = 0; i < XBOW_PERF_COUNTERS; i++) { if (i == counter) continue; if (((xbow_perf + i)->xp_link == link) && ((xbow_perf + i)->xp_mode)) { spin_unlock(&xbow_soft->xbow_perf_lock); return -1; } } xbow_perf += counter; xbow_perf->xp_curlink = xbow_perf->xp_link = link; xbow_perf->xp_curmode = xbow_perf->xp_mode = mode; xbow_link_ctrl.xbl_ctrlword = xbow->xb_link_raw[link].link_control; xbow_link_ctrl.xb_linkcontrol.perf_mode = mode; xbow->xb_link_raw[link].link_control = xbow_link_ctrl.xbl_ctrlword; perf_reg.xb_counter_val = *(xbowreg_t *) xbow_perf->xp_perf_reg; perf_reg.xb_perf.link_select = link; *(xbowreg_t *) xbow_perf->xp_perf_reg = perf_reg.xb_counter_val; xbow_perf->xp_current = perf_reg.xb_perf.count; spin_unlock(&xbow_soft->xbow_perf_lock); return 0;}xbow_link_status_t *xbow_get_llp_status(vertex_hdl_t vhdl){ xbow_soft_t xbow_soft = xbow_soft_get(vhdl); xbow_link_status_t *xbow_llp_status = xbow_soft->xbow_link_status; return xbow_llp_status;}voidxbow_update_llp_status(vertex_hdl_t vhdl){ xbow_soft_t xbow_soft = xbow_soft_get(vhdl); xbow_link_status_t *xbow_llp_status = xbow_soft->xbow_link_status; xbow_t *xbow; xbwX_stat_t lnk_sts; xbow_aux_link_status_t aux_sts; int link; vertex_hdl_t xwidget_vhdl; char *xwidget_name; xbow = (xbow_t *) xbow_soft->base; for (link = 0; link < MAX_XBOW_PORTS; link++, xbow_llp_status++) { /* Get the widget name corresponding the current link. * Note : 0 <= link < MAX_XBOW_PORTS(8). * BASE_XBOW_PORT(0x8) <= xwidget number < MAX_PORT_NUM (0x10) */ xwidget_vhdl = xbow_widget_lookup(xbow_soft->busv,link+BASE_XBOW_PORT); xwidget_name = xwidget_name_get(xwidget_vhdl); aux_sts.aux_linkstatus = xbow->xb_link_raw[link].link_aux_status; lnk_sts.linkstatus = xbow->xb_link_raw[link].link_status_clr; if (lnk_sts.link_alive == 0) continue; xbow_llp_status->rx_err_count += aux_sts.xb_aux_linkstatus.rx_err_cnt; xbow_llp_status->tx_retry_count += aux_sts.xb_aux_linkstatus.tx_retry_cnt; if (lnk_sts.linkstatus & ~(XB_STAT_RCV_ERR | XB_STAT_XMT_RTRY_ERR | XB_STAT_LINKALIVE)) {#ifdef LATER printk(KERN_WARNING "link %d[%s]: bad status 0x%x\n", link, xwidget_name, lnk_sts.linkstatus);#endif } }}intxbow_disable_llp_monitor(vertex_hdl_t vhdl){ xbow_soft_t xbow_soft = xbow_soft_get(vhdl); int port; for (port = 0; port < MAX_XBOW_PORTS; port++) { xbow_soft->xbow_link_status[port].rx_err_count = 0; xbow_soft->xbow_link_status[port].tx_retry_count = 0; } xbow_soft->link_monitor = 0; return 0;}intxbow_enable_llp_monitor(vertex_hdl_t vhdl){ xbow_soft_t xbow_soft = xbow_soft_get(vhdl); xbow_soft->link_monitor = 1; return 0;}intxbow_reset_link(vertex_hdl_t xconn_vhdl){ xwidget_info_t widget_info; xwidgetnum_t port; xbow_t *xbow; xbowreg_t ctrl; xbwX_stat_t stat; unsigned long itick; unsigned dtick; static long ticks_to_wait = HZ / 1000; widget_info = xwidget_info_get(xconn_vhdl); port = xwidget_info_id_get(widget_info);#ifdef XBOW_K1PTR /* defined if we only have one xbow ... */ xbow = XBOW_K1PTR;#else { vertex_hdl_t xbow_vhdl; xbow_soft_t xbow_soft; hwgraph_traverse(xconn_vhdl, ".master/xtalk/0/xbow", &xbow_vhdl); xbow_soft = xbow_soft_get(xbow_vhdl); xbow = xbow_soft->base; }#endif /* * This requires three PIOs (reset the link, check for the * reset, restore the control register for the link) plus * 10us to wait for the reset. We allow up to 1ms for the * widget to come out of reset before giving up and * returning a failure. */ ctrl = xbow->xb_link(port).link_control; xbow->xb_link(port).link_reset = 0; itick = jiffies; while (1) { stat.linkstatus = xbow->xb_link(port).link_status; if (stat.link_alive) break; dtick = jiffies - itick; if (dtick > ticks_to_wait) { return -1; /* never came out of reset */ } udelay(2); /* don't beat on link_status */ } xbow->xb_link(port).link_control = ctrl; return 0;}#define XBOW_ARB_RELOAD_TICKS 25 /* granularity: 4 MB/s, max: 124 MB/s */#define GRANULARITY ((100 * 1000000) / XBOW_ARB_RELOAD_TICKS)#define XBOW_BYTES_TO_GBR(BYTES_per_s) (int) (BYTES_per_s / GRANULARITY)#define XBOW_GBR_TO_BYTES(cnt) (bandwidth_t) ((cnt) * GRANULARITY)#define CEILING_BYTES_TO_GBR(gbr, bytes_per_sec) \ ((XBOW_GBR_TO_BYTES(gbr) < bytes_per_sec) ? gbr+1 : gbr)#define XBOW_ARB_GBR_MAX 31#define ABS(x) ((x > 0) ? (x) : (-1 * x)) /* absolute value */intxbow_bytes_to_gbr(bandwidth_t old_bytes_per_sec, bandwidth_t bytes_per_sec){ int gbr_granted; int new_total_gbr; int change_gbr; bandwidth_t new_total_bw;#ifdef GRIO_DEBUG printk("xbow_bytes_to_gbr: old_bytes_per_sec %lld bytes_per_sec %lld\n", old_bytes_per_sec, bytes_per_sec);#endif /* GRIO_DEBUG */ gbr_granted = CEILING_BYTES_TO_GBR((XBOW_BYTES_TO_GBR(old_bytes_per_sec)), old_bytes_per_sec); new_total_bw = old_bytes_per_sec + bytes_per_sec; new_total_gbr = CEILING_BYTES_TO_GBR((XBOW_BYTES_TO_GBR(new_total_bw)), new_total_bw); change_gbr = new_total_gbr - gbr_granted;#ifdef GRIO_DEBUG printk("xbow_bytes_to_gbr: gbr_granted %d new_total_gbr %d change_gbr %d\n", gbr_granted, new_total_gbr, change_gbr);#endif /* GRIO_DEBUG */ return (change_gbr);}/* Conversion from GBR to bytes */bandwidth_txbow_gbr_to_bytes(int gbr){ return (XBOW_GBR_TO_BYTES(gbr));}/* Given the vhdl for the desired xbow, the src and dest. widget ids * and the req_bw value, this xbow driver entry point accesses the * xbow registers and allocates the desired bandwidth if available. * * If bandwidth allocation is successful, return success else return failure. */intxbow_prio_bw_alloc(vertex_hdl_t vhdl, xwidgetnum_t src_wid, xwidgetnum_t dest_wid, unsigned long long old_alloc_bw, unsigned long long req_bw){ xbow_soft_t soft = xbow_soft_get(vhdl); volatile xbowreg_t *xreg; xbowreg_t mask; int error = 0; bandwidth_t old_bw_BYTES, req_bw_BYTES; xbowreg_t old_xreg; int old_bw_GBR, req_bw_GBR, new_bw_GBR;#ifdef GRIO_DEBUG printk("xbow_prio_bw_alloc: vhdl %d src_wid %d dest_wid %d req_bw %lld\n", (int) vhdl, (int) src_wid, (int) dest_wid, req_bw);#endif ASSERT(XBOW_WIDGET_IS_VALID(src_wid)); ASSERT(XBOW_WIDGET_IS_VALID(dest_wid)); spin_lock(&soft->xbow_bw_alloc_lock); /* Get pointer to the correct register */ xreg = XBOW_PRIO_ARBREG_PTR(soft->base, dest_wid, src_wid); /* Get mask for GBR count value */ mask = XB_ARB_GBR_MSK << XB_ARB_GBR_SHFT(src_wid); req_bw_GBR = xbow_bytes_to_gbr(old_alloc_bw, req_bw); req_bw_BYTES = (req_bw_GBR < 0) ? (-1 * xbow_gbr_to_bytes(ABS(req_bw_GBR))) : xbow_gbr_to_bytes(req_bw_GBR);#ifdef GRIO_DEBUG printk("req_bw %lld req_bw_BYTES %lld req_bw_GBR %d\n", req_bw, req_bw_BYTES, req_bw_GBR);#endif /* GRIO_DEBUG */ old_bw_BYTES = soft->bw_cur_used[(int) dest_wid - MAX_XBOW_PORTS]; old_xreg = *xreg; old_bw_GBR = (((*xreg) & mask) >> XB_ARB_GBR_SHFT(src_wid));#ifdef GRIO_DEBUG ASSERT(XBOW_BYTES_TO_GBR(old_bw_BYTES) == old_bw_GBR); printk("old_bw_BYTES %lld old_bw_GBR %d\n", old_bw_BYTES, old_bw_GBR); printk("req_bw_BYTES %lld old_bw_BYTES %lld soft->bw_hiwm %lld\n", req_bw_BYTES, old_bw_BYTES, soft->bw_hiwm[(int) dest_wid - MAX_XBOW_PORTS]); #endif /* GRIO_DEBUG */ /* Accept the request only if we don't exceed the destination * port HIWATER_MARK *AND* the max. link GBR arbitration count */ if (((old_bw_BYTES + req_bw_BYTES) <= soft->bw_hiwm[(int) dest_wid - MAX_XBOW_PORTS]) && (req_bw_GBR + old_bw_GBR <= XBOW_ARB_GBR_MAX)) { new_bw_GBR = (old_bw_GBR + req_bw_GBR); /* Set this in the xbow link register */ *xreg = (old_xreg & ~mask) | \ (new_bw_GBR << XB_ARB_GBR_SHFT(src_wid) & mask); soft->bw_cur_used[(int) dest_wid - MAX_XBOW_PORTS] = xbow_gbr_to_bytes(new_bw_GBR); } else { error = 1; } spin_unlock(&soft->xbow_bw_alloc_lock); return (error);}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -