📄 pcibr_dvr.c
字号:
} if (pcibr_soft->bs_rev_num < BRIDGE_PART_REV_C) { int_enable &= ~BRIDGE_ISR_BAD_XRESP_PKT; }#endif /* BRIDGE_ERROR_INTR_WAR */#ifdef QL_SCSI_CTRL_WAR /* for IP30 only */ /* Really a QL rev A issue, but all newer hearts have newer QLs. * Forces all IO6/MSCSI to be new. */ if (heart_rev() == HEART_REV_A) int_enable &= ~BRIDGE_IMR_PCI_MST_TIMEOUT;#endif#ifdef BRIDGE1_TIMEOUT_WAR if (pcibr_soft->bs_rev_num == BRIDGE_PART_REV_A) { /* * Turn off these interrupts. They can't be trusted in bridge 1 */ int_enable &= ~(BRIDGE_IMR_XREAD_REQ_TIMEOUT | BRIDGE_IMR_UNEXP_RESP); }#endif /* PIC BRINGUP WAR (PV# 856864 & 856865): allow the tnums that are * locked out to be freed up sooner (by timing out) so that the * read tnums are never completely used up. */ if (IS_PIC_SOFT(pcibr_soft) && PCIBR_WAR_ENABLED(PV856864, pcibr_soft)) { int_enable &= ~PIC_ISR_PCIX_REQ_TOUT; int_enable &= ~BRIDGE_ISR_XREAD_REQ_TIMEOUT; bridge->b_wid_req_timeout = 0x750; } /* * PIC BRINGUP WAR (PV# 856866, 859504, 861476, 861478): Don't use * RRB0, RRB8, RRB1, and RRB9. Assign them to DEVICE[2|3]--VCHAN3 * so they are not used */ if (IS_PIC_SOFT(pcibr_soft) && PCIBR_WAR_ENABLED(PV856866, pcibr_soft)) { bridge->b_even_resp |= 0x000f000f; bridge->b_odd_resp |= 0x000f000f; } if (IS_PIC_SOFT(pcibr_soft)) { bridge->p_int_enable_64 = (picreg_t)int_enable; } bridge->b_int_mode = 0; /* do not send "clear interrupt" packets */ bridge->b_wid_tflush; /* wait until Bridge PIO complete */ /* * Depending on the rev of bridge, disable certain features. * Easiest way seems to be to force the PCIBR_NOwhatever * flag to be on for all DMA calls, which overrides any * PCIBR_whatever flag or even the setting of whatever * from the PCIIO_DMA_class flags (or even from the other * PCIBR flags, since NO overrides YES). */ pcibr_soft->bs_dma_flags = 0; /* PREFETCH: * Always completely disabled for REV.A; * at "pcibr_prefetch_enable_rev", anyone * asking for PCIIO_PREFETCH gets it. * Between these two points, you have to ask * for PCIBR_PREFETCH, which promises that * your driver knows about known Bridge WARs. */ if (pcibr_soft->bs_rev_num < BRIDGE_PART_REV_B) pcibr_soft->bs_dma_flags |= PCIBR_NOPREFETCH; else if (pcibr_soft->bs_rev_num < (BRIDGE_WIDGET_PART_NUM << 4)) pcibr_soft->bs_dma_flags |= PCIIO_NOPREFETCH; /* WRITE_GATHER: Disabled */ if (pcibr_soft->bs_rev_num < (BRIDGE_WIDGET_PART_NUM << 4)) pcibr_soft->bs_dma_flags |= PCIBR_NOWRITE_GATHER; /* PIC only supports 64-bit direct mapping in PCI-X mode. Since * all PCI-X devices that initiate memory transactions must be * capable of generating 64-bit addressed, we force 64-bit DMAs. */ if (IS_PCIX(pcibr_soft)) { pcibr_soft->bs_dma_flags |= PCIIO_DMA_A64; } { iopaddr_t prom_base_addr = pcibr_soft->bs_xid << 24; int prom_base_size = 0x1000000; int status; struct resource *res; /* Allocate resource maps based on bus page size; for I/O and memory * space, free all pages except those in the base area and in the * range set by the PROM. * * PROM creates BAR addresses in this format: 0x0ws00000 where w is * the widget number and s is the device register offset for the slot. */ /* Setup the Bus's PCI IO Root Resource. */ pcibr_soft->bs_io_win_root_resource.start = PCIBR_BUS_IO_BASE; pcibr_soft->bs_io_win_root_resource.end = 0xffffffff; res = (struct resource *) kmalloc( sizeof(struct resource), GFP_KERNEL); if (!res) panic("PCIBR:Unable to allocate resource structure\n"); /* Block off the range used by PROM. */ res->start = prom_base_addr; res->end = prom_base_addr + (prom_base_size - 1); status = request_resource(&pcibr_soft->bs_io_win_root_resource, res); if (status) panic("PCIBR:Unable to request_resource()\n"); /* Setup the Small Window Root Resource */ pcibr_soft->bs_swin_root_resource.start = PAGE_SIZE; pcibr_soft->bs_swin_root_resource.end = 0x000FFFFF; /* Setup the Bus's PCI Memory Root Resource */ pcibr_soft->bs_mem_win_root_resource.start = 0x200000; pcibr_soft->bs_mem_win_root_resource.end = 0xffffffff; res = (struct resource *) kmalloc( sizeof(struct resource), GFP_KERNEL); if (!res) panic("PCIBR:Unable to allocate resource structure\n"); /* Block off the range used by PROM. */ res->start = prom_base_addr; res->end = prom_base_addr + (prom_base_size - 1);; status = request_resource(&pcibr_soft->bs_mem_win_root_resource, res); if (status) panic("PCIBR:Unable to request_resource()\n"); } /* build "no-slot" connection point */ pcibr_info = pcibr_device_info_new (pcibr_soft, PCIIO_SLOT_NONE, PCIIO_FUNC_NONE, PCIIO_VENDOR_ID_NONE, PCIIO_DEVICE_ID_NONE); noslot_conn = pciio_device_info_register (pcibr_vhdl, &pcibr_info->f_c); /* Remember the no slot connection point info for tearing it * down during detach. */ pcibr_soft->bs_noslot_conn = noslot_conn; pcibr_soft->bs_noslot_info = pcibr_info;#if PCI_FBBE fast_back_to_back_enable = 1;#endif#if PCI_FBBE if (fast_back_to_back_enable) { /* * All devices on the bus are capable of fast back to back, so * we need to set the fast back to back bit in all devices on * the bus that are capable of doing such accesses. */ }#endif for (slot = pcibr_soft->bs_min_slot; slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot) { /* Find out what is out there */ (void)pcibr_slot_info_init(pcibr_vhdl,slot); } for (slot = pcibr_soft->bs_min_slot; slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot) /* Set up the address space for this slot in the PCI land */ (void)pcibr_slot_addr_space_init(pcibr_vhdl, slot); for (slot = pcibr_soft->bs_min_slot; slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot) /* Setup the device register */ (void)pcibr_slot_device_init(pcibr_vhdl, slot); if (IS_PCIX(pcibr_soft)) { pcibr_soft->bs_pcix_rbar_inuse = 0; pcibr_soft->bs_pcix_rbar_avail = NUM_RBAR; pcibr_soft->bs_pcix_rbar_percent_allowed = pcibr_pcix_rbars_calc(pcibr_soft); for (slot = pcibr_soft->bs_min_slot; slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot) /* Setup the PCI-X Read Buffer Attribute Registers (RBARs) */ (void)pcibr_slot_pcix_rbar_init(pcibr_soft, slot); } /* Set up convenience links */ if (IS_XBRIDGE_OR_PIC_SOFT(pcibr_soft)) pcibr_bus_cnvlink(pcibr_soft->bs_vhdl); for (slot = pcibr_soft->bs_min_slot; slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot) /* Setup host/guest relations */ (void)pcibr_slot_guest_info_init(pcibr_vhdl, slot); /* Handle initial RRB management for Bridge and Xbridge */ pcibr_initial_rrb(pcibr_vhdl, pcibr_soft->bs_first_slot, pcibr_soft->bs_last_slot); { /* Before any drivers get called that may want to re-allocate * RRB's, let's get some special cases pre-allocated. Drivers * may override these pre-allocations, but by doing pre-allocations * now we're assured not to step all over what the driver intended. * * Note: Someday this should probably be moved over to pcibr_rrb.c */ /* * Each Pbrick PCI bus only has slots 1 and 2. Similarly for * widget 0xe on Ibricks. Allocate RRB's accordingly. */ if (pcibr_soft->bs_bricktype > 0) { switch (pcibr_soft->bs_bricktype) { case MODULE_PBRICK: do_pcibr_rrb_autoalloc(pcibr_soft, 1, VCHAN0, 8); do_pcibr_rrb_autoalloc(pcibr_soft, 2, VCHAN0, 8); break; case MODULE_IBRICK: /* port 0xe on the Ibrick only has slots 1 and 2 */ if (pcibr_soft->bs_xid == 0xe) { do_pcibr_rrb_autoalloc(pcibr_soft, 1, VCHAN0, 8); do_pcibr_rrb_autoalloc(pcibr_soft, 2, VCHAN0, 8); } else { /* allocate one RRB for the serial port */ do_pcibr_rrb_autoalloc(pcibr_soft, 0, VCHAN0, 1); } break; case MODULE_PXBRICK: case MODULE_IXBRICK: case MODULE_OPUSBRICK: /* * If the IO9 is in the PXBrick (bus1, slot1) allocate * RRBs to all the devices */ if ((pcibr_widget_to_bus(pcibr_vhdl) == 1) && (pcibr_soft->bs_slot[0].bss_vendor_id == 0x10A9) && (pcibr_soft->bs_slot[0].bss_device_id == 0x100A)) { do_pcibr_rrb_autoalloc(pcibr_soft, 0, VCHAN0, 4); do_pcibr_rrb_autoalloc(pcibr_soft, 1, VCHAN0, 4); do_pcibr_rrb_autoalloc(pcibr_soft, 2, VCHAN0, 4); do_pcibr_rrb_autoalloc(pcibr_soft, 3, VCHAN0, 4); } else { do_pcibr_rrb_autoalloc(pcibr_soft, 0, VCHAN0, 4); do_pcibr_rrb_autoalloc(pcibr_soft, 1, VCHAN0, 4); } break; case MODULE_CGBRICK: do_pcibr_rrb_autoalloc(pcibr_soft, 0, VCHAN0, 8); break; } /* switch */ }} /* OK Special RRB allocations are done. */ for (slot = pcibr_soft->bs_min_slot; slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot) /* Call the device attach */ (void)pcibr_slot_call_device_attach(pcibr_vhdl, slot, 0); pciio_device_attach(noslot_conn, (int)0); return 0;}/* * pcibr_detach: * Detach the bridge device from the hwgraph after cleaning out all the * underlying vertices. */intpcibr_detach(vertex_hdl_t xconn){ pciio_slot_t slot; vertex_hdl_t pcibr_vhdl; pcibr_soft_t pcibr_soft; bridge_t *bridge; unsigned s; PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DETACH, xconn, "pcibr_detach\n")); /* Get the bridge vertex from its xtalk connection point */ if (hwgraph_traverse(xconn, EDGE_LBL_PCI, &pcibr_vhdl) != GRAPH_SUCCESS) return(1); pcibr_soft = pcibr_soft_get(pcibr_vhdl); bridge = pcibr_soft->bs_base; s = pcibr_lock(pcibr_soft); /* Disable the interrupts from the bridge */ if (IS_PIC_SOFT(pcibr_soft)) { bridge->p_int_enable_64 = 0; } pcibr_unlock(pcibr_soft, s); /* Detach all the PCI devices talking to this bridge */ for (slot = pcibr_soft->bs_min_slot; slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot) { pcibr_slot_detach(pcibr_vhdl, slot, 0, (char *)NULL, (int *)NULL); } /* Unregister the no-slot connection point */ pciio_device_info_unregister(pcibr_vhdl, &(pcibr_soft->bs_noslot_info->f_c)); kfree(pcibr_soft->bs_name); /* Disconnect the error interrupt and free the xtalk resources * associated with it. */ xtalk_intr_disconnect(pcibr_soft->bsi_err_intr); xtalk_intr_free(pcibr_soft->bsi_err_intr); /* Clear the software state maintained by the bridge driver for this * bridge. */ DEL(pcibr_soft); /* Remove the Bridge revision labelled info */ (void)hwgraph_info_remove_LBL(pcibr_vhdl, INFO_LBL_PCIBR_ASIC_REV, NULL); /* Remove the character device associated with this bridge */ (void)hwgraph_edge_remove(pcibr_vhdl, EDGE_LBL_CONTROLLER, NULL); /* Remove the PCI bridge vertex */ (void)hwgraph_edge_remove(xconn, EDGE_LBL_PCI, NULL); return(0);}intpcibr_asic_rev(vertex_hdl_t pconn_vhdl){ vertex_hdl_t pcibr_vhdl; int tmp_vhdl; arbitrary_info_t ainfo; if (GRAPH_SUCCESS != hwgraph_traverse(pconn_vhdl, EDGE_LBL_MASTER, &pcibr_vhdl)) return -1; tmp_vhdl = hwgraph_info_get_LBL(pcibr_vhdl, INFO_LBL_PCIBR_ASIC_REV, &ainfo); /* * Any hwgraph function that returns a vertex handle will implicity * increment that vertex's reference count. The caller must explicity * decrement the vertex's referece count after the last reference to * that vertex. * * Decrement reference count incremented by call to hwgraph_traverse(). * */ hwgraph_vertex_unref(pcibr_vhdl); if (tmp_vhdl != GRAPH_SUCCESS) return -1; return (int) ainfo;}/* ===================================================================== * PIO MANAGEMENT */static iopaddr_tpcibr_addr_pci_to_xio(vertex_hdl_t pconn_vhdl, pciio_slot_t slot, pciio_space_t space, iopaddr_t pci_addr, size_t req_size, unsigned flags){ pcibr_info_t pcibr_info = pcibr_info_get(pconn_vhdl); pciio_info_t pciio_info = &pcibr_info->f_c; pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info); bridge_t *bridge = pcibr_soft->bs_base; unsigned bar; /* which BASE reg on device is decoding */ iopaddr_t xio_addr = XIO_NOWHERE; iopaddr_t base; /* base of devio(x) mapped area on PCI */ iopaddr_t limit; /* base of devio(x) mapped area on PCI */ pciio_space_t wspace; /* which space device is decoding */ iopaddr_t wbase; /* base of device decode on PCI */ size_t wsize; /* size of device decode on PCI */ int try; /* DevIO(x) window scanning order control */ int maxtry, halftry; int win; /* which DevIO(x) window is being used */ pciio_space_t mspace; /* target space for devio(x) register */ iopaddr_t mbase; /* base of devio(x) mapped ar
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -