📄 sbutils.c
字号:
/* loop thr'u the capability list and see if the pcie capabilty exists */ cap_id = read_pci_cfg_byte(cap_ptr); while (cap_id != req_cap_id) { cap_ptr = read_pci_cfg_byte((cap_ptr+1)); if (cap_ptr == 0x00) break; cap_id = read_pci_cfg_byte(cap_ptr); } if (cap_id != req_cap_id) { return FALSE; } /* found the caller requested capability */ if ((buf != NULL) && (buflen != NULL)) { bufsize = *buflen; if (!bufsize) goto end; *buflen = 0; /* copy the cpability data excluding cap ID and next ptr */ cap_ptr += 2; if ((bufsize + cap_ptr) > SZPCR) bufsize = SZPCR - cap_ptr; *buflen = bufsize; while (bufsize--) { *buf = read_pci_cfg_byte(cap_ptr); cap_ptr++; buf++; } }end: return TRUE;}/* return TRUE if PCIE capability exists the pci config space */static boolsb_ispcie(sb_info_t *si){ return (sb_find_pci_capability(si, PCI_CAP_PCIECAP_ID, NULL, NULL));}/* scan the sb enumerated space to identify all cores */static voidBCMINITFN(sb_scan)(sb_info_t *si){ uint origidx; uint i; bool pci; bool pcie; uint pciidx; uint pcieidx; uint pcirev; uint pcierev; /* numcores should already be set */ ASSERT((si->numcores > 0) && (si->numcores <= SB_MAXCORES)); /* save current core index */ origidx = sb_coreidx(&si->sb); si->sb.buscorerev = NOREV; si->sb.buscoreidx = BADIDX; si->gpioidx = BADIDX; pci = pcie = FALSE; pcirev = pcierev = NOREV; pciidx = pcieidx = BADIDX; for (i = 0; i < si->numcores; i++) { sb_setcoreidx(&si->sb, i); si->coreid[i] = sb_coreid(&si->sb); if (si->coreid[i] == SB_PCI) { pciidx = i; pcirev = sb_corerev(&si->sb); pci = TRUE; } else if (si->coreid[i] == SB_PCIE) { pcieidx = i; pcierev = sb_corerev(&si->sb); pcie = TRUE; } } if (pci && pcie) { if (sb_ispcie(si)) pci = FALSE; else pcie = FALSE; } if (pci) { si->sb.buscoretype = SB_PCI; si->sb.buscorerev = pcirev; si->sb.buscoreidx = pciidx; } else if (pcie) { si->sb.buscoretype = SB_PCIE; si->sb.buscorerev = pcierev; si->sb.buscoreidx = pcieidx; } /* * Find the gpio "controlling core" type and index. * Precedence: * - if there's a chip common core - use that * - else if there's a pci core (rev >= 2) - use that */ if (GOODIDX(sb_findcoreidx(si, SB_CC, 0))) { si->gpioidx = sb_findcoreidx(si, SB_CC, 0); si->gpioid = SB_CC; } else if (PCI(si) && (si->sb.buscorerev >= 2)) { si->gpioidx = si->sb.buscoreidx; si->gpioid = SB_PCI; } else ASSERT(si->gpioidx != BADIDX); /* return to original core index */ sb_setcoreidx(&si->sb, origidx);}/* may be called with core in reset */voidsb_detach(sb_t *sbh){ sb_info_t *si; uint idx; si = SB_INFO(sbh); if (si == NULL) return; if (BUSTYPE(si->sb.bustype) == SB_BUS) for (idx = 0; idx < SB_MAXCORES; idx++) if (si->regs[idx]) { REG_UNMAP(si->regs[idx]); si->regs[idx] = NULL; } MFREE(si->osh, si, sizeof(sb_info_t));}/* return index of coreid or BADIDX if not found */static uintsb_findcoreidx(sb_info_t *si, uint coreid, uint coreunit){ uint found; uint i; found = 0; for (i = 0; i < si->numcores; i++) if (si->coreid[i] == coreid) { if (found == coreunit) return (i); found++; } return (BADIDX);}/* * this function changes logical "focus" to the indiciated core, * must be called with interrupt off. * Moreover, callers should keep interrupts off during switching out of and back to d11 core */void*sb_setcoreidx(sb_t *sbh, uint coreidx){ sb_info_t *si; uint32 sbaddr; si = SB_INFO(sbh); if (coreidx >= si->numcores) return (NULL); /* * If the user has provided an interrupt mask enabled function, * then assert interrupts are disabled before switching the core. */ ASSERT((si->intrsenabled_fn == NULL) || !(*(si)->intrsenabled_fn)((si)->intr_arg)); sbaddr = SB_ENUM_BASE + (coreidx * SB_CORE_SIZE); switch (BUSTYPE(si->sb.bustype)) { case PCI_BUS: /* point bar0 window */ OSL_PCI_WRITE_CONFIG(si->osh, PCI_BAR0_WIN, 4, sbaddr); break; } si->curidx = coreidx; return (si->curmap);}/* * this function changes logical "focus" to the indiciated core, * must be called with interrupt off. * Moreover, callers should keep interrupts off during switching out of and back to d11 core */void*sb_setcore(sb_t *sbh, uint coreid, uint coreunit){ sb_info_t *si; uint idx; si = SB_INFO(sbh); idx = sb_findcoreidx(si, coreid, coreunit); if (!GOODIDX(idx)) return (NULL); return (sb_setcoreidx(sbh, idx));}/* return chip number */uintBCMINITFN(sb_chip)(sb_t *sbh){ sb_info_t *si; si = SB_INFO(sbh); return (si->sb.chip);}/* return chip revision number */uintBCMINITFN(sb_chiprev)(sb_t *sbh){ sb_info_t *si; si = SB_INFO(sbh); return (si->sb.chiprev);}/* return chip common revision number */uintBCMINITFN(sb_chipcrev)(sb_t *sbh){ sb_info_t *si; si = SB_INFO(sbh); return (si->sb.ccrev);}/* return chip package option */uintBCMINITFN(sb_chippkg)(sb_t *sbh){ sb_info_t *si; si = SB_INFO(sbh); return (si->sb.chippkg);}/* return PCI core rev. */uintBCMINITFN(sb_pcirev)(sb_t *sbh){ sb_info_t *si; si = SB_INFO(sbh); return (si->sb.buscorerev);}boolBCMINITFN(sb_war16165)(sb_t *sbh){ sb_info_t *si; si = SB_INFO(sbh); return (PCI(si) && (si->sb.buscorerev <= 10));}static voidBCMINITFN(sb_war30841)(sb_info_t *si){ sb_pcie_mdiowrite(si, MDIODATA_DEV_RX, SERDES_RX_TIMER1, 0x8128); sb_pcie_mdiowrite(si, MDIODATA_DEV_RX, SERDES_RX_CDR, 0x0100); sb_pcie_mdiowrite(si, MDIODATA_DEV_RX, SERDES_RX_CDRBW, 0x1466);}/* return board vendor id */uintBCMINITFN(sb_boardvendor)(sb_t *sbh){ sb_info_t *si; si = SB_INFO(sbh); return (si->sb.boardvendor);}/* return boardtype */uintBCMINITFN(sb_boardtype)(sb_t *sbh){ sb_info_t *si; si = SB_INFO(sbh); return (si->sb.boardtype);}/* return bus type of sbh device */uintsb_bus(sb_t *sbh){ sb_info_t *si; si = SB_INFO(sbh); return (si->sb.bustype);}/* return bus core type */uintsb_buscoretype(sb_t *sbh){ sb_info_t *si; si = SB_INFO(sbh); return (si->sb.buscoretype);}/* return bus core revision */uintsb_buscorerev(sb_t *sbh){ sb_info_t *si; si = SB_INFO(sbh); return (si->sb.buscorerev);}/* return list of found cores */uintsb_corelist(sb_t *sbh, uint coreid[]){ sb_info_t *si; si = SB_INFO(sbh); bcopy((uchar*)si->coreid, (uchar*)coreid, (si->numcores * sizeof(uint))); return (si->numcores);}/* return current register mapping */void *sb_coreregs(sb_t *sbh){ sb_info_t *si; si = SB_INFO(sbh); ASSERT(GOODREGS(si->curmap)); return (si->curmap);}/* do buffered registers update */voidsb_commit(sb_t *sbh){ sb_info_t *si; uint origidx; uint intr_val = 0; si = SB_INFO(sbh); origidx = si->curidx; ASSERT(GOODIDX(origidx)); INTR_OFF(si, intr_val); /* switch over to chipcommon core if there is one, else use pci */ if (si->sb.ccrev != NOREV) { chipcregs_t *ccregs = (chipcregs_t *)sb_setcore(sbh, SB_CC, 0); /* do the buffer registers update */ W_REG(&ccregs->broadcastaddress, SB_COMMIT); W_REG(&ccregs->broadcastdata, 0x0); } else if (PCI(si)) { sbpciregs_t *pciregs = (sbpciregs_t *)sb_setcore(sbh, SB_PCI, 0); /* do the buffer registers update */ W_REG(&pciregs->bcastaddr, SB_COMMIT); W_REG(&pciregs->bcastdata, 0x0); } else ASSERT(0); /* restore core index */ sb_setcoreidx(sbh, origidx); INTR_RESTORE(si, intr_val);}/* reset and re-enable a core */voidsb_core_reset(sb_t *sbh, uint32 bits){ sb_info_t *si; sbconfig_t *sb; volatile uint32 dummy; si = SB_INFO(sbh); ASSERT(GOODREGS(si->curmap)); sb = REGS2SB(si->curmap); /* * Must do the disable sequence first to work for arbitrary current core state. */ sb_core_disable(sbh, bits); /* * Now do the initialization sequence. */ /* set reset while enabling the clock and forcing them on throughout the core */ W_SBREG(si, &sb->sbtmstatelow, (SBTML_FGC | SBTML_CLK | SBTML_RESET | bits)); dummy = R_SBREG(si, &sb->sbtmstatelow); OSL_DELAY(1); if (R_SBREG(si, &sb->sbtmstatehigh) & SBTMH_SERR) { W_SBREG(si, &sb->sbtmstatehigh, 0); } if ((dummy = R_SBREG(si, &sb->sbimstate)) & (SBIM_IBE | SBIM_TO)) { AND_SBREG(si, &sb->sbimstate, ~(SBIM_IBE | SBIM_TO)); } /* clear reset and allow it to propagate throughout the core */ W_SBREG(si, &sb->sbtmstatelow, (SBTML_FGC | SBTML_CLK | bits)); dummy = R_SBREG(si, &sb->sbtmstatelow); OSL_DELAY(1); /* leave clock enabled */ W_SBREG(si, &sb->sbtmstatelow, (SBTML_CLK | bits)); dummy = R_SBREG(si, &sb->sbtmstatelow); OSL_DELAY(1);}voidsb_core_tofixup(sb_t *sbh){ sb_info_t *si; sbconfig_t *sb; si = SB_INFO(sbh); if ((BUSTYPE(si->sb.bustype) != PCI_BUS) || PCIE(si) || (PCI(si) && (si->sb.buscorerev >= 5))) return; ASSERT(GOODREGS(si->curmap)); sb = REGS2SB(si->curmap); if (BUSTYPE(si->sb.bustype) == SB_BUS) { SET_SBREG(si, &sb->sbimconfiglow, SBIMCL_RTO_MASK | SBIMCL_STO_MASK, (0x5 << SBIMCL_RTO_SHIFT) | 0x3); } else { if (sb_coreid(sbh) == SB_PCI) { SET_SBREG(si, &sb->sbimconfiglow, SBIMCL_RTO_MASK | SBIMCL_STO_MASK, (0x3 << SBIMCL_RTO_SHIFT) | 0x2); } else { SET_SBREG(si, &sb->sbimconfiglow, (SBIMCL_RTO_MASK | SBIMCL_STO_MASK), 0); } } sb_commit(sbh);}/* * Set the initiator timeout for the "master core". * The master core is defined to be the core in control * of the chip and so it issues accesses to non-memory * locations (Because of dma *any* core can access memeory). * * The routine uses the bus to decide who is the master: * PCI_BUS => pci or pcie * * This routine exists so callers can disable initiator * timeouts so accesses to very slow devices like otp * won't cause an abort. The routine allows arbitrary * settings of the service and request timeouts, though. * * Returns the timeout state before changing it or -1 * on error. */#define TO_MASK (SBIMCL_RTO_MASK | SBIMCL_STO_MASK)uint32sb_set_initiator_to(sb_t *sbh, uint32 to){ sb_info_t *si; uint origidx, idx; uint intr_val = 0; uint32 tmp, ret = 0xffffffff; sbconfig_t *sb; si = SB_INFO(sbh); if ((to & ~TO_MASK) != 0) return ret; /* Figure out the master core */ idx = BADIDX; switch (BUSTYPE(si->sb.bustype)) { case PCI_BUS: idx = si->sb.buscoreidx; break; default: ASSERT(0); } if (idx == BADIDX) return ret; INTR_OFF(si, intr_val); origidx = sb_coreidx(sbh); sb = REGS2SB(sb_setcoreidx(sbh, idx)); tmp = R_SBREG(si, &sb->sbimconfiglow); ret = tmp & TO_MASK; W_SBREG(si, &sb->sbimconfiglow, (tmp & ~TO_MASK) | to); sb_commit(sbh); sb_setcoreidx(sbh, origidx); INTR_RESTORE(si, intr_val); return ret;}voidsb_core_disable(sb_t *sbh, uint32 bits){ sb_info_t *si; volatile uint32 dummy; uint32 rej; sbconfig_t *sb; si = SB_INFO(sbh); ASSERT(GOODREGS(si->curmap)); sb = REGS2SB(si->curmap); /* if core is already in reset, just return */ if (R_SBREG(si, &sb->sbtmstatelow) & SBTML_RESET) return; /* reject value changed between sonics 2.2 and 2.3 */ if (si->sb.sonicsrev == SONICS_2_2) rej = (1 << SBTML_REJ_SHIFT); else rej = (2 << SBTML_REJ_SHIFT); /* if clocks are not enabled, put into reset and return */ if ((R_SBREG(si, &sb->sbtmstatelow) & SBTML_CLK) == 0) goto disable; /* set target reject and spin until busy is clear (preserve core-specific bits) */ OR_SBREG(si, &sb->sbtmstatelow, rej); dummy = R_SBREG(si, &sb->sbtmstatelow); OSL_DELAY(1); SPINWAIT((R_SBREG(si, &sb->sbtmstatehigh) & SBTMH_BUSY), 100000); if (R_SBREG(si, &sb->sbidlow) & SBIDL_INIT) { OR_SBREG(si, &sb->sbimstate, SBIM_RJ); dummy = R_SBREG(si, &sb->sbimstate); OSL_DELAY(1); SPINWAIT((R_SBREG(si, &sb->sbimstate) & SBIM_BY), 100000); } /* set reset and reject while enabling the clocks */ W_SBREG(si, &sb->sbtmstatelow, (bits | SBTML_FGC | SBTML_CLK | rej | SBTML_RESET)); dummy = R_SBREG(si, &sb->sbtmstatelow); OSL_DELAY(10); /* don't forget to clear the initiator reject bit */ if (R_SBREG(si, &sb->sbidlow) & SBIDL_INIT) AND_SBREG(si, &sb->sbimstate, ~SBIM_RJ);disable: /* leave reset and reject asserted */ W_SBREG(si, &sb->sbtmstatelow, (bits | rej | SBTML_RESET)); OSL_DELAY(1);}/* set chip watchdog reset timer to fire in 'ticks' backplane cycles */voidsb_watchdog(sb_t *sbh, uint ticks){ sb_info_t *si = SB_INFO(sbh); /* make sure we come up in fast clock mode */ sb_clkctl_clk(sbh, CLK_FAST); /* instant NMI */ switch (si->gpioid) { case SB_CC: sb_corereg(si, 0, OFFSETOF(chipcregs_t, watchdog), ~0, ticks); break; }}/* * Configure the pci core for pci client (NIC) action * coremask is the bitvec of cores by index to be enabled. */voidBCMINITFN(sb_pci_setup)(sb_t *sbh, uint coremask){ sb_info_t *si; sbconfig_t *sb; sbpciregs_t *pciregs; uint32 sbflag;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -