📄 ml_sn_init.c
字号:
router_map_init(npda); /* Allocate memory for the per-node router traversal queue */ router_queue_init(npda,node); npda->sbe_info = kmem_zalloc_node_hint(sizeof (sbe_info_t), 0, node); ASSERT(npda->sbe_info);#ifdef CONFIG_SGI_IP35 || CONFIG_IA64_SGI_SN1 || CONFIG_IA64_GENERIC /* * Initialize bte info pointers to NULL */ for (i = 0; i < BTES_PER_NODE; i++) { npda->node_bte_info[i] = (bteinfo_t *)NULL; }#endif#endif /* LATER */}/* XXX - Move the interrupt stuff to intr.c ? *//* * Set up the platform-dependent fields in the processor pda. * Must be done _after_ init_platform_nodepda(). * If we need a lock here, something else is wrong! */// void init_platform_pda(pda_t *ppda, cpuid_t cpu)void init_platform_pda(cpuid_t cpu){ hub_intmasks_t *intmasks;#ifdef LATER cpuinfo_t cpuinfo;#endif int i; cnodeid_t cnode; synergy_da_t *sda; int which_synergy;#ifdef LATER /* Allocate per-cpu platform-dependent data */ cpuinfo = (cpuinfo_t)kmem_alloc_node(sizeof(struct cpuinfo_s), GFP_ATOMIC, cputocnode(cpu)); ASSERT_ALWAYS(cpuinfo); ppda->pdinfo = (void *)cpuinfo; cpuinfo->ci_cpupda = ppda; cpuinfo->ci_cpuid = cpu;#endif cnode = cpuid_to_cnodeid(cpu); which_synergy = cpuid_to_synergy(cpu); sda = Synergy_da_indr[(cnode * 2) + which_synergy]; // intmasks = &ppda->p_intmasks; intmasks = &sda->s_intmasks;#ifdef LATER ASSERT_ALWAYS(&ppda->p_nodepda);#endif /* Clear INT_PEND0 masks. */ for (i = 0; i < N_INTPEND0_MASKS; i++) intmasks->intpend0_masks[i] = 0; /* Set up pointer to the vector block in the nodepda. */ /* (Cant use SUBNODEPDA - not working yet) */ intmasks->dispatch0 = &Nodepdaindr[cnode]->snpda[cpuid_to_subnode(cpu)].intr_dispatch0; intmasks->dispatch1 = &Nodepdaindr[cnode]->snpda[cpuid_to_subnode(cpu)].intr_dispatch1; /* Clear INT_PEND1 masks. */ for (i = 0; i < N_INTPEND1_MASKS; i++) intmasks->intpend1_masks[i] = 0;#ifdef LATER /* Don't read the routers unless we're the master. */ ppda->p_routertick = 0;#endif}#if (defined(CONFIG_SGI_IP35) || defined(CONFIG_IA64_SGI_SN1) || defined(CONFIG_IA64_GENERIC)) && !defined(BRINGUP) /* protect low mem for IP35/7 */#error "need protect_hub_calias, protect_nmi_handler_data"#endif#ifdef LATER/* * For now, just protect the first page (exception handlers). We * may want to protect more stuff later. */voidprotect_hub_calias(nasid_t nasid){ paddr_t pa = NODE_OFFSET(nasid) + 0; /* page 0 on node nasid */ int i; for (i = 0; i < MAX_REGIONS; i++) { if (i == nasid_to_region(nasid)) continue; }}/* * Protect the page of low memory used to communicate with the NMI handler. */voidprotect_nmi_handler_data(nasid_t nasid, int slice){ paddr_t pa = NODE_OFFSET(nasid) + NMI_OFFSET(nasid, slice); int i; for (i = 0; i < MAX_REGIONS; i++) { if (i == nasid_to_region(nasid)) continue; }}#endif /* LATER */#ifdef LATER/* * Protect areas of memory that we access uncached by marking them as * poisoned so the T5 can't read them speculatively and erroneously * mark them dirty in its cache only to write them back with old data * later. */static voidprotect_low_memory(nasid_t nasid){ /* Protect low memory directory */ poison_state_alter_range(KLDIR_ADDR(nasid), KLDIR_SIZE, 1); /* Protect klconfig area */ poison_state_alter_range(KLCONFIG_ADDR(nasid), KLCONFIG_SIZE(nasid), 1); /* Protect the PI error spool area. */ poison_state_alter_range(PI_ERROR_ADDR(nasid), PI_ERROR_SIZE(nasid), 1); /* Protect CPU A's cache error eframe area. */ poison_state_alter_range(TO_NODE_UNCAC(nasid, CACHE_ERR_EFRAME), CACHE_ERR_AREA_SIZE, 1); /* Protect CPU B's area */ poison_state_alter_range(TO_NODE_UNCAC(nasid, CACHE_ERR_EFRAME) ^ UALIAS_FLIP_BIT, CACHE_ERR_AREA_SIZE, 1);#error "SN1 not handled correctly"}#endif /* LATER *//* * per_hub_init * * This code is executed once for each Hub chip. */voidper_hub_init(cnodeid_t cnode){ uint64_t done; nasid_t nasid; nodepda_t *npdap;#if defined(CONFIG_SGI_IP35) || defined(CONFIG_IA64_SGI_SN1) || defined(CONFIG_IA64_GENERIC) /* SN1 specific */ ii_icmr_u_t ii_icmr; ii_ibcr_u_t ii_ibcr;#endif#ifdef LATER int i;#endif nasid = COMPACT_TO_NASID_NODEID(cnode); ASSERT(nasid != INVALID_NASID); ASSERT(NASID_TO_COMPACT_NODEID(nasid) == cnode); /* Grab the hub_mask lock. */ spin_lock(&hub_mask_lock); /* Test our bit. */ if (!(done = CNODEMASK_TSTB(hub_init_mask, cnode))) { /* Turn our bit on in the mask. */ CNODEMASK_SETB(hub_init_mask, cnode); }#if defined(SN0_HWDEBUG) hub_config_setup();#endif /* Release the hub_mask lock. */ spin_unlock(&hub_mask_lock); /* * Do the actual initialization if it hasn't been done yet. * We don't need to hold a lock for this work. */ if (!done) { npdap = NODEPDA(cnode);#if defined(CONFIG_IA64_SGI_SYNERGY_PERF) /* initialize per-node synergy perf instrumentation */ npdap->synergy_perf_enabled = 0; /* off by default */ npdap->synergy_perf_lock = SPIN_LOCK_UNLOCKED; npdap->synergy_perf_freq = SYNERGY_PERF_FREQ_DEFAULT; npdap->synergy_inactive_intervals = 0; npdap->synergy_active_intervals = 0; npdap->synergy_perf_data = NULL; npdap->synergy_perf_first = NULL;#endif /* CONFIG_IA64_SGI_SYNERGY_PERF */ npdap->hub_chip_rev = get_hub_chiprev(nasid);#ifdef LATER for (i = 0; i < CPUS_PER_NODE; i++) { cpu = cnode_slice_to_cpuid(cnode, i); if (!cpu_enabled(cpu)) SET_CPU_LEDS(nasid, i, 0xf); }#endif /* LATER */#if defined(CONFIG_SGI_IP35) || defined(CONFIG_IA64_SGI_SN1) || defined(CONFIG_IA64_GENERIC) /* SN1 specific */ /* * Set the total number of CRBs that can be used. */ ii_icmr.ii_icmr_regval= 0x0; ii_icmr.ii_icmr_fld_s.i_c_cnt = 0xF; REMOTE_HUB_S(nasid, IIO_ICMR, ii_icmr.ii_icmr_regval); /* * Set the number of CRBs that both of the BTEs combined * can use minus 1. */ ii_ibcr.ii_ibcr_regval= 0x0; ii_ibcr.ii_ibcr_fld_s.i_count = 0x8; REMOTE_HUB_S(nasid, IIO_IBCR, ii_ibcr.ii_ibcr_regval); /* * Set CRB timeout to be 10ms. */ REMOTE_HUB_S(nasid, IIO_ICTP, 0x1000 ); REMOTE_HUB_S(nasid, IIO_ICTO, 0xff);#endif /* SN0_HWDEBUG */ /* Reserve all of the hardwired interrupt levels. */ intr_reserve_hardwired(cnode); /* Initialize error interrupts for this hub. */ hub_error_init(cnode);#ifdef LATER /* Set up correctable memory/directory ECC error interrupt. */ install_eccintr(cnode); /* Protect our exception vectors from accidental corruption. */ protect_hub_calias(nasid); /* Enable RT clock interrupts */ hub_rtc_init(cnode); hub_migrintr_init(cnode); /* Enable migration interrupt */#endif /* LATER */ spin_lock(&hub_mask_lock); CNODEMASK_SETB(hub_init_done_mask, cnode); spin_unlock(&hub_mask_lock); } else { /* * Wait for the other CPU to complete the initialization. */ while (CNODEMASK_TSTB(hub_init_done_mask, cnode) == 0) { /* * On SNIA64 we should never get here .. */ printk("WARNING: per_hub_init: Should NEVER get here!\n"); /* LOOP */ ; } }}extern voidupdate_node_information(cnodeid_t cnodeid){ nodepda_t *npda = NODEPDA(cnodeid); nodepda_router_info_t *npda_rip; /* Go through the list of router info * structures and copy some frequently * accessed info from the info hanging * off the corresponding router vertices */ npda_rip = npda->npda_rip_first; while(npda_rip) { if (npda_rip->router_infop) { npda_rip->router_portmask = npda_rip->router_infop->ri_portmask; npda_rip->router_slot = npda_rip->router_infop->ri_slotnum; } else { /* No router, no ports. */ npda_rip->router_portmask = 0; } npda_rip = npda_rip->router_next; }}hubreg_tget_region(cnodeid_t cnode){ if (fine_mode) return COMPACT_TO_NASID_NODEID(cnode) >> NASID_TO_FINEREG_SHFT; else return COMPACT_TO_NASID_NODEID(cnode) >> NASID_TO_COARSEREG_SHFT;}hubreg_tnasid_to_region(nasid_t nasid){ if (fine_mode) return nasid >> NASID_TO_FINEREG_SHFT; else return nasid >> NASID_TO_COARSEREG_SHFT;}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -