📄 ml_sn_intr.c
字号:
}/* * Actually block or unblock an interrupt */voiddo_intr_block_bit(cpuid_t cpu, int bit, int block){ intr_vecblk_t *vecblk; int ip; unsigned long s; hubreg_t *intpend_masks; volatile hubreg_t mask_value; volatile hubreg_t *mask_reg; intr_get_ptrs(cpu, bit, &bit, &intpend_masks, &vecblk, &ip); INTR_LOCK(vecblk); if (block) /* Block */ intpend_masks[0] &= ~(1ULL << (uint64_t)bit); else /* Unblock */ intpend_masks[0] |= (1ULL << (uint64_t)bit); if (ip == 0) { mask_reg = REMOTE_HUB_PI_ADDR(COMPACT_TO_NASID_NODEID(cputocnode(cpu)), cpuid_to_subnode(cpu), PI_INT_MASK0_A); } else { mask_reg = REMOTE_HUB_PI_ADDR(COMPACT_TO_NASID_NODEID(cputocnode(cpu)), cpuid_to_subnode(cpu), PI_INT_MASK1_A); } HUB_S(mask_reg, intpend_masks[0]); /* * Wait for it to take effect. (One read should suffice.) * This is only necessary when blocking an interrupt */ if (block) while ((mask_value = HUB_L(mask_reg)) != intpend_masks[0]) ; INTR_UNLOCK(vecblk);}/* * Block a particular interrupt (cpu/bit pair). *//* ARGSUSED */voidintr_block_bit(cpuid_t cpu, int bit){ do_intr_block_bit(cpu, bit, 1);}/* * Unblock a particular interrupt (cpu/bit pair). *//* ARGSUSED */voidintr_unblock_bit(cpuid_t cpu, int bit){ do_intr_block_bit(cpu, bit, 0);}/* verifies that the specified CPUID is on the specified SUBNODE (if any) */#define cpu_on_subnode(cpuid, which_subnode) \ (((which_subnode) == SUBNODE_ANY) || (cpuid_to_subnode(cpuid) == (which_subnode)))/* * Choose one of the CPUs on a specified node or subnode to receive * interrupts. Don't pick a cpu which has been specified as a NOINTR cpu. * * Among all acceptable CPUs, the CPU that has the fewest total number * of interrupts targetted towards it is chosen. Note that we never * consider how frequent each of these interrupts might occur, so a rare * hardware error interrupt is weighted equally with a disk interrupt. */static cpuid_tdo_intr_cpu_choose(cnodeid_t cnode, int which_subnode){ cpuid_t cpu, best_cpu = CPU_NONE; int slice, min_count=1000; min_count = 1000; for (slice=0; slice < CPUS_PER_NODE; slice++) { intr_vecblk_t *vecblk0, *vecblk1; int total_intrs_to_slice; subnode_pda_t *snpda; int local_cpu_num; cpu = cnode_slice_to_cpuid(cnode, slice); if (cpu == CPU_NONE) continue; /* If this cpu isn't enabled for interrupts, skip it */ if (!cpu_enabled(cpu) || !cpu_allows_intr(cpu)) continue; /* If this isn't the right subnode, skip it */ if (!cpu_on_subnode(cpu, which_subnode)) continue; /* OK, this one's a potential CPU for interrupts */ snpda = SUBNODEPDA(cnode,SUBNODE(slice)); vecblk0 = &snpda->intr_dispatch0; vecblk1 = &snpda->intr_dispatch1; local_cpu_num = LOCALCPU(slice); total_intrs_to_slice = vecblk0->cpu_count[local_cpu_num] + vecblk1->cpu_count[local_cpu_num]; if (min_count > total_intrs_to_slice) { min_count = total_intrs_to_slice; best_cpu = cpu; } } return best_cpu;}/* * Choose an appropriate interrupt target CPU on a specified node. * If which_subnode is SUBNODE_ANY, then subnode is not considered. * Otherwise, the chosen CPU must be on the specified subnode. */static cpuid_tintr_cpu_choose_from_node(cnodeid_t cnode, int which_subnode){ return(do_intr_cpu_choose(cnode, which_subnode));}#ifdef LATER/* * Convert a subnode vertex into a (cnodeid, which_subnode) pair. * Return 0 on success, non-zero on failure. */static intsubnodevertex_to_subnode(devfs_handle_t vhdl, cnodeid_t *cnodeidp, int *which_subnodep){ arbitrary_info_t which_subnode; cnodeid_t cnodeid; /* Try to grab subnode information */ if (hwgraph_info_get_LBL(vhdl, INFO_LBL_CPUBUS, &which_subnode) != GRAPH_SUCCESS) return(-1); /* On which node? */ cnodeid = master_node_get(vhdl); if (cnodeid == CNODEID_NONE) return(-1); *which_subnodep = (int)which_subnode; *cnodeidp = cnodeid; return(0); /* success */}#endif /* LATER *//* Make it easy to identify subnode vertices in the hwgraph */voidmark_subnodevertex_as_subnode(devfs_handle_t vhdl, int which_subnode){ graph_error_t rv; ASSERT(0 <= which_subnode); ASSERT(which_subnode < NUM_SUBNODES); rv = hwgraph_info_add_LBL(vhdl, INFO_LBL_CPUBUS, (arbitrary_info_t)which_subnode); ASSERT_ALWAYS(rv == GRAPH_SUCCESS); rv = hwgraph_info_export_LBL(vhdl, INFO_LBL_CPUBUS, sizeof(arbitrary_info_t)); ASSERT_ALWAYS(rv == GRAPH_SUCCESS);}/* * Given a device descriptor, extract interrupt target information and * choose an appropriate CPU. Return CPU_NONE if we can't make sense * out of the target information. * TBD: Should this be considered platform-independent code? */#ifdef LATERstatic cpuid_tintr_target_from_desc(device_desc_t dev_desc, int favor_subnode){ cpuid_t cpuid = CPU_NONE; cnodeid_t cnodeid; int which_subnode; devfs_handle_t intr_target_dev; if ((intr_target_dev = device_desc_intr_target_get(dev_desc)) != GRAPH_VERTEX_NONE) { /* * A valid device was specified. If it's a particular * CPU, then use that CPU as target. */ cpuid = cpuvertex_to_cpuid(intr_target_dev); if (cpuid != CPU_NONE) goto cpuchosen; /* If a subnode vertex was specified, pick a CPU on that subnode. */ if (subnodevertex_to_subnode(intr_target_dev, &cnodeid, &which_subnode) == 0) { cpuid = intr_cpu_choose_from_node(cnodeid, which_subnode); goto cpuchosen; } /* * Otherwise, pick a CPU on the node that owns the * specified target. Favor "favor_subnode", if specified. */ cnodeid = master_node_get(intr_target_dev); if (cnodeid != CNODEID_NONE) { cpuid = intr_cpu_choose_from_node(cnodeid, favor_subnode); goto cpuchosen; } }cpuchosen: return(cpuid);}#endif /* LATER */#ifdef LATER/* * Check if we had already visited this candidate cnode */static void *intr_cnode_seen(cnodeid_t candidate, void *arg1, void *arg2){ int i; cnodeid_t *visited_cnodes = (cnodeid_t *)arg1; int *num_visited_cnodes = (int *)arg2; ASSERT(visited_cnodes); ASSERT(*num_visited_cnodes <= numnodes); for(i = 0 ; i < *num_visited_cnodes; i++) { if (candidate == visited_cnodes[i]) return(NULL); } return(visited_cnodes);}#endif /* LATER *//* * intr_bit_reserve_test(cpuid,which_subnode,cnode,req_bit,intr_resflags, * owner_dev,intr_name,*resp_bit) * Either cpuid is not CPU_NONE or cnodeid not CNODE_NONE but * not both. * 1. If cpuid is specified, this routine tests if this cpu can be a valid * interrupt target candidate. * 2. If cnodeid is specified, this routine tests if there is a cpu on * this node which can be a valid interrupt target candidate. * 3. If a valid interrupt target cpu candidate is found then an attempt at * reserving an interrupt bit on the corresponding cnode is made. * * If steps 1 & 2 both fail or step 3 fails then we are not able to get a valid * interrupt target cpu then routine returns CPU_NONE (failure) * Otherwise routine returns cpuid of interrupt target (success) */static cpuid_tintr_bit_reserve_test(cpuid_t cpuid, int favor_subnode, cnodeid_t cnodeid, int req_bit, int intr_resflags, devfs_handle_t owner_dev, char *intr_name, int *resp_bit){ ASSERT((cpuid==CPU_NONE) || (cnodeid==CNODEID_NONE)); if (cnodeid != CNODEID_NONE) { /* Try to choose a interrupt cpu candidate */ cpuid = intr_cpu_choose_from_node(cnodeid, favor_subnode); } if (cpuid != CPU_NONE) { /* Try to reserve an interrupt bit on the hub * corresponding to the canidate cnode. If we * are successful then we got a cpu which can * act as an interrupt target for the io device. * Otherwise we need to continue the search * further. */ *resp_bit = do_intr_reserve_level(cpuid, req_bit, intr_resflags, II_RESERVE, owner_dev, intr_name); if (*resp_bit >= 0) /* The interrupt target specified was fine */ return(cpuid); } return(CPU_NONE);}/* * intr_heuristic(dev_t dev,device_desc_t dev_desc, * int req_bit,int intr_resflags,dev_t owner_dev, * char *intr_name,int *resp_bit) * * Choose an interrupt destination for an interrupt. * dev is the device for which the interrupt is being set up * dev_desc is a description of hardware and policy that could * help determine where this interrupt should go * req_bit is the interrupt bit requested * (can be INTRCONNECT_ANY_BIT in which the first available * interrupt bit is used) * intr_resflags indicates whether we want to (un)reserve bit * owner_dev is the owner device * intr_name is the readable interrupt name * resp_bit indicates whether we succeeded in getting the required * action { (un)reservation} done * negative value indicates failure * *//* ARGSUSED */cpuid_tintr_heuristic(devfs_handle_t dev, device_desc_t dev_desc, int req_bit, int intr_resflags, devfs_handle_t owner_dev, char *intr_name, int *resp_bit){ cpuid_t cpuid; /* possible intr targ*/ cnodeid_t candidate; /* possible canidate */#ifdef LATER cnodeid_t visited_cnodes[MAX_NASIDS], /* nodes seen so far */ center, /* node we are on */ candidate; /* possible canidate */ int num_visited_cnodes = 0; /* # nodes seen */ int radius = 1, /* start looking at the * current node */ maxradius = physmem_maxradius(); void *rv;#endif /* LATER */ int which_subnode = SUBNODE_ANY;/* SN1 + pcibr Addressing Limitation */ { devfs_handle_t pconn_vhdl; pcibr_soft_t pcibr_soft; /* * This combination of SN1 and Bridge hardware has an odd "limitation". * Due to the choice of addresses for PI0 and PI1 registers on SN1 * and historical limitations in Bridge, Bridge is unable to * send interrupts to both PI0 CPUs and PI1 CPUs -- we have * to choose one set or the other. That choice is implicitly * made when Bridge first attaches its error interrupt. After * that point, all subsequent interrupts are restricted to the * same PI number (though it's possible to send interrupts to * the same PI number on a different node). * * Since neither SN1 nor Bridge designers are willing to admit a * bug, we can't really call this a "workaround". It's a permanent * solution for an SN1-specific and Bridge-specific hardware * limitation that won't ever be lifted. */ if ((hwgraph_edge_get(dev, EDGE_LBL_PCI, &pconn_vhdl) == GRAPH_SUCCESS) && ((pcibr_soft = pcibr_soft_get(pconn_vhdl)) != NULL)) { /* * We "know" that the error interrupt is the first * interrupt set up by pcibr_attach. Send all interrupts * on this bridge to the same subnode number. */ if (pcibr_soft->bsi_err_intr) { which_subnode = cpuid_to_subnode(((hub_intr_t) pcibr_soft->bsi_err_intr)->i_cpuid); } } }#ifdef LATER /* * If an interrupt target was specified for this * interrupt allocation, try to use it. */ if (dev_desc) { /* Try to see if the interrupt target specified in the * device descriptor is a legal candidate. */ cpuid = intr_bit_reserve_test(intr_target_from_desc(dev_desc, which_subnode), which_subnode, CNODEID_NONE, req_bit, intr_resflags, owner_dev, intr_name, resp_bit); if (cpuid != CPU_NONE) { if (cpu_on_subnode(cpuid, which_subnode)) return(cpuid); /* got a valid interrupt target */ printk("Override explicit interrupt targetting: %v (0x%x)\n", owner_dev, owner_dev); intr_unreserve_level(cpuid, *resp_bit); } /* Fall through on to the next step in the search for * the interrupt candidate. */ }#endif /* LATER */ /* Check if we can find a valid interrupt target candidate on * the master node for the device. */ cpuid = intr_bit_reserve_test(CPU_NONE, which_subnode, master_node_get(dev), req_bit, intr_resflags, owner_dev, intr_name, resp_bit); if (cpuid != CPU_NONE) { if (cpu_on_subnode(cpuid, which_subnode)) return(cpuid); /* got a valid interrupt target */ else intr_unreserve_level(cpuid, *resp_bit); } PRINT_WARNING("Cannot target interrupts to closest node(%d): %ld (0x%lx)\n", master_node_get(dev),(long) owner_dev, (unsigned long)owner_dev); /* Fall through into the default algorithm * (exhaustive-search-for-the-nearest-possible-interrupt-target) * for finding the interrupt target */#ifndef BRINGUP // Use of this algorithm is deferred until the supporting // code has been implemented. /* * No valid interrupt specification exists. * Try to find a node which is closest to the current node * which can process interrupts from a device */ center = cpuid_to_cnodeid(smp_processor_id()); while (radius <= maxradius) { /* Try to find a node at the given radius and which * we haven't seen already. */ rv = physmem_select_neighbor_node(center,radius,&candidate, intr_cnode_seen, (void *)visited_cnodes, (void *)&num_visited_cnodes); if (!rv) { /* We have seen all the nodes at this particular radius * Go on to the next radius level. */ radius++; continue; } /* We are seeing this candidate cnode for the first time */ visited_cnodes[num_visited_cnodes++] = candidate; cpuid = intr_bit_reserve_test(CPU_NONE, which_subnode, candidate, req_bit, intr_resflags, owner_dev, intr_name, resp_bit); if (cpuid != CPU_NONE) { if (cpu_on_subnode(cpuid, which_subnode)) return(cpuid); /* got a valid interrupt target */ else intr_unreserve_level(cpuid, *resp_bit); } }#else /* BRINGUP */ { // Do a stupid round-robin assignment of the node. static cnodeid_t last_node = -1; if (last_node >= numnodes) last_node = 0; for (candidate = last_node + 1; candidate != last_node; candidate++) { if (candidate == numnodes) candidate = 0; cpuid = intr_bit_reserve_test(CPU_NONE, which_subnode, candidate, req_bit, intr_resflags, owner_dev, intr_name, resp_bit); if (cpuid != CPU_NONE) { if (cpu_on_subnode(cpuid, which_subnode)) { last_node = candidate; return(cpuid); /* got a valid interrupt target */ } else intr_unreserve_level(cpuid, *resp_bit); } } last_node = candidate; }#endif PRINT_WARNING("Cannot target interrupts to any close node: %ld (0x%lx)\n", (long)owner_dev, (unsigned long)owner_dev); /* In the worst case try to allocate interrupt bits on the * master processor's node. We may get here during error interrupt * allocation phase when the topology matrix is not yet setup * and hence cannot do an exhaustive search. */ ASSERT(cpu_allows_intr(master_procid)); cpuid = intr_bit_reserve_test(master_procid, which_subnode, CNODEID_NONE, req_bit, intr_resflags, owner_dev, intr_name, resp_bit); if (cpuid != CPU_NONE) { if (cpu_on_subnode(cpuid, which_subnode)) return(cpuid); else intr_unreserve_level(cpuid, *resp_bit); } PRINT_WARNING("Cannot target interrupts: %ld (0x%lx)\n", (long)owner_dev, (unsigned long)owner_dev); return(CPU_NONE); /* Should never get here */}#ifndef BRINGUP/* * Should never receive an exception while running on the idle * stack. It IS possible to handle *interrupts* while on the * idle stack, but a non-interrupt *exception* is a problem. */voididle_err(inst_t *epc, uint cause, void *fep, void *sp){ eframe_t *ep = (eframe_t *)fep; if ((cause & CAUSE_EXCMASK) == EXC_IBE || (cause & CAUSE_EXCMASK) == EXC_DBE) { (void)dobuserre((eframe_t *)ep, epc, 0);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -