📄 xics.c
字号:
} else { irq = real_irq_to_virt(vec); if (irq == NO_IRQ) irq = real_irq_to_virt_slowpath(vec); if (irq == NO_IRQ) { printk(KERN_ERR "Interrupt %u (real) is invalid," " disabling it.\n", vec); xics_disable_real_irq(vec); } else irq = irq_offset_up(irq); } return irq;}#ifdef CONFIG_SMPirqreturn_t xics_ipi_action(int irq, void *dev_id, struct pt_regs *regs){ int cpu = smp_processor_id(); ops->qirr_info(cpu, 0xff); WARN_ON(cpu_is_offline(cpu)); while (xics_ipi_message[cpu].value) { if (test_and_clear_bit(PPC_MSG_CALL_FUNCTION, &xics_ipi_message[cpu].value)) { mb(); smp_message_recv(PPC_MSG_CALL_FUNCTION, regs); } if (test_and_clear_bit(PPC_MSG_RESCHEDULE, &xics_ipi_message[cpu].value)) { mb(); smp_message_recv(PPC_MSG_RESCHEDULE, regs); }#if 0 if (test_and_clear_bit(PPC_MSG_MIGRATE_TASK, &xics_ipi_message[cpu].value)) { mb(); smp_message_recv(PPC_MSG_MIGRATE_TASK, regs); }#endif#ifdef CONFIG_DEBUGGER if (test_and_clear_bit(PPC_MSG_DEBUGGER_BREAK, &xics_ipi_message[cpu].value)) { mb(); smp_message_recv(PPC_MSG_DEBUGGER_BREAK, regs); }#endif } return IRQ_HANDLED;}void xics_cause_IPI(int cpu){ ops->qirr_info(cpu, IPI_PRIORITY);}#endif /* CONFIG_SMP */void xics_setup_cpu(void){ int cpu = smp_processor_id(); ops->cppr_info(cpu, 0xff); iosync(); /* * Put the calling processor into the GIQ. This is really only * necessary from a secondary thread as the OF start-cpu interface * performs this function for us on primary threads. * * XXX: undo of teardown on kexec needs this too, as may hotplug */ rtas_set_indicator(GLOBAL_INTERRUPT_QUEUE, (1UL << interrupt_server_size) - 1 - default_distrib_server, 1);}void xics_init_IRQ(void){ int i; unsigned long intr_size = 0; struct device_node *np; uint *ireg, ilen, indx = 0; unsigned long intr_base = 0; struct xics_interrupt_node { unsigned long addr; unsigned long size; } intnodes[NR_CPUS]; ppc64_boot_msg(0x20, "XICS Init"); ibm_get_xive = rtas_token("ibm,get-xive"); ibm_set_xive = rtas_token("ibm,set-xive"); ibm_int_on = rtas_token("ibm,int-on"); ibm_int_off = rtas_token("ibm,int-off"); np = of_find_node_by_type(NULL, "PowerPC-External-Interrupt-Presentation"); if (!np) panic("xics_init_IRQ: can't find interrupt presentation");nextnode: ireg = (uint *)get_property(np, "ibm,interrupt-server-ranges", NULL); if (ireg) { /* * set node starting index for this node */ indx = *ireg; } ireg = (uint *)get_property(np, "reg", &ilen); if (!ireg) panic("xics_init_IRQ: can't find interrupt reg property"); while (ilen) { intnodes[indx].addr = (unsigned long)*ireg++ << 32; ilen -= sizeof(uint); intnodes[indx].addr |= *ireg++; ilen -= sizeof(uint); intnodes[indx].size = (unsigned long)*ireg++ << 32; ilen -= sizeof(uint); intnodes[indx].size |= *ireg++; ilen -= sizeof(uint); indx++; if (indx >= NR_CPUS) break; } np = of_find_node_by_type(np, "PowerPC-External-Interrupt-Presentation"); if ((indx < NR_CPUS) && np) goto nextnode; /* Find the server numbers for the boot cpu. */ for (np = of_find_node_by_type(NULL, "cpu"); np; np = of_find_node_by_type(np, "cpu")) { ireg = (uint *)get_property(np, "reg", &ilen); if (ireg && ireg[0] == boot_cpuid_phys) { ireg = (uint *)get_property(np, "ibm,ppc-interrupt-gserver#s", &ilen); i = ilen / sizeof(int); if (ireg && i > 0) { default_server = ireg[0]; default_distrib_server = ireg[i-1]; /* take last element */ } ireg = (uint *)get_property(np, "ibm,interrupt-server#-size", NULL); if (ireg) interrupt_server_size = *ireg; break; } } of_node_put(np); intr_base = intnodes[0].addr; intr_size = intnodes[0].size; np = of_find_node_by_type(NULL, "interrupt-controller"); if (!np) { printk(KERN_WARNING "xics: no ISA interrupt controller\n"); xics_irq_8259_cascade_real = -1; xics_irq_8259_cascade = -1; } else { ireg = (uint *) get_property(np, "interrupts", NULL); if (!ireg) panic("xics_init_IRQ: can't find ISA interrupts property"); xics_irq_8259_cascade_real = *ireg; xics_irq_8259_cascade = virt_irq_create_mapping(xics_irq_8259_cascade_real); i8259_init(0, 0); of_node_put(np); } if (platform_is_lpar()) ops = &pSeriesLP_ops; else {#ifdef CONFIG_SMP for_each_cpu(i) { int hard_id; /* FIXME: Do this dynamically! --RR */ if (!cpu_present(i)) continue; hard_id = get_hard_smp_processor_id(i); xics_per_cpu[i] = ioremap(intnodes[hard_id].addr, intnodes[hard_id].size); }#else xics_per_cpu[0] = ioremap(intr_base, intr_size);#endif /* CONFIG_SMP */ } for (i = irq_offset_value(); i < NR_IRQS; ++i) get_irq_desc(i)->handler = &xics_pic; xics_setup_cpu(); ppc64_boot_msg(0x21, "XICS Done");}/* * We cant do this in init_IRQ because we need the memory subsystem up for * request_irq() */static int __init xics_setup_i8259(void){ if (ppc64_interrupt_controller == IC_PPC_XIC && xics_irq_8259_cascade != -1) { if (request_irq(irq_offset_up(xics_irq_8259_cascade), no_action, 0, "8259 cascade", NULL)) printk(KERN_ERR "xics_setup_i8259: couldn't get 8259 " "cascade\n"); } return 0;}arch_initcall(xics_setup_i8259);#ifdef CONFIG_SMPvoid xics_request_IPIs(void){ virt_irq_to_real_map[XICS_IPI] = XICS_IPI; /* IPIs are marked SA_INTERRUPT as they must run with irqs disabled */ request_irq(irq_offset_up(XICS_IPI), xics_ipi_action, SA_INTERRUPT, "IPI", NULL); get_irq_desc(irq_offset_up(XICS_IPI))->status |= IRQ_PER_CPU;}#endifstatic void xics_set_affinity(unsigned int virq, cpumask_t cpumask){ unsigned int irq; int status; int xics_status[2]; unsigned long newmask; cpumask_t tmp = CPU_MASK_NONE; irq = virt_irq_to_real(irq_offset_down(virq)); if (irq == XICS_IPI || irq == NO_IRQ) return; status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq); if (status) { printk(KERN_ERR "xics_set_affinity: irq=%u ibm,get-xive " "returns %d\n", irq, status); return; } /* For the moment only implement delivery to all cpus or one cpu */ if (cpus_equal(cpumask, CPU_MASK_ALL)) { newmask = default_distrib_server; } else { cpus_and(tmp, cpu_online_map, cpumask); if (cpus_empty(tmp)) return; newmask = get_hard_smp_processor_id(first_cpu(tmp)); } status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, newmask, xics_status[1]); if (status) { printk(KERN_ERR "xics_set_affinity: irq=%u ibm,set-xive " "returns %d\n", irq, status); return; }}void xics_teardown_cpu(int secondary){ int cpu = smp_processor_id(); ops->cppr_info(cpu, 0x00); iosync(); /* * Some machines need to have at least one cpu in the GIQ, * so leave the master cpu in the group. */ if (secondary) { /* * we need to EOI the IPI if we got here from kexec down IPI * * probably need to check all the other interrupts too * should we be flagging idle loop instead? * or creating some task to be scheduled? */ ops->xirr_info_set(cpu, XICS_IPI); rtas_set_indicator(GLOBAL_INTERRUPT_QUEUE, (1UL << interrupt_server_size) - 1 - default_distrib_server, 0); }}#ifdef CONFIG_HOTPLUG_CPU/* Interrupts are disabled. */void xics_migrate_irqs_away(void){ int status; unsigned int irq, virq, cpu = smp_processor_id(); /* Reject any interrupt that was queued to us... */ ops->cppr_info(cpu, 0); iosync(); /* remove ourselves from the global interrupt queue */ status = rtas_set_indicator(GLOBAL_INTERRUPT_QUEUE, (1UL << interrupt_server_size) - 1 - default_distrib_server, 0); WARN_ON(status < 0); /* Allow IPIs again... */ ops->cppr_info(cpu, DEFAULT_PRIORITY); iosync(); for_each_irq(virq) { irq_desc_t *desc; int xics_status[2]; unsigned long flags; /* We cant set affinity on ISA interrupts */ if (virq < irq_offset_value()) continue; desc = get_irq_desc(virq); irq = virt_irq_to_real(irq_offset_down(virq)); /* We need to get IPIs still. */ if (irq == XICS_IPI || irq == NO_IRQ) continue; /* We only need to migrate enabled IRQS */ if (desc == NULL || desc->handler == NULL || desc->action == NULL || desc->handler->set_affinity == NULL) continue; spin_lock_irqsave(&desc->lock, flags); status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq); if (status) { printk(KERN_ERR "migrate_irqs_away: irq=%u " "ibm,get-xive returns %d\n", virq, status); goto unlock; } /* * We only support delivery to all cpus or to one cpu. * The irq has to be migrated only in the single cpu * case. */ if (xics_status[0] != get_hard_smp_processor_id(cpu)) goto unlock; printk(KERN_WARNING "IRQ %u affinity broken off cpu %u\n", virq, cpu); /* Reset affinity to all cpus */ desc->handler->set_affinity(virq, CPU_MASK_ALL); irq_affinity[virq] = CPU_MASK_ALL;unlock: spin_unlock_irqrestore(&desc->lock, flags); }}#endif
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -