📄 io_apic_64.c
字号:
setup_ExtINT_IRQ0_pin(apic2, pin2, cfg->vector); if (timer_irq_works()) { apic_printk(APIC_VERBOSE," works.\n"); nmi_watchdog_default(); if (nmi_watchdog == NMI_IO_APIC) { setup_nmi(); } goto out; } /* * Cleanup, just in case ... */ clear_IO_APIC_pin(apic2, pin2); } apic_printk(APIC_VERBOSE," failed.\n"); if (nmi_watchdog == NMI_IO_APIC) { printk(KERN_WARNING "timer doesn't work through the IO-APIC - disabling NMI Watchdog!\n"); nmi_watchdog = 0; } apic_printk(APIC_VERBOSE, KERN_INFO "...trying to set up timer as Virtual Wire IRQ..."); disable_8259A_irq(0); irq_desc[0].chip = &lapic_irq_type; apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector); /* Fixed mode */ enable_8259A_irq(0); if (timer_irq_works()) { apic_printk(APIC_VERBOSE," works.\n"); goto out; } apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector); apic_printk(APIC_VERBOSE," failed.\n"); apic_printk(APIC_VERBOSE, KERN_INFO "...trying to set up timer as ExtINT IRQ..."); init_8259A(0); make_8259A_irq(0); apic_write(APIC_LVT0, APIC_DM_EXTINT); unlock_ExtINT_logic(); if (timer_irq_works()) { apic_printk(APIC_VERBOSE," works.\n"); goto out; } apic_printk(APIC_VERBOSE," failed :(.\n"); panic("IO-APIC + timer doesn't work! Try using the 'noapic' kernel parameter\n");out: local_irq_restore(flags);}static int __init notimercheck(char *s){ no_timer_check = 1; return 1;}__setup("no_timer_check", notimercheck);/* * * IRQs that are handled by the PIC in the MPS IOAPIC case. * - IRQ2 is the cascade IRQ, and cannot be a io-apic IRQ. * Linux doesn't really care, as it's not actually used * for any interrupt handling anyway. */#define PIC_IRQS (1<<2)void __init setup_IO_APIC(void){ enable_IO_APIC(); if (acpi_ioapic) io_apic_irqs = ~0; /* all IRQs go through IOAPIC */ else io_apic_irqs = ~PIC_IRQS; apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n"); sync_Arb_IDs(); setup_IO_APIC_irqs(); init_IO_APIC_traps(); check_timer(); if (!acpi_ioapic) print_IO_APIC();}struct sysfs_ioapic_data { struct sys_device dev; struct IO_APIC_route_entry entry[0];};static struct sysfs_ioapic_data * mp_ioapic_data[MAX_IO_APICS];static int ioapic_suspend(struct sys_device *dev, pm_message_t state){ struct IO_APIC_route_entry *entry; struct sysfs_ioapic_data *data; int i; data = container_of(dev, struct sysfs_ioapic_data, dev); entry = data->entry; for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ ) *entry = ioapic_read_entry(dev->id, i); return 0;}static int ioapic_resume(struct sys_device *dev){ struct IO_APIC_route_entry *entry; struct sysfs_ioapic_data *data; unsigned long flags; union IO_APIC_reg_00 reg_00; int i; data = container_of(dev, struct sysfs_ioapic_data, dev); entry = data->entry; spin_lock_irqsave(&ioapic_lock, flags); reg_00.raw = io_apic_read(dev->id, 0); if (reg_00.bits.ID != mp_ioapics[dev->id].mpc_apicid) { reg_00.bits.ID = mp_ioapics[dev->id].mpc_apicid; io_apic_write(dev->id, 0, reg_00.raw); } spin_unlock_irqrestore(&ioapic_lock, flags); for (i = 0; i < nr_ioapic_registers[dev->id]; i++) ioapic_write_entry(dev->id, i, entry[i]); return 0;}static struct sysdev_class ioapic_sysdev_class = { set_kset_name("ioapic"), .suspend = ioapic_suspend, .resume = ioapic_resume,};static int __init ioapic_init_sysfs(void){ struct sys_device * dev; int i, size, error; error = sysdev_class_register(&ioapic_sysdev_class); if (error) return error; for (i = 0; i < nr_ioapics; i++ ) { size = sizeof(struct sys_device) + nr_ioapic_registers[i] * sizeof(struct IO_APIC_route_entry); mp_ioapic_data[i] = kzalloc(size, GFP_KERNEL); if (!mp_ioapic_data[i]) { printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i); continue; } dev = &mp_ioapic_data[i]->dev; dev->id = i; dev->cls = &ioapic_sysdev_class; error = sysdev_register(dev); if (error) { kfree(mp_ioapic_data[i]); mp_ioapic_data[i] = NULL; printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i); continue; } } return 0;}device_initcall(ioapic_init_sysfs);/* * Dynamic irq allocate and deallocation */int create_irq(void){ /* Allocate an unused irq */ int irq; int new; unsigned long flags; irq = -ENOSPC; spin_lock_irqsave(&vector_lock, flags); for (new = (NR_IRQS - 1); new >= 0; new--) { if (platform_legacy_irq(new)) continue; if (irq_cfg[new].vector != 0) continue; if (__assign_irq_vector(new, TARGET_CPUS) == 0) irq = new; break; } spin_unlock_irqrestore(&vector_lock, flags); if (irq >= 0) { dynamic_irq_init(irq); } return irq;}void destroy_irq(unsigned int irq){ unsigned long flags; dynamic_irq_cleanup(irq); spin_lock_irqsave(&vector_lock, flags); __clear_irq_vector(irq); spin_unlock_irqrestore(&vector_lock, flags);}/* * MSI message composition */#ifdef CONFIG_PCI_MSIstatic int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg){ struct irq_cfg *cfg = irq_cfg + irq; int err; unsigned dest; cpumask_t tmp; tmp = TARGET_CPUS; err = assign_irq_vector(irq, tmp); if (!err) { cpus_and(tmp, cfg->domain, tmp); dest = cpu_mask_to_apicid(tmp); msg->address_hi = MSI_ADDR_BASE_HI; msg->address_lo = MSI_ADDR_BASE_LO | ((INT_DEST_MODE == 0) ? MSI_ADDR_DEST_MODE_PHYSICAL: MSI_ADDR_DEST_MODE_LOGICAL) | ((INT_DELIVERY_MODE != dest_LowestPrio) ? MSI_ADDR_REDIRECTION_CPU: MSI_ADDR_REDIRECTION_LOWPRI) | MSI_ADDR_DEST_ID(dest); msg->data = MSI_DATA_TRIGGER_EDGE | MSI_DATA_LEVEL_ASSERT | ((INT_DELIVERY_MODE != dest_LowestPrio) ? MSI_DATA_DELIVERY_FIXED: MSI_DATA_DELIVERY_LOWPRI) | MSI_DATA_VECTOR(cfg->vector); } return err;}#ifdef CONFIG_SMPstatic void set_msi_irq_affinity(unsigned int irq, cpumask_t mask){ struct irq_cfg *cfg = irq_cfg + irq; struct msi_msg msg; unsigned int dest; cpumask_t tmp; cpus_and(tmp, mask, cpu_online_map); if (cpus_empty(tmp)) return; if (assign_irq_vector(irq, mask)) return; cpus_and(tmp, cfg->domain, mask); dest = cpu_mask_to_apicid(tmp); read_msi_msg(irq, &msg); msg.data &= ~MSI_DATA_VECTOR_MASK; msg.data |= MSI_DATA_VECTOR(cfg->vector); msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; msg.address_lo |= MSI_ADDR_DEST_ID(dest); write_msi_msg(irq, &msg); irq_desc[irq].affinity = mask;}#endif /* CONFIG_SMP *//* * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices, * which implement the MSI or MSI-X Capability Structure. */static struct irq_chip msi_chip = { .name = "PCI-MSI", .unmask = unmask_msi_irq, .mask = mask_msi_irq, .ack = ack_apic_edge,#ifdef CONFIG_SMP .set_affinity = set_msi_irq_affinity,#endif .retrigger = ioapic_retrigger_irq,};int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc){ struct msi_msg msg; int irq, ret; irq = create_irq(); if (irq < 0) return irq; ret = msi_compose_msg(dev, irq, &msg); if (ret < 0) { destroy_irq(irq); return ret; } set_irq_msi(irq, desc); write_msi_msg(irq, &msg); set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq, "edge"); return 0;}void arch_teardown_msi_irq(unsigned int irq){ destroy_irq(irq);}#ifdef CONFIG_DMAR#ifdef CONFIG_SMPstatic void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask){ struct irq_cfg *cfg = irq_cfg + irq; struct msi_msg msg; unsigned int dest; cpumask_t tmp; cpus_and(tmp, mask, cpu_online_map); if (cpus_empty(tmp)) return; if (assign_irq_vector(irq, mask)) return; cpus_and(tmp, cfg->domain, mask); dest = cpu_mask_to_apicid(tmp); dmar_msi_read(irq, &msg); msg.data &= ~MSI_DATA_VECTOR_MASK; msg.data |= MSI_DATA_VECTOR(cfg->vector); msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; msg.address_lo |= MSI_ADDR_DEST_ID(dest); dmar_msi_write(irq, &msg); irq_desc[irq].affinity = mask;}#endif /* CONFIG_SMP */struct irq_chip dmar_msi_type = { .name = "DMAR_MSI", .unmask = dmar_msi_unmask, .mask = dmar_msi_mask, .ack = ack_apic_edge,#ifdef CONFIG_SMP .set_affinity = dmar_msi_set_affinity,#endif .retrigger = ioapic_retrigger_irq,};int arch_setup_dmar_msi(unsigned int irq){ int ret; struct msi_msg msg; ret = msi_compose_msg(NULL, irq, &msg); if (ret < 0) return ret; dmar_msi_write(irq, &msg); set_irq_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq, "edge"); return 0;}#endif#endif /* CONFIG_PCI_MSI *//* * Hypertransport interrupt support */#ifdef CONFIG_HT_IRQ#ifdef CONFIG_SMPstatic void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector){ struct ht_irq_msg msg; fetch_ht_irq_msg(irq, &msg); msg.address_lo &= ~(HT_IRQ_LOW_VECTOR_MASK | HT_IRQ_LOW_DEST_ID_MASK); msg.address_hi &= ~(HT_IRQ_HIGH_DEST_ID_MASK); msg.address_lo |= HT_IRQ_LOW_VECTOR(vector) | HT_IRQ_LOW_DEST_ID(dest); msg.address_hi |= HT_IRQ_HIGH_DEST_ID(dest); write_ht_irq_msg(irq, &msg);}static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask){ struct irq_cfg *cfg = irq_cfg + irq; unsigned int dest; cpumask_t tmp; cpus_and(tmp, mask, cpu_online_map); if (cpus_empty(tmp)) return; if (assign_irq_vector(irq, mask)) return; cpus_and(tmp, cfg->domain, mask); dest = cpu_mask_to_apicid(tmp); target_ht_irq(irq, dest, cfg->vector); irq_desc[irq].affinity = mask;}#endifstatic struct irq_chip ht_irq_chip = { .name = "PCI-HT", .mask = mask_ht_irq, .unmask = unmask_ht_irq, .ack = ack_apic_edge,#ifdef CONFIG_SMP .set_affinity = set_ht_irq_affinity,#endif .retrigger = ioapic_retrigger_irq,};int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev){ struct irq_cfg *cfg = irq_cfg + irq; int err; cpumask_t tmp; tmp = TARGET_CPUS; err = assign_irq_vector(irq, tmp); if (!err) { struct ht_irq_msg msg; unsigned dest; cpus_and(tmp, cfg->domain, tmp); dest = cpu_mask_to_apicid(tmp); msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest); msg.address_lo = HT_IRQ_LOW_BASE | HT_IRQ_LOW_DEST_ID(dest) | HT_IRQ_LOW_VECTOR(cfg->vector) | ((INT_DEST_MODE == 0) ? HT_IRQ_LOW_DM_PHYSICAL : HT_IRQ_LOW_DM_LOGICAL) | HT_IRQ_LOW_RQEOI_EDGE | ((INT_DELIVERY_MODE != dest_LowestPrio) ? HT_IRQ_LOW_MT_FIXED : HT_IRQ_LOW_MT_ARBITRATED) | HT_IRQ_LOW_IRQ_MASKED; write_ht_irq_msg(irq, &msg); set_irq_chip_and_handler_name(irq, &ht_irq_chip, handle_edge_irq, "edge"); } return err;}#endif /* CONFIG_HT_IRQ *//* -------------------------------------------------------------------------- ACPI-based IOAPIC Configuration -------------------------------------------------------------------------- */#ifdef CONFIG_ACPI#define IO_APIC_MAX_ID 0xFEint __init io_apic_get_redir_entries (int ioapic){ union IO_APIC_reg_01 reg_01; unsigned long flags; spin_lock_irqsave(&ioapic_lock, flags); reg_01.raw = io_apic_read(ioapic, 1); spin_unlock_irqrestore(&ioapic_lock, flags); return reg_01.bits.entries;}int io_apic_set_pci_routing (int ioapic, int pin, int irq, int triggering, int polarity){ if (!IO_APIC_IRQ(irq)) { apic_printk(APIC_QUIET,KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n", ioapic); return -EINVAL; } /* * IRQs < 16 are already in the irq_2_pin[] map */ if (irq >= 16) add_pin_to_irq(irq, ioapic, pin); setup_IO_APIC_irq(ioapic, pin, irq, triggering, polarity); return 0;}int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity){ int i; if (skip_ioapic_setup) return -1; for (i = 0; i < mp_irq_entries; i++) if (mp_irqs[i].mpc_irqtype == mp_INT && mp_irqs[i].mpc_srcbusirq == bus_irq) break; if (i >= mp_irq_entries) return -1; *trigger = irq_trigger(i); *polarity = irq_polarity(i); return 0;}#endif /* CONFIG_ACPI *//* * This function currently is only a helper for the i386 smp boot process where * we need to reprogram the ioredtbls to cater for the cpus which have come online * so mask in all cases should simply be TARGET_CPUS */#ifdef CONFIG_SMPvoid __init setup_ioapic_dest(void){ int pin, ioapic, irq, irq_entry; if (skip_ioapic_setup == 1) return; for (ioapic = 0; ioapic < nr_ioapics; ioapic++) { for (pin = 0; pin < nr_ioapic_registers[ioapic]; pin++) { irq_entry = find_irq_entry(ioapic, pin, mp_INT); if (irq_entry == -1) continue; irq = pin_2_irq(irq_entry, ioapic, pin); /* setup_IO_APIC_irqs could fail to get vector for some device * when you have too many devices, because at that time only boot * cpu is online. */ if (!irq_cfg[irq].vector) setup_IO_APIC_irq(ioapic, pin, irq, irq_trigger(irq_entry), irq_polarity(irq_entry)); else set_ioapic_affinity_irq(irq, TARGET_CPUS); } }}#endif
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -