📄 adeos-ipipe-2.4.32-i386-1.1-03.patch
字号:
- spin_unlock_irqrestore(&ioapic_lock, flags);+ spin_unlock_irqrestore_hw(&ioapic_lock, flags); } void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)@@ -171,10 +173,10 @@ void clear_IO_APIC_pin(unsigned int apic unsigned long flags; /* Check delivery_mode to be sure we're not clearing an SMI pin */- spin_lock_irqsave(&ioapic_lock, flags);+ spin_lock_irqsave_hw(&ioapic_lock, flags); *(((int*)&entry) + 0) = io_apic_read(apic, 0x10 + 2 * pin); *(((int*)&entry) + 1) = io_apic_read(apic, 0x11 + 2 * pin);- spin_unlock_irqrestore(&ioapic_lock, flags);+ spin_unlock_irqrestore_hw(&ioapic_lock, flags); if (entry.delivery_mode == dest_SMI) return; @@ -183,10 +185,10 @@ void clear_IO_APIC_pin(unsigned int apic */ memset(&entry, 0, sizeof(entry)); entry.mask = 1;- spin_lock_irqsave(&ioapic_lock, flags);+ spin_lock_irqsave_hw(&ioapic_lock, flags); io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry) + 0)); io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry) + 1));- spin_unlock_irqrestore(&ioapic_lock, flags);+ spin_unlock_irqrestore_hw(&ioapic_lock, flags); } static void clear_IO_APIC (void)@@ -691,10 +693,10 @@ void __init setup_IO_APIC_irqs(void) if (!apic && (irq < 16)) disable_8259A_irq(irq); }- spin_lock_irqsave(&ioapic_lock, flags);+ spin_lock_irqsave_hw(&ioapic_lock, flags); io_apic_write(apic, 0x11+2*pin, *(((int *)&entry)+1)); io_apic_write(apic, 0x10+2*pin, *(((int *)&entry)+0));- spin_unlock_irqrestore(&ioapic_lock, flags);+ spin_unlock_irqrestore_hw(&ioapic_lock, flags); } } @@ -739,10 +741,10 @@ void __init setup_ExtINT_IRQ0_pin(unsign /* * Add it to the IO-APIC irq-routing table: */- spin_lock_irqsave(&ioapic_lock, flags);+ spin_lock_irqsave_hw(&ioapic_lock, flags); io_apic_write(0, 0x11+2*pin, *(((int *)&entry)+1)); io_apic_write(0, 0x10+2*pin, *(((int *)&entry)+0));- spin_unlock_irqrestore(&ioapic_lock, flags);+ spin_unlock_irqrestore_hw(&ioapic_lock, flags); enable_8259A_irq(0); }@@ -1007,7 +1009,7 @@ void /*__init*/ print_PIC(void) printk(KERN_DEBUG "\nprinting PIC contents\n"); - spin_lock_irqsave(&i8259A_lock, flags);+ spin_lock_irqsave_hw(&i8259A_lock, flags); v = inb(0xa1) << 8 | inb(0x21); printk(KERN_DEBUG "... PIC IMR: %04x\n", v);@@ -1021,7 +1023,7 @@ void /*__init*/ print_PIC(void) outb(0x0a,0xa0); outb(0x0a,0x20); - spin_unlock_irqrestore(&i8259A_lock, flags);+ spin_unlock_irqrestore_hw(&i8259A_lock, flags); printk(KERN_DEBUG "... PIC ISR: %04x\n", v); @@ -1231,14 +1233,15 @@ static unsigned int startup_edge_ioapic_ int was_pending = 0; unsigned long flags; - spin_lock_irqsave(&ioapic_lock, flags);+ spin_lock_irqsave_hw(&ioapic_lock, flags); if (irq < 16) { disable_8259A_irq(irq); if (i8259A_irq_pending(irq)) was_pending = 1; } __unmask_IO_APIC_irq(irq);- spin_unlock_irqrestore(&ioapic_lock, flags);+ ipipe_irq_unlock(irq); + spin_unlock_irqrestore_hw(&ioapic_lock, flags); return was_pending; }@@ -1250,6 +1253,24 @@ static unsigned int startup_edge_ioapic_ * interrupt for real. This prevents IRQ storms from unhandled * devices. */+#if CONFIG_IPIPE++static void ack_edge_ioapic_irq (unsigned int irq)++{+ if ((irq_desc[irq].status & (IRQ_PENDING | IRQ_DISABLED))+ == (IRQ_PENDING | IRQ_DISABLED)) {+ unsigned long flags;+ spin_lock_irqsave_hw(&ioapic_lock,flags);+ __mask_IO_APIC_irq(irq);+ spin_unlock_irqrestore_hw(&ioapic_lock,flags);+ }++ __ack_APIC_irq();+}++#else /* !CONFIG_IPIPE */+ static void ack_edge_ioapic_irq(unsigned int irq) { if ((irq_desc[irq].status & (IRQ_PENDING | IRQ_DISABLED))@@ -1258,6 +1279,8 @@ static void ack_edge_ioapic_irq(unsigned ack_APIC_irq(); } +#endif /* CONFIG_IPIPE */+ static void end_edge_ioapic_irq (unsigned int i) { /* nothing */ } @@ -1286,6 +1309,58 @@ static unsigned int startup_level_ioapic #define enable_level_ioapic_irq unmask_IO_APIC_irq #define disable_level_ioapic_irq mask_IO_APIC_irq +#ifdef CONFIG_IPIPE++/* Prevent low priority IRQs grabbed by high priority domains from+ being delayed, waiting for a high priority interrupt handler+ running in a low priority domain to complete. */++static unsigned long bugous_edge_triggers;++static void end_level_ioapic_irq (unsigned irq)++{+ unsigned long flags;++ spin_lock_irqsave_hw(&ioapic_lock, flags);++ if (test_and_clear_bit(irq,&bugous_edge_triggers)) {+ atomic_inc(&irq_mis_count);+ __unmask_and_level_IO_APIC_irq(irq);+ }+ else+ __unmask_IO_APIC_irq(irq);++ ipipe_irq_unlock(irq);++ spin_unlock_irqrestore_hw(&ioapic_lock, flags);+}++static void mask_and_ack_level_ioapic_irq (unsigned irq)++{+ unsigned long flags, v;+ int i;++ i = IO_APIC_VECTOR(irq);+ v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1));++ spin_lock_irqsave_hw(&ioapic_lock, flags);++ if (!(v & (1 << (i & 0x1f)))) {+ set_bit(irq,&bugous_edge_triggers);+ __mask_and_edge_IO_APIC_irq(irq);+ }+ else+ __mask_IO_APIC_irq(irq);++ spin_unlock_irqrestore_hw(&ioapic_lock, flags);++ __ack_APIC_irq();+}++#else /* !CONFIG_IPIPE */+ static void end_level_ioapic_irq (unsigned int irq) { unsigned long v;@@ -1347,6 +1422,8 @@ static void end_level_ioapic_irq (unsign static void mask_and_ack_level_ioapic_irq (unsigned int irq) { /* nothing */ } +#endif /* CONFIG_IPIPE */+ #ifndef CONFIG_SMP void fastcall send_IPI_self(int vector)@@ -1381,9 +1458,9 @@ static void set_ioapic_affinity (unsigne */ mask = mask << 24; - spin_lock_irqsave(&ioapic_lock, flags);+ spin_lock_irqsave_hw(&ioapic_lock, flags); __DO_ACTION(1, = mask, )- spin_unlock_irqrestore(&ioapic_lock, flags);+ spin_unlock_irqrestore_hw(&ioapic_lock, flags); } /*@@ -1466,7 +1543,7 @@ static void disable_lapic_irq (unsigned static void ack_lapic_irq (unsigned int irq) {- ack_APIC_irq();+ __ack_APIC_irq(); } static void end_lapic_irq (unsigned int i) { /* nothing */ }@@ -1883,10 +1960,10 @@ int io_apic_set_pci_routing (int ioapic, if (!ioapic && (irq < 16)) disable_8259A_irq(irq); - spin_lock_irqsave(&ioapic_lock, flags);+ spin_lock_irqsave_hw(&ioapic_lock, flags); io_apic_write(ioapic, 0x11+2*pin, *(((int *)&entry)+1)); io_apic_write(ioapic, 0x10+2*pin, *(((int *)&entry)+0));- spin_unlock_irqrestore(&ioapic_lock, flags);+ spin_unlock_irqrestore_hw(&ioapic_lock, flags); return 0; }diff -uNrp 2.4.32/arch/i386/kernel/ipipe-core.c 2.4.32-i386-ipipe/arch/i386/kernel/ipipe-core.c--- 2.4.32/arch/i386/kernel/ipipe-core.c 1970-01-01 01:00:00.000000000 +0100+++ 2.4.32-i386-ipipe/arch/i386/kernel/ipipe-core.c 2006-01-04 17:41:56.000000000 +0100@@ -0,0 +1,416 @@+/* -*- linux-c -*-+ * linux/arch/i386/kernel/ipipe-core.c+ *+ * Copyright (C) 2002-2005 Philippe Gerum.+ *+ * This program is free software; you can redistribute it and/or modify+ * it under the terms of the GNU General Public License as published by+ * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,+ * USA; either version 2 of the License, or (at your option) any later+ * version.+ *+ * This program is distributed in the hope that it will be useful,+ * but WITHOUT ANY WARRANTY; without even the implied warranty of+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the+ * GNU General Public License for more details.+ *+ * You should have received a copy of the GNU General Public License+ * along with this program; if not, write to the Free Software+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.+ *+ * Architecture-dependent I-PIPE core support for x86.+ */++#include <linux/config.h>+#include <linux/kernel.h>+#include <linux/module.h>+#include <linux/sched.h>+#include <linux/smp.h>+#include <linux/interrupt.h>+#include <linux/slab.h>+#include <asm/system.h>+#include <asm/atomic.h>+#include <asm/hw_irq.h>+#include <asm/irq.h>+#include <asm/desc.h>+#include <asm/io.h>+#ifdef CONFIG_X86_LOCAL_APIC+#include <asm/fixmap.h>+#include <asm/bitops.h>+#include <asm/mpspec.h>+#ifdef CONFIG_X86_IO_APIC+#include <asm/io_apic.h>+#endif /* CONFIG_X86_IO_APIC */+#include <asm/apic.h>+#endif /* CONFIG_X86_LOCAL_APIC */++struct pt_regs __ipipe_tick_regs[IPIPE_NR_CPUS];++int __ipipe_tick_irq;++#ifdef CONFIG_SMP++static cpumask_t __ipipe_cpu_sync_map;++static cpumask_t __ipipe_cpu_lock_map;++static ipipe_spinlock_t __ipipe_cpu_barrier = IPIPE_SPIN_LOCK_UNLOCKED;++static atomic_t __ipipe_critical_count = ATOMIC_INIT(0);++static void (*__ipipe_cpu_sync) (void);++void __ipipe_send_IPI_allbutself(int vector);++#endif /* CONFIG_SMP */++#define __ipipe_call_root_xirq_handler(ipd,irq) \+ __asm__ __volatile__ ("pushfl\n\t" \+ "pushl %%cs\n\t" \+ "pushl $1f\n\t" \+ "pushl %%eax\n\t" \+ "pushl %%es\n\t" \+ "pushl %%ds\n\t" \+ "pushl %%eax\n\t" \+ "pushl %%ebp\n\t" \+ "pushl %%edi\n\t" \+ "pushl %%esi\n\t" \+ "pushl %%edx\n\t" \+ "pushl %%ecx\n\t" \+ "pushl %%ebx\n\t" \+ "movl %%esp,%%eax\n\t" \+ "call *%1\n\t" \+ "jmp ret_from_intr\n\t" \+ "1:\n" \+ : /* no output */ \+ : "a" (irq-256), "m" ((ipd)->irqs[irq].handler))++#define __ipipe_call_root_virq_handler(ipd,irq) \+ __asm__ __volatile__ ("pushfl\n\t" \+ "pushl %%cs\n\t" \+ "pushl $1f\n\t" \+ "pushl $-1\n\t" \+ "pushl %%es\n\t" \+ "pushl %%ds\n\t" \+ "pushl %%eax\n\t" \+ "pushl %%ebp\n\t" \+ "pushl %%edi\n\t" \+ "pushl %%esi\n\t" \+ "pushl %%edx\n\t" \+ "pushl %%ecx\n\t" \+ "pushl %%ebx\n\t" \+ "pushl %2\n\t" \+ "pushl %%eax\n\t" \+ "call *%1\n\t" \+ "addl $8,%%esp\n\t" \+ "jmp ret_from_intr\n\t" \+ "1:\n" \+ : /* no output */ \+ : "a" (irq), "m" ((ipd)->irqs[irq].handler), "r" ((ipd)->irqs[irq].cookie))++static __inline__ unsigned long flnz(unsigned long word)+{+ __asm__("bsrl %1, %0":"=r"(word)+ : "r"(word));+ return word;+}++int __ipipe_ack_system_irq(unsigned irq)+{+#ifdef CONFIG_X86_LOCAL_APIC+ __ack_APIC_irq();+#endif /* CONFIG_X86_LOCAL_APIC */+ return 1;+}++#ifdef CONFIG_SMP++/* Always called with hw interrupts off. */++void __ipipe_do_critical_sync(unsigned irq, void *cookie)+{+ ipipe_declare_cpuid;++ ipipe_load_cpuid();++ cpu_set(cpuid, __ipipe_cpu_sync_map);++ /* Now we are in sync with the lock requestor running on another+ CPU. Enter a spinning wait until he releases the global+ lock. */+ spin_lock_hw(&__ipipe_cpu_barrier);++ /* Got it. Now get out. */++ if (__ipipe_cpu_sync)+ /* Call the sync routine if any. */+ __ipipe_cpu_sync();++ spin_unlock_hw(&__ipipe_cpu_barrier);++ cpu_clear(cpuid, __ipipe_cpu_sync_map);+}++#endif /* CONFIG_SMP */++/* ipipe_critical_enter() -- Grab the superlock excluding all CPUs+ but the current one from a critical section. This lock is used when+ we must enforce a global critical section for a single CPU in a+ possibly SMP system whichever context the CPUs are running. */++unsigned long ipipe_critical_enter(void (*syncfn) (void))+{+ unsigned long flags;++ local_irq_save_hw(flags);++#ifdef CONFIG_SMP+ if (num_online_cpus() > 1) { /* We might be running a SMP-kernel on a UP box... */+ ipipe_declare_cpuid;+ cpumask_t lock_map;++ ipipe_load_cpuid();++ if (!cpu_test_and_set(cpuid, __ipipe_cpu_lock_map)) {+ while (cpu_test_and_set+ (BITS_PER_LONG - 1, __ipipe_cpu_lock_map)) {+ int n = 0;+ do {+ cpu_relax();+ } while (++n < cpuid);+ }++ spin_lock_hw(&__ipipe_cpu_barrier);++ __ipipe_cpu_sync = syncfn;++ /* Send the sync IPI to all processors but the current one. */+ __ipipe_send_IPI_allbutself(IPIPE_CRITICAL_VECTOR);++ cpus_andnot(lock_map, cpu_online_map,+ __ipipe_cpu_lock_map);++ while (!cpus_equal(__ipipe_cpu_sync_map, lock_map))+ cpu_relax();+ }+
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -