📄 hal5-2.6.5.patch
字号:
+ err = -EBUSY;+ goto unlock_and_exit;+ }+ + if ((modemask & (IPIPE_SHARED_MASK|IPIPE_PASS_MASK)) == IPIPE_SHARED_MASK)+ {+ err = -EINVAL;+ goto unlock_and_exit;+ }++ if ((modemask & IPIPE_STICKY_MASK) != 0)+ modemask |= IPIPE_HANDLE_MASK;+ }+ else+ modemask &= ~(IPIPE_HANDLE_MASK|IPIPE_STICKY_MASK|IPIPE_SHARED_MASK);++ if (acknowledge == NULL)+ {+ if ((modemask & IPIPE_SHARED_MASK) == 0)+ /* Acknowledge handler unspecified -- this is ok in+ non-shared management mode, but we will force the use+ of the Linux-defined handler instead. */+ acknowledge = adp_root->irqs[irq].acknowledge;+ else+ {+ /* A valid acknowledge handler to be called in shared mode+ is required when declaring a shared IRQ. */+ err = -EINVAL;+ goto unlock_and_exit;+ }+ }++ adp->irqs[irq].handler = handler;+ adp->irqs[irq].acknowledge = acknowledge;+ adp->irqs[irq].control = modemask;++ if (irq < NR_IRQS &&+ handler != NULL &&+ !adeos_virtual_irq_p(irq) &&+ (modemask & IPIPE_ENABLE_MASK) != 0)+ {+ if (adp != adp_current)+ {+ /* IRQ enable/disable state is domain-sensitive, so we may+ not change it for another domain. What is allowed+ however is forcing some domain to handle an interrupt+ source, by passing the proper 'adp' descriptor which+ thus may be different from adp_current. */+ err = -EPERM;+ goto unlock_and_exit;+ }++ irq_desc[irq].handler->enable(irq);+ }++ err = 0;++unlock_and_exit:++ adeos_spin_unlock_irqrestore(&__adeos_pipelock,flags);++ return err;+}++/* adeos_control_irq() -- Change an interrupt mode. This affects the+ way a given interrupt is handled by ADEOS for the current+ domain. setmask is a bitmask telling whether:+ - the interrupt should be passed to the domain (IPIPE_HANDLE_MASK),+ and/or+ - the interrupt should be passed down to the lower priority domain(s)+ in the pipeline (IPIPE_PASS_MASK).+ This leads to four possibilities:+ - PASS only => Ignore the interrupt+ - HANDLE only => Terminate the interrupt (process but don't pass down)+ - PASS + HANDLE => Accept the interrupt (process and pass down)+ - <none> => Discard the interrupt+ - DYNAMIC is currently an alias of HANDLE since it marks an interrupt+ which is processed by the current domain but not implicitely passed+ down to the pipeline, letting the domain's handler choose on a case-+ by-case basis whether the interrupt propagation should be forced+ using adeos_propagate_irq().+ clrmask clears the corresponding bits from the control field before+ setmask is applied.+*/++int adeos_control_irq (unsigned irq,+ unsigned clrmask,+ unsigned setmask)+{+ unsigned long flags;+ irq_desc_t *desc;++ if (irq >= IPIPE_NR_IRQS)+ return -EINVAL;++ if (adp_current->irqs[irq].control & IPIPE_SYSTEM_MASK)+ return -EPERM;+ + if (((setmask|clrmask) & IPIPE_SHARED_MASK) != 0)+ return -EINVAL;++ desc = irq_desc + irq;++ if (adp_current->irqs[irq].handler == NULL)+ setmask &= ~(IPIPE_HANDLE_MASK|IPIPE_STICKY_MASK);++ if ((setmask & IPIPE_STICKY_MASK) != 0)+ setmask |= IPIPE_HANDLE_MASK;++ if ((clrmask & (IPIPE_HANDLE_MASK|IPIPE_STICKY_MASK)) != 0) /* If one goes, both go. */+ clrmask |= (IPIPE_HANDLE_MASK|IPIPE_STICKY_MASK);++ adeos_spin_lock_irqsave(&__adeos_pipelock,flags);++ adp_current->irqs[irq].control &= ~clrmask;+ adp_current->irqs[irq].control |= setmask;++ if ((setmask & IPIPE_ENABLE_MASK) != 0)+ desc->handler->enable(irq);+ else if ((clrmask & IPIPE_ENABLE_MASK) != 0)+ desc->handler->disable(irq);++ adeos_spin_unlock_irqrestore(&__adeos_pipelock,flags);++ return 0;+}++static void __adeos_domain_trampoline (void (*entry)(int), int iflag)++{+ unsigned long flags;+ adeos_declare_cpuid;++ adeos_get_cpu(flags);+ clear_bit(IPIPE_SLEEP_FLAG,&adp_current->cpudata[cpuid].status);+ adeos_put_cpu(flags);+ entry(iflag);+}++void __adeos_init_domain (adomain_t *adp, adattr_t *attr)++{+ int estacksz = attr->estacksz > 0 ? attr->estacksz : 8192, _cpuid;+ int nr_cpus = num_online_cpus();+ adeos_declare_cpuid;++ /* Here we don't care if a CPU migration occurs since we do not+ use the cpuid for accessing per-CPU data, but we don't want+ more than one CPU to be passed iflag == 1. */++ adeos_load_cpuid();++ for (_cpuid = 0; _cpuid < nr_cpus; _cpuid++)+ {+ int **psp = &adp->esp[_cpuid];++ adp->estackbase[_cpuid] = (int *)kmalloc(estacksz,GFP_KERNEL);+ + if (adp->estackbase[_cpuid] == NULL)+ panic("Adeos: No memory for domain stack on CPU #%d",_cpuid);++ adp->esp[_cpuid] = adp->estackbase[_cpuid];+ **psp = 0;+ *psp = (int *)(((unsigned long)*psp + estacksz - 0x10) & ~0xf);+ *--(*psp) = (_cpuid == cpuid); /* iflag */+ *--(*psp) = (int)attr->entry;+ *--(*psp) = 0;+ *--(*psp) = (int)&__adeos_domain_trampoline;+ }+}++void __adeos_cleanup_domain (adomain_t *adp)++{+ int nr_cpus = num_online_cpus();+ int _cpuid;++ adeos_unstall_pipeline_from(adp);++ for (_cpuid = 0; _cpuid < nr_cpus; _cpuid++)+ {+#ifdef CONFIG_SMP+ while (adp->cpudata[_cpuid].irq_pending_hi != 0)+ cpu_relax();++ while (test_bit(IPIPE_XPEND_FLAG,&adp->cpudata[_cpuid].status))+ cpu_relax();+#endif /* CONFIG_SMP */++ if (adp->estackbase[_cpuid] != 0)+ kfree(adp->estackbase[_cpuid]);+ }+}++int adeos_get_sysinfo (adsysinfo_t *info)++{+ info->ncpus = num_online_cpus();+ info->cpufreq = adeos_cpu_freq();+ info->archdep.tmirq = __adeos_tick_irq;++ return 0;+}++int adeos_tune_timer (unsigned long ns, int flags)++{+ unsigned ghz, latch;+ unsigned long x;++ if (flags & ADEOS_RESET_TIMER)+ latch = LATCH;+ else+ {+ if (ns < 122071 || ns > (1000 / HZ) * 1000000) /* HZ max, 8khz min */+ return -EINVAL;++ ghz = 1000000000 / ns;+ latch = (CLOCK_TICK_RATE + ghz/2) / ghz;+ }++ x = adeos_critical_enter(NULL); /* Sync with all CPUs */++ /* Shamelessly lifted from init_IRQ() in i8259.c */+ outb_p(0x34,0x43); /* binary, mode 2, LSB/MSB, ch 0 */+ outb_p(latch & 0xff,0x40); /* LSB */+ outb(latch >> 8,0x40); /* MSB */++ adeos_critical_exit(x);++ return 0;+}++/* adeos_trigger_ipi() -- Send the ADEOS service IPI to other+ processors. */++int fastcall adeos_trigger_ipi (int _cpuid)++{+#ifdef CONFIG_SMP+ int nr_cpus = num_online_cpus();+ unsigned long flags;+ adeos_declare_cpuid;++ adeos_lock_cpu(flags);++ if (unlikely(_cpuid == cpuid)) /* Self-posting the service interrupt? */+ adeos_trigger_irq(ADEOS_SERVICE_IPI);+ else+ {+ if (_cpuid == ADEOS_OTHER_CPUS)+ {+ if (nr_cpus > 1)+ /* Send the service IPI to all processors but the current one. */+ __adeos_send_IPI_allbutself(ADEOS_SERVICE_VECTOR);+ }+ else if (_cpuid >= 0 && _cpuid < nr_cpus)+ __adeos_send_IPI_other(_cpuid,ADEOS_SERVICE_VECTOR);+ }++ adeos_unlock_cpu(flags);++ return _cpuid != ADEOS_OTHER_CPUS ? 1 : nr_cpus - 1;+#else /* !CONFIG_SMP */+ return 0;+#endif /* CONFIG_SMP */+}diff -uNrp linux-2.6.5/arch/i386/Kconfig linux-2.6.5-adeos/arch/i386/Kconfig--- linux-2.6.5/arch/i386/Kconfig 2004-04-21 15:24:05.000000000 +0200+++ linux-2.6.5-adeos/arch/i386/Kconfig 2004-05-27 18:59:34.000000000 +0200@@ -1186,6 +1186,7 @@ source "fs/Kconfig" source "arch/i386/oprofile/Kconfig" +source "adeos/Kconfig" menu "Kernel hacking" diff -uNrp linux-2.6.5/arch/i386/kernel/Makefile linux-2.6.5-adeos/arch/i386/kernel/Makefile--- linux-2.6.5/arch/i386/kernel/Makefile 2004-04-21 15:24:06.000000000 +0200+++ linux-2.6.5-adeos/arch/i386/kernel/Makefile 2004-05-27 18:59:34.000000000 +0200@@ -11,6 +11,7 @@ obj-y := process.o semaphore.o signal.o obj-y += cpu/ obj-y += timers/+obj-$(CONFIG_ADEOS_CORE) += adeos.o obj-$(CONFIG_ACPI_BOOT) += acpi/ obj-$(CONFIG_X86_BIOS_REBOOT) += reboot.o obj-$(CONFIG_MCA) += mca.odiff -uNrp linux-2.6.5/arch/i386/kernel/adeos.c linux-2.6.5-adeos/arch/i386/kernel/adeos.c--- linux-2.6.5/arch/i386/kernel/adeos.c 1970-01-01 01:00:00.000000000 +0100+++ linux-2.6.5-adeos/arch/i386/kernel/adeos.c 2004-06-11 15:13:57.000000000 +0200@@ -0,0 +1,582 @@+/*+ * linux/arch/i386/kernel/adeos.c+ *+ * Copyright (C) 2002 Philippe Gerum.+ *+ * This program is free software; you can redistribute it and/or modify+ * it under the terms of the GNU General Public License as published by+ * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,+ * USA; either version 2 of the License, or (at your option) any later+ * version.+ *+ * This program is distributed in the hope that it will be useful,+ * but WITHOUT ANY WARRANTY; without even the implied warranty of+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the+ * GNU General Public License for more details.+ *+ * You should have received a copy of the GNU General Public License+ * along with this program; if not, write to the Free Software+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.+ *+ * Architecture-dependent ADEOS core support for x86.+ */++#include <linux/config.h>+#include <linux/kernel.h>+#include <linux/module.h>+#include <linux/smp.h>+#include <linux/sched.h>+#include <linux/irq.h>+#include <linux/slab.h>+#include <asm/system.h>+#include <asm/atomic.h>+#include <asm/hw_irq.h>+#include <asm/irq.h>+#include <asm/desc.h>+#include <asm/io.h>+#ifdef CONFIG_X86_LOCAL_APIC+#include <asm/fixmap.h>+#include <asm/bitops.h>+#include <asm/mpspec.h>+#ifdef CONFIG_X86_IO_APIC+#include <asm/io_apic.h>+#endif /* CONFIG_X86_IO_APIC */+#include <asm/apic.h>+#endif /* CONFIG_X86_LOCAL_APIC */++extern spinlock_t __adeos_pipelock;++struct pt_regs __adeos_tick_regs;++int __adeos_tick_irq;++#if defined(CONFIG_ADEOS_MODULE) || defined(CONFIG_X86_IO_APIC)+/* A global flag telling whether Adeos pipelining is engaged. */+int adp_pipelined = 0;+#endif /* CONFIG_ADEOS_MODULE || CONFIG_X86_IO_APIC */++#ifdef CONFIG_X86_LOCAL_APIC+/* Ugly hack allowing 1) to run portions of early kernel init code+ which make use of smp_processor_id(), 2) to run SMP kernels on UP+ boxen without APIC. See asm-i386/adeos.h */+int __adeos_apic_mapped = 0;+#endif /* CONFIG_X86_LOCAL_APIC */++#ifdef CONFIG_SMP++volatile u8 __adeos_apicid_2_cpuid[MAX_APICID];++static volatile unsigned long __adeos_cpu_sync_map;++static volatile unsigned long __adeos_cpu_lock_map;++static spinlock_t __adeos_cpu_barrier = SPIN_LOCK_UNLOCKED;++static atomic_t __adeos_critical_count = ATOMIC_INIT(0);++static void (*__adeos_cpu_sync)(void);++#endif /* CONFIG_SMP */++#define __adeos_call_asm_irq_handler(adp,irq) \+ __asm__ __volatile__ ("pushfl\n\t" \+ "push %%cs\n\t" \+ "call *%1\n" \+ : /* no output */ \+ : "a" (irq), "m" ((adp)->irqs[irq].handler))++#define __adeos_call_c_irq_handler(adp,irq) \+ __asm__ __volatile__ ("pushl %%ebp\n\t" \+ "pushl %%edi\n\t" \+ "pushl %%esi\n\t" \+ "pushl %%edx\n\t" \+ "pushl %%ecx\n\t" \+ "pushl %%ebx\n\t" \+ "pushl %%eax\n\t" \+ "call *%1\n\t" \+ "addl $4,%%esp\n\t" \+ "popl %%ebx\n\t" \+ "popl %%ecx\n\t" \+ "popl %%edx\n\t" \+ "popl %%esi\n\t" \+ "popl %%edi\n\t" \+ "popl %%ebp\n" \+ : /* no output */ \+ : "a" (irq), "m" ((adp)->irqs[irq].handler))++/* Since 2.6, ret_from_intr might identify a need for rescheduling+ (raised by the C handler) even from kernel space if the preemption+ is enabled, so we should branch to this routine on our return+ path. Native (i.e. ASM) handlers do the same. */++#define __adeos_call_c_root_irq_handler(adp,irq) \+ __asm__ __volatile__ ("pushfl\n\t" \+ "pushl %%cs\n\t" \+ "pushl $1f\n\t" \+ "pushl $-1\n\t" /* Negative (fake) orig_eax. */ \+ "pushl %%es\n\t" \+ "pushl %%ds\n\t" \+ "pushl %%eax\n\t" \+ "pushl %%ebp\n\t" \+ "pushl %%edi\n\t" \+ "pushl %%esi\
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -