📄 hal13c1-2.4.25.patch
字号:
+#ifdef CONFIG_SMP++ for (vector = CALL_FUNCTION_VECTOR; vector < SPURIOUS_APIC_VECTOR; vector++)+ __adeos_set_irq_gate(vector,__adeos_std_vector_table[vector]);++ __adeos_set_irq_gate(ADEOS_SERVICE_VECTOR,__adeos_std_vector_table[ADEOS_SERVICE_VECTOR]);+ __adeos_set_irq_gate(ADEOS_CRITICAL_VECTOR,__adeos_std_vector_table[ADEOS_CRITICAL_VECTOR]);++#endif /* CONFIG_SMP */++ __adeos_set_trap_gate(0,__adeos_std_vector_table[0]);+ __adeos_set_trap_gate(1,__adeos_std_vector_table[1]);+ __adeos_set_sys_gate(3,__adeos_std_vector_table[3]);+ __adeos_set_sys_gate(4,__adeos_std_vector_table[4]);+ __adeos_set_sys_gate(5,__adeos_std_vector_table[5]);+ __adeos_set_trap_gate(6,__adeos_std_vector_table[6]);+ __adeos_set_trap_gate(7,__adeos_std_vector_table[7]);+ __adeos_set_trap_gate(8,__adeos_std_vector_table[8]);+ __adeos_set_trap_gate(9,__adeos_std_vector_table[9]);+ __adeos_set_trap_gate(10,__adeos_std_vector_table[10]);+ __adeos_set_trap_gate(11,__adeos_std_vector_table[11]);+ __adeos_set_trap_gate(12,__adeos_std_vector_table[12]);+ __adeos_set_trap_gate(13,__adeos_std_vector_table[13]);+ __adeos_set_irq_gate(14,__adeos_std_vector_table[14]);+ __adeos_set_trap_gate(15,__adeos_std_vector_table[15]);+ __adeos_set_trap_gate(16,__adeos_std_vector_table[16]);+ __adeos_set_trap_gate(17,__adeos_std_vector_table[17]);+ __adeos_set_trap_gate(18,__adeos_std_vector_table[18]);+ __adeos_set_trap_gate(19,__adeos_std_vector_table[19]);++#if defined(CONFIG_ADEOS_MODULE) || defined(CONFIG_X86_IO_APIC)+ adp_pipelined = 0;+#endif /* CONFIG_ADEOS_MODULE || CONFIG_X86_IO_APIC */++ adeos_critical_exit(flags);+}++/* adeos_virtualize_irq_from() -- Attach a handler (and optionally a+ hw acknowledge routine) to an interrupt for the given domain. */++int adeos_virtualize_irq_from (adomain_t *adp,+ unsigned irq,+ void (*handler)(unsigned irq),+ int (*acknowledge)(unsigned irq),+ unsigned modemask)+{+ unsigned long flags;+ int err;++ if (irq >= IPIPE_NR_IRQS)+ return -EINVAL;++ if (adp->irqs[irq].control & IPIPE_SYSTEM_MASK)+ return -EPERM;+ + adeos_spin_lock_irqsave(&__adeos_pipelock,flags);++ if (handler != NULL)+ {+ /* A bit of hack here: if we are re-virtualizing an IRQ just+ to change the acknowledge routine by passing the special+ ADEOS_SAME_HANDLER value, then allow to recycle the current+ handler for the IRQ. This allows Linux device drivers+ managing shared IRQ lines to call adeos_virtualize_irq() in+ addition to request_irq() just for the purpose of+ interposing their own shared acknowledge routine. */++ if (handler == ADEOS_SAME_HANDLER)+ {+ handler = adp->irqs[irq].handler;++ if (handler == NULL)+ {+ err = -EINVAL;+ goto unlock_and_exit;+ }+ }+ else if ((modemask & IPIPE_EXCLUSIVE_MASK) != 0 &&+ adp->irqs[irq].handler != NULL)+ {+ err = -EBUSY;+ goto unlock_and_exit;+ }+ + if ((modemask & (IPIPE_SHARED_MASK|IPIPE_PASS_MASK)) == IPIPE_SHARED_MASK)+ {+ err = -EINVAL;+ goto unlock_and_exit;+ }++ if ((modemask & IPIPE_STICKY_MASK) != 0)+ modemask |= IPIPE_HANDLE_MASK;+ }+ else+ modemask &= ~(IPIPE_HANDLE_MASK|IPIPE_STICKY_MASK|IPIPE_SHARED_MASK);++ if (acknowledge == NULL)+ {+ if ((modemask & IPIPE_SHARED_MASK) == 0)+ /* Acknowledge handler unspecified -- this is ok in+ non-shared management mode, but we will force the use+ of the Linux-defined handler instead. */+ acknowledge = adp_root->irqs[irq].acknowledge;+ else+ {+ /* A valid acknowledge handler to be called in shared mode+ is required when declaring a shared IRQ. */+ err = -EINVAL;+ goto unlock_and_exit;+ }+ }++ adp->irqs[irq].handler = handler;+ adp->irqs[irq].acknowledge = acknowledge;+ adp->irqs[irq].control = modemask;++ if (irq < NR_IRQS &&+ handler != NULL &&+ !adeos_virtual_irq_p(irq) &&+ (modemask & IPIPE_ENABLE_MASK) != 0)+ {+ if (adp != adp_current)+ {+ /* IRQ enable/disable state is domain-sensitive, so we may+ not change it for another domain. What is allowed+ however is forcing some domain to handle an interrupt+ source, by passing the proper 'adp' descriptor which+ thus may be different from adp_current. */+ err = -EPERM;+ goto unlock_and_exit;+ }++ irq_desc[irq].handler->enable(irq);+ }++ err = 0;++unlock_and_exit:++ adeos_spin_unlock_irqrestore(&__adeos_pipelock,flags);++ return err;+}++/* adeos_control_irq() -- Change an interrupt mode. This affects the+ way a given interrupt is handled by ADEOS for the current+ domain. setmask is a bitmask telling whether:+ - the interrupt should be passed to the domain (IPIPE_HANDLE_MASK),+ and/or+ - the interrupt should be passed down to the lower priority domain(s)+ in the pipeline (IPIPE_PASS_MASK).+ This leads to four possibilities:+ - PASS only => Ignore the interrupt+ - HANDLE only => Terminate the interrupt (process but don't pass down)+ - PASS + HANDLE => Accept the interrupt (process and pass down)+ - <none> => Discard the interrupt+ - DYNAMIC is currently an alias of HANDLE since it marks an interrupt+ which is processed by the current domain but not implicitely passed+ down to the pipeline, letting the domain's handler choose on a case-+ by-case basis whether the interrupt propagation should be forced+ using adeos_propagate_irq().+ clrmask clears the corresponding bits from the control field before+ setmask is applied.+*/++int adeos_control_irq (unsigned irq,+ unsigned clrmask,+ unsigned setmask)+{+ unsigned long flags;+ irq_desc_t *desc;++ if (irq >= IPIPE_NR_IRQS)+ return -EINVAL;++ if (adp_current->irqs[irq].control & IPIPE_SYSTEM_MASK)+ return -EPERM;++ if (((setmask|clrmask) & IPIPE_SHARED_MASK) != 0)+ return -EINVAL;+ + desc = irq_desc + irq;++ if (adp_current->irqs[irq].handler == NULL)+ setmask &= ~(IPIPE_HANDLE_MASK|IPIPE_STICKY_MASK);++ if ((setmask & IPIPE_STICKY_MASK) != 0)+ setmask |= IPIPE_HANDLE_MASK;++ if ((clrmask & (IPIPE_HANDLE_MASK|IPIPE_STICKY_MASK)) != 0) /* If one goes, both go. */+ clrmask |= (IPIPE_HANDLE_MASK|IPIPE_STICKY_MASK);++ adeos_spin_lock_irqsave(&__adeos_pipelock,flags);++ adp_current->irqs[irq].control &= ~clrmask;+ adp_current->irqs[irq].control |= setmask;++ if ((setmask & IPIPE_ENABLE_MASK) != 0)+ desc->handler->enable(irq);+ else if ((clrmask & IPIPE_ENABLE_MASK) != 0)+ desc->handler->disable(irq);++ adeos_spin_unlock_irqrestore(&__adeos_pipelock,flags);++ return 0;+}++static void __adeos_domain_trampoline (void (*entry)(int), int iflag)++{+ unsigned long flags;+ adeos_declare_cpuid;++ adeos_get_cpu(flags);+ clear_bit(IPIPE_SLEEP_FLAG,&adp_current->cpudata[cpuid].status);+ adeos_put_cpu(flags);+ entry(iflag);+}++void __adeos_init_domain (adomain_t *adp, adattr_t *attr)++{+ int estacksz = attr->estacksz > 0 ? attr->estacksz : 8192, _cpuid;+ adeos_declare_cpuid;++ /* Here we don't care if a CPU migration occurs since we do not+ use the cpuid for accessing per-CPU data, but we don't want+ more than one CPU to be passed iflag == 1. */++ adeos_load_cpuid();++ for (_cpuid = 0; _cpuid < smp_num_cpus; _cpuid++)+ {+ int **psp = &adp->esp[_cpuid];++ adp->estackbase[_cpuid] = (int *)kmalloc(estacksz,GFP_KERNEL);+ + if (adp->estackbase[_cpuid] == NULL)+ panic("Adeos: No memory for domain stack on CPU #%d",_cpuid);++ adp->esp[_cpuid] = adp->estackbase[_cpuid];+ **psp = 0;+ *psp = (int *)(((unsigned long)*psp + estacksz - 0x10) & ~0xf);+ *--(*psp) = (_cpuid == cpuid); /* iflag */+ *--(*psp) = (int)attr->entry;+ *--(*psp) = 0;+ *--(*psp) = (int)&__adeos_domain_trampoline;+ }+}++void __adeos_cleanup_domain (adomain_t *adp)++{+ int _cpuid;++ adeos_unstall_pipeline_from(adp);++ for (_cpuid = 0; _cpuid < smp_num_cpus; _cpuid++)+ {+#ifdef CONFIG_SMP+ while (adp->cpudata[_cpuid].irq_pending_hi != 0)+ cpu_relax();++ while (test_bit(IPIPE_XPEND_FLAG,&adp->cpudata[_cpuid].status))+ cpu_relax();+#endif /* CONFIG_SMP */++ if (adp->estackbase[_cpuid] != NULL)+ kfree(adp->estackbase[_cpuid]);+ }+}++int adeos_get_sysinfo (adsysinfo_t *info)++{+ info->ncpus = smp_num_cpus;+ info->cpufreq = adeos_cpu_freq();+ info->archdep.tmirq = __adeos_tick_irq;++ return 0;+}++int adeos_tune_timer (unsigned long ns, int flags)++{+ unsigned ghz, latch;+ unsigned long x;++ if (flags & ADEOS_RESET_TIMER)+ latch = LATCH;+ else+ {+ if (ns < 122071 || ns > (1000 / HZ) * 1000000) /* HZ max, 8khz min */+ return -EINVAL;++ ghz = 1000000000 / ns;+ latch = (CLOCK_TICK_RATE + ghz/2) / ghz;+ }++ x = adeos_critical_enter(NULL); /* Sync with all CPUs */++ /* Shamelessly lifted from init_IRQ() in i8259.c */+ outb_p(0x34,0x43); /* binary, mode 2, LSB/MSB, ch 0 */+ outb_p(latch & 0xff,0x40); /* LSB */+ outb(latch >> 8,0x40); /* MSB */++ adeos_critical_exit(x);++ return 0;+}++/* adeos_trigger_ipi() -- Send the ADEOS service IPI to other+ processors. */++int adeos_trigger_ipi (int _cpuid)++{+#ifdef CONFIG_SMP+ int nr_cpus = smp_num_cpus;+ unsigned long flags;+ adeos_declare_cpuid;++ adeos_lock_cpu(flags);++ if (unlikely(_cpuid == cpuid)) /* Self-posting the service interrupt? */+ adeos_trigger_irq(ADEOS_SERVICE_IPI);+ else+ {+ if (_cpuid == ADEOS_OTHER_CPUS)+ {+ if (nr_cpus > 1)+ /* Send the service IPI to all processors but the current one. */+ __adeos_send_IPI_allbutself(ADEOS_SERVICE_VECTOR);+ }+ else if (_cpuid >= 0 && _cpuid < nr_cpus)+ __adeos_send_IPI_other(_cpuid,ADEOS_SERVICE_VECTOR);+ }++ adeos_unlock_cpu(flags);++ return _cpuid != ADEOS_OTHER_CPUS ? 1 : nr_cpus - 1;+#else /* !CONFIG_SMP */+ return 0;+#endif /* CONFIG_SMP */+}diff -uNrp linux-2.4.25/arch/i386/config.in linux-2.4.25-adeos/arch/i386/config.in--- linux-2.4.25/arch/i386/config.in 2004-02-18 14:36:30.000000000 +0100+++ linux-2.4.25-adeos/arch/i386/config.in 2004-03-20 11:50:17.000000000 +0100@@ -267,6 +267,10 @@ mainmenu_option next_comment comment 'General setup' bool 'Networking support' CONFIG_NET+tristate 'Adeos support' CONFIG_ADEOS+if [ "$CONFIG_ADEOS" != "n" ]; then+ define_bool CONFIG_ADEOS_CORE y+fi # Visual Workstation support is utterly broken. # If you want to see it working mail an VW540 to hch@infradead.org 8)diff -uNrp linux-2.4.25/arch/i386/kernel/Makefile linux-2.4.25-adeos/arch/i386/kernel/Makefile--- linux-2.4.25/arch/i386/kernel/Makefile 2003-11-28 19:26:19.000000000 +0100+++ linux-2.4.25-adeos/arch/i386/kernel/Makefile 2004-02-29 19:30:20.000000000 +0100@@ -14,7 +14,7 @@ all: kernel.o head.o init_task.o O_TARGET := kernel.o -export-objs := mca.o mtrr.o msr.o cpuid.o microcode.o i386_ksyms.o time.o setup.o+export-objs := adeos.o mca.o mtrr.o msr.o cpuid.o microcode.o i386_ksyms.o time.o setup.o obj-y := process.o semaphore.o signal.o entry.o traps.o irq.o vm86.o \ ptrace.o i8259.o ioport.o ldt.o setup.o time.o sys_i386.o \@@ -30,6 +30,7 @@ obj-y += pci-pc.o pci-irq.o endif endif +obj-$(CONFIG_ADEOS_CORE) += adeos.o obj-$(CONFIG_MCA) += mca.o obj-$(CONFIG_MTRR) += mtrr.o obj-$(CONFIG_X86_MSR) += msr.odiff -uNrp linux-2.4.25/arch/i386/kernel/adeos.c linux-2.4.25-adeos/arch/i386/kernel/adeos.c--- linux-2.4.25/arch/i386/kernel/adeos.c 1970-01-01 01:00:00.000000000 +0100+++ linux-2.4.25-adeos/arch/i386/kernel/adeos.c 2004-03-31 12:35:54.000000000 +0200@@ -0,0 +1,575 @@+/*+ * linux/arch/i386/kernel/adeos.c+ *+ * Copyright (C) 2002 Philippe Gerum.+ *+ * This program is free software; you can redistribute it and/or modify+ * it under the terms of the GNU General Public License as published by+ * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,+ * USA; either version 2 of the License, or (at your option) any later+ * version.+ *+ * This program is distributed in the hope that it will be useful,+ * but WITHOUT ANY WARRANTY; without even the implied warranty of+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the+ * GNU General Public License for more details.+ *+ * You should have received a copy of the GNU General Public License+ * along with this program; if not, write to the Free Software+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.+ *+ * Architecture-dependent ADEOS core support for x86.+ */++#include <linux/config.h>+#include <linux/kernel.h>+#include <linux/smp.h>+#include <linux/sched.h>+#include <linux/irq.h>+#include <linux/slab.h>+#include <asm/system.h>+#include <asm/atomic.h>+#include <asm/hw_irq.h>+#include <asm/irq.h>+
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -