⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 adeos-ipipe-2.4.32-i386-1.1-03.patch

📁 xenomai 很好的linux实时补丁
💻 PATCH
📖 第 1 页 / 共 5 页
字号:
+		atomic_inc(&__ipipe_critical_count);+	}+#endif	/* CONFIG_SMP */++	return flags;+}++/* ipipe_critical_exit() -- Release the superlock. */++void ipipe_critical_exit(unsigned long flags)+{+#ifdef CONFIG_SMP+	if (num_online_cpus() > 1) {	/* We might be running a SMP-kernel on a UP box... */+		ipipe_declare_cpuid;++		ipipe_load_cpuid();++		if (atomic_dec_and_test(&__ipipe_critical_count)) {+			spin_unlock_hw(&__ipipe_cpu_barrier);++			while (!cpus_empty(__ipipe_cpu_sync_map))+				cpu_relax();++			cpu_clear(cpuid, __ipipe_cpu_lock_map);+			cpu_clear(BITS_PER_LONG - 1, __ipipe_cpu_lock_map);+		}+	}+#endif	/* CONFIG_SMP */++	local_irq_restore_hw(flags);+}++/* __ipipe_sync_stage() -- Flush the pending IRQs for the current+   domain (and processor).  This routine flushes the interrupt log+   (see "Optimistic interrupt protection" from D. Stodolsky et al. for+   more on the deferred interrupt scheme). Every interrupt that+   occurred while the pipeline was stalled gets played.  WARNING:+   callers on SMP boxen should always check for CPU migration on+   return of this routine. One can control the kind of interrupts+   which are going to be sync'ed using the syncmask+   parameter. IPIPE_IRQMASK_ANY plays them all, IPIPE_IRQMASK_VIRT+   plays virtual interrupts only. This routine must be called with hw+   interrupts off. */++void __ipipe_sync_stage(unsigned long syncmask)+{+	unsigned long mask, submask;+	struct ipcpudata *cpudata;+	struct ipipe_domain *ipd;+	ipipe_declare_cpuid;+	int level, rank;+	unsigned irq;++	ipipe_load_cpuid();+	ipd = ipipe_percpu_domain[cpuid];+	cpudata = &ipd->cpudata[cpuid];++	if (__test_and_set_bit(IPIPE_SYNC_FLAG, &cpudata->status))+		return;++	/* The policy here is to keep the dispatching code interrupt-free+	   by stalling the current stage. If the upper domain handler+	   (which we call) wants to re-enable interrupts while in a safe+	   portion of the code (e.g. SA_INTERRUPT flag unset for Linux's+	   sigaction()), it will have to unstall (then stall again before+	   returning to us!) the stage when it sees fit. */++	while ((mask = (cpudata->irq_pending_hi & syncmask)) != 0) {+		/* Give a slight priority advantage to high-numbered IRQs+		   like the virtual ones. */+		level = flnz(mask);+		__clear_bit(level, &cpudata->irq_pending_hi);++		while ((submask = cpudata->irq_pending_lo[level]) != 0) {+			rank = flnz(submask);+			irq = (level << IPIPE_IRQ_ISHIFT) + rank;++			if (test_bit(IPIPE_LOCK_FLAG, &ipd->irqs[irq].control)) {+				__clear_bit(rank,+					    &cpudata->irq_pending_lo[level]);+				continue;+			}++			if (--cpudata->irq_counters[irq].pending_hits == 0) {+				__clear_bit(rank,+					    &cpudata->irq_pending_lo[level]);+				ipipe_mark_irq_delivery(ipd,irq,cpuid);+			}++			__set_bit(IPIPE_STALL_FLAG, &cpudata->status);+			ipipe_mark_domain_stall(ipd, cpuid);++			if (ipd == ipipe_root_domain) {+				/* Linux handlers are called hw interrupts on+				   so that they could not defer interrupts for+				   higher priority domains. */+				local_irq_enable_hw();++				if (likely(!ipipe_virtual_irq_p(irq))) {+					__ipipe_call_root_xirq_handler(ipd,irq);+				} else {+					irq_enter(cpuid,irq);+					__ipipe_call_root_virq_handler(ipd,irq);+					irq_exit(cpuid,irq);+				}++				local_irq_disable_hw();+			} else {+				__clear_bit(IPIPE_SYNC_FLAG, &cpudata->status);+				ipd->irqs[irq].handler(irq, ipd->irqs[irq].cookie);+				__set_bit(IPIPE_SYNC_FLAG, &cpudata->status);+			}++#ifdef CONFIG_SMP+			{+				int _cpuid = ipipe_processor_id();++				if (_cpuid != cpuid) {	/* Handle CPU migration. */+					/* We expect any domain to clear the SYNC bit each+					   time it switches in a new task, so that preemptions+					   and/or CPU migrations (in the SMP case) over the+					   ISR do not lock out the log syncer for some+					   indefinite amount of time. In the Linux case,+					   schedule() handles this (see kernel/sched.c). For+					   this reason, we don't bother clearing it here for+					   the source CPU in the migration handling case,+					   since it must have scheduled another task in by+					   now. */+					cpuid = _cpuid;+					cpudata = &ipd->cpudata[cpuid];+					__set_bit(IPIPE_SYNC_FLAG,&cpudata->status);+				}+			}+#endif	/* CONFIG_SMP */++			__clear_bit(IPIPE_STALL_FLAG, &cpudata->status);+			ipipe_mark_domain_unstall(ipd, cpuid);+		}+	}++	__clear_bit(IPIPE_SYNC_FLAG, &cpudata->status);+}++/* ipipe_trigger_irq() -- Push the interrupt at front of the pipeline+   just like if it has been actually received from a hw source. Also+   works for virtual interrupts. */++int ipipe_trigger_irq(unsigned irq)+{+	struct pt_regs regs;+	unsigned long flags;++	if (irq >= IPIPE_NR_IRQS ||+	    (ipipe_virtual_irq_p(irq) &&+	     !test_bit(irq - IPIPE_VIRQ_BASE, &__ipipe_virtual_irq_map)))+		return -EINVAL;++	local_irq_save_hw(flags);++	regs.orig_eax = irq;	/* Won't be acked */+	regs.xcs = __KERNEL_CS;+	regs.eflags = flags;++	__ipipe_handle_irq(regs);++	local_irq_restore_hw(flags);++	return 1;+}++int ipipe_get_sysinfo(struct ipipe_sysinfo *info)+{+	info->ncpus = num_online_cpus();+	info->cpufreq = ipipe_cpu_freq();+	info->archdep.tmirq = __ipipe_tick_irq;+#ifdef CONFIG_X86_TSC+	info->archdep.tmfreq = ipipe_cpu_freq();+#else	/* !CONFIG_X86_TSC */+	info->archdep.tmfreq = CLOCK_TICK_RATE;+#endif	/* CONFIG_X86_TSC */++	return 0;+}++int ipipe_tune_timer (unsigned long ns, int flags)++{+	unsigned hz, latch;+	unsigned long x;++	if (flags & IPIPE_RESET_TIMER)+		latch = LATCH;+	else {+		hz = 1000000000 / ns;++		if (hz < HZ)+			return -EINVAL;++		latch = (CLOCK_TICK_RATE + hz/2) / hz;+	}++	x = ipipe_critical_enter(NULL); /* Sync with all CPUs */++	/* Shamelessly lifted from init_IRQ() in i8259.c */+	outb_p(0x34,0x43);		/* binary, mode 2, LSB/MSB, ch 0 */+	outb_p(latch & 0xff,0x40);	/* LSB */+	outb(latch >> 8,0x40);	/* MSB */++	ipipe_critical_exit(x);++	return 0;+}++EXPORT_SYMBOL(__ipipe_sync_stage);+EXPORT_SYMBOL(__ipipe_tick_irq);+EXPORT_SYMBOL(ipipe_critical_enter);+EXPORT_SYMBOL(ipipe_critical_exit);+EXPORT_SYMBOL(ipipe_trigger_irq);+EXPORT_SYMBOL(ipipe_get_sysinfo);+EXPORT_SYMBOL(ipipe_tune_timer);diff -uNrp 2.4.32/arch/i386/kernel/ipipe-root.c 2.4.32-i386-ipipe/arch/i386/kernel/ipipe-root.c--- 2.4.32/arch/i386/kernel/ipipe-root.c	1970-01-01 01:00:00.000000000 +0100+++ 2.4.32-i386-ipipe/arch/i386/kernel/ipipe-root.c	2006-02-04 18:22:06.000000000 +0100@@ -0,0 +1,578 @@+/*   -*- linux-c -*-+ *   linux/arch/i386/kernel/ipipe-root.c+ *+ *   Copyright (C) 2002-2005 Philippe Gerum.+ *+ *   This program is free software; you can redistribute it and/or modify+ *   it under the terms of the GNU General Public License as published by+ *   the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,+ *   USA; either version 2 of the License, or (at your option) any later+ *   version.+ *+ *   This program is distributed in the hope that it will be useful,+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the+ *   GNU General Public License for more details.+ *+ *   You should have received a copy of the GNU General Public License+ *   along with this program; if not, write to the Free Software+ *   Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.+ *+ *   Architecture-dependent I-PIPE support for x86.+ */++#include <linux/config.h>+#include <linux/kernel.h>+#include <linux/module.h>+#include <linux/sched.h>+#include <linux/smp.h>+#include <linux/interrupt.h>+#include <linux/slab.h>+#include <asm/system.h>+#include <asm/atomic.h>+#include <asm/hw_irq.h>+#include <asm/irq.h>+#include <asm/desc.h>+#include <asm/io.h>+#ifdef CONFIG_X86_LOCAL_APIC+#include <asm/fixmap.h>+#include <asm/bitops.h>+#include <asm/mpspec.h>+#ifdef CONFIG_X86_IO_APIC+#include <asm/io_apic.h>+#endif	/* CONFIG_X86_IO_APIC */+#include <asm/apic.h>++static int __ipipe_noack_irq(unsigned irq)+{+	return 1;+}+#endif	/* CONFIG_X86_LOCAL_APIC */++asmlinkage unsigned int do_IRQ(struct pt_regs *regs);+void smp_apic_timer_interrupt(struct pt_regs *regs);+asmlinkage void smp_spurious_interrupt(struct pt_regs *regs);+asmlinkage void smp_error_interrupt(struct pt_regs *regs);+asmlinkage void smp_reschedule_interrupt(struct pt_regs *regs);+asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs);+asmlinkage void smp_call_function_interrupt(struct pt_regs *regs);++static int __ipipe_ack_common_irq(unsigned irq)+{+	irq_desc_t *desc = irq_desc + irq;+	unsigned long flags;+	ipipe_declare_cpuid;++	ipipe_load_cpuid();	/* hw interrupts are off. */+	flags = ipipe_test_and_stall_pipeline();+	desc->handler->ack(irq);+	ipipe_restore_pipeline_nosync(ipipe_percpu_domain[cpuid], flags, cpuid);++	return 1;+}++#ifdef CONFIG_X86_LOCAL_APIC++static void __ipipe_null_handler(unsigned irq, void *cookie)+{+	/* Nop. */+}++#ifdef CONFIG_SMP++static int __ipipe_boot_cpuid(void)+{+	return 0;+}++u8 __ipipe_apicid_2_cpu[IPIPE_NR_CPUS];++static int __ipipe_hard_cpuid(void)+{+	unsigned long flags;+	int cpu;++	local_irq_save_hw(flags);+	cpu = __ipipe_apicid_2_cpu[GET_APIC_ID(apic_read(APIC_ID))];+	local_irq_restore_hw(flags);+	return cpu;+}++int (*__ipipe_logical_cpuid)(void) = &__ipipe_boot_cpuid;++EXPORT_SYMBOL(__ipipe_logical_cpuid);++#endif /* CONFIG_SMP */++#endif	/* CONFIG_X86_LOCAL_APIC */++/* __ipipe_enable_pipeline() -- We are running on the boot CPU, hw+   interrupts are off, and secondary CPUs are still lost in space. */++void __init __ipipe_enable_pipeline(void)+{+	unsigned irq;++#ifdef CONFIG_X86_LOCAL_APIC++	/* Map the APIC system vectors. */++	ipipe_virtualize_irq(ipipe_root_domain,+			     LOCAL_TIMER_VECTOR - FIRST_EXTERNAL_VECTOR,+			     (ipipe_irq_handler_t)&smp_apic_timer_interrupt,+			     NULL,+			     &__ipipe_ack_system_irq,+			     IPIPE_STDROOT_MASK);++	ipipe_virtualize_irq(ipipe_root_domain,+			     SPURIOUS_APIC_VECTOR - FIRST_EXTERNAL_VECTOR,+			     (ipipe_irq_handler_t)&smp_spurious_interrupt,+			     NULL,+			     &__ipipe_noack_irq,+			     IPIPE_STDROOT_MASK);++	ipipe_virtualize_irq(ipipe_root_domain,+			     ERROR_APIC_VECTOR - FIRST_EXTERNAL_VECTOR,+			     (ipipe_irq_handler_t)&smp_error_interrupt,+			     NULL,+			     &__ipipe_ack_system_irq,+			     IPIPE_STDROOT_MASK);++	ipipe_virtualize_irq(ipipe_root_domain,+			     IPIPE_SERVICE_VECTOR0 - FIRST_EXTERNAL_VECTOR,+			     &__ipipe_null_handler,+			     NULL,+			     &__ipipe_ack_system_irq,+			     IPIPE_STDROOT_MASK);++	ipipe_virtualize_irq(ipipe_root_domain,+			     IPIPE_SERVICE_VECTOR1 - FIRST_EXTERNAL_VECTOR,+			     &__ipipe_null_handler,+			     NULL,+			     &__ipipe_ack_system_irq,+			     IPIPE_STDROOT_MASK);++	ipipe_virtualize_irq(ipipe_root_domain,+			     IPIPE_SERVICE_VECTOR2 - FIRST_EXTERNAL_VECTOR,+			     &__ipipe_null_handler,+			     NULL,+			     &__ipipe_ack_system_irq,+			     IPIPE_STDROOT_MASK);++	ipipe_virtualize_irq(ipipe_root_domain,+			     IPIPE_SERVICE_VECTOR3 - FIRST_EXTERNAL_VECTOR,+			     &__ipipe_null_handler,+			     NULL,+			     &__ipipe_ack_system_irq,+			     IPIPE_STDROOT_MASK);++	__ipipe_tick_irq =+	    using_apic_timer ? LOCAL_TIMER_VECTOR - FIRST_EXTERNAL_VECTOR : 0;++#else	/* !CONFIG_X86_LOCAL_APIC */++	__ipipe_tick_irq = 0;++#endif	/* CONFIG_X86_LOCAL_APIC */++#ifdef CONFIG_SMP++	ipipe_virtualize_irq(ipipe_root_domain,+			     RESCHEDULE_VECTOR - FIRST_EXTERNAL_VECTOR,+			     (ipipe_irq_handler_t)&smp_reschedule_interrupt,+			     NULL,+			     &__ipipe_ack_system_irq,+			     IPIPE_STDROOT_MASK);++	ipipe_virtualize_irq(ipipe_root_domain,+			     INVALIDATE_TLB_VECTOR - FIRST_EXTERNAL_VECTOR,+			     (ipipe_irq_handler_t)&smp_invalidate_interrupt,+			     NULL,+			     &__ipipe_ack_system_irq,+			     IPIPE_STDROOT_MASK);++	ipipe_virtualize_irq(ipipe_root_domain,+			     CALL_FUNCTION_VECTOR - FIRST_EXTERNAL_VECTOR,+			     (ipipe_irq_handler_t)&smp_call_function_interrupt,+			     NULL,+			     &__ipipe_ack_system_irq,+			     IPIPE_STDROOT_MASK);++	/* Some guest O/S may run tasks over non-Linux stacks, so we+	 * cannot rely on the regular definition of smp_processor_id()+	 * on x86 to fetch the logical cpu id. We fix this by using+	 * our own private physical apicid -> logicial cpuid mapping+	 * as soon as the pipeline is enabled, so that

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -