⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 adeos-ipipe-2.4.25-ppc-denx-1.0-03.patch

📁 xenomai 很好的linux实时补丁
💻 PATCH
📖 第 1 页 / 共 5 页
字号:
+			while (!cpus_empty(__ipipe_cpu_sync_map))+				cpu_relax();++			cpu_clear(cpuid, __ipipe_cpu_lock_map);+			cpu_clear(BITS_PER_LONG - 1, __ipipe_cpu_lock_map);+		}+	}+#endif	/* CONFIG_SMP */++	local_irq_restore_hw(flags);+}++void __ipipe_init_platform(void)+{+	unsigned timer_virq;++	/*+	 * Allocate a virtual IRQ for the decrementer trap early to+	 * get it mapped to IPIPE_VIRQ_BASE+	 */++	timer_virq = ipipe_alloc_virq();++	if (timer_virq != IPIPE_TIMER_VIRQ)+		panic("I-pipe: cannot reserve timer virq #%d (got #%d)",+		      IPIPE_TIMER_VIRQ, timer_virq);++	__ipipe_decr_ticks = tb_ticks_per_jiffy;+}++/*+ * __ipipe_sync_stage() -- Flush the pending IRQs for the current+ * domain (and processor). This routine flushes the interrupt log+ * (see "Optimistic interrupt protection" from D. Stodolsky et al. for+ * more on the deferred interrupt scheme). Every interrupt that+ * occurred while the pipeline was stalled gets played. WARNING:+ * callers on SMP boxen should always check for CPU migration on+ * return of this routine. One can control the kind of interrupts+ * which are going to be sync'ed using the syncmask+ * parameter. IPIPE_IRQMASK_ANY plays them all, IPIPE_IRQMASK_VIRT+ * plays virtual interrupts only. This routine must be called with hw+ * interrupts off.+ */+void __ipipe_sync_stage(unsigned long syncmask)+{+	unsigned long mask, submask;+	struct ipcpudata *cpudata;+	struct ipipe_domain *ipd;+	ipipe_declare_cpuid;+	int level, rank;+	unsigned irq;++	ipipe_load_cpuid();+	ipd = ipipe_percpu_domain[cpuid];+	cpudata = &ipd->cpudata[cpuid];++	if (__test_and_set_bit(IPIPE_SYNC_FLAG, &cpudata->status))+		return;++	/*+	 * The policy here is to keep the dispatching code interrupt-free+	 * by stalling the current stage. If the upper domain handler+	 * (which we call) wants to re-enable interrupts while in a safe+	 * portion of the code (e.g. SA_INTERRUPT flag unset for Linux's+	 * sigaction()), it will have to unstall (then stall again before+	 * returning to us!) the stage when it sees fit.+	 */+	while ((mask = (cpudata->irq_pending_hi & syncmask)) != 0) {+		level = ffnz(mask);+		__clear_bit(level, &cpudata->irq_pending_hi);++		while ((submask = cpudata->irq_pending_lo[level]) != 0) {+			rank = ffnz(submask);+			irq = (level << IPIPE_IRQ_ISHIFT) + rank;++			if (test_bit(IPIPE_LOCK_FLAG, &ipd->irqs[irq].control)) {+				__clear_bit(rank,+					    &cpudata->irq_pending_lo[level]);+				continue;+			}++			if (--cpudata->irq_counters[irq].pending_hits == 0) {+				__clear_bit(rank,+					    &cpudata->irq_pending_lo[level]);+				ipipe_mark_irq_delivery(ipd,irq,cpuid);+			}++			__set_bit(IPIPE_STALL_FLAG, &cpudata->status);+			ipipe_mark_domain_stall(ipd, cpuid);++			if (ipd == ipipe_root_domain) {+				/*+				 * Linux handlers are called w/ hw+				 * interrupts on so that they could+				 * not defer interrupts for higher+				 * priority domains.+				 */+				local_irq_enable_hw();+				((void (*)(unsigned, struct pt_regs *))+				 ipd->irqs[irq].handler) (irq, __ipipe_tick_regs + cpuid);+				local_irq_disable_hw();+			} else {+				__clear_bit(IPIPE_SYNC_FLAG, &cpudata->status);+				ipd->irqs[irq].handler(irq,ipd->irqs[irq].cookie);+				__set_bit(IPIPE_SYNC_FLAG, &cpudata->status);+			}+#ifdef CONFIG_SMP+			{+				int _cpuid = ipipe_processor_id();++				if (_cpuid != cpuid) {	/* Handle CPU migration. */+					/*+					 * We expect any domain to clear the SYNC bit each+					 * time it switches in a new task, so that preemptions+					 * and/or CPU migrations (in the SMP case) over the+					 * ISR do not lock out the log syncer for some+					 * indefinite amount of time. In the Linux case,+					 * schedule() handles this (see kernel/sched.c). For+					 * this reason, we don't bother clearing it here for+					 * the source CPU in the migration handling case,+					 * since it must have scheduled another task in by+					 * now.+					 */+					cpuid = _cpuid;+					cpudata = &ipd->cpudata[cpuid];+					__set_bit(IPIPE_SYNC_FLAG, &cpudata->status);+				}+			}+#endif	/* CONFIG_SMP */++			__clear_bit(IPIPE_STALL_FLAG, &cpudata->status);+			ipipe_mark_domain_unstall(ipd, cpuid);+		}+	}++	__clear_bit(IPIPE_SYNC_FLAG, &cpudata->status);+}++int ipipe_get_sysinfo(struct ipipe_sysinfo *info)+{+	info->ncpus = num_online_cpus();+	info->cpufreq = ipipe_cpu_freq();+	info->archdep.tmirq = IPIPE_TIMER_VIRQ;+	info->archdep.tmfreq = info->cpufreq;++	return 0;+}++/*+ * ipipe_trigger_irq() -- Push the interrupt at front of the pipeline+ * just like if it has been actually received from a hw source. Also+ * works for virtual interrupts.+ */+int ipipe_trigger_irq(unsigned irq)+{+	unsigned long flags;++	if (irq >= IPIPE_NR_IRQS ||+	    (ipipe_virtual_irq_p(irq)+	     && !test_bit(irq - IPIPE_VIRQ_BASE, &__ipipe_virtual_irq_map)))+		return -EINVAL;++	local_irq_save_hw(flags);++	__ipipe_handle_irq(irq, NULL);++	local_irq_restore_hw(flags);++	return 1;+}++static void __ipipe_set_decr(void)+{+	ipipe_declare_cpuid;++	ipipe_load_cpuid();++	disarm_decr[cpuid] = (__ipipe_decr_ticks != tb_ticks_per_jiffy);+#ifdef CONFIG_40x+	/* Enable and set auto-reload. */+	mtspr(SPRN_TCR, mfspr(SPRN_TCR) | TCR_ARE);+	mtspr(SPRN_PIT, __ipipe_decr_ticks);+#else	/* !CONFIG_40x */+	__ipipe_decr_next[cpuid] = __ipipe_read_timebase() + __ipipe_decr_ticks;+	set_dec(__ipipe_decr_ticks);+#endif	/* CONFIG_40x */+}++int ipipe_tune_timer(unsigned long ns, int flags)+{+	unsigned long x, ticks;++	if (flags & IPIPE_RESET_TIMER)+		ticks = tb_ticks_per_jiffy;+	else {+		ticks = ns * tb_ticks_per_jiffy / (1000000000 / HZ);++		if (ticks > tb_ticks_per_jiffy)+			return -EINVAL;+	}++	x = ipipe_critical_enter(&__ipipe_set_decr);	/* Sync with all CPUs */+	__ipipe_decr_ticks = ticks;+	__ipipe_set_decr();+	ipipe_critical_exit(x);++	return 0;+}++EXPORT_SYMBOL(__ipipe_sync_stage);+EXPORT_SYMBOL(__ipipe_decr_ticks);+EXPORT_SYMBOL(__ipipe_decr_next);+EXPORT_SYMBOL(ipipe_critical_enter);+EXPORT_SYMBOL(ipipe_critical_exit);+EXPORT_SYMBOL(ipipe_trigger_irq);+EXPORT_SYMBOL(ipipe_get_sysinfo);+EXPORT_SYMBOL(ipipe_tune_timer);diff -uNrp 2.4.25-ppc/arch/ppc/kernel/ipipe-root.c 2.4.25-ppc-ipipe/arch/ppc/kernel/ipipe-root.c--- 2.4.25-ppc/arch/ppc/kernel/ipipe-root.c	1970-01-01 01:00:00.000000000 +0100+++ 2.4.25-ppc-ipipe/arch/ppc/kernel/ipipe-root.c	2006-02-04 20:09:01.000000000 +0100@@ -0,0 +1,480 @@+/* -*- linux-c -*-+ * linux/arch/ppc/kernel/ipipe-root.c+ *+ * Copyright (C) 2002-2005 Philippe Gerum (Adeos/ppc port over 2.6).+ * Copyright (C) 2004 Wolfgang Grandegger (Adeos/ppc port over 2.4).+ *+ * This program is free software; you can redistribute it and/or modify+ * it under the terms of the GNU General Public License as published by+ * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,+ * USA; either version 2 of the License, or (at your option) any later+ * version.+ *+ * This program is distributed in the hope that it will be useful,+ * but WITHOUT ANY WARRANTY; without even the implied warranty of+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the+ * GNU General Public License for more details.+ *+ * You should have received a copy of the GNU General Public License+ * along with this program; if not, write to the Free Software+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.+ *+ * Architecture-dependent I-pipe support for PowerPC.+ */++#include <linux/config.h>+#include <linux/kernel.h>+#include <linux/sched.h>+#include <linux/module.h>+#include <linux/slab.h>+#include <linux/interrupt.h>+#include <linux/irq.h>+#include <linux/errno.h>+#include <asm/system.h>+#include <asm/hardirq.h>+#include <asm/atomic.h>+#include <asm/io.h>+#include <asm/time.h>+#include <asm/mmu_context.h>++extern irq_desc_t irq_desc[];++static struct hw_interrupt_type __ipipe_std_irq_dtype[NR_IRQS];++static void __ipipe_override_irq_enable(unsigned irq)+{+	unsigned long flags;++	local_irq_save_hw(flags);+	ipipe_irq_unlock(irq);+	__ipipe_std_irq_dtype[irq].enable(irq);+	local_irq_restore_hw(flags);+}++static void __ipipe_override_irq_disable(unsigned irq)+{+	unsigned long flags;++	local_irq_save_hw(flags);+	ipipe_irq_lock(irq);+	__ipipe_std_irq_dtype[irq].disable(irq);+	local_irq_restore_hw(flags);+}++static void __ipipe_override_irq_end(unsigned irq)+{+	unsigned long flags;++	local_irq_save_hw(flags);++	if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS)))+		ipipe_irq_unlock(irq);++	__ipipe_std_irq_dtype[irq].end(irq);++	local_irq_restore_hw(flags);+}++static void __ipipe_override_irq_affinity(unsigned irq, cpumask_t mask)+{+	unsigned long flags;++	local_irq_save_hw(flags);+	__ipipe_std_irq_dtype[irq].set_affinity(irq, mask);+	local_irq_restore_hw(flags);+}++static void __ipipe_enable_sync(void)+{+	__ipipe_decr_next[ipipe_processor_id()] =+		__ipipe_read_timebase() + get_dec();+}++/*+ * __ipipe_enable_pipeline() -- We are running on the boot CPU, hw+ * interrupts are off, and secondary CPUs are still lost in space.+ */+void __ipipe_enable_pipeline(void)+{+	unsigned long flags;+	unsigned irq;++	flags = ipipe_critical_enter(&__ipipe_enable_sync);++	/* First, virtualize all interrupts from the root domain. */++	for (irq = 0; irq < NR_IRQS; irq++)+		ipipe_virtualize_irq(ipipe_root_domain,+				     irq,+				     (ipipe_irq_handler_t)&__ipipe_do_IRQ,+				     NULL,+				     &__ipipe_ack_irq,+				     IPIPE_HANDLE_MASK | IPIPE_PASS_MASK);++	/*+	 * We use a virtual IRQ to handle the timer irq (decrementer trap)+	 * which has been allocated early in __ipipe_init_platform().+	 */++	ipipe_virtualize_irq(ipipe_root_domain,+			     IPIPE_TIMER_VIRQ,+			     (ipipe_irq_handler_t)&__ipipe_do_timer,+			     NULL,+			     NULL, IPIPE_HANDLE_MASK | IPIPE_PASS_MASK);++	/*+	 * Interpose on the IRQ control routines so we can make them+	 * atomic using hw masking and prevent the interrupt log from+	 * being untimely flushed.+	 */++	for (irq = 0; irq < NR_IRQS; irq++) {+		if (irq_desc[irq].handler != NULL)+			__ipipe_std_irq_dtype[irq] = *irq_desc[irq].handler;+	}++	/*+	 * The original controller structs are often shared, so we first+	 * save them all before changing any of them. Notice that we don't+	 * override the ack() handler since we will enforce the necessary+	 * setup in __ipipe_ack_irq().+	 */++	for (irq = 0; irq < NR_IRQS; irq++) {+		struct hw_interrupt_type *handler = irq_desc[irq].handler;++		if (handler == NULL)+			continue;++		if (handler->enable != NULL)+			handler->enable = &__ipipe_override_irq_enable;++		if (handler->disable != NULL)+			handler->disable = &__ipipe_override_irq_disable;++		if (handler->end != NULL)+			handler->end = &__ipipe_override_irq_end;++		if (handler->set_affinity != NULL)+			handler->set_affinity = &__ipipe_override_irq_affinity;+	}++	__ipipe_decr_next[ipipe_processor_id()] =+		__ipipe_read_timebase() + get_dec();++	ipipe_critical_exit(flags);+}++int __ipipe_ack_irq(unsigned irq)+{+	irq_desc_t *desc = irq_desc + irq;+	unsigned long flags;+	ipipe_declare_cpuid;++	if (desc->handler == NULL ||+	    desc->handler->ack == NULL)+		return 1;++	/*+	 * No need to mask IRQs at hw level: we are always called from+	 * __ipipe_handle_irq(), so interrupts are already off. We+	 * stall the pipeline so that spin_lock_irq*() ops won't+	 * unintentionally flush it, since this could cause infinite+	 * recursion.+	 */++	ipipe_load_cpuid();+	flags = ipipe_test_and_stall_pipeline();+	desc->handler->ack(irq);+	ipipe_restore_pipeline_nosync(ipipe_percpu_domain[cpuid], flags, cpuid);++	return 1;+}++/*+ * __ipipe_walk_pipeline(): Plays interrupts pending in the log. Must+ * be called with local hw interrupts disabled.+ */+static inline void __ipipe_walk_pipeline(struct list_head *pos, int cpuid)+{+	struct ipipe_domain *this_domain = ipipe_percpu_domain[cpuid];++	while (pos != &__ipipe_pipeline) {+		struct ipipe_domain *next_domain =+			list_entry(pos, struct ipipe_domain, p_link);++		if (test_bit(IPIPE_STALL_FLAG,+			     &next_domain->cpudata[cpuid].status))+			break;	/* Stalled stage -- do not go further. */++		if (next_domain->cpudata[cpuid].irq_pending_hi != 0) {++			if (next_domain == this_domain)+				__ipipe_sync_stage(IPIPE_IRQMASK_ANY);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -