📄 adeos-ipipe-2.6.12-bf533-1.2-00.patch
字号:
diff -uNrp linux-2.6.12/arch/blackfin/Kconfig 2.6.12-ipipe/arch/blackfin/Kconfig--- linux-2.6.12/arch/blackfin/Kconfig 2005-12-21 08:15:32.000000000 +0100+++ 2.6.12-ipipe/arch/blackfin/Kconfig 2006-02-11 22:41:02.000000000 +0100@@ -569,6 +569,8 @@ source "drivers/pci/hotplug/Kconfig" endmenu +source "kernel/ipipe/Kconfig"+ menu "Executable File Formats" source "fs/Kconfig.binfmt" endmenudiff -uNrp linux-2.6.12/arch/blackfin/kernel/Makefile 2.6.12-ipipe/arch/blackfin/kernel/Makefile--- linux-2.6.12/arch/blackfin/kernel/Makefile 2005-11-22 02:13:57.000000000 +0100+++ 2.6.12-ipipe/arch/blackfin/kernel/Makefile 2006-02-11 22:41:02.000000000 +0100@@ -7,3 +7,4 @@ obj-y:= entry.o process.o bfin_ksyms.o p obj-$(CONFIG_MODULES) += module.o obj-$(CONFIG_BFIN_DMA_5XX) += bfin_dma_5xx.o+obj-$(CONFIG_IPIPE) += ipipe-core.o ipipe-root.odiff -uNrp linux-2.6.12/arch/blackfin/kernel/ipipe-core.c 2.6.12-ipipe/arch/blackfin/kernel/ipipe-core.c--- linux-2.6.12/arch/blackfin/kernel/ipipe-core.c 1970-01-01 01:00:00.000000000 +0100+++ 2.6.12-ipipe/arch/blackfin/kernel/ipipe-core.c 2006-02-16 16:20:20.000000000 +0100@@ -0,0 +1,360 @@+/* -*- linux-c -*-+ * linux/arch/blackfin/kernel/ipipe-core.c+ *+ * Copyright (C) 2005 Philippe Gerum.+ *+ * This program is free software; you can redistribute it and/or modify+ * it under the terms of the GNU General Public License as published by+ * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,+ * USA; either version 2 of the License, or (at your option) any later+ * version.+ *+ * This program is distributed in the hope that it will be useful,+ * but WITHOUT ANY WARRANTY; without even the implied warranty of+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the+ * GNU General Public License for more details.+ *+ * You should have received a copy of the GNU General Public License+ * along with this program; if not, write to the Free Software+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.+ *+ * Architecture-dependent I-PIPE core support for the Blackfin.+ */++#include <linux/config.h>+#include <linux/kernel.h>+#include <linux/smp.h>+#include <linux/sched.h>+#include <linux/slab.h>+#include <linux/bitops.h>+#include <linux/module.h>+#include <asm/system.h>+#include <asm/atomic.h>+#include <asm/irqchip.h>+#include <asm/io.h>++extern struct irqdesc irq_desc[];++struct pt_regs __ipipe_tick_regs[IPIPE_NR_CPUS];++static void __ipipe_no_irqtail(void);++unsigned long __ipipe_irq_tail_hook = (unsigned long)&__ipipe_no_irqtail;++#ifdef CONFIG_SMP++static cpumask_t __ipipe_cpu_sync_map;++static cpumask_t __ipipe_cpu_lock_map;++static ipipe_spinlock_t __ipipe_cpu_barrier = IPIPE_SPIN_LOCK_UNLOCKED;++static atomic_t __ipipe_critical_count = ATOMIC_INIT(0);++static void (*__ipipe_cpu_sync) (void);++/* Always called with hw interrupts off. */++void __ipipe_do_critical_sync(unsigned irq)+{+ ipipe_declare_cpuid;++ ipipe_load_cpuid();++ cpu_set(cpuid, __ipipe_cpu_sync_map);++ /* Now we are in sync with the lock requestor running on another+ CPU. Enter a spinning wait until he releases the global+ lock. */+ spin_lock_hw(&__ipipe_cpu_barrier);++ /* Got it. Now get out. */++ if (__ipipe_cpu_sync)+ /* Call the sync routine if any. */+ __ipipe_cpu_sync();++ spin_unlock_hw(&__ipipe_cpu_barrier);++ cpu_clear(cpuid, __ipipe_cpu_sync_map);+}++#endif /* CONFIG_SMP */++unsigned long ipipe_critical_enter(void (*syncfn) (void))+{+ unsigned long flags;++ local_irq_save_hw(flags);++#ifdef CONFIG_SMP+ if (num_online_cpus() > 1) { /* We might be running a SMP-kernel on a UP box... */+ ipipe_declare_cpuid;+ cpumask_t lock_map;++ ipipe_load_cpuid();++ if (!cpu_test_and_set(cpuid, __ipipe_cpu_lock_map)) {+ while (cpu_test_and_set+ (BITS_PER_LONG - 1, __ipipe_cpu_lock_map)) {+ int n = 0;+ do {+ cpu_relax();+ } while (++n < cpuid);+ }++ spin_lock_hw(&__ipipe_cpu_barrier);++ __ipipe_cpu_sync = syncfn;++ /* Send the sync IPI to all processors but the current one. */+ send_IPI_allbutself(IPIPE_CRITICAL_VECTOR);++ cpus_andnot(lock_map, cpu_online_map,+ __ipipe_cpu_lock_map);++ while (!cpus_equal(__ipipe_cpu_sync_map, lock_map))+ cpu_relax();+ }++ atomic_inc(&__ipipe_critical_count);+ }+#endif /* CONFIG_SMP */++ return flags;+}++void ipipe_critical_exit(unsigned long flags)+{+#ifdef CONFIG_SMP+ if (num_online_cpus() > 1) { /* We might be running a SMP-kernel on a UP box... */+ ipipe_declare_cpuid;++ ipipe_load_cpuid();++ if (atomic_dec_and_test(&__ipipe_critical_count)) {+ spin_unlock_hw(&__ipipe_cpu_barrier);++ while (!cpus_empty(__ipipe_cpu_sync_map))+ cpu_relax();++ cpu_clear(cpuid, __ipipe_cpu_lock_map);+ cpu_clear(BITS_PER_LONG - 1, __ipipe_cpu_lock_map);+ }+ }+#endif /* CONFIG_SMP */++ local_irq_restore_hw(flags);+}++static void __ipipe_no_irqtail(void)+{+}++static inline void __ipipe_run_irqtail(void)+{+ asmlinkage void __ipipe_call_irqtail(void);+ unsigned long pending;++ __builtin_bfin_csync();++ pending = *pIPEND;+ if (pending & 0x8000) {+ pending &= ~0x8010;+ if (pending && (pending & (pending - 1)) == 0)+ __ipipe_call_irqtail();+ }+}++/*+ * __ipipe_sync_stage() -- Flush the pending IRQs for the current+ * domain (and processor). This routine flushes the interrupt log+ * (see "Optimistic interrupt protection" from D. Stodolsky et al. for+ * more on the deferred interrupt scheme). Every interrupt that+ * occurred while the pipeline was stalled gets played. WARNING:+ * callers on SMP boxen should always check for CPU migration on+ * return of this routine. One can control the kind of interrupts+ * which are going to be sync'ed using the syncmask+ * parameter. IPIPE_IRQMASK_ANY plays them all, IPIPE_IRQMASK_VIRT+ * plays virtual interrupts only. This routine must be called with hw+ * interrupts off.+ */+void __ipipe_sync_stage(unsigned long syncmask)+{+ unsigned long mask, submask;+ struct ipcpudata *cpudata;+ struct ipipe_domain *ipd;+ ipipe_declare_cpuid;+ int level, rank;+ unsigned irq;++ ipipe_load_cpuid();+ ipd = ipipe_percpu_domain[cpuid];+ cpudata = &ipd->cpudata[cpuid];++ if (__test_and_set_bit(IPIPE_SYNC_FLAG, &cpudata->status))+ return;++ /*+ * The policy here is to keep the dispatching code interrupt-free+ * by stalling the current stage. If the upper domain handler+ * (which we call) wants to re-enable interrupts while in a safe+ * portion of the code (e.g. SA_INTERRUPT flag unset for Linux's+ * sigaction()), it will have to unstall (then stall again before+ * returning to us!) the stage when it sees fit.+ */+ while ((mask = (cpudata->irq_pending_hi & syncmask)) != 0) {+ level = ffs(mask) - 1;+ __clear_bit(level, &cpudata->irq_pending_hi);++ while ((submask = cpudata->irq_pending_lo[level]) != 0) {++ if (ipd == ipipe_root_domain &&+ test_bit(IPIPE_ROOTLOCK_FLAG, &ipd->flags)) {+ __set_bit(level, &cpudata->irq_pending_hi);+ goto done;+ }++ rank = ffs(submask) - 1;+ irq = (level << IPIPE_IRQ_ISHIFT) + rank;++ if (test_bit(IPIPE_LOCK_FLAG, &ipd->irqs[irq].control)) {+ __clear_bit(rank,+ &cpudata->irq_pending_lo[level]);+ continue;+ }++ if (--cpudata->irq_counters[irq].pending_hits == 0) {+ __clear_bit(rank,+ &cpudata->irq_pending_lo[level]);+ ipipe_mark_irq_delivery(ipd,irq,cpuid);+ }++ __set_bit(IPIPE_STALL_FLAG, &cpudata->status);+ ipipe_mark_domain_stall(ipd, cpuid);++ if (ipd == ipipe_root_domain) {+ /*+ * Note: the I-pipe implements a+ * threaded interrupt model on this+ * arch for Linux external IRQs. The+ * interrupt handler we call here only+ * wakes up the associated IRQ thread.+ */+ if (ipipe_virtual_irq_p(irq)) {+ /* No irqtail here; virtual interrupts have+ no effect on IPEND so there is no need for+ processing deferral. */+ local_irq_enable_hw();+ ipd->irqs[irq].handler(irq, ipd->irqs[irq].cookie);+ local_irq_disable_hw();+ } else+ /* No need to run the irqtail here either; we are not+ preemptable by hw IRQs, so non-Linux IRQs cannot+ stack over the short thread wakeup code. Which in turn+ means that no irqtail condition could be pending+ for domains above Linux in the pipeline. */+ ((void (*)(unsigned, struct pt_regs *))+ ipd->irqs[irq].handler) (irq, __ipipe_tick_regs + cpuid);+ } else {+ __clear_bit(IPIPE_SYNC_FLAG, &cpudata->status);+ ipd->irqs[irq].handler(irq, ipd->irqs[irq].cookie);+ /* Attempt to exit the outer interrupt+ * level before starting the deferred+ * IRQ processing. */+ __ipipe_run_irqtail();+ __set_bit(IPIPE_SYNC_FLAG, &cpudata->status);+ }+#ifdef CONFIG_SMP+ {+ int _cpuid = ipipe_processor_id();++ if (_cpuid != cpuid) { /* Handle CPU migration. */+ /* We expect any domain to clear the SYNC bit each+ time it switches in a new task, so that preemptions+ and/or CPU migrations (in the SMP case) over the+ ISR do not lock out the log syncer for some+ indefinite amount of time. In the Linux case,+ schedule() handles this (see kernel/sched.c). For+ this reason, we don't bother clearing it here for+ the source CPU in the migration handling case,+ since it must have scheduled another task in by+ now. */+ cpuid = _cpuid;+ cpudata = &ipd->cpudata[cpuid];+ __set_bit(IPIPE_SYNC_FLAG,&cpudata->status);+ }+ }+#endif /* CONFIG_SMP */+ __clear_bit(IPIPE_STALL_FLAG, &cpudata->status);+ ipipe_mark_domain_unstall(ipd, cpuid);+ }+ }++done:+ __clear_bit(IPIPE_SYNC_FLAG, &cpudata->status);+}++int ipipe_get_sysinfo(struct ipipe_sysinfo *info)+{+ info->ncpus = num_online_cpus();+ info->cpufreq = ipipe_cpu_freq();+ info->archdep.tmirq = IPIPE_TIMER_IRQ;+ info->archdep.tmfreq = info->cpufreq;++ return 0;+}++/*+ * ipipe_trigger_irq() -- Push the interrupt at front of the pipeline+ * just like if it has been actually received from a hw source. Also+ * works for virtual interrupts.+ */+int ipipe_trigger_irq(unsigned irq)+{+ unsigned long flags;++ if (irq >= IPIPE_NR_IRQS ||+ (ipipe_virtual_irq_p(irq)+ && !test_bit(irq - IPIPE_VIRQ_BASE, &__ipipe_virtual_irq_map)))+ return -EINVAL;++ local_irq_save_hw(flags);++ __ipipe_handle_irq(irq, NULL);++ local_irq_restore_hw(flags);++ return 1;+}++int ipipe_tune_timer(unsigned long ns, int flags)+{+ unsigned long x, hz;++ x = ipipe_critical_enter(NULL);++ *pTIMER_DISABLE = 1;+ __builtin_bfin_ssync();+ *pTIMER0_CONFIG = 0x19; /* IRQ enable, periodic, PWM_OUT, SCLKed */+ __builtin_bfin_ssync();+ hz = (flags & IPIPE_RESET_TIMER) ? HZ : 1000000000L / ns;+ *pTIMER0_PERIOD = get_sclk() / hz;+ __builtin_bfin_ssync();+ *pTIMER0_WIDTH = 1;+ __builtin_bfin_ssync();+ *pTIMER_ENABLE = 1;+ __builtin_bfin_ssync();++ ipipe_critical_exit(x);++ return 0;+}++EXPORT_SYMBOL(__ipipe_irq_tail_hook);+EXPORT_SYMBOL(__ipipe_sync_stage);+EXPORT_SYMBOL(ipipe_critical_enter);+EXPORT_SYMBOL(ipipe_critical_exit);+EXPORT_SYMBOL(ipipe_trigger_irq);+EXPORT_SYMBOL(ipipe_get_sysinfo);+EXPORT_SYMBOL(ipipe_tune_timer);diff -uNrp linux-2.6.12/arch/blackfin/kernel/ipipe-root.c 2.6.12-ipipe/arch/blackfin/kernel/ipipe-root.c--- linux-2.6.12/arch/blackfin/kernel/ipipe-root.c 1970-01-01 01:00:00.000000000 +0100+++ 2.6.12-ipipe/arch/blackfin/kernel/ipipe-root.c 2006-02-16 15:43:33.000000000 +0100@@ -0,0 +1,312 @@+/* -*- linux-c -*-+ * linux/arch/blackfin/kernel/ipipe-root.c+ *+ * Copyright (C) 2005 Philippe Gerum.+ *+ * This program is free software; you can redistribute it and/or modify+ * it under the terms of the GNU General Public License as published by+ * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,+ * USA; either version 2 of the License, or (at your option) any later+ * version.+ *+ * This program is distributed in the hope that it will be useful,+ * but WITHOUT ANY WARRANTY; without even the implied warranty of+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the+ * GNU General Public License for more details.+ *+ * You should have received a copy of the GNU General Public License+ * along with this program; if not, write to the Free Software+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.+ *+ * Architecture-dependent I-pipe support for the Blackfin.+ */++#include <linux/config.h>+#include <linux/kernel.h>+#include <linux/sched.h>+#include <linux/module.h>+#include <linux/interrupt.h>+#include <linux/slab.h>+#include <linux/errno.h>+#include <asm/system.h>+#include <asm/atomic.h>+#include <asm/io.h>++/* __ipipe_sync_stage -> asm_do_IRQ -> do_simple/edge/level_IRQ -> __do_irq -> action */+asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs);++extern struct irqdesc irq_desc[];++unsigned long __ipipe_core_clock;++unsigned long __ipipe_freq_scale;++/*+ * __ipipe_enable_pipeline() -- We are running on the boot CPU, hw+ * interrupts are off, and secondary CPUs are still lost in space.+ */+void __ipipe_enable_pipeline(void)+{+ unsigned irq;++ __ipipe_core_clock = get_cclk(); /* Fetch this once. */+ __ipipe_freq_scale = 1000000000UL / __ipipe_core_clock;++ for (irq = 0; irq < NR_IRQS; irq++) {+ if (irq != IRQ_SW_INT1 && irq != IRQ_SW_INT2)+ ipipe_virtualize_irq(ipipe_root_domain,+ irq,+ (ipipe_irq_handler_t)&asm_do_IRQ,+ NULL,+ &__ipipe_ack_irq,+ IPIPE_HANDLE_MASK | IPIPE_PASS_MASK);+ }+}++int __ipipe_ack_irq(unsigned irq)
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -