📄 adeos-ipipe-2.6.12-bf533-1.2-00.patch
字号:
+{+ struct irqdesc *desc = irq_desc + irq;+ unsigned long flags;+ ipipe_declare_cpuid;++ if (irq == IRQ_SYSTMR) {+ /* Clear interrupt latch for TIMER0, don't mask. */+ *pTIMER_STATUS = 1;+ __builtin_bfin_ssync();+ return 1;+ }++ /*+ * No need to mask IRQs at hw level: we are always called from+ * __ipipe_handle_irq(), so interrupts are already off. We+ * stall the pipeline so that spin_lock_irq*() ops won't+ * unintentionally flush it, since this could cause infinite+ * recursion.+ */++ ipipe_load_cpuid();+ flags = ipipe_test_and_stall_pipeline();+ preempt_disable();+ desc->chip->ack(irq);+ preempt_enable_no_resched();+ ipipe_restore_pipeline_nosync(ipipe_percpu_domain[cpuid], flags, cpuid);++ return 1;+}++/*+ * __ipipe_walk_pipeline(): Plays interrupts pending in the log. Must+ * be called with local hw interrupts disabled.+ */+static inline void __ipipe_walk_pipeline(struct list_head *pos, int cpuid)+{+ struct ipipe_domain *this_domain = ipipe_percpu_domain[cpuid];+ int s = -1;++ if (test_bit(IPIPE_ROOTLOCK_FLAG, &ipipe_root_domain->flags))+ s = __test_and_set_bit(IPIPE_STALL_FLAG,+ &ipipe_root_domain->cpudata[cpuid].status);++ while (pos != &__ipipe_pipeline) {+ struct ipipe_domain *next_domain =+ list_entry(pos, struct ipipe_domain, p_link);++ if (test_bit(IPIPE_STALL_FLAG,+ &next_domain->cpudata[cpuid].status))+ break; /* Stalled stage -- do not go further. */++ if (next_domain->cpudata[cpuid].irq_pending_hi != 0) {++ if (next_domain == this_domain)+ __ipipe_sync_stage(IPIPE_IRQMASK_ANY);+ else {+ __ipipe_switch_to(this_domain, next_domain, cpuid);++ ipipe_load_cpuid(); /* Processor might have changed. */++ if (this_domain->cpudata[cpuid].irq_pending_hi != 0+ && !test_bit(IPIPE_STALL_FLAG,+ &this_domain->cpudata[cpuid].status))+ __ipipe_sync_stage(IPIPE_IRQMASK_ANY);+ }++ break;+ } else if (next_domain == this_domain)+ break;++ pos = next_domain->p_link.next;+ }++ if (!s)+ __clear_bit(IPIPE_STALL_FLAG,+ &ipipe_root_domain->cpudata[cpuid].status);+}++/*+ * __ipipe_handle_irq() -- IPIPE's generic IRQ handler. An optimistic+ * interrupt protection log is maintained here for each domain. Hw+ * interrupts are masked on entry.+ */+void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs)+{+ struct ipipe_domain *this_domain;+ struct list_head *head, *pos;+ ipipe_declare_cpuid;+ int m_ack, s_ack;++ m_ack = (regs == NULL); /* Software-triggered IRQs do not need+ * any ack. */+ ipipe_load_cpuid();++ this_domain = ipipe_percpu_domain[cpuid];++ s_ack = m_ack;++ if (test_bit(IPIPE_STICKY_FLAG, &this_domain->irqs[irq].control))+ head = &this_domain->p_link;+ else+ head = __ipipe_pipeline.next;++ /* Ack the interrupt. */++ pos = head;++ while (pos != &__ipipe_pipeline) {+ struct ipipe_domain *next_domain =+ list_entry(pos, struct ipipe_domain, p_link);++ /*+ * For each domain handling the incoming IRQ, mark it as+ * pending in its log.+ */+ if (test_bit(IPIPE_HANDLE_FLAG,+ &next_domain->irqs[irq].control)) {+ /*+ * Domains that handle this IRQ are polled for+ * acknowledging it by decreasing priority order. The+ * interrupt must be made pending _first_ in the+ * domain's status flags before the PIC is unlocked.+ */++ next_domain->cpudata[cpuid].irq_counters[irq].total_hits++;+ next_domain->cpudata[cpuid].irq_counters[irq].pending_hits++;+ __ipipe_set_irq_bit(next_domain, cpuid, irq);+ ipipe_mark_irq_receipt(next_domain, irq, cpuid);++ /*+ * Always get the first master acknowledge available.+ * Once we've got it, allow slave acknowledge+ * handlers to run (until one of them stops us).+ */+ if (next_domain->irqs[irq].acknowledge != NULL) {+ if (!m_ack)+ m_ack = next_domain->irqs[irq].acknowledge(irq);+ else if (test_bit+ (IPIPE_SHARED_FLAG,+ &next_domain->irqs[irq].control) && !s_ack)+ s_ack = next_domain->irqs[irq].acknowledge(irq);+ }+ }++ /*+ * If the domain does not want the IRQ to be passed down the+ * interrupt pipe, exit the loop now.+ */++ if (!test_bit(IPIPE_PASS_FLAG, &next_domain->irqs[irq].control))+ break;++ pos = next_domain->p_link.next;+ }++ /*+ * Now walk the pipeline, yielding control to the highest+ * priority domain that has pending interrupt(s) or+ * immediately to the current domain if the interrupt has been+ * marked as 'sticky'. This search does not go beyond the+ * current domain in the pipeline.+ */+ __ipipe_walk_pipeline(head, cpuid);+}++asmlinkage int __ipipe_check_root(void)+{+ ipipe_declare_cpuid;+ /*+ * SMP: This routine is called with hw interrupts off, so no+ * migration can occur while checking the identity of the+ * current domain.+ */+ ipipe_load_cpuid();+ return ipipe_percpu_domain[cpuid] == ipipe_root_domain;+}++asmlinkage int __ipipe_syscall_root(struct pt_regs *regs)+{+ ipipe_declare_cpuid;+ unsigned long flags;++ /*+ * This routine either returns:+ * 0 -- if the syscall is to be passed to Linux;+ * 1 -- if the syscall should not be passed to Linux, and no+ * tail work should be performed;+ * -1 -- if the syscall should not be passed to Linux but the+ * tail work has to be performed (for handling signals etc).+ */++ if (__ipipe_event_pipelined_p(IPIPE_EVENT_SYSCALL) &&+ __ipipe_dispatch_event(IPIPE_EVENT_SYSCALL,regs) > 0) {+ /*+ * We might enter here over a non-root domain and exit+ * over the root one as a result of the syscall+ * (i.e. by recycling the register set of the current+ * context across the migration), so we need to fixup+ * the interrupt flag upon return too, so that+ * __ipipe_unstall_iret_root() resets the correct+ * stall bit on exit.+ */+ if (ipipe_current_domain == ipipe_root_domain && !in_atomic()) {+ /*+ * Sync pending VIRQs before _TIF_NEED_RESCHED+ * is tested.+ */+ ipipe_lock_cpu(flags);+ if ((ipipe_root_domain->cpudata[cpuid].irq_pending_hi & IPIPE_IRQMASK_VIRT) != 0)+ __ipipe_sync_stage(IPIPE_IRQMASK_VIRT);+ ipipe_unlock_cpu(flags);+ return -1;+ }+ return 1;+ }++ return 0;+}++void ipipe_stall_root_raw(void)+{+ ipipe_declare_cpuid;++ ipipe_load_cpuid();++ __set_bit(IPIPE_STALL_FLAG,+ &ipipe_root_domain->cpudata[cpuid].status);++ ipipe_mark_domain_stall(ipipe_root_domain, cpuid);+}++void ipipe_unstall_root_raw(void)+{+ ipipe_declare_cpuid;++ ipipe_load_cpuid();++ __clear_bit(IPIPE_STALL_FLAG,+ &ipipe_root_domain->cpudata[cpuid].status);++ ipipe_mark_domain_unstall(ipipe_root_domain, cpuid);+}++EXPORT_SYMBOL(__ipipe_core_clock);+EXPORT_SYMBOL(__ipipe_freq_scale);+EXPORT_SYMBOL(show_stack);diff -uNrp linux-2.6.12/arch/blackfin/kernel/irqchip.c 2.6.12-ipipe/arch/blackfin/kernel/irqchip.c--- linux-2.6.12/arch/blackfin/kernel/irqchip.c 2005-11-30 13:25:04.000000000 +0100+++ 2.6.12-ipipe/arch/blackfin/kernel/irqchip.c 2006-02-14 18:01:24.000000000 +0100@@ -6,7 +6,7 @@ * Created: * Description: This file contains the simple DMA Implementation for Blackfin *- * Rev: $Id: irqchip.c,v 1.12 2005/11/30 12:25:04 hennerich Exp $+ * Rev: $Id: irqchip.c,v 1.2 2006/02/14 17:01:24 rpm Exp $ * * Modified: * Copyright 2004-2005 Analog Devices Inc.@@ -62,6 +62,94 @@ static LIST_HEAD(irq_pending); struct irqdesc irq_desc[NR_IRQS]; extern int init_arch_irq(void);++#ifdef CONFIG_IPIPE++/* Implement a threaded interrupt model a la PREEMPT_RT on top of the+ I-pipe, so that Linux device IRQ handlers cannot defer the+ interrupt tail code for too long. */++#include <linux/kthread.h>++static int create_irq_threads;++static int do_irqd(void * __desc)+{+ struct irqdesc *desc = __desc;+ unsigned irq = desc - irq_desc;+ int cpu = smp_processor_id();+ cpumask_t cpumask;++ sigfillset(¤t->blocked);+ current->flags |= PF_NOFREEZE;+ cpumask = cpumask_of_cpu(cpu);+ set_cpus_allowed(current, cpumask);++ ipipe_setscheduler_root(current,SCHED_FIFO,+ 50 + IVG7 - __ipipe_get_irq_priority(irq));++ while (!kthread_should_stop()) {+ if (!down_interruptible(&desc->thrsem)) {+ local_irq_disable();+ desc->thrhandler(irq,&__ipipe_tick_regs[cpu]);+ local_irq_enable();+ }+ }+ __set_current_state(TASK_RUNNING);+ return 0;+}++static void kick_irqd(unsigned irq, void *cookie)+{+ struct irqdesc *desc = irq_desc + irq;+ up(&desc->thrsem);+}++static int start_irq_thread(unsigned irq, struct irqdesc *desc)+{+ if (desc->thread || !create_irq_threads)+ return 0;++ sema_init(&desc->thrsem,0);+ desc->thread = kthread_create(do_irqd, desc, "IRQ %d", irq);++ if (!desc->thread) {+ printk(KERN_ERR "irqd: could not create IRQ thread %d!\n", irq);+ return -ENOMEM;+ }++ wake_up_process(desc->thread);++ desc->thrhandler = (void (*)(unsigned,struct pt_regs *))ipipe_root_domain->irqs[irq].handler;+ ipipe_root_domain->irqs[irq].handler = &kick_irqd;++ return 0;+}++int __init init_irqthreads(void)+{+ unsigned irq;++ create_irq_threads = 1;++ for (irq = 0; irq < NR_IRQS; irq++) {+ struct irqdesc *desc = irq_desc + irq;+ if (desc->action)+ start_irq_thread(irq, desc);+ }++ return 0;+}++#else /* CONFIG_IPIPE */++int __init init_irqthreads(void)+{+ return 0;+}++#endif /* CONFIG_IPIPE */+ /* * Dummy mask/unmask handler */@@ -221,6 +309,7 @@ int show_interrupts(struct seq_file *p, */ static int check_irq_lock(struct irqdesc *desc, int irq, struct pt_regs *regs) {+#ifndef CONFIG_IPIPE /* This check raises false positives over the I-pipe */ unsigned long instr_ptr = instruction_pointer(regs); if (desc->lck_jif == jiffies &&@@ -238,6 +327,7 @@ static int check_irq_lock(struct irqdesc desc->lck_pc = instruction_pointer(regs); desc->lck_jif = jiffies; }+#endif /* CONFIG_IPIPE */ return 0; } @@ -317,6 +407,11 @@ void do_simple_IRQ(unsigned int irq, str int ret = __do_irq(irq, action, regs); if (ret != IRQ_HANDLED) report_bad_irq(irq, regs, desc, ret);+#ifdef CONFIG_IPIPE+ if (likely(!desc->disable_depth &&+ !check_irq_lock(desc, irq, regs)))+ desc->chip->unmask(irq);+#endif /* CONFIG_IPIPE */ } } @@ -341,7 +436,9 @@ void do_edge_IRQ(unsigned int irq, struc /* * Acknowledge and clear the IRQ, but don't mask it. */+#ifndef CONFIG_IPIPE desc->chip->ack(irq);+#endif /* CONFIG_IPIPE */ /* * Mark the IRQ currently in progress.@@ -384,7 +481,9 @@ void do_edge_IRQ(unsigned int irq, struc */ desc->pending = 1; desc->chip->mask(irq);+#ifndef CONFIG_IPIPE desc->chip->ack(irq);+#endif /* CONFIG_IPIPE */ } /*@@ -400,7 +499,9 @@ void do_level_IRQ(unsigned int irq, stru /* * Acknowledge, clear _AND_ disable the interrupt. */+#ifndef CONFIG_IPIPE desc->chip->ack(irq);+#endif /* CONFIG_IPIPE */ if (likely(!desc->disable_depth)) { kstat_cpu(cpu).irqs[irq]++;@@ -487,6 +588,7 @@ asmlinkage void asm_do_IRQ(unsigned int spin_unlock(&irq_controller_lock); +#ifndef CONFIG_IPIPE /* Useless and bugous over the I-pipe: IRQs are threaded. */ /* If we're the only interrupt running (ignoring IRQ15 which is for syscalls), lower our priority to IRQ14 so that softirqs run at that level. If there's another, lower-level interrupt, irq_exit@@ -496,6 +598,7 @@ asmlinkage void asm_do_IRQ(unsigned int other_ints = pending & (pending - 1); if (other_ints == 0) lower_to_irq14();+#endif /* !CONFIG_IPIPE */ irq_exit(); } @@ -622,6 +725,10 @@ int setup_irq(unsigned int irq, struct i * The following block of code has to be executed atomically */ desc = irq_desc + irq;+#ifdef CONFIG_IPIPE+ if (start_irq_thread(irq, desc))+ return -ENOMEM;+#endif /* CONFIG_IPIPE */ spin_lock_irqsave(&irq_controller_lock, flags); p = &desc->action; if ((old = *p) != NULL) {diff -uNrp linux-2.6.12/arch/blackfin/kernel/process.c 2.6.12-ipipe/arch/blackfin/kernel/process.c--- linux-2.6.12/arch/blackfin/kernel/process.c 2005-10-28 06:02:10.000000000 +0200+++ 2.6.12-ipipe/arch/blackfin/kernel/process.c 2006-02-11 23:01:00.000000000 +0100@@ -7,7 +7,7 @@ * Description: This file handles the architecture-dependent parts * of process handling. *- * Rev: $Id: process.c,v 1.30 2005/10/28 04:02:10 magicyang Exp $+ * Rev: $Id: process.c,v 1.1 2006/02/11 22:01:00 rpm Exp $ * * Modified: * Copyright 2004-2005 Analog Devices Inc.@@ -52,11 +52,13 @@ inline static void default_idle(void) { while (1) { leds_switch(LED_OFF);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -