⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 adeos-ipipe-2.4.31-i386-1.1-03.patch

📁 xenomai 很好的linux实时补丁
💻 PATCH
📖 第 1 页 / 共 5 页
字号:
+	 * ipipe_processor_id() always do the right thing, regardless+	 * of the current stack setup. Also note that the pipeline is+	 * enabled after the APIC space has been mapped in+	 * trap_init(), so it's safe to use it. */++	__ipipe_logical_cpuid = &__ipipe_hard_cpuid;++#endif	/* CONFIG_SMP */++	/* Finally, virtualize the remaining ISA and IO-APIC+	 * interrupts. Interrupts which have already been virtualized+	 * will just beget a silent -EPERM error since+	 * IPIPE_SYSTEM_MASK has been passed for them, that's ok. */++	for (irq = 0; irq < NR_IRQS; irq++) {+		/* Fails for IPIPE_CRITICAL_IPI but that's ok. */+		ipipe_virtualize_irq(ipipe_root_domain,+				     irq,+				     (ipipe_irq_handler_t)&do_IRQ,+				     NULL,+				     &__ipipe_ack_common_irq,+				     IPIPE_STDROOT_MASK);+	}++#ifdef CONFIG_X86_LOCAL_APIC+	/* Eventually allow these vectors to be reprogrammed. */+	ipipe_root_domain->irqs[IPIPE_SERVICE_IPI0].control &= ~IPIPE_SYSTEM_MASK;+	ipipe_root_domain->irqs[IPIPE_SERVICE_IPI1].control &= ~IPIPE_SYSTEM_MASK;+	ipipe_root_domain->irqs[IPIPE_SERVICE_IPI2].control &= ~IPIPE_SYSTEM_MASK;+	ipipe_root_domain->irqs[IPIPE_SERVICE_IPI3].control &= ~IPIPE_SYSTEM_MASK;+#endif	/* CONFIG_X86_LOCAL_APIC */+}++static inline void __fixup_if(struct pt_regs *regs)+{+	ipipe_declare_cpuid;+	unsigned long flags;++	ipipe_get_cpu(flags);++	if (ipipe_percpu_domain[cpuid] == ipipe_root_domain) {+		/* Have the saved hw state look like the domain stall bit, so+		   that __ipipe_unstall_iret_root() restores the proper+		   pipeline state for the root stage upon exit. */++		if (test_bit+		    (IPIPE_STALL_FLAG,+		     &ipipe_root_domain->cpudata[cpuid].status))+			regs->eflags &= ~X86_EFLAGS_IF;+		else+			regs->eflags |= X86_EFLAGS_IF;+	}++	ipipe_put_cpu(flags);+}++asmlinkage void __ipipe_unstall_iret_root(struct pt_regs regs)+{+	ipipe_declare_cpuid;++	/* Emulate IRET's handling of the interrupt flag. */++	local_irq_disable_hw();++	ipipe_load_cpuid();++	/* Restore the software state as it used to be on kernel+	   entry. CAUTION: NMIs must *not* return through this+	   emulation. */++	if (!(regs.eflags & X86_EFLAGS_IF)) {+		__set_bit(IPIPE_STALL_FLAG,+			  &ipipe_root_domain->cpudata[cpuid].status);+		ipipe_mark_domain_stall(ipipe_root_domain, cpuid);+		regs.eflags |= X86_EFLAGS_IF;+	} else {+		__clear_bit(IPIPE_STALL_FLAG,+			    &ipipe_root_domain->cpudata[cpuid].status);++		ipipe_mark_domain_unstall(ipipe_root_domain, cpuid);++		/* Only sync virtual IRQs here, so that we don't recurse+		   indefinitely in case of an external interrupt flood. */++		if ((ipipe_root_domain->cpudata[cpuid].+		     irq_pending_hi & IPIPE_IRQMASK_VIRT) != 0)+			__ipipe_sync_stage(IPIPE_IRQMASK_VIRT);+	}+}++asmlinkage int __ipipe_syscall_root(struct pt_regs regs)+{+	ipipe_declare_cpuid;+	unsigned long flags;++	__fixup_if(&regs);++	/* This routine either returns:+	    0 -- if the syscall is to be passed to Linux;+	   >0 -- if the syscall should not be passed to Linux, and no+	   tail work should be performed;+	   <0 -- if the syscall should not be passed to Linux but the+	   tail work has to be performed (for handling signals etc). */++	if (__ipipe_event_pipelined_p(IPIPE_EVENT_SYSCALL) &&+	    __ipipe_dispatch_event(IPIPE_EVENT_SYSCALL,&regs) > 0) {+		/* We might enter here over a non-root domain and exit+		 * over the root one as a result of the syscall+		 * (i.e. by recycling the register set of the current+		 * context across the migration), so we need to fixup+		 * the interrupt flag upon return too, so that+		 * __ipipe_unstall_iret_root() resets the correct+		 * stall bit on exit. */+		__fixup_if(&regs);++		if (ipipe_current_domain == ipipe_root_domain) {+			/* Sync pending VIRQs before _TIF_NEED_RESCHED+			 * is tested. */+			ipipe_lock_cpu(flags);+			if ((ipipe_root_domain->cpudata[cpuid].irq_pending_hi & IPIPE_IRQMASK_VIRT) != 0)+				__ipipe_sync_stage(IPIPE_IRQMASK_VIRT);+			ipipe_unlock_cpu(flags);+			return -1;+		}+		return 1;+	}++    return 0;+}++asmlinkage void do_divide_error(struct pt_regs *regs, long error_code);+asmlinkage void do_overflow(struct pt_regs *regs, long error_code);+asmlinkage void do_bounds(struct pt_regs *regs, long error_code);+asmlinkage void do_invalid_op(struct pt_regs *regs, long error_code);+asmlinkage void do_coprocessor_segment_overrun(struct pt_regs *regs, long error_code);+asmlinkage void do_invalid_TSS(struct pt_regs *regs, long error_code);+asmlinkage void do_segment_not_present(struct pt_regs *regs, long error_code);+asmlinkage void do_stack_segment(struct pt_regs *regs, long error_code);+asmlinkage void do_general_protection(struct pt_regs *regs, long error_code);+asmlinkage void do_page_fault(struct pt_regs *regs, long error_code);+asmlinkage void do_spurious_interrupt_bug(struct pt_regs *regs, long error_code);+asmlinkage void do_coprocessor_error(struct pt_regs *regs, long error_code);+asmlinkage void do_alignment_check(struct pt_regs *regs, long error_code);+asmlinkage void do_machine_check(struct pt_regs *regs, long error_code);+asmlinkage void do_simd_coprocessor_error(struct pt_regs *regs, long error_code);++/* Work around genksyms's issue with over-qualification in decls. */++typedef asmlinkage void __ipipe_exhandler(struct pt_regs *, long);++typedef __ipipe_exhandler *__ipipe_exptr;++static __ipipe_exptr __ipipe_std_extable[] = {++	[ex_do_divide_error] = &do_divide_error,+	[ex_do_overflow] = &do_overflow,+	[ex_do_bounds] = &do_bounds,+	[ex_do_invalid_op] = &do_invalid_op,+	[ex_do_coprocessor_segment_overrun] = &do_coprocessor_segment_overrun,+	[ex_do_invalid_TSS] = &do_invalid_TSS,+	[ex_do_segment_not_present] = &do_segment_not_present,+	[ex_do_stack_segment] = &do_stack_segment,+	[ex_do_general_protection] = do_general_protection,+	[ex_do_page_fault] = &do_page_fault,+	[ex_do_spurious_interrupt_bug] = &do_spurious_interrupt_bug,+	[ex_do_coprocessor_error] = &do_coprocessor_error,+	[ex_do_alignment_check] = &do_alignment_check,+	[ex_do_machine_check] = &do_machine_check,+	[ex_do_simd_coprocessor_error] = &do_simd_coprocessor_error+};++asmlinkage int __ipipe_handle_exception(int vector, struct pt_regs *regs, long error_code)+{+	if (!__ipipe_event_pipelined_p(vector) ||+	    __ipipe_dispatch_event(vector,regs) == 0) {+		__ipipe_exptr handler = __ipipe_std_extable[vector];+		handler(regs,error_code);+		__fixup_if(regs);+		return 0;+	}++	return 1;+}++int FASTCALL(__ipipe_divert_exception(struct pt_regs *regs, int vector));++int __ipipe_divert_exception(struct pt_regs *regs, int vector)+{+	if (__ipipe_event_pipelined_p(vector) &&+	    __ipipe_dispatch_event(vector,regs) != 0)+		return 1;++	__fixup_if(regs);++	return 0;+}++/* __ipipe_walk_pipeline(): Plays interrupts pending in the log. Must+   be called with local hw interrupts disabled. */++static inline void __ipipe_walk_pipeline(struct list_head *pos, int cpuid)+{+	struct ipipe_domain *this_domain = ipipe_percpu_domain[cpuid];++	while (pos != &__ipipe_pipeline) {+		struct ipipe_domain *next_domain =+		    list_entry(pos, struct ipipe_domain, p_link);++		if (test_bit+		    (IPIPE_STALL_FLAG, &next_domain->cpudata[cpuid].status))+			break;	/* Stalled stage -- do not go further. */++		if (next_domain->cpudata[cpuid].irq_pending_hi != 0) {++			if (next_domain == this_domain)+				__ipipe_sync_stage(IPIPE_IRQMASK_ANY);+			else {+				__ipipe_switch_to(this_domain, next_domain,+						  cpuid);++				ipipe_load_cpuid();	/* Processor might have changed. */++				if (this_domain->cpudata[cpuid].+				    irq_pending_hi != 0+				    && !test_bit(IPIPE_STALL_FLAG,+						 &this_domain->cpudata[cpuid].status))+					__ipipe_sync_stage(IPIPE_IRQMASK_ANY);+			}++			break;+		} else if (next_domain == this_domain)+			break;++		pos = next_domain->p_link.next;+	}+}++/* __ipipe_handle_irq() -- IPIPE's generic IRQ handler. An optimistic+   interrupt protection log is maintained here for each domain.  Hw+   interrupts are off on entry. */++int __ipipe_handle_irq(struct pt_regs regs)+{+	struct ipipe_domain *this_domain;+	unsigned irq = regs.orig_eax;+	struct list_head *head, *pos;+	ipipe_declare_cpuid;+	int m_ack, s_ack;++	if (regs.orig_eax < 0) {+		irq &= 0xff;+		m_ack = 0;+	} else {+		m_ack = 1;+	}++	ipipe_load_cpuid();++	this_domain = ipipe_percpu_domain[cpuid];++	s_ack = m_ack;++	if (test_bit(IPIPE_STICKY_FLAG, &this_domain->irqs[irq].control))+		head = &this_domain->p_link;+	else+		head = __ipipe_pipeline.next;++	/* Ack the interrupt. */++	pos = head;++	while (pos != &__ipipe_pipeline) {+		struct ipipe_domain *next_domain =+		    list_entry(pos, struct ipipe_domain, p_link);++		/* For each domain handling the incoming IRQ, mark it as+		   pending in its log. */++		if (test_bit+		    (IPIPE_HANDLE_FLAG, &next_domain->irqs[irq].control)) {+			/* Domains that handle this IRQ are polled for+			   acknowledging it by decreasing priority order. The+			   interrupt must be made pending _first_ in the domain's+			   status flags before the PIC is unlocked. */++			next_domain->cpudata[cpuid].irq_counters[irq].total_hits++;+			next_domain->cpudata[cpuid].irq_counters[irq].pending_hits++;+			__ipipe_set_irq_bit(next_domain, cpuid, irq);+			ipipe_mark_irq_receipt(next_domain, irq, cpuid);++			/* Always get the first master acknowledge available. Once+			   we've got it, allow slave acknowledge handlers to run+			   (until one of them stops us). */++			if (!m_ack)+				m_ack = next_domain->irqs[irq].acknowledge(irq);+			else if (test_bit+				 (IPIPE_SHARED_FLAG,+				  &next_domain->irqs[irq].control) && !s_ack)+				s_ack = next_domain->irqs[irq].acknowledge(irq);+		}++		/* If the domain does not want the IRQ to be passed down the+		   interrupt pipe, exit the loop now. */++		if (!test_bit(IPIPE_PASS_FLAG, &next_domain->irqs[irq].control))+			break;++		pos = next_domain->p_link.next;+	}++	if (irq == __ipipe_tick_irq) {+		__ipipe_tick_regs[cpuid].eflags = regs.eflags;+		__ipipe_tick_regs[cpuid].eip = regs.eip;+		__ipipe_tick_regs[cpuid].xcs = regs.xcs;+#if defined(CONFIG_SMP) && defined(CONFIG_FRAME_POINTER)+		/* Linux profiling code needs this. */+		__ipipe_tick_regs[cpuid].ebp = regs.ebp;+#endif	/* CONFIG_SMP && CONFIG_FRAME_POINTER */+		if (__ipipe_pipeline_head_p(ipipe_root_domain) &&+		    ipipe_root_domain->cpudata[cpuid].irq_counters[irq].pending_hits > 1)+			/* Emulate a loss of clock ticks if Linux is+			 * owning the time source. The drift will be+			 * compensated by the timer support code.*/+			ipipe_root_domain->cpudata[cpuid].irq_counters[irq].pending_hits = 1;+	}++	/* Now walk the pipeline, yielding control to the highest+	   priority domain that has pending interrupt(s) or+	   immediately to the current domain if the interrupt has been+	   marked as 'sticky'. This search does not go beyond the+	   current domain in the pipeline. */++	__ipipe_walk_pipeline(head, cpuid);++	ipipe_load_cpuid();++	if (ipipe_percpu_domain[cpuid] != ipipe_root_domain ||+	    test_bit(IPIPE_STALL_FLAG,+		     &ipipe_root_domain->cpudata[cpuid].status))+		return 0;++#ifdef CONFIG_SMP+	/* Prevent a spurious rescheduling from being triggered along+	   the way out through ret_from_intr. */+	if (regs.orig_eax < 0) {+		__set_bit(IPIPE_STALL_FLAG, &ipipe_root_domain->cpudata[cpuid].status);+		ipipe_mark_domain_stall(ipipe_root_domain, cpuid);+	}+#endif	/* CONFIG_SMP */++	return 1;+}++extern unsigned long cpu_khz;+EXPORT_SYMBOL(cpu_khz);+#ifdef CONFIG_SMP+extern struct tlb_state cpu_tlbstate[];+EXPORT_SYMBOL_NOVERS(cpu_tlbstate);+extern spinlock_t nmi_print_lock;+EXPORT_SYMBOL(nmi_print_lock);+#endif /* CONFIG_SMP */+extern irq_desc_t irq_desc[];+EXPORT_SYMBOL_NOVERS(irq_desc);+EXPORT_SYMBOL_NOVERS(default_ldt);+EXPORT_SYMBOL_NOVERS(__switch_to);+extern void show_stack(unsigned long *);+EXPORT_SYMBOL_NOVERS(show_stack);+EXPORT_SYMBOL(init_tss);+EXPORT_SYMBOL(set_ldt_desc);+EXPORT_SYMBOL(do_exit);+void (*nmi_watchdog_tick) (struct pt_regs * regs);+EXPORT_SYMBOL(nmi_watchdog_tick);diff -uNrp 2.4.31/arch/i386/kernel/irq.c 2.4.31-i386-ipipe/arch/i386/kernel/irq.c--- 2.4.31/arch/i386/kernel/irq.c	2003-11-28 19:26:19.000000000 +0100+++ 2.4.31-i386-ipipe/arch/i386/kernel/irq.c	2005-11-25 15:56:39.000000000 +0100@@ -102,7 +102,7 @@ static void ack_none(unsigned int irq) 	 * unexpected vectors occur) that might lock up the APIC 	 * completely. 	 */-	ack_APIC_irq();+	__ack_APIC_irq(); #endif #endif }@@ -444,6 +444,15 @@ int handle_IRQ_event(unsigned int irq, s  	status = 1;	/* Force the "do bottom halves" bit */ +#ifdef CONFIG_IPIPE+	/* If processing a timer tick, pass the original regs as+	   collected during preemption and not our phony - always+	   kernel-originated - frame, so that we don't wreck the+	   profiling code. */+	if (__ipipe_tick_irq == irq)+		regs = __ipipe_tick_regs + smp_processor_id();+#endif /* CONFIG_IPIPE */+	 	if (!(action->flags & SA_INTERRUPT)) 		__sti(); @@ -595,7 +604,10 @@ asmlinkage unsigned int do_IRQ(struct pt  	kstat.irqs[cpu][irq]++; 	spin_lock(&desc->lock);+#ifndef CONFIG_IPIPE	 	desc->handler->ack(irq);+#endif /* CONFIG_IPIPE */+	 	/* 	   REPLAY is when Linux resends an IRQ that was dropped earlier 	   WAITING is used by probe to mark irqs that are being tested@@ -1213,3 +1225,27 @@ void init_irq_proc (void) 		register_irq_proc(i); } +#if defined(CONFIG_IPIPE) && defined(CONFIG_SMP)++cpumask_t __ipipe_set_irq_affinity (unsigned irq, cpumask_t cpumask)++{+	cpumask_t oldmask = irq_affinity[irq];++	if (irq_desc[irq].handler->set_affinity == NULL)+		return CPU_MASK_NONE;++	if (cpus_empty(cpumask))+		return oldmask; /* Return mask value -- no change. */+

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -