⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 adeos-ipipe-2.4.32-i386-1.1-03.patch

📁 xenomai 很好的linux实时补丁
💻 PATCH
📖 第 1 页 / 共 5 页
字号:
+	cpus_and(cpumask,cpumask,cpu_online_map);++	if (cpus_empty(cpumask))+		return CPU_MASK_NONE;	/* Error -- bad mask value or non-routable IRQ. */++	irq_affinity[irq] = cpumask;+	irq_desc[irq].handler->set_affinity(irq,cpumask);+	return oldmask;+}++#endif /* CONFIG_IPIPE && CONFIG_SMP */diff -uNrp 2.4.32/arch/i386/kernel/nmi.c 2.4.32-i386-ipipe/arch/i386/kernel/nmi.c--- 2.4.32/arch/i386/kernel/nmi.c	2003-06-13 16:51:29.000000000 +0200+++ 2.4.32-i386-ipipe/arch/i386/kernel/nmi.c	2006-01-07 09:39:44.000000000 +0100@@ -29,6 +29,7 @@ unsigned int nmi_watchdog = NMI_NONE; static unsigned int nmi_hz = HZ; unsigned int nmi_perfctr_msr;	/* the MSR to reset in NMI handler */ extern void show_registers(struct pt_regs *regs);+static void default_nmi_watchdog_tick (struct pt_regs * regs);  #define K7_EVNTSEL_ENABLE	(1 << 22) #define K7_EVNTSEL_INT		(1 << 20)@@ -109,6 +110,7 @@ static int __init setup_nmi_watchdog(cha  	if (nmi >= NMI_INVALID) 		return 0;+        nmi_watchdog_tick = default_nmi_watchdog_tick; 	if (nmi == NMI_NONE) 		nmi_watchdog = nmi; 	/*@@ -304,7 +306,7 @@ void __pminit setup_apic_nmi_watchdog (v 	nmi_pm_init(); } -static spinlock_t nmi_print_lock = SPIN_LOCK_UNLOCKED;+spinlock_t nmi_print_lock = SPIN_LOCK_UNLOCKED;  /*  * the best way to detect whether a CPU has a 'hard lockup' problem@@ -337,7 +339,7 @@ void touch_nmi_watchdog (void) 		alert_counter[i] = 0; } -void nmi_watchdog_tick (struct pt_regs * regs)+static void default_nmi_watchdog_tick (struct pt_regs * regs) {  	/*diff -uNrp 2.4.32/arch/i386/kernel/process.c 2.4.32-i386-ipipe/arch/i386/kernel/process.c--- 2.4.32/arch/i386/kernel/process.c	2005-11-16 20:12:54.000000000 +0100+++ 2.4.32-i386-ipipe/arch/i386/kernel/process.c	2006-01-03 19:05:17.000000000 +0100@@ -134,6 +134,7 @@ void cpu_idle (void) 		void (*idle)(void) = pm_idle; 		if (!idle) 			idle = default_idle;+		ipipe_suspend_domain(); 		while (!current->need_resched) 			idle(); 		schedule();diff -uNrp 2.4.32/arch/i386/kernel/smp.c 2.4.32-i386-ipipe/arch/i386/kernel/smp.c--- 2.4.32/arch/i386/kernel/smp.c	2004-11-17 12:54:21.000000000 +0100+++ 2.4.32-i386-ipipe/arch/i386/kernel/smp.c	2006-01-03 19:05:17.000000000 +0100@@ -133,6 +133,9 @@ static inline void __send_IPI_shortcut(u 	 * to the APIC. 	 */ 	unsigned int cfg;+	unsigned long flags;++	local_irq_save_hw_cond(flags);	  	/* 	 * Wait for idle.@@ -148,6 +151,8 @@ static inline void __send_IPI_shortcut(u 	 * Send the IPI. The write to APIC_ICR fires this off. 	 */ 	apic_write_around(APIC_ICR, cfg);++	local_irq_restore_hw_cond(flags); }  void fastcall send_IPI_self(int vector)@@ -160,9 +165,8 @@ static inline void send_IPI_mask_bitmask 	unsigned long cfg; 	unsigned long flags; -	__save_flags(flags);-	__cli(); +	local_irq_save_hw(flags); 		 	/* 	 * Wait for idle.@@ -185,7 +189,7 @@ static inline void send_IPI_mask_bitmask 	 */ 	apic_write_around(APIC_ICR, cfg); -	__restore_flags(flags);+	local_irq_restore_hw(flags); }  static inline void send_IPI_mask_sequence(int mask, int vector)@@ -199,8 +203,7 @@ static inline void send_IPI_mask_sequenc 	 * should be modified to do 1 message per cluster ID - mbligh 	 */  -	__save_flags(flags);-	__cli();+	local_irq_save_hw(flags);  	for (query_cpu = 0; query_cpu < NR_CPUS; ++query_cpu) { 		query_mask = 1 << query_cpu;@@ -231,7 +234,7 @@ static inline void send_IPI_mask_sequenc 			apic_write_around(APIC_ICR, cfg); 		} 	}-	__restore_flags(flags);+	local_irq_restore_hw(flags); }  static inline void send_IPI_mask(int mask, int vector)@@ -360,8 +363,12 @@ static void inline leave_mm (unsigned lo  asmlinkage void smp_invalidate_interrupt (void) {-	unsigned long cpu = smp_processor_id();+        unsigned long cpu, flags; +	local_irq_save_hw_cond(flags);+	+	cpu = smp_processor_id();+	 	if (!test_bit(cpu, &flush_cpumask)) 		return; 		/* @@ -384,6 +391,8 @@ asmlinkage void smp_invalidate_interrupt 	} 	ack_APIC_irq(); 	clear_bit(cpu, &flush_cpumask);+	+	local_irq_restore_hw_cond(flags); }  static void flush_tlb_others (unsigned long cpumask, struct mm_struct *mm,@@ -434,10 +443,15 @@ void flush_tlb_current_task(void) { 	struct mm_struct *mm = current->mm; 	unsigned long cpu_mask = mm->cpu_vm_mask & ~(1 << smp_processor_id());+	unsigned long flags; +	local_irq_save_hw_cond(flags);+	 	local_flush_tlb(); 	if (cpu_mask) 		flush_tlb_others(cpu_mask, mm, FLUSH_ALL);++	local_irq_restore_hw_cond(flags); }  void flush_tlb_mm (struct mm_struct * mm)@@ -458,7 +472,10 @@ void flush_tlb_page(struct vm_area_struc { 	struct mm_struct *mm = vma->vm_mm; 	unsigned long cpu_mask = mm->cpu_vm_mask & ~(1 << smp_processor_id());+	unsigned long flags; +	local_irq_save_hw_cond(flags);+	 	if (current->active_mm == mm) { 		if(current->mm) 			__flush_tlb_one(va);@@ -466,6 +483,8 @@ void flush_tlb_page(struct vm_area_struc 		 	leave_mm(smp_processor_id()); 	} +	local_irq_restore_hw_cond(flags);+	 	if (cpu_mask) 		flush_tlb_others(cpu_mask, mm, va); }@@ -629,3 +648,34 @@ asmlinkage void smp_call_function_interr 	} } +#if defined(CONFIG_IPIPE) && defined(CONFIG_SMP)++int __ipipe_send_ipi(unsigned ipi, cpumask_t cpumask)++{+	unsigned long flags;+	ipipe_declare_cpuid;+	int self;++	ipipe_lock_cpu(flags);++	self = cpu_isset(cpuid,cpumask);+	cpu_clear(cpuid,cpumask);++	if (!cpus_empty(cpumask))+		send_IPI_mask(cpumask,ipi + FIRST_EXTERNAL_VECTOR);++	if (self)+		ipipe_trigger_irq(ipi);++	ipipe_unlock_cpu(flags);++	return 0;+}++void __ipipe_send_IPI_allbutself (int vector)+{+    send_IPI_allbutself(vector);+}++#endif /* CONFIG_IPIPE && CONFIG_SMP */diff -uNrp 2.4.32/arch/i386/kernel/smpboot.c 2.4.32-i386-ipipe/arch/i386/kernel/smpboot.c--- 2.4.32/arch/i386/kernel/smpboot.c	2004-04-14 15:05:25.000000000 +0200+++ 2.4.32-i386-ipipe/arch/i386/kernel/smpboot.c	2006-01-03 19:05:17.000000000 +0100@@ -776,6 +776,7 @@ static void __init do_boot_cpu (int apic 	unsigned short nmi_high = 0, nmi_low = 0;  	cpu = ++cpucount;+	ipipe_note_apicid(apicid,cpu); 	/* 	 * We can't use kernel_thread since we must avoid to 	 * reschedule the child.@@ -1012,6 +1013,7 @@ void __init smp_boot_cpus(void) 	else 		boot_cpu_logical_apicid = logical_smp_processor_id(); 	map_cpu_to_boot_apicid(0, boot_cpu_apicid);+	ipipe_note_apicid(boot_cpu_physical_apicid,0);  	global_irq_holder = 0; 	current->processor = 0;diff -uNrp 2.4.32/arch/i386/kernel/time.c 2.4.32-i386-ipipe/arch/i386/kernel/time.c--- 2.4.32/arch/i386/kernel/time.c	2004-02-18 14:36:30.000000000 +0100+++ 2.4.32-i386-ipipe/arch/i386/kernel/time.c	2006-01-03 19:05:17.000000000 +0100@@ -210,14 +210,15 @@ static unsigned long do_slow_gettimeoffs 			/* the nutcase */  			int i;+			unsigned long flags;+			spin_lock_irqsave_hw_cond(&i8259A_lock,flags);			 -			spin_lock(&i8259A_lock); 			/* 			 * This is tricky when I/O APICs are used; 			 * see do_timer_interrupt(). 			 */ 			i = inb(0x20);-			spin_unlock(&i8259A_lock);+			spin_unlock_irqrestore_hw_cond(&i8259A_lock,flags);  			/* assumption about timer being IRQ0 */ 			if (i & 0x01) {@@ -577,14 +578,15 @@ static inline void do_timer_interrupt(in 		 * This will also deassert NMI lines for the watchdog if run 		 * on an 82489DX-based system. 		 */-		spin_lock(&i8259A_lock);+		unsigned long flags;+		spin_lock_irqsave_hw_cond(&i8259A_lock,flags); 		outb(0x0c, 0x20); 		/* Ack the IRQ; AEOI will end it automatically. */ 		inb(0x20);-		spin_unlock(&i8259A_lock);+		spin_unlock_irqrestore_hw_cond(&i8259A_lock,flags); 	} #endif-+	 #ifdef CONFIG_VISWS 	/* Clear the interrupt */ 	co_cpu_write(CO_CPU_STAT,co_cpu_read(CO_CPU_STAT) & ~CO_STAT_TIMEINTR);@@ -644,8 +646,10 @@ static int use_tsc;  */ static void timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) {+#ifndef CONFIG_IPIPE     	int count;-+#endif /* CONFIG_IPIPE */+	 	/* 	 * Here we are in the timer irq handler. We just have irqs locally 	 * disabled but we don't know if the timer_bh is running on the other@@ -674,6 +678,7 @@ static void timer_interrupt(int irq, voi  		rdtscl(last_tsc_low); +#ifndef CONFIG_IPIPE		 		spin_lock(&i8253_lock); 		outb_p(0x00, 0x43);     /* latch the count ASAP */ @@ -705,6 +710,8 @@ static void timer_interrupt(int irq, voi  		count = ((LATCH-1) - count) * TICK_SIZE; 		delay_at_last_interrupt = (count + LATCH/2) / LATCH;+		+#endif /* CONFIG_IPIPE */		 	}  	do_timer_interrupt(irq, NULL, regs);diff -uNrp 2.4.32/arch/i386/kernel/traps.c 2.4.32-i386-ipipe/arch/i386/kernel/traps.c--- 2.4.32/arch/i386/kernel/traps.c	2005-11-16 20:12:54.000000000 +0100+++ 2.4.32-i386-ipipe/arch/i386/kernel/traps.c	2006-01-03 19:05:17.000000000 +0100@@ -216,6 +216,11 @@ void show_registers(struct pt_regs *regs 		regs->esi, regs->edi, regs->ebp, esp); 	printk("ds: %04x   es: %04x   ss: %04x\n", 		regs->xds & 0xffff, regs->xes & 0xffff, ss);+#ifdef CONFIG_IPIPE+	if (ipipe_current_domain != ipipe_root_domain)+	    printk("I-pipe domain %s",ipipe_current_domain->name);+	else+#endif /* CONFIG_IPIPE */	 	printk("Process %s (pid: %d, stackpage=%08lx)", 		current->comm, current->pid, 4096+(unsigned long)current); 	/*@@ -753,6 +758,10 @@ asmlinkage void do_spurious_interrupt_bu  */ asmlinkage void math_state_restore(struct pt_regs regs) {+	unsigned long flags;++	local_irq_save_hw_cond(flags);+ 	__asm__ __volatile__("clts");		/* Allow maths ops (or we recurse) */  	if (current->used_math) {@@ -761,6 +770,8 @@ asmlinkage void math_state_restore(struc 		init_fpu(); 	} 	current->flags |= PF_USEDFPU;	/* So we fnsave on switch_to() */++	local_irq_restore_hw_cond(flags); }  #ifndef CONFIG_MATH_EMULATIONdiff -uNrp 2.4.32/arch/i386/mm/fault.c 2.4.32-i386-ipipe/arch/i386/mm/fault.c--- 2.4.32/arch/i386/mm/fault.c	2004-08-08 01:26:04.000000000 +0200+++ 2.4.32-i386-ipipe/arch/i386/mm/fault.c	2006-01-03 19:05:17.000000000 +0100@@ -151,6 +151,8 @@ asmlinkage void do_page_fault(struct pt_ 	/* get the address */ 	__asm__("movl %%cr2,%0":"=r" (address)); +	local_irq_enable_hw_cond();+ 	/* It's safe to allow irq's after cr2 has been saved */ 	if (regs->eflags & X86_EFLAGS_IF) 		local_irq_enable();diff -uNrp 2.4.32/arch/i386/mm/ioremap.c 2.4.32-i386-ipipe/arch/i386/mm/ioremap.c--- 2.4.32/arch/i386/mm/ioremap.c	2003-11-28 19:26:19.000000000 +0100+++ 2.4.32-i386-ipipe/arch/i386/mm/ioremap.c	2006-01-03 19:05:17.000000000 +0100@@ -81,6 +81,7 @@ static int remap_area_pages(unsigned lon 		if (remap_area_pmd(pmd, address, end - address, 					 phys_addr + address, flags)) 			break;+		set_pgdir(address, *dir);		 		error = 0; 		address = (address + PGDIR_SIZE) & PGDIR_MASK; 		dir++;diff -uNrp 2.4.32/include/asm-i386/apic.h 2.4.32-i386-ipipe/include/asm-i386/apic.h--- 2.4.32/include/asm-i386/apic.h	2004-11-17 12:54:22.000000000 +0100+++ 2.4.32-i386-ipipe/include/asm-i386/apic.h	2006-01-07 09:34:32.000000000 +0100@@ -50,7 +50,13 @@ static __inline__ void apic_wait_icr_idl # define apic_write_around(x,y) apic_write_atomic((x),(y)) #endif +#ifdef CONFIG_IPIPE+#define ack_APIC_irq() do { } while(0)+static inline void __ack_APIC_irq(void)+#else /* !CONFIG_IPIPE */+#define __ack_APIC_irq() ack_APIC_irq() static inline void ack_APIC_irq(void)+#endif /* CONFIG_IPIPE */ { 	/* 	 * ack_APIC_irq() actually gets compiled as a single instruction:@@ -77,7 +83,7 @@ extern void init_apic_mappings (void); extern void smp_local_timer_interrupt (struct pt_regs * regs); extern void setup_APIC_clocks (void); extern void setup_apic_nmi_watchdog (void);-extern void nmi_watchdog_tick (struct pt_regs * regs);+extern void (*nmi_watchdog_tick) (struct pt_regs * regs); extern int APIC_init_uniprocessor (void); extern void disable_APIC_timer(void); extern void enable_APIC_timer(void);diff -uNrp 2.4.32/include/asm-i386/hw_irq.h 2.4.32-i386-ipipe/include/asm-i386/hw_irq.h--- 2.4.32/include/asm-i386/hw_irq.h	2003-08-25 13:44:43.000000000 +0200+++ 2.4.32-i386-ipipe/include/asm-i386/hw_irq.h	2006-01-07 09:34:32.000000000 +0100@@ -123,6 +123,37 @@ extern char _stext, _etext;  	/* there is a second layer of macro just to get the symbolic 	   name for the vector evaluated. This change is for RTLinux */++#ifdef CONFIG_IPIPE++#define BUILD_SMP_INTERRUPT(x,v)	XBUILD_SMP_INTERRUPT(x,v)+#define BUILD_SMP_TIMER_INTERRUPT(x,v)	XBUILD_SMP_INTERRUPT(x,v)+#define XBUILD_SMP_INTERRUPT(x,v)\+asmlinkage void x(void); \+asmlinkage void call_##x(void); \+__asm__( \+"\n"__ALIGN_STR"\n" \+SYMBOL_NAME_STR(x) ":\n\t" \+	"pushl $"#v"-288\n\t" /* nr - (256 + FIRST_EXTERNAL_VECTOR) */ \+	SAVE_ALL \+        "call "SYMBOL_NAME_STR(__ipipe_handle_irq)"\n\t"   \+	"testl %eax,%eax\n\t"	\+	"jnz ret_from_intr\n\t" \+        "jmp restore_raw\n");++#define BUILD_COMMON_IRQ() \+asmlinkage void call_do_IRQ(void); \+__asm__( \+	"\n" __ALIGN_STR"\n" \+	"common_interrupt:\n\t" \+	SAVE_ALL \+        "call "SYMBOL_NAME_STR(__ipipe_handle_irq)"\n\t"   \+	"testl %eax,%eax\n\t"	\+	"jnz ret_from_intr\n\t" \+        "jmp restore_raw\n");++#else /* !CONFIG_IPIPE */+ #define BUILD_SMP_INTERRUPT(x,v) XBUILD_SMP_INTERRUPT(x

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -