📄 hal.c
字号:
* deliver affinity as they call it. So be warned that such a name is kept * mainly for compatibility reasons, as for such a kernel the reset operation * does not necessarily implies a symmetric external interrupt delivery. */int rt_reset_irq_to_sym_mode (int irq){ unsigned long oldmask, flags; rtai_local_irq_save(flags); spin_lock(&rtai_iset_lock); if (rtai_old_irq_affinity[irq] == 0) { spin_unlock(&rtai_iset_lock); rtai_local_irq_restore(flags); return -EINVAL; } oldmask = adeos_set_irq_affinity(irq,0); /* Query -- no change. */ if (oldmask == rtai_set_irq_affinity[irq]) { /* Ok, proceed since nobody changed it in the meantime. */ adeos_set_irq_affinity(irq,rtai_old_irq_affinity[irq]); rtai_old_irq_affinity[irq] = 0; } spin_unlock(&rtai_iset_lock); rtai_local_irq_restore(flags); return 0;}void rt_request_timer_cpuid (void (*handler)(void), unsigned tick, int cpuid){ unsigned long flags; int count; set_bit(RTAI_USE_APIC,&rtai_status); rtai_timers_sync_time = 0; for (count = 0; count < RTAI_NR_CPUS; count++) rtai_timer_mode[count].mode = rtai_timer_mode[count].count = 0; flags = rtai_critical_enter(rtai_critical_sync); rtai_sync_level = 1; rt_times.tick_time = rtai_rdtsc(); if (tick > 0) { rt_times.linux_tick = RTAI_APIC_ICOUNT; rt_times.tick_time = ((RTIME)rt_times.linux_tick)*(jiffies + 1); rt_times.intr_time = rt_times.tick_time + tick; rt_times.linux_time = rt_times.tick_time + rt_times.linux_tick; rt_times.periodic_tick = tick; if (cpuid == adeos_processor_id()) rtai_setup_periodic_apic(tick,RTAI_APIC_TIMER_VECTOR); else { rtai_timer_mode[cpuid].mode = 1; rtai_timer_mode[cpuid].count = tick; rtai_setup_oneshot_apic(0,RTAI_APIC_TIMER_VECTOR); } } else { rt_times.linux_tick = rtai_imuldiv(LATCH, rtai_tunables.cpu_freq,RTAI_FREQ_8254); rt_times.intr_time = rt_times.tick_time + rt_times.linux_tick; rt_times.linux_time = rt_times.tick_time + rt_times.linux_tick; rt_times.periodic_tick = rt_times.linux_tick; if (cpuid == adeos_processor_id()) rtai_setup_oneshot_apic(RTAI_APIC_ICOUNT,RTAI_APIC_TIMER_VECTOR); else { rtai_timer_mode[cpuid].mode = 0; rtai_timer_mode[cpuid].count = RTAI_APIC_ICOUNT; rtai_setup_oneshot_apic(0,RTAI_APIC_TIMER_VECTOR); } } rt_release_irq(RTAI_APIC_TIMER_IPI); rt_request_irq(RTAI_APIC_TIMER_IPI,(rt_irq_handler_t)handler,NULL); rt_request_linux_irq(RTAI_TIMER_8254_IRQ, &rtai_broadcast_to_local_timers, "broadcast", &rtai_broadcast_to_local_timers); rtai_critical_exit(flags);}#else /* !CONFIG_SMP */int rt_assign_irq_to_cpu (int irq, unsigned long cpus_mask) { return 0;}int rt_reset_irq_to_sym_mode (int irq) { return 0;}void rt_request_timer_cpuid (void (*handler)(void), unsigned tick, int cpuid) {}#endif /* CONFIG_SMP *//** * Install a timer interrupt handler. * * rt_request_timer requests a timer of period tick ticks, and installs the * routine @a handler as a real time interrupt service routine for the timer. * * Set @a tick to 0 for oneshot mode (in oneshot mode it is not used). * If @a apic has a nonzero value the local APIC timer is used. Otherwise * timing is based on the 8254. * */int rt_request_timer (void (*handler)(void), unsigned tick, int use_apic){ unsigned long flags; TRACE_RTAI_TIMER(TRACE_RTAI_EV_TIMER_REQUEST,handler,tick); if (use_apic) set_bit(RTAI_USE_APIC,&rtai_status); else clear_bit(RTAI_USE_APIC,&rtai_status); flags = rtai_critical_enter(rtai_critical_sync); rt_times.tick_time = rtai_rdtsc(); if (tick > 0) { rt_times.linux_tick = use_apic ? RTAI_APIC_ICOUNT : LATCH; rt_times.tick_time = ((RTIME)rt_times.linux_tick)*(jiffies + 1); rt_times.intr_time = rt_times.tick_time + tick; rt_times.linux_time = rt_times.tick_time + rt_times.linux_tick; rt_times.periodic_tick = tick; if (use_apic) { rtai_sync_level = 2; rt_release_irq(RTAI_APIC_TIMER_IPI); rt_request_irq(RTAI_APIC_TIMER_IPI,(rt_irq_handler_t)handler,NULL); rtai_setup_periodic_apic(tick,RTAI_APIC_TIMER_VECTOR); } else { outb(0x34,0x43); outb(tick & 0xff,0x40); outb(tick >> 8,0x40); rt_release_irq(RTAI_TIMER_8254_IRQ); if (rt_request_irq(RTAI_TIMER_8254_IRQ,(rt_irq_handler_t)handler,NULL) < 0) { rtai_critical_exit(flags); return -EINVAL; } } } else { rt_times.linux_tick = rtai_imuldiv(LATCH,rtai_tunables.cpu_freq,RTAI_FREQ_8254); rt_times.intr_time = rt_times.tick_time + rt_times.linux_tick; rt_times.linux_time = rt_times.tick_time + rt_times.linux_tick; rt_times.periodic_tick = rt_times.linux_tick; if (use_apic) { rtai_sync_level = 2; rt_release_irq(RTAI_APIC_TIMER_IPI); rt_request_irq(RTAI_APIC_TIMER_IPI,(rt_irq_handler_t)handler,NULL); rtai_setup_oneshot_apic(RTAI_APIC_ICOUNT,RTAI_APIC_TIMER_VECTOR); } else { outb(0x30,0x43); outb(LATCH & 0xff,0x40); outb(LATCH >> 8,0x40); rt_release_irq(RTAI_TIMER_8254_IRQ); if (rt_request_irq(RTAI_TIMER_8254_IRQ,(rt_irq_handler_t)handler,NULL) < 0) { rtai_critical_exit(flags); return -EINVAL; } } } rtai_critical_exit(flags); return use_apic ? rt_request_linux_irq(RTAI_TIMER_8254_IRQ, &rtai_broadcast_to_local_timers, "rtai_broadcast", &rtai_broadcast_to_local_timers) : 0;}/** * Uninstall a timer interrupt handler. * * rt_free_timer uninstalls a timer previously set by rt_request_timer(). */void rt_free_timer (void){ unsigned long flags; TRACE_RTAI_TIMER(TRACE_RTAI_EV_TIMER_FREE,0,0); if (test_bit(RTAI_USE_APIC,&rtai_status)) rt_free_linux_irq(RTAI_TIMER_8254_IRQ, &rtai_broadcast_to_local_timers); flags = rtai_critical_enter(rtai_critical_sync); if (test_bit(RTAI_USE_APIC,&rtai_status)) { rtai_sync_level = 3; rtai_setup_periodic_apic(RTAI_APIC_ICOUNT,LOCAL_TIMER_VECTOR); clear_bit(RTAI_USE_APIC,&rtai_status); } else { outb(0x34,0x43); outb(LATCH & 0xff,0x40); outb(LATCH >> 8,0x40); rt_release_irq(RTAI_TIMER_8254_IRQ); } rtai_critical_exit(flags);}RT_TRAP_HANDLER rt_set_trap_handler (RT_TRAP_HANDLER handler) { return (RT_TRAP_HANDLER)xchg(&rtai_trap_handler,handler);}RTIME rd_8254_ts (void){ unsigned long flags; int inc, c2; RTIME t; adeos_hw_local_irq_save(flags); /* local hw masking is required here. */ outb(0xD8,0x43); c2 = inb(0x42); inc = rtai_last_8254_counter2 - (c2 |= (inb(0x42) << 8)); rtai_last_8254_counter2 = c2; t = (rtai_ts_8254 += (inc > 0 ? inc : inc + RTAI_COUNTER_2_LATCH)); adeos_hw_local_irq_restore(flags); return t;}void rt_setup_8254_tsc (void){ unsigned long flags; int c; flags = rtai_critical_enter(NULL); outb_p(0x00,0x43); c = inb_p(0x40); c |= inb_p(0x40) << 8; outb_p(0xB4, 0x43); outb_p(RTAI_COUNTER_2_LATCH & 0xff, 0x42); outb_p(RTAI_COUNTER_2_LATCH >> 8, 0x42); rtai_ts_8254 = c + ((RTIME)LATCH)*jiffies; rtai_last_8254_counter2 = 0; outb_p((inb_p(0x61) & 0xFD) | 1, 0x61); rtai_critical_exit(flags);}void rt_mount (void) { TRACE_RTAI_MOUNT();}void rt_umount (void) { TRACE_RTAI_UMOUNT();}static void rtai_irq_trampoline (unsigned irq){ TRACE_RTAI_GLOBAL_IRQ_ENTRY(irq,0); if (rtai_realtime_irq[irq].handler) {#ifdef CONFIG_RTAI_SCHED_ISR_LOCK adeos_declare_cpuid;#ifdef adeos_load_cpuid adeos_load_cpuid();#endif /* adeos_load_cpuid */ if (rtai_isr_nesting[cpuid]++ == 0 && rtai_isr_hook) rtai_isr_hook(rtai_isr_nesting[cpuid]);#endif /* CONFIG_RTAI_SCHED_ISR_LOCK */ rtai_realtime_irq[irq].handler(irq,rtai_realtime_irq[irq].cookie);#ifdef CONFIG_RTAI_SCHED_ISR_LOCK if (--rtai_isr_nesting[cpuid] == 0 && rtai_isr_hook) rtai_isr_hook(rtai_isr_nesting[cpuid]);#endif /* CONFIG_RTAI_SCHED_ISR_LOCK */ } else adeos_propagate_irq(irq); TRACE_RTAI_GLOBAL_IRQ_EXIT();}static void rtai_trap_fault (adevinfo_t *evinfo){ adeos_declare_cpuid; static const int trap2sig[] = { SIGFPE, // 0 - Divide error SIGTRAP, // 1 - Debug SIGSEGV, // 2 - NMI (but we ignore these) SIGTRAP, // 3 - Software breakpoint SIGSEGV, // 4 - Overflow SIGSEGV, // 5 - Bounds SIGILL, // 6 - Invalid opcode SIGSEGV, // 7 - Device not available SIGSEGV, // 8 - Double fault SIGFPE, // 9 - Coprocessor segment overrun SIGSEGV, // 10 - Invalid TSS SIGBUS, // 11 - Segment not present SIGBUS, // 12 - Stack segment SIGSEGV, // 13 - General protection fault SIGSEGV, // 14 - Page fault 0, // 15 - Spurious interrupt SIGFPE, // 16 - Coprocessor error SIGBUS, // 17 - Alignment check SIGSEGV, // 18 - Reserved SIGFPE, // 19 - XMM fault 0,0,0,0,0,0,0,0,0,0,0,0 }; TRACE_RTAI_TRAP_ENTRY(evinfo->event,0); /* Notes: 1) GPF needs to be propagated downstream whichever domain caused it. This is required so that we don't spuriously raise a fatal error when some fixup code is available to solve the error condition. For instance, Linux always attempts to reload the %gs segment register when switching a process in (__switch_to()), regardless of its value. It is then up to Linux's GPF handling code to search for a possible fixup whenever some exception occurs. In the particular case of the %gs register, such an exception could be raised for an exiting process if a preemption occurs inside a short time window, after the process's LDT has been dropped, but before the kernel lock is taken. The same goes for LXRT switching back a Linux thread in non-RT mode which happens to have been preempted inside do_exit() after the MM context has been dropped (thus the LDT too). In such a case, %gs could be reloaded with what used to be the TLS descriptor of the exiting thread, but unfortunately after the LDT itself has been dropped. Since the default LDT is only 5 entries long, any attempt to refer to an LDT-indexed descriptor above this value would cause a GPF. 2) NMI is not pipelined by Adeos. */#ifdef adeos_load_cpuid adeos_load_cpuid();#endif /* adeos_load_cpuid */ if (evinfo->domid == RTAI_DOMAIN_ID) { if (evinfo->event == 7) /* (FPU) Device not available. */ { /* Ok, this one is a bit insane: some RTAI examples use the FPU in real-time mode while the TS bit is on from a previous Linux switch, so this trap is raised. We just simulate a math_state_restore() using the proper "current" value from the Linux domain here to please everyone without impacting the existing code. */ struct task_struct *linux_task = rtai_get_current(cpuid);#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)#ifdef CONFIG_PREEMPT /* See comment in math_state_restore() in arch/i386/traps.c from a kpreempt-enabled kernel for more on this. */ linux_task->preempt_count++;#endif /* CONFIG_PREEMPT */ if (linux_task->used_math) restore_task_fpenv(linux_task); /* Does clts(). */ else { init_xfpu(); /* Does clts(). */ linux_task->used_math = 1; } linux_task->flags |= PF_USEDFPU;#ifdef CONFIG_PREEMPT linux_task->preempt_count--;#endif /* CONFIG_PREEMPT */ goto endtrap; }#else /* >= 2.6.0 */#ifdef CONFIG_PREEMPT linux_task->thread_info->preempt_count++;#endif /* CONFIG_PREEMPT */ if (linux_task->used_math) restore_task_fpenv(linux_task); /* Does clts(). */ else { init_xfpu(); /* Does clts(). */ linux_task->used_math = 1; } linux_task->thread_info->status |= TS_USEDFPU;#ifdef CONFIG_PREEMPT linux_task->thread_info->preempt_count--;#endif /* CONFIG_PREEMPT */ goto endtrap; }#endif /* < 2.6.0 */ if (rtai_trap_handler != NULL && (test_bit(cpuid,&rtai_cpu_realtime) || test_bit(cpuid,&rtai_cpu_lxrt)) && rtai_trap_handler(evinfo->event, trap2sig[evinfo->event], (struct pt_regs *)evinfo->evdata, NULL) != 0) goto endtrap; } adeos_propagate_event(evinfo);endtrap: TRACE_RTAI_TRAP_EXIT();}static void rtai_ssrq_trampoline (unsigned virq){ unsigned long pending;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -