📄 rtai.c
字号:
return(-EINVAL); } if(global_irq_handler[irq]) { return(-EBUSY); } flags = hard_lock_all(); IRQ_DESC[irq].handler = &real_time_irq_type; global_irq_handler[irq] = handler; linux_end_irq[irq] = do_nothing_picfun; hard_unlock_all(flags); return(0);} /* End function - rt_request_global_irq */int rt_free_global_irq(unsigned int irq){ unsigned long flags; if(irq >= NR_GLOBAL_IRQS || !global_irq_handler[irq]) { return(-EINVAL); } flags = hard_lock_all(); IRQ_DESC[irq].handler = &trapped_linux_irq_type; global_irq_handler[irq] = 0; linux_end_irq[irq] = ic_end_irq[irq]; hard_unlock_all(flags); return(0);} /* End function - rt_free_global_irq */int rt_request_linux_irq(unsigned int irq, void (*linux_handler)(int irq, void *dev_id, struct pt_regs *regs), char *linux_handler_id, void *dev_id){ unsigned long flags, lflags; if(irq >= NR_GLOBAL_IRQS || !linux_handler) { return(-EINVAL); } lflags = linux_save_flags_and_cli(); spin_lock_irqsave(&(IRQ_DESC[irq].lock), flags); if(!chained_to_linux[irq]++) { if(IRQ_DESC[irq].action) { irq_action_flags[irq] = IRQ_DESC[irq].action->flags; IRQ_DESC[irq].action->flags |= SA_SHIRQ; } } spin_unlock_irqrestore(&(IRQ_DESC[irq].lock), flags); request_irq(irq, linux_handler, SA_SHIRQ, linux_handler_id, dev_id); rtai_just_copy_back(lflags, hard_cpu_id()); return(0);} /* End function - rt_request_linux_irq */int rt_free_linux_irq(unsigned int irq, void *dev_id){ unsigned long flags, lflags; if(irq >= NR_GLOBAL_IRQS || !chained_to_linux[irq]) { return -EINVAL; } lflags = linux_save_flags_and_cli(); free_irq(irq, dev_id); spin_lock_irqsave(&(IRQ_DESC[irq].lock), flags); if(!(--chained_to_linux[irq]) && IRQ_DESC[irq].action) { IRQ_DESC[irq].action->flags = irq_action_flags[irq]; } spin_unlock_irqrestore(&(IRQ_DESC[irq].lock), flags); rtai_just_copy_back(lflags, hard_cpu_id()); return(0);}void rt_pend_linux_irq(unsigned int irq){ unsigned long flags; flags = hard_lock_all(); processor[hard_cpu_id()].irqs[irq]++; set_bit(irq<32 ? irq : irq-32, irq<32 ? &global.pending_irqs_l : &global.pending_irqs_h); hard_unlock_all(flags);} /* End function - rt_pend_linux_irq */int rt_request_srq(unsigned int label, void (*rtai_handler)(void), long long (*user_handler)(unsigned int whatever)){ unsigned long flags; int srq; flags = rt_spin_lock_irqsave(&global.data_lock); if(!rtai_handler) { rt_spin_unlock_irqrestore(flags, &global.data_lock); return(-EINVAL); } for(srq = 2; srq < NR_GLOBAL_IRQS; srq++) { if(!(sysrq[srq].rtai_handler)) { sysrq[srq].rtai_handler = rtai_handler; sysrq[srq].label = label; if(user_handler) { sysrq[srq].user_handler = user_handler; } rt_spin_unlock_irqrestore(flags, &global.data_lock); return(srq); } /* End if - this srq slot is free. */ } /* End for loop - locate a free srq slot. */ rt_spin_unlock_irqrestore(flags, &global.data_lock); return(-EBUSY);} /* End function - rt_request_srq */int rt_free_srq(unsigned int srq){ unsigned long flags; flags = rt_spin_lock_irqsave(&global.data_lock); if(srq < 2 || srq >= NR_GLOBAL_IRQS || !sysrq[srq].rtai_handler) { rt_spin_unlock_irqrestore(flags, &global.data_lock); return(-EINVAL); } sysrq[srq].rtai_handler = 0; sysrq[srq].user_handler = 0; sysrq[srq].label = 0; rt_spin_unlock_irqrestore(flags, &global.data_lock); return(0);} /* End function - rt_free_srq */void rt_pend_linux_srq(unsigned int srq){ set_bit(srq, &global.pending_srqs);} /* End function - rt_pend_linux_srq *//* * Linux cli/sti emulation routines. */static void linux_cli(void){ processor[hard_cpu_id()].intr_flag = 0;}static unsigned long linux_save_flags(void){ unsigned long flags; flags = processor[hard_cpu_id()].intr_flag; return(flags);}static void linux_sti(void){ unsigned long irq, cpuid; struct cpu_own_status *cpu; /* * The cpu_in_sti makes sure that this part of the code is not * reentered from a linux interrupt handler which calls __sti(). */ if(!test_and_set_bit(cpuid = hard_cpu_id(), &global.cpu_in_sti)) { hard_sti(); cpu = processor + cpuid; do { /* * First dispatch pending Linux interrupts. */ if((irq = global.pending_irqs_l)) { irq = ffnz(irq); hard_cli(); if (!(--processor[cpuid].irqs[irq])) { clear_bit(irq, &global.pending_irqs_l); } cpu->intr_flag = 0; //possible race conditon if hard_sti() called before hard_sti(); linux_isr[irq](irq, rtai_regs[irq]); } /* End if - There are pending linux interrupts. */ if((irq = global.pending_irqs_h)) { irq = ffnz(irq); hard_cli(); if (!(--processor[cpuid].irqs[irq+32])) { clear_bit(irq, &global.pending_irqs_h); } cpu->intr_flag = 0; //possible race conditon if hard_sti() called before hard_sti(); irq+=32; linux_isr[irq](irq, rtai_regs[irq]); } /* End if - There are pending linux interrupts. */ /* * Now dispatch pending srqs. */ rt_spin_lock(&(global.data_lock)); if((irq = global.pending_srqs & ~global.activ_srqs)) { irq = ffnz(irq); set_bit(irq, &global.activ_srqs); clear_bit(irq, &global.pending_srqs); rt_spin_unlock(&(global.data_lock)); if(sysrq[irq].rtai_handler) { sysrq[irq].rtai_handler(); } clear_bit(irq, &global.activ_srqs); } else { rt_spin_unlock(&(global.data_lock)); } } while(global.pending_irqs_l | global.pending_irqs_h | global.pending_srqs); /* End do loop - clear all pending linux interrupts. */ cpu->intr_flag = (1 << IFLAG ) | (1 << cpuid); //clear_bit(cpuid, &global.cpu_in_sti); //the above line crashes on steve's board for some reason after //a few trips through linux_sti...so we fix it with the //SMP incompatible: global.cpu_in_sti = 0; //not smp safe, but we aren't smp anyway. return; } /* End if - Cannot enter if already called by some one else. */ processor[cpuid].intr_flag = (1 << IFLAG ) | (1 << cpuid); return;} /* End function - linux_sti */static void linux_restore_flags(unsigned long flags){ if(flags) { linux_sti(); } else { processor[hard_cpu_id()].intr_flag = 0; }} /* End function - linux_restore_flags */unsigned long linux_save_flags_and_cli(void){ return(rtai_xchg_u32((void *)(&(processor[hard_cpu_id()].intr_flag)), 0));} /* End function - linux_save_flags_and_cli */void rtai_just_copy_back(unsigned long flags, int cpuid){ processor[cpuid].intr_flag = flags;} /* End function - rtai_just_copy_back */asmlinkage unsigned int dispatch_mips_timer_interrupt(int irq, struct pt_regs *regs){ unsigned long lflags; volatile unsigned int *lflagsp; struct cpu_own_status *cpu; cpu = processor + hard_cpu_id(); lflags = rtai_xchg_u32(lflagsp = &cpu->intr_flag, 0); rt_spin_lock(&(global.data_lock)); if(global_irq_handler[irq]) { internal_ic_ack_irq[irq](irq); rt_spin_unlock(&global.ic_lock); rtai_regs[irq] = regs; ((void (*)(int))global_irq_handler[irq])(irq); rt_spin_lock(&global.ic_lock); } else { ic_ack_irq[irq](irq); rtai_regs[irq] = regs; cpu->irqs[irq]++; set_bit(irq, &(global.pending_irqs_l)); //assumes timer is in l word (7) } *lflagsp = lflags; if(global.used_by_linux & processor[hard_cpu_id()].intr_flag) { rt_spin_unlock(&(global.data_lock)); linux_sti(); return(1); } else { rt_spin_unlock(&(global.data_lock)); return(0); }} /* End function - dispatch_mips_timer_interrupt *///static int lame=0;static asmlinkage unsigned int dispatch_mips_interrupt(int irq, struct pt_regs *regs){ unsigned long lflags; volatile unsigned int *lflagsp; struct cpu_own_status *cpu; cpu = processor + hard_cpu_id(); lflags = rtai_xchg_u32(lflagsp = &cpu->intr_flag, 0); rt_spin_lock(&(global.data_lock)); if(global_irq_handler[irq]) { rt_spin_unlock(&global.ic_lock); rtai_regs[irq] = regs; ((void (*)(int))global_irq_handler[irq])(irq); rt_spin_lock(&global.ic_lock); } else { ic_ack_irq[irq](irq); rtai_regs[irq] = regs; cpu->irqs[irq]++; set_bit(irq < 32 ? irq : irq-32, irq < 32 ? &(global.pending_irqs_l) : &(global.pending_irqs_h)); *lflagsp = lflags; if(global.used_by_linux & processor[hard_cpu_id()].intr_flag) { rt_spin_unlock(&(global.data_lock)); linux_sti(); return(1); } else { rt_spin_unlock(&(global.data_lock)); return(0); } } rt_spin_unlock(&(global.data_lock)); *lflagsp = lflags; return 0;} /* End function - dispatch_mips_interrupt *//* * RTAI mount-unmount functions to be called from the application to * initialise the real time application interface, i.e. this module, only * when it is required; so that it can stay asleep when it is not needed */#ifdef CONFIG_RTAI_MOUNT_ON_LOAD#define rtai_mounted 1#elsestatic int rtai_mounted;#ifdef CONFIG_SMPstatic spinlock_t rtai_mount_lock = SPIN_LOCK_UNLOCKED;#endif#endif/* * Trivial, but we do things carefully, the blocking part is relatively short, * should cause no troubles in the transition phase. * All the zeroings are strictly not required as mostly related to static data. * Done esplicitly for emphasis. Simple, just lock and grab everything from * Linux. */void __rt_mount_rtai(void){ static void rt_printk_sysreq_handler(void); int i; unsigned long flags; flags = hard_lock_all(); rthal.disint = linux_cli; rthal.enint = linux_sti; rthal.rtai_active = 0xffffffff; rthal.getflags = linux_save_flags; rthal.setflags = linux_restore_flags; rthal.getflags_and_cli = linux_save_flags_and_cli; rthal.mips_timer_interrupt = dispatch_mips_timer_interrupt; rthal.mips_interrupt = dispatch_mips_interrupt; rthal.tsc.tsc = 0; hard_unlock_all(flags); for(i = 0; i < NR_IRQS; i++) { IRQ_DESC[i].handler = &trapped_linux_irq_type; } sysrq[1].rtai_handler = rt_printk_sysreq_handler; printk("\n***** RTAI NEWLY MOUNTED (MOUNT COUNT %d) ******\n\n", rtai_mounted);} /* End function - __rt_mount_rtai *//* * Simple, now we can simply block other processors and copy original data back * to Linux. */void __rt_umount_rtai(void){ int i; unsigned long flags; flags = hard_lock_all(); rthal = linux_rthal; for(i = 0; i < NR_IRQS; i++) { IRQ_DESC[i].handler = linux_irq_desc_handler[i]; } hard_unlock_all(flags); printk("\n***** RTAI UNMOUNTED (MOUNT COUNT %d) ******\n\n", rtai_mounted);} /* End function - __rt_umount_rtai */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -