📄 entry.s
字号:
/* are we tracing syscalls?*/ r7 = sp; r6.l = lo(ALIGN_PAGE_MASK); r6.h = hi(ALIGN_PAGE_MASK); r7 = r7 & r6; p2 = r7; r7 = [p2+TI_FLAGS]; CC = BITTST(r7,TIF_SYSCALL_TRACE); if CC JUMP _sys_trace; /* Execute the appropriate system call */ p4 = p0; p5.l = _sys_call_table; p5.h = _sys_call_table; p5 = p5 + (p4 << 2); r0 = [sp + PT_R0]; r1 = [sp + PT_R1]; r2 = [sp + PT_R2]; p5 = [p5]; [--sp] = r5; [--sp] = r4; [--sp] = r3; SP += -12; call (p5); SP += 24; [sp + PT_R0] = r0;.Lresume_userspace: r7 = sp; r4.l = lo(ALIGN_PAGE_MASK); r4.h = hi(ALIGN_PAGE_MASK); r7 = r7 & r4; /* thread_info->flags */ p5 = r7;.Lresume_userspace_1: /* Disable interrupts. */ [--sp] = reti; reti = [sp++]; r7 = [p5 + TI_FLAGS]; r4.l = lo(_TIF_WORK_MASK); r4.h = hi(_TIF_WORK_MASK); r7 = r7 & r4;.Lsyscall_resched: cc = BITTST(r7, TIF_NEED_RESCHED); if !cc jump .Lsyscall_sigpending; /* Reenable interrupts. */ [--sp] = reti; r0 = [sp++]; SP += -12; call _schedule; SP += 12; jump .Lresume_userspace_1;.Lsyscall_sigpending: cc = BITTST(r7, TIF_RESTORE_SIGMASK); if cc jump .Lsyscall_do_signals; cc = BITTST(r7, TIF_SIGPENDING); if !cc jump .Lsyscall_really_exit;.Lsyscall_do_signals: /* Reenable interrupts. */ [--sp] = reti; r0 = [sp++]; r0 = sp; SP += -12; call _do_signal; SP += 12;.Lsyscall_really_exit: r5 = [sp + PT_RESERVED]; rets = r5; rts;ENDPROC(_system_call)_sys_trace: call _syscall_trace; /* Execute the appropriate system call */ p4 = [SP + PT_P0]; p5.l = _sys_call_table; p5.h = _sys_call_table; p5 = p5 + (p4 << 2); r0 = [sp + PT_R0]; r1 = [sp + PT_R1]; r2 = [sp + PT_R2]; r3 = [sp + PT_R3]; r4 = [sp + PT_R4]; r5 = [sp + PT_R5]; p5 = [p5]; [--sp] = r5; [--sp] = r4; [--sp] = r3; SP += -12; call (p5); SP += 24; [sp + PT_R0] = r0; call _syscall_trace; jump .Lresume_userspace;ENDPROC(_sys_trace)ENTRY(_resume) /* * Beware - when entering resume, prev (the current task) is * in r0, next (the new task) is in r1. */ p0 = r0; p1 = r1; [--sp] = rets; [--sp] = fp; [--sp] = (r7:4, p5:3); /* save usp */ p2 = usp; [p0+(TASK_THREAD+THREAD_USP)] = p2; /* save current kernel stack pointer */ [p0+(TASK_THREAD+THREAD_KSP)] = sp; /* save program counter */ r1.l = _new_old_task; r1.h = _new_old_task; [p0+(TASK_THREAD+THREAD_PC)] = r1; /* restore the kernel stack pointer */ sp = [p1+(TASK_THREAD+THREAD_KSP)]; /* restore user stack pointer */ p0 = [p1+(TASK_THREAD+THREAD_USP)]; usp = p0; /* restore pc */ p0 = [p1+(TASK_THREAD+THREAD_PC)]; jump (p0); /* * Following code actually lands up in a new (old) task. */_new_old_task: (r7:4, p5:3) = [sp++]; fp = [sp++]; rets = [sp++]; /* * When we come out of resume, r0 carries "old" task, becuase we are * in "new" task. */ rts;ENDPROC(_resume)ENTRY(_ret_from_exception) p2.l = lo(IPEND); p2.h = hi(IPEND); csync; r0 = [p2]; [sp + PT_IPEND] = r0;1: r1 = 0x37(Z); r2 = ~r1; r2.h = 0; r0 = r2 & r0; cc = r0 == 0; if !cc jump 4f; /* if not return to user mode, get out */ /* Make sure any pending system call or deferred exception * return in ILAT for this process to get executed, otherwise * in case context switch happens, system call of * first process (i.e in ILAT) will be carried * forward to the switched process */ p2.l = lo(ILAT); p2.h = hi(ILAT); r0 = [p2]; r1 = (EVT_IVG14 | EVT_IVG15) (z); r0 = r0 & r1; cc = r0 == 0; if !cc jump 5f; /* Set the stack for the current process */ r7 = sp; r4.l = lo(ALIGN_PAGE_MASK); r4.h = hi(ALIGN_PAGE_MASK); r7 = r7 & r4; /* thread_info->flags */ p5 = r7; r7 = [p5 + TI_FLAGS]; r4.l = lo(_TIF_WORK_MASK); r4.h = hi(_TIF_WORK_MASK); r7 = r7 & r4; cc = r7 == 0; if cc jump 4f; p0.l = lo(EVT15); p0.h = hi(EVT15); p1.l = _schedule_and_signal; p1.h = _schedule_and_signal; [p0] = p1; csync; raise 15; /* raise evt14 to do signal or reschedule */4: r0 = syscfg; bitclr(r0, 0); syscfg = r0;5: rts;ENDPROC(_ret_from_exception)ENTRY(_return_from_int) /* If someone else already raised IRQ 15, do nothing. */ csync; p2.l = lo(ILAT); p2.h = hi(ILAT); r0 = [p2]; cc = bittst (r0, EVT_IVG15_P); if cc jump 2f; /* if not return to user mode, get out */ p2.l = lo(IPEND); p2.h = hi(IPEND); r0 = [p2]; r1 = 0x17(Z); r2 = ~r1; r2.h = 0; r0 = r2 & r0; r1 = 1; r1 = r0 - r1; r2 = r0 & r1; cc = r2 == 0; if !cc jump 2f; /* Lower the interrupt level to 15. */ p0.l = lo(EVT15); p0.h = hi(EVT15); p1.l = _schedule_and_signal_from_int; p1.h = _schedule_and_signal_from_int; [p0] = p1; csync;#if ANOMALY_05000281 r0.l = _safe_speculative_execution; r0.h = _safe_speculative_execution; reti = r0;#endif r0 = 0x801f (z); STI r0; raise 15; /* raise evt15 to do signal or reschedule */ rti;2: rts;ENDPROC(_return_from_int)ENTRY(_lower_to_irq14)#if ANOMALY_05000281 r0.l = _safe_speculative_execution; r0.h = _safe_speculative_execution; reti = r0;#endif r0 = 0x401f; sti r0; raise 14; rti;ENTRY(_evt14_softirq)#ifdef CONFIG_DEBUG_HWERR r0 = 0x3f; sti r0;#else cli r0;#endif [--sp] = RETI; SP += 4; rts;_schedule_and_signal_from_int: /* To end up here, vector 15 was changed - so we have to change it * back. */ p0.l = lo(EVT15); p0.h = hi(EVT15); p1.l = _evt_system_call; p1.h = _evt_system_call; [p0] = p1; csync; /* Set orig_p0 to -1 to indicate this isn't the end of a syscall. */ r0 = -1 (x); [sp + PT_ORIG_P0] = r0; p1 = rets; [sp + PT_RESERVED] = p1; p0.l = _irq_flags; p0.h = _irq_flags; r0 = [p0]; sti r0; r0 = sp; sp += -12; call _finish_atomic_sections; sp += 12; jump.s .Lresume_userspace;_schedule_and_signal: SAVE_CONTEXT_SYSCALL /* To end up here, vector 15 was changed - so we have to change it * back. */ p0.l = lo(EVT15); p0.h = hi(EVT15); p1.l = _evt_system_call; p1.h = _evt_system_call; [p0] = p1; csync; p0.l = 1f; p0.h = 1f; [sp + PT_RESERVED] = P0; call .Lresume_userspace;1: RESTORE_CONTEXT rti;ENDPROC(_lower_to_irq14)/* Make sure when we start, that the circular buffer is initialized properly * R0 and P0 are call clobbered, so we can use them here. */ENTRY(_init_exception_buff) r0 = 0; p0.h = _in_ptr_excause; p0.l = _in_ptr_excause; [p0] = r0; p0.h = _out_ptr_excause; p0.l = _out_ptr_excause; [p0] = r0; rts;ENDPROC(_init_exception_buff)/* We handle this 100% in exception space - to reduce overhead * Only potiential problem is if the software buffer gets swapped out of the * CPLB table - then double fault. - so we don't let this happen in other places */#ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPANDENTRY(_ex_trace_buff_full) [--sp] = P3; [--sp] = P2; [--sp] = LC0; [--sp] = LT0; [--sp] = LB0; P5.L = _trace_buff_offset; P5.H = _trace_buff_offset; P3 = [P5]; /* trace_buff_offset */ P5.L = lo(TBUFSTAT); P5.H = hi(TBUFSTAT); R7 = [P5]; R7 <<= 1; /* double, since we need to read twice */ LC0 = R7; R7 <<= 2; /* need to shift over again, * to get the number of bytes */ P5.L = lo(TBUF); P5.H = hi(TBUF); R6 = ((1 << CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN)*1024) - 1; P2 = R7; P3 = P3 + P2; R7 = P3; R7 = R7 & R6; P3 = R7; P2.L = _trace_buff_offset; P2.H = _trace_buff_offset; [P2] = P3; P2.L = _software_trace_buff; P2.H = _software_trace_buff; LSETUP (.Lstart, .Lend) LC0;.Lstart: R7 = [P5]; /* read TBUF */ P4 = P3 + P2; [P4] = R7; P3 += -4; R7 = P3; R7 = R7 & R6;.Lend: P3 = R7; LB0 = [sp++]; LT0 = [sp++]; LC0 = [sp++]; P2 = [sp++]; P3 = [sp++]; jump _bfin_return_from_exception;ENDPROC(_ex_trace_buff_full)#if CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN == 4.data#else.section .l1.data.B#endif /* CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN */ENTRY(_trace_buff_offset) .long 0;ALIGNENTRY(_software_trace_buff) .rept ((1 << CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN)*256); .long 0 .endr#endif /* CONFIG_DEBUG_BFIN_HWTRACE_EXPAND */#if CONFIG_EARLY_PRINTK__INITENTRY(_early_trap) SAVE_ALL_SYS trace_buffer_stop(p0,r0); /* Turn caches off, to ensure we don't get double exceptions */ P4.L = LO(IMEM_CONTROL); P4.H = HI(IMEM_CONTROL); R5 = [P4]; /* Control Register*/ BITCLR(R5,ENICPLB_P); CLI R1; SSYNC; /* SSYNC required before writing to IMEM_CONTROL. */ .align 8; [P4] = R5; SSYNC; P4.L = LO(DMEM_CONTROL); P4.H = HI(DMEM_CONTROL); R5 = [P4]; BITCLR(R5,ENDCPLB_P); SSYNC; /* SSYNC required before writing to DMEM_CONTROL. */ .align 8; [P4] = R5; SSYNC; STI R1; r0 = sp; /* stack frame pt_regs pointer argument ==> r0 */ r1 = RETX; SP += -12; call _early_trap_c; SP += 12;ENDPROC(_early_trap)__FINIT#endif /* CONFIG_EARLY_PRINTK *//* * Put these in the kernel data section - that should always be covered by * a CPLB. This is needed to ensure we don't get double fault conditions */#ifdef CONFIG_SYSCALL_TAB_L1.section .l1.data#else.data#endif
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -