📄 entry.s
字号:
/* * Machine check handler routines */ .globl mcck_int_handlermcck_int_handler: spt __LC_CPU_TIMER_SAVE_AREA # revalidate cpu timer mvc __LC_ASYNC_ENTER_TIMER(8),__LC_CPU_TIMER_SAVE_AREA lm %r0,%r15,__LC_GPREGS_SAVE_AREA # revalidate gprs SAVE_ALL_BASE __LC_SAVE_AREA+32 la %r12,__LC_MCK_OLD_PSW tm __LC_MCCK_CODE,0x80 # system damage? bo BASED(mcck_int_main) # yes -> rest of mcck code invalid tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid? bo BASED(0f) spt __LC_LAST_UPDATE_TIMER # revalidate cpu timer#ifdef CONFIG_VIRT_CPU_ACCOUNTING mvc __LC_ASYNC_ENTER_TIMER(8),__LC_LAST_UPDATE_TIMER mvc __LC_SYNC_ENTER_TIMER(8),__LC_LAST_UPDATE_TIMER mvc __LC_EXIT_TIMER(8),__LC_LAST_UPDATE_TIMER#endif0: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid? bno BASED(mcck_int_main) # no -> skip cleanup critical tm __LC_MCK_OLD_PSW+1,0x01 # test problem state bit bnz BASED(mcck_int_main) # from user -> load async stack clc __LC_MCK_OLD_PSW+4(4),BASED(.Lcritical_end) bhe BASED(mcck_int_main) clc __LC_MCK_OLD_PSW+4(4),BASED(.Lcritical_start) bl BASED(mcck_int_main) l %r14,BASED(.Lcleanup_critical) basr %r14,%r14mcck_int_main: l %r14,__LC_PANIC_STACK # are we already on the panic stack? slr %r14,%r15 sra %r14,PAGE_SHIFT be BASED(0f) l %r15,__LC_PANIC_STACK # load panic stack0: CREATE_STACK_FRAME __LC_MCK_OLD_PSW,__LC_SAVE_AREA+32#ifdef CONFIG_VIRT_CPU_ACCOUNTING tm __LC_MCCK_CODE+2,0x08 # mwp of old psw valid? bno BASED(mcck_no_vtime) # no -> skip cleanup critical tm __LC_MCK_OLD_PSW+1,0x01 # interrupting from user ? bz BASED(mcck_no_vtime) UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMERmcck_no_vtime:#endif l %r9,__LC_THREAD_INFO # load pointer to thread_info struct la %r2,SP_PTREGS(%r15) # load pt_regs l %r1,BASED(.Ls390_mcck) basr %r14,%r1 # call machine check handler tm SP_PSW+1(%r15),0x01 # returning to user ? bno BASED(mcck_return) l %r1,__LC_KERNEL_STACK # switch to kernel stack s %r1,BASED(.Lc_spsize) mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15) xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain lr %r15,%r1 stosm __SF_EMPTY(%r15),0x04 # turn dat on tm __TI_flags+3(%r9),_TIF_MCCK_PENDING bno BASED(mcck_return) l %r1,BASED(.Ls390_handle_mcck) basr %r14,%r1 # call machine check handlermcck_return: RESTORE_ALL __LC_RETURN_MCCK_PSW,0#ifdef CONFIG_SMP/* * Restart interruption handler, kick starter for additional CPUs */ .globl restart_int_handlerrestart_int_handler: l %r15,__LC_SAVE_AREA+60 # load ksp lctl %c0,%c15,__LC_CREGS_SAVE_AREA # get new ctl regs lam %a0,%a15,__LC_AREGS_SAVE_AREA lm %r6,%r15,__SF_GPRS(%r15) # load registers from clone stosm __SF_EMPTY(%r15),0x04 # now we can turn dat on basr %r14,0 l %r14,restart_addr-.(%r14) br %r14 # branch to start_secondaryrestart_addr: .long start_secondary#else/* * If we do not run with SMP enabled, let the new CPU crash ... */ .globl restart_int_handlerrestart_int_handler: basr %r1,0restart_base: lpsw restart_crash-restart_base(%r1) .align 8restart_crash: .long 0x000a0000,0x00000000restart_go:#endif#ifdef CONFIG_CHECK_STACK/* * The synchronous or the asynchronous stack overflowed. We are dead. * No need to properly save the registers, we are going to panic anyway. * Setup a pt_regs so that show_trace can provide a good call trace. */stack_overflow: l %r15,__LC_PANIC_STACK # change to panic stack sl %r15,BASED(.Lc_spsize) mvc SP_PSW(8,%r15),0(%r12) # move user PSW to stack stm %r0,%r11,SP_R0(%r15) # store gprs %r0-%r11 to kernel stack la %r1,__LC_SAVE_AREA ch %r12,BASED(.L0x020) # old psw addr == __LC_SVC_OLD_PSW ? be BASED(0f) ch %r12,BASED(.L0x028) # old psw addr == __LC_PGM_OLD_PSW ? be BASED(0f) la %r1,__LC_SAVE_AREA+160: mvc SP_R12(16,%r15),0(%r1) # move %r12-%r15 to stack xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear back chain l %r1,BASED(1f) # branch to kernel_stack_overflow la %r2,SP_PTREGS(%r15) # load pt_regs br %r11: .long kernel_stack_overflow#endifcleanup_table_system_call: .long system_call + 0x80000000, sysc_do_svc + 0x80000000cleanup_table_sysc_return: .long sysc_return + 0x80000000, sysc_leave + 0x80000000cleanup_table_sysc_leave: .long sysc_leave + 0x80000000, sysc_work_loop + 0x80000000cleanup_table_sysc_work_loop: .long sysc_work_loop + 0x80000000, sysc_reschedule + 0x80000000cleanup_table_io_leave: .long io_leave + 0x80000000, io_done + 0x80000000cleanup_table_io_work_loop: .long io_work_loop + 0x80000000, io_mcck_pending + 0x80000000cleanup_critical: clc 4(4,%r12),BASED(cleanup_table_system_call) bl BASED(0f) clc 4(4,%r12),BASED(cleanup_table_system_call+4) bl BASED(cleanup_system_call)0: clc 4(4,%r12),BASED(cleanup_table_sysc_return) bl BASED(0f) clc 4(4,%r12),BASED(cleanup_table_sysc_return+4) bl BASED(cleanup_sysc_return)0: clc 4(4,%r12),BASED(cleanup_table_sysc_leave) bl BASED(0f) clc 4(4,%r12),BASED(cleanup_table_sysc_leave+4) bl BASED(cleanup_sysc_leave)0: clc 4(4,%r12),BASED(cleanup_table_sysc_work_loop) bl BASED(0f) clc 4(4,%r12),BASED(cleanup_table_sysc_work_loop+4) bl BASED(cleanup_sysc_return)0: clc 4(4,%r12),BASED(cleanup_table_io_leave) bl BASED(0f) clc 4(4,%r12),BASED(cleanup_table_io_leave+4) bl BASED(cleanup_io_leave)0: clc 4(4,%r12),BASED(cleanup_table_io_work_loop) bl BASED(0f) clc 4(4,%r12),BASED(cleanup_table_io_work_loop+4) bl BASED(cleanup_io_return)0: br %r14cleanup_system_call: mvc __LC_RETURN_PSW(8),0(%r12) c %r12,BASED(.Lmck_old_psw) be BASED(0f) la %r12,__LC_SAVE_AREA+16 b BASED(1f)0: la %r12,__LC_SAVE_AREA+321:#ifdef CONFIG_VIRT_CPU_ACCOUNTING clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+4) bh BASED(0f) mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER0: clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+8) bhe BASED(cleanup_vtime)#endif clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn) bh BASED(0f) mvc __LC_SAVE_AREA(16),0(%r12)0: st %r13,4(%r12) st %r12,__LC_SAVE_AREA+48 # argh SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1 CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA l %r12,__LC_SAVE_AREA+48 # argh st %r15,12(%r12) lh %r7,0x8a#ifdef CONFIG_VIRT_CPU_ACCOUNTINGcleanup_vtime: clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+12) bhe BASED(cleanup_stime) tm SP_PSW+1(%r15),0x01 # interrupting from user ? bz BASED(cleanup_novtime) UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMERcleanup_stime: clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+16) bh BASED(cleanup_update) UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMERcleanup_update: mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMERcleanup_novtime:#endif mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_system_call+4) la %r12,__LC_RETURN_PSW br %r14cleanup_system_call_insn: .long sysc_saveall + 0x80000000#ifdef CONFIG_VIRT_CPU_ACCOUNTING .long system_call + 0x80000000 .long sysc_vtime + 0x80000000 .long sysc_stime + 0x80000000 .long sysc_update + 0x80000000#endifcleanup_sysc_return: mvc __LC_RETURN_PSW(4),0(%r12) mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_sysc_return) la %r12,__LC_RETURN_PSW br %r14cleanup_sysc_leave: clc 4(4,%r12),BASED(cleanup_sysc_leave_insn) be BASED(2f)#ifdef CONFIG_VIRT_CPU_ACCOUNTING mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER clc 4(4,%r12),BASED(cleanup_sysc_leave_insn+4) be BASED(2f)#endif mvc __LC_RETURN_PSW(8),SP_PSW(%r15) c %r12,BASED(.Lmck_old_psw) bne BASED(0f) mvc __LC_SAVE_AREA+32(16),SP_R12(%r15) b BASED(1f)0: mvc __LC_SAVE_AREA+16(16),SP_R12(%r15)1: lm %r0,%r11,SP_R0(%r15) l %r15,SP_R15(%r15)2: la %r12,__LC_RETURN_PSW br %r14cleanup_sysc_leave_insn:#ifdef CONFIG_VIRT_CPU_ACCOUNTING .long sysc_leave + 14 + 0x80000000#endif .long sysc_leave + 10 + 0x80000000cleanup_io_return: mvc __LC_RETURN_PSW(4),0(%r12) mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_io_work_loop) la %r12,__LC_RETURN_PSW br %r14cleanup_io_leave: clc 4(4,%r12),BASED(cleanup_io_leave_insn) be BASED(2f)#ifdef CONFIG_VIRT_CPU_ACCOUNTING mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER clc 4(4,%r12),BASED(cleanup_io_leave_insn+4) be BASED(2f)#endif mvc __LC_RETURN_PSW(8),SP_PSW(%r15) c %r12,BASED(.Lmck_old_psw) bne BASED(0f) mvc __LC_SAVE_AREA+32(16),SP_R12(%r15) b BASED(1f)0: mvc __LC_SAVE_AREA+16(16),SP_R12(%r15)1: lm %r0,%r11,SP_R0(%r15) l %r15,SP_R15(%r15)2: la %r12,__LC_RETURN_PSW br %r14cleanup_io_leave_insn:#ifdef CONFIG_VIRT_CPU_ACCOUNTING .long io_leave + 18 + 0x80000000#endif .long io_leave + 14 + 0x80000000/* * Integer constants */ .align 4.Lc_spsize: .long SP_SIZE.Lc_overhead: .long STACK_FRAME_OVERHEAD.Lc_pactive: .long PREEMPT_ACTIVE.Lnr_syscalls: .long NR_syscalls.L0x018: .short 0x018.L0x020: .short 0x020.L0x028: .short 0x028.L0x030: .short 0x030.L0x038: .short 0x038.Lc_1: .long 1/* * Symbol constants */.Ls390_mcck: .long s390_do_machine_check.Ls390_handle_mcck: .long s390_handle_mcck.Lmck_old_psw: .long __LC_MCK_OLD_PSW.Ldo_IRQ: .long do_IRQ.Ldo_extint: .long do_extint.Ldo_signal: .long do_signal.Lhandle_per: .long do_single_step.Ljump_table: .long pgm_check_table.Lschedule: .long schedule.Lclone: .long sys_clone.Lexecve: .long sys_execve.Lfork: .long sys_fork.Lrt_sigreturn:.long sys_rt_sigreturn.Lrt_sigsuspend: .long sys_rt_sigsuspend.Lsigreturn: .long sys_sigreturn.Lsigsuspend: .long sys_sigsuspend.Lsigaltstack: .long sys_sigaltstack.Ltrace: .long syscall_trace.Lvfork: .long sys_vfork.Lschedtail: .long schedule_tail.Lcritical_start: .long __critical_start + 0x80000000.Lcritical_end: .long __critical_end + 0x80000000.Lcleanup_critical: .long cleanup_critical#define SYSCALL(esa,esame,emu) .long esa .globl sys_call_tablesys_call_table:#include "syscalls.S"#undef SYSCALL
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -