📄 entry.s
字号:
UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMERpgm_no_vtime:#endif l %r9,__LC_THREAD_INFO # load pointer to thread_info struct l %r3,__LC_PGM_ILC # load program interruption code la %r8,0x7f nr %r8,%r3pgm_do_call: l %r7,BASED(.Ljump_table) sll %r8,2 l %r7,0(%r8,%r7) # load address of handler routine la %r2,SP_PTREGS(%r15) # address of register-save area la %r14,BASED(sysc_return) br %r7 # branch to interrupt-handler## handle per exception#pgm_per: tm __LC_PGM_OLD_PSW,0x40 # test if per event recording is on bnz BASED(pgm_per_std) # ok, normal per event from user space# ok its one of the special cases, now we need to find out which one clc __LC_PGM_OLD_PSW(8),__LC_SVC_NEW_PSW be BASED(pgm_svcper)# no interesting special case, ignore PER event lm %r12,%r15,__LC_SAVE_AREA lpsw 0x28## Normal per exception#pgm_per_std: SAVE_ALL __LC_PGM_OLD_PSW,__LC_SAVE_AREA,1#ifdef CONFIG_VIRT_CPU_ACCOUNTING tm SP_PSW+1(%r15),0x01 # interrupting from user ? bz BASED(pgm_no_vtime2) UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMERpgm_no_vtime2:#endif l %r9,__LC_THREAD_INFO # load pointer to thread_info struct l %r1,__TI_task(%r9) mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID mvc __THREAD_per+__PER_address(4,%r1),__LC_PER_ADDRESS mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID oi __TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP l %r3,__LC_PGM_ILC # load program interruption code la %r8,0x7f nr %r8,%r3 # clear per-event-bit and ilc be BASED(sysc_return) # only per or per+check ? b BASED(pgm_do_call)## it was a single stepped SVC that is causing all the trouble#pgm_svcper: SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1#ifdef CONFIG_VIRT_CPU_ACCOUNTING tm SP_PSW+1(%r15),0x01 # interrupting from user ? bz BASED(pgm_no_vtime3) UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMERpgm_no_vtime3:#endif lh %r7,0x8a # get svc number from lowcore l %r9,__LC_THREAD_INFO # load pointer to thread_info struct l %r1,__TI_task(%r9) mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID mvc __THREAD_per+__PER_address(4,%r1),__LC_PER_ADDRESS mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID oi __TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP stosm __SF_EMPTY(%r15),0x03 # reenable interrupts b BASED(sysc_do_svc)/* * IO interrupt handler routine */ .globl io_int_handlerio_int_handler: STORE_TIMER __LC_ASYNC_ENTER_TIMER stck __LC_INT_CLOCK SAVE_ALL_BASE __LC_SAVE_AREA+16 SAVE_ALL __LC_IO_OLD_PSW,__LC_SAVE_AREA+16,0#ifdef CONFIG_VIRT_CPU_ACCOUNTING tm SP_PSW+1(%r15),0x01 # interrupting from user ? bz BASED(io_no_vtime) UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMERio_no_vtime:#endif l %r9,__LC_THREAD_INFO # load pointer to thread_info struct l %r1,BASED(.Ldo_IRQ) # load address of do_IRQ la %r2,SP_PTREGS(%r15) # address of register-save area basr %r14,%r1 # branch to standard irq handlerio_return: tm SP_PSW+1(%r15),0x01 # returning to user ?#ifdef CONFIG_PREEMPT bno BASED(io_preempt) # no -> check for preemptive scheduling#else bno BASED(io_leave) # no-> skip resched & signal#endif tm __TI_flags+3(%r9),_TIF_WORK_INT bnz BASED(io_work) # there is work to do (signals etc.)io_leave: RESTORE_ALL 0#ifdef CONFIG_PREEMPTio_preempt: icm %r0,15,__TI_precount(%r9) bnz BASED(io_leave) l %r1,SP_R15(%r15) s %r1,BASED(.Lc_spsize) mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15) xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain lr %r15,%r1io_resume_loop: tm __TI_flags+3(%r9),_TIF_NEED_RESCHED bno BASED(io_leave) mvc __TI_precount(4,%r9),BASED(.Lc_pactive) stosm __SF_EMPTY(%r15),0x03 # reenable interrupts l %r1,BASED(.Lschedule) basr %r14,%r1 # call schedule stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts xc __TI_precount(4,%r9),__TI_precount(%r9) b BASED(io_resume_loop)#endif## switch to kernel stack, then check the TIF bits#io_work: l %r1,__LC_KERNEL_STACK s %r1,BASED(.Lc_spsize) mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15) xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain lr %r15,%r1## One of the work bits is on. Find out which one.# Checked are: _TIF_SIGPENDING and _TIF_NEED_RESCHED#io_work_loop: tm __TI_flags+3(%r9),_TIF_NEED_RESCHED bo BASED(io_reschedule) tm __TI_flags+3(%r9),_TIF_SIGPENDING bo BASED(io_sigpending) b BASED(io_leave)## _TIF_NEED_RESCHED is set, call schedule# io_reschedule: l %r1,BASED(.Lschedule) stosm __SF_EMPTY(%r15),0x03 # reenable interrupts basr %r14,%r1 # call scheduler stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts tm __TI_flags+3(%r9),_TIF_WORK_INT bz BASED(io_leave) # there is no work to do b BASED(io_work_loop)## _TIF_SIGPENDING is set, call do_signal#io_sigpending: stosm __SF_EMPTY(%r15),0x03 # reenable interrupts la %r2,SP_PTREGS(%r15) # load pt_regs sr %r3,%r3 # clear *oldset l %r1,BASED(.Ldo_signal) basr %r14,%r1 # call do_signal stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts b BASED(io_leave) # out of here, do NOT recheck/* * External interrupt handler routine */ .globl ext_int_handlerext_int_handler: STORE_TIMER __LC_ASYNC_ENTER_TIMER stck __LC_INT_CLOCK SAVE_ALL_BASE __LC_SAVE_AREA+16 SAVE_ALL __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16,0#ifdef CONFIG_VIRT_CPU_ACCOUNTING tm SP_PSW+1(%r15),0x01 # interrupting from user ? bz BASED(ext_no_vtime) UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMERext_no_vtime:#endif l %r9,__LC_THREAD_INFO # load pointer to thread_info struct la %r2,SP_PTREGS(%r15) # address of register-save area lh %r3,__LC_EXT_INT_CODE # get interruption code l %r1,BASED(.Ldo_extint) basr %r14,%r1 b BASED(io_return)/* * Machine check handler routines */ .globl mcck_int_handlermcck_int_handler: STORE_TIMER __LC_ASYNC_ENTER_TIMER SAVE_ALL_BASE __LC_SAVE_AREA+32 SAVE_ALL __LC_MCK_OLD_PSW,__LC_SAVE_AREA+32,0#ifdef CONFIG_VIRT_CPU_ACCOUNTING tm SP_PSW+1(%r15),0x01 # interrupting from user ? bz BASED(mcck_no_vtime) UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMERmcck_no_vtime:#endif l %r1,BASED(.Ls390_mcck) basr %r14,%r1 # call machine check handlermcck_return: RESTORE_ALL 0#ifdef CONFIG_SMP/* * Restart interruption handler, kick starter for additional CPUs */ .globl restart_int_handlerrestart_int_handler: l %r15,__LC_SAVE_AREA+60 # load ksp lctl %c0,%c15,__LC_CREGS_SAVE_AREA # get new ctl regs lam %a0,%a15,__LC_AREGS_SAVE_AREA lm %r6,%r15,__SF_GPRS(%r15) # load registers from clone stosm __SF_EMPTY(%r15),0x04 # now we can turn dat on basr %r14,0 l %r14,restart_addr-.(%r14) br %r14 # branch to start_secondaryrestart_addr: .long start_secondary#else/* * If we do not run with SMP enabled, let the new CPU crash ... */ .globl restart_int_handlerrestart_int_handler: basr %r1,0restart_base: lpsw restart_crash-restart_base(%r1) .align 8restart_crash: .long 0x000a0000,0x00000000restart_go:#endif#ifdef CONFIG_CHECK_STACK/* * The synchronous or the asynchronous stack overflowed. We are dead. * No need to properly save the registers, we are going to panic anyway. * Setup a pt_regs so that show_trace can provide a good call trace. */stack_overflow: l %r15,__LC_PANIC_STACK # change to panic stack sl %r15,BASED(.Lc_spsize) mvc SP_PSW(8,%r15),0(%r12) # move user PSW to stack stm %r0,%r11,SP_R0(%r15) # store gprs %r0-%r11 to kernel stack la %r1,__LC_SAVE_AREA ch %r12,BASED(.L0x020) # old psw addr == __LC_SVC_OLD_PSW ? be BASED(0f) ch %r12,BASED(.L0x028) # old psw addr == __LC_PGM_OLD_PSW ? be BASED(0f) la %r1,__LC_SAVE_AREA+160: mvc SP_R12(16,%r15),0(%r1) # move %r12-%r15 to stack xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear back chain l %r1,BASED(1f) # branch to kernel_stack_overflow la %r2,SP_PTREGS(%r15) # load pt_regs br %r11: .long kernel_stack_overflow#endifcleanup_table_system_call: .long system_call + 0x80000000, sysc_do_svc + 0x80000000cleanup_table_sysc_return: .long sysc_return + 0x80000000, sysc_leave + 0x80000000cleanup_table_sysc_leave: .long sysc_leave + 0x80000000, sysc_work_loop + 0x80000000cleanup_table_sysc_work_loop: .long sysc_work_loop + 0x80000000, sysc_reschedule + 0x80000000cleanup_critical: clc 4(4,%r12),BASED(cleanup_table_system_call) bl BASED(0f) clc 4(4,%r12),BASED(cleanup_table_system_call+4) bl BASED(cleanup_system_call)0: clc 4(4,%r12),BASED(cleanup_table_sysc_return) bl BASED(0f) clc 4(4,%r12),BASED(cleanup_table_sysc_return+4) bl BASED(cleanup_sysc_return)0: clc 4(4,%r12),BASED(cleanup_table_sysc_leave) bl BASED(0f) clc 4(4,%r12),BASED(cleanup_table_sysc_leave+4) bl BASED(cleanup_sysc_leave)0: clc 4(4,%r12),BASED(cleanup_table_sysc_work_loop) bl BASED(0f) clc 4(4,%r12),BASED(cleanup_table_sysc_work_loop+4) bl BASED(cleanup_sysc_leave)0: br %r14cleanup_system_call: mvc __LC_RETURN_PSW(8),0(%r12)#ifdef CONFIG_VIRT_CPU_ACCOUNTING clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+4) bh BASED(0f) mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER0: clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+8) bhe BASED(cleanup_vtime)#endif clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn) bh BASED(0f) mvc __LC_SAVE_AREA(16),__LC_SAVE_AREA+160: st %r13,__LC_SAVE_AREA+20 SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1 st %r15,__LC_SAVE_AREA+28 lh %r7,0x8a#ifdef CONFIG_VIRT_CPU_ACCOUNTINGcleanup_vtime: clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+12) bhe BASED(cleanup_stime) tm SP_PSW+1(%r15),0x01 # interrupting from user ? bz BASED(cleanup_novtime) UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMERcleanup_stime: clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+16) bh BASED(cleanup_update) UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMERcleanup_update: mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMERcleanup_novtime:#endif mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_system_call+4) la %r12,__LC_RETURN_PSW br %r14cleanup_system_call_insn: .long sysc_saveall + 0x80000000#ifdef CONFIG_VIRT_CPU_ACCOUNTING .long system_call + 0x80000000 .long sysc_vtime + 0x80000000 .long sysc_stime + 0x80000000 .long sysc_update + 0x80000000#endifcleanup_sysc_return: mvc __LC_RETURN_PSW(4),0(%r12) mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_sysc_return) la %r12,__LC_RETURN_PSW br %r14cleanup_sysc_leave: clc 4(4,%r12),BASED(cleanup_sysc_leave_insn) be BASED(0f)#ifdef CONFIG_VIRT_CPU_ACCOUNTING mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER clc 4(4,%r12),BASED(cleanup_sysc_leave_insn+4) be BASED(0f)#endif mvc __LC_RETURN_PSW(8),SP_PSW(%r15) mvc __LC_SAVE_AREA+16(16),SP_R12(%r15) lm %r0,%r11,SP_R0(%r15) l %r15,SP_R15(%r15)0: la %r12,__LC_RETURN_PSW br %r14cleanup_sysc_leave_insn:#ifdef CONFIG_VIRT_CPU_ACCOUNTING .long sysc_leave + 14 + 0x80000000#endif .long sysc_leave + 10 + 0x80000000/* * Integer constants */ .align 4.Lc_spsize: .long SP_SIZE.Lc_overhead: .long STACK_FRAME_OVERHEAD.Lc_pactive: .long PREEMPT_ACTIVE.Lnr_syscalls: .long NR_syscalls.L0x018: .short 0x018.L0x020: .short 0x020.L0x028: .short 0x028.L0x030: .short 0x030.L0x038: .short 0x038.Lc_1: .long 1/* * Symbol constants */.Ls390_mcck: .long s390_do_machine_check.Ldo_IRQ: .long do_IRQ.Ldo_extint: .long do_extint.Ldo_signal: .long do_signal.Lhandle_per: .long do_single_step.Ljump_table: .long pgm_check_table.Lschedule: .long schedule.Lclone: .long sys_clone.Lexecve: .long sys_execve.Lfork: .long sys_fork.Lrt_sigreturn:.long sys_rt_sigreturn.Lrt_sigsuspend: .long sys_rt_sigsuspend.Lsigreturn: .long sys_sigreturn.Lsigsuspend: .long sys_sigsuspend.Lsigaltstack: .long sys_sigaltstack.Ltrace: .long syscall_trace.Lvfork: .long sys_vfork.Lschedtail: .long schedule_tail.Lcritical_start: .long __critical_start + 0x80000000.Lcritical_end: .long __critical_end + 0x80000000.Lcleanup_critical: .long cleanup_critical#define SYSCALL(esa,esame,emu) .long esa .globl sys_call_tablesys_call_table:#include "syscalls.S"#undef SYSCALL
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -