📄 entry64.s
字号:
/* * arch/s390/kernel/entry64.S * S390 low-level entry points. * * Copyright (C) IBM Corp. 1999,2006 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), * Hartmut Penner (hp@de.ibm.com), * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), * Heiko Carstens <heiko.carstens@de.ibm.com> */#include <linux/sys.h>#include <linux/linkage.h>#include <asm/cache.h>#include <asm/lowcore.h>#include <asm/errno.h>#include <asm/ptrace.h>#include <asm/thread_info.h>#include <asm/asm-offsets.h>#include <asm/unistd.h>#include <asm/page.h>/* * Stack layout for the system_call stack entry. * The first few entries are identical to the user_regs_struct. */SP_PTREGS = STACK_FRAME_OVERHEADSP_ARGS = STACK_FRAME_OVERHEAD + __PT_ARGSSP_PSW = STACK_FRAME_OVERHEAD + __PT_PSWSP_R0 = STACK_FRAME_OVERHEAD + __PT_GPRSSP_R1 = STACK_FRAME_OVERHEAD + __PT_GPRS + 8SP_R2 = STACK_FRAME_OVERHEAD + __PT_GPRS + 16SP_R3 = STACK_FRAME_OVERHEAD + __PT_GPRS + 24SP_R4 = STACK_FRAME_OVERHEAD + __PT_GPRS + 32SP_R5 = STACK_FRAME_OVERHEAD + __PT_GPRS + 40SP_R6 = STACK_FRAME_OVERHEAD + __PT_GPRS + 48SP_R7 = STACK_FRAME_OVERHEAD + __PT_GPRS + 56SP_R8 = STACK_FRAME_OVERHEAD + __PT_GPRS + 64SP_R9 = STACK_FRAME_OVERHEAD + __PT_GPRS + 72SP_R10 = STACK_FRAME_OVERHEAD + __PT_GPRS + 80SP_R11 = STACK_FRAME_OVERHEAD + __PT_GPRS + 88SP_R12 = STACK_FRAME_OVERHEAD + __PT_GPRS + 96SP_R13 = STACK_FRAME_OVERHEAD + __PT_GPRS + 104SP_R14 = STACK_FRAME_OVERHEAD + __PT_GPRS + 112SP_R15 = STACK_FRAME_OVERHEAD + __PT_GPRS + 120SP_ORIG_R2 = STACK_FRAME_OVERHEAD + __PT_ORIG_GPR2SP_ILC = STACK_FRAME_OVERHEAD + __PT_ILCSP_TRAP = STACK_FRAME_OVERHEAD + __PT_TRAPSP_SIZE = STACK_FRAME_OVERHEAD + __PT_SIZESTACK_SHIFT = PAGE_SHIFT + THREAD_ORDERSTACK_SIZE = 1 << STACK_SHIFT_TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK | _TIF_NEED_RESCHED | \ _TIF_MCCK_PENDING | _TIF_RESTART_SVC | _TIF_SINGLE_STEP )_TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK | _TIF_NEED_RESCHED | \ _TIF_MCCK_PENDING)#define BASED(name) name-system_call(%r13)#ifdef CONFIG_TRACE_IRQFLAGS .macro TRACE_IRQS_ON brasl %r14,trace_hardirqs_on .endm .macro TRACE_IRQS_OFF brasl %r14,trace_hardirqs_off .endm .macro TRACE_IRQS_CHECK tm SP_PSW(%r15),0x03 # irqs enabled? jz 0f brasl %r14,trace_hardirqs_on j 1f0: brasl %r14,trace_hardirqs_off1: .endm#else#define TRACE_IRQS_ON#define TRACE_IRQS_OFF#define TRACE_IRQS_CHECK#endif#ifdef CONFIG_LOCKDEP .macro LOCKDEP_SYS_EXIT tm SP_PSW+1(%r15),0x01 # returning to user ? jz 0f brasl %r14,lockdep_sys_exit0: .endm#else#define LOCKDEP_SYS_EXIT#endif .macro STORE_TIMER lc_offset#ifdef CONFIG_VIRT_CPU_ACCOUNTING stpt \lc_offset#endif .endm#ifdef CONFIG_VIRT_CPU_ACCOUNTING .macro UPDATE_VTIME lc_from,lc_to,lc_sum lg %r10,\lc_from slg %r10,\lc_to alg %r10,\lc_sum stg %r10,\lc_sum .endm#endif/* * Register usage in interrupt handlers: * R9 - pointer to current task structure * R13 - pointer to literal pool * R14 - return register for function calls * R15 - kernel stack pointer */ .macro SAVE_ALL_BASE savearea stmg %r12,%r15,\savearea larl %r13,system_call .endm .macro SAVE_ALL_SVC psworg,savearea la %r12,\psworg lg %r15,__LC_KERNEL_STACK # problem state -> load ksp .endm .macro SAVE_ALL_SYNC psworg,savearea la %r12,\psworg tm \psworg+1,0x01 # test problem state bit jz 2f # skip stack setup save lg %r15,__LC_KERNEL_STACK # problem state -> load ksp#ifdef CONFIG_CHECK_STACK j 3f2: tml %r15,STACK_SIZE - CONFIG_STACK_GUARD jz stack_overflow3:#endif2: .endm .macro SAVE_ALL_ASYNC psworg,savearea la %r12,\psworg tm \psworg+1,0x01 # test problem state bit jnz 1f # from user -> load kernel stack clc \psworg+8(8),BASED(.Lcritical_end) jhe 0f clc \psworg+8(8),BASED(.Lcritical_start) jl 0f brasl %r14,cleanup_critical tm 1(%r12),0x01 # retest problem state after cleanup jnz 1f0: lg %r14,__LC_ASYNC_STACK # are we already on the async. stack ? slgr %r14,%r15 srag %r14,%r14,STACK_SHIFT jz 2f1: lg %r15,__LC_ASYNC_STACK # load async stack#ifdef CONFIG_CHECK_STACK j 3f2: tml %r15,STACK_SIZE - CONFIG_STACK_GUARD jz stack_overflow3:#endif2: .endm .macro CREATE_STACK_FRAME psworg,savearea aghi %r15,-SP_SIZE # make room for registers & psw mvc SP_PSW(16,%r15),0(%r12) # move user PSW to stack la %r12,\psworg stg %r2,SP_ORIG_R2(%r15) # store original content of gpr 2 icm %r12,12,__LC_SVC_ILC stmg %r0,%r11,SP_R0(%r15) # store gprs %r0-%r11 to kernel stack st %r12,SP_ILC(%r15) mvc SP_R12(32,%r15),\savearea # move %r12-%r15 to stack la %r12,0 stg %r12,__SF_BACKCHAIN(%r15) .endm .macro RESTORE_ALL psworg,sync mvc \psworg(16),SP_PSW(%r15) # move user PSW to lowcore .if !\sync ni \psworg+1,0xfd # clear wait state bit .endif lmg %r0,%r15,SP_R0(%r15) # load gprs 0-15 of user STORE_TIMER __LC_EXIT_TIMER lpswe \psworg # back to caller .endm/* * Scheduler resume function, called by switch_to * gpr2 = (task_struct *) prev * gpr3 = (task_struct *) next * Returns: * gpr2 = prev */ .globl __switch_to__switch_to: tm __THREAD_per+4(%r3),0xe8 # is the new process using per ? jz __switch_to_noper # if not we're fine stctg %c9,%c11,__SF_EMPTY(%r15)# We are using per stuff clc __THREAD_per(24,%r3),__SF_EMPTY(%r15) je __switch_to_noper # we got away without bashing TLB's lctlg %c9,%c11,__THREAD_per(%r3) # Nope we didn't__switch_to_noper: lg %r4,__THREAD_info(%r2) # get thread_info of prev tm __TI_flags+7(%r4),_TIF_MCCK_PENDING # machine check pending? jz __switch_to_no_mcck ni __TI_flags+7(%r4),255-_TIF_MCCK_PENDING # clear flag in prev lg %r4,__THREAD_info(%r3) # get thread_info of next oi __TI_flags+7(%r4),_TIF_MCCK_PENDING # set it in next__switch_to_no_mcck: stmg %r6,%r15,__SF_GPRS(%r15)# store __switch_to registers of prev task stg %r15,__THREAD_ksp(%r2) # store kernel stack to prev->tss.ksp lg %r15,__THREAD_ksp(%r3) # load kernel stack from next->tss.ksp lmg %r6,%r15,__SF_GPRS(%r15)# load __switch_to registers of next task stg %r3,__LC_CURRENT # __LC_CURRENT = current task struct lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4 lg %r3,__THREAD_info(%r3) # load thread_info from task struct stg %r3,__LC_THREAD_INFO aghi %r3,STACK_SIZE stg %r3,__LC_KERNEL_STACK # __LC_KERNEL_STACK = new kernel stack br %r14__critical_start:/* * SVC interrupt handler routine. System calls are synchronous events and * are executed with interrupts enabled. */ .globl system_callsystem_call: STORE_TIMER __LC_SYNC_ENTER_TIMERsysc_saveall: SAVE_ALL_BASE __LC_SAVE_AREA SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA llgh %r7,__LC_SVC_INT_CODE # get svc number from lowcore#ifdef CONFIG_VIRT_CPU_ACCOUNTINGsysc_vtime: UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMERsysc_stime: UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMERsysc_update: mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER#endifsysc_do_svc: lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct slag %r7,%r7,2 # *4 and test for svc 0 jnz sysc_nr_ok # svc 0: system call number in %r1 cl %r1,BASED(.Lnr_syscalls) jnl sysc_nr_ok lgfr %r7,%r1 # clear high word in r1 slag %r7,%r7,2 # svc 0: system call number in %r1sysc_nr_ok: mvc SP_ARGS(8,%r15),SP_R7(%r15)sysc_do_restart: larl %r10,sys_call_table#ifdef CONFIG_COMPAT tm __TI_flags+5(%r9),(_TIF_31BIT>>16) # running in 31 bit mode ? jno sysc_noemu larl %r10,sys_call_table_emu # use 31 bit emulation system callssysc_noemu:#endif tm __TI_flags+7(%r9),(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT) lgf %r8,0(%r7,%r10) # load address of system call routine jnz sysc_tracesys basr %r14,%r8 # call sys_xxxx stg %r2,SP_R2(%r15) # store return value (change R2 on stack)sysc_return: tm SP_PSW+1(%r15),0x01 # returning to user ? jno sysc_restore tm __TI_flags+7(%r9),_TIF_WORK_SVC jnz sysc_work # there is work to do (signals etc.)sysc_restore:#ifdef CONFIG_TRACE_IRQFLAGS larl %r1,sysc_restore_trace_psw lpswe 0(%r1)sysc_restore_trace: TRACE_IRQS_CHECK LOCKDEP_SYS_EXIT#endifsysc_leave: RESTORE_ALL __LC_RETURN_PSW,1sysc_done:#ifdef CONFIG_TRACE_IRQFLAGS .align 8 .globl sysc_restore_trace_pswsysc_restore_trace_psw: .quad 0, sysc_restore_trace#endif## recheck if there is more work to do#sysc_work_loop: tm __TI_flags+7(%r9),_TIF_WORK_SVC jz sysc_restore # there is no work to do## One of the work bits is on. Find out which one.#sysc_work: tm __TI_flags+7(%r9),_TIF_MCCK_PENDING jo sysc_mcck_pending tm __TI_flags+7(%r9),_TIF_NEED_RESCHED jo sysc_reschedule tm __TI_flags+7(%r9),(_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK) jnz sysc_sigpending tm __TI_flags+7(%r9),_TIF_RESTART_SVC jo sysc_restart tm __TI_flags+7(%r9),_TIF_SINGLE_STEP jo sysc_singlestep j sysc_restoresysc_work_done:## _TIF_NEED_RESCHED is set, call schedule#sysc_reschedule: larl %r14,sysc_work_loop jg schedule # return point is sysc_return## _TIF_MCCK_PENDING is set, call handler#sysc_mcck_pending: larl %r14,sysc_work_loop jg s390_handle_mcck # TIF bit will be cleared by handler## _TIF_SIGPENDING or _TIF_RESTORE_SIGMASK is set, call do_signal#sysc_sigpending: ni __TI_flags+7(%r9),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP la %r2,SP_PTREGS(%r15) # load pt_regs brasl %r14,do_signal # call do_signal tm __TI_flags+7(%r9),_TIF_RESTART_SVC jo sysc_restart tm __TI_flags+7(%r9),_TIF_SINGLE_STEP jo sysc_singlestep j sysc_work_loop## _TIF_RESTART_SVC is set, set up registers and restart svc#sysc_restart: ni __TI_flags+7(%r9),255-_TIF_RESTART_SVC # clear TIF_RESTART_SVC lg %r7,SP_R2(%r15) # load new svc number slag %r7,%r7,2 # *4 mvc SP_R2(8,%r15),SP_ORIG_R2(%r15) # restore first argument lmg %r2,%r6,SP_R2(%r15) # load svc arguments j sysc_do_restart # restart svc## _TIF_SINGLE_STEP is set, call do_single_step#sysc_singlestep: ni __TI_flags+7(%r9),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP lhi %r0,__LC_PGM_OLD_PSW sth %r0,SP_TRAP(%r15) # set trap indication to pgm check la %r2,SP_PTREGS(%r15) # address of register-save area larl %r14,sysc_return # load adr. of system return jg do_single_step # branch to do_sigtrap## call syscall_trace before and after system call# special linkage: %r12 contains the return address for trace_svc#sysc_tracesys: la %r2,SP_PTREGS(%r15) # load pt_regs la %r3,0 srl %r7,2 stg %r7,SP_R2(%r15) brasl %r14,syscall_trace lghi %r0,NR_syscalls clg %r0,SP_R2(%r15) jnh sysc_tracenogo lg %r7,SP_R2(%r15) # strace might have changed the sll %r7,2 # system call lgf %r8,0(%r7,%r10)sysc_tracego: lmg %r3,%r6,SP_R3(%r15) lg %r2,SP_ORIG_R2(%r15) basr %r14,%r8 # call sys_xxx stg %r2,SP_R2(%r15) # store return valuesysc_tracenogo: tm __TI_flags+7(%r9),(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT) jz sysc_return la %r2,SP_PTREGS(%r15) # load pt_regs la %r3,1 larl %r14,sysc_return # return point is sysc_return jg syscall_trace## a new process exits the kernel with ret_from_fork# .globl ret_from_forkret_from_fork: lg %r13,__LC_SVC_NEW_PSW+8 lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct tm SP_PSW+1(%r15),0x01 # forking a kernel thread ? jo 0f stg %r15,SP_R15(%r15) # store stack pointer for new kthread0: brasl %r14,schedule_tail TRACE_IRQS_ON stosm 24(%r15),0x03 # reenable interrupts j sysc_return## kernel_execve function needs to deal with pt_regs that is not# at the usual place# .globl kernel_execvekernel_execve: stmg %r12,%r15,96(%r15) lgr %r14,%r15 aghi %r15,-SP_SIZE stg %r14,__SF_BACKCHAIN(%r15) la %r12,SP_PTREGS(%r15) xc 0(__PT_SIZE,%r12),0(%r12) lgr %r5,%r12 brasl %r14,do_execve ltgfr %r2,%r2 je 0f aghi %r15,SP_SIZE lmg %r12,%r15,96(%r15) br %r14 # execve succeeded.0: stnsm __SF_EMPTY(%r15),0xfc # disable interrupts lg %r15,__LC_KERNEL_STACK # load ksp aghi %r15,-SP_SIZE # make room for registers & psw lg %r13,__LC_SVC_NEW_PSW+8 lg %r9,__LC_THREAD_INFO mvc SP_PTREGS(__PT_SIZE,%r15),0(%r12) # copy pt_regs xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) stosm __SF_EMPTY(%r15),0x03 # reenable interrupts brasl %r14,execve_tail j sysc_return/* * Program check handler routine */ .globl pgm_check_handlerpgm_check_handler:/* * First we need to check for a special case: * Single stepping an instruction that disables the PER event mask will * cause a PER event AFTER the mask has been set. Example: SVC or LPSW. * For a single stepped SVC the program check handler gets control after * the SVC new PSW has been loaded. But we want to execute the SVC first and * then handle the PER event. Therefore we update the SVC old PSW to point * to the pgm_check_handler and branch to the SVC handler after we checked * if we have to load the kernel stack register. * For every other possible cause for PER event without the PER mask set * we just ignore the PER event (FIXME: is there anything we have to do * for LPSW?). */ STORE_TIMER __LC_SYNC_ENTER_TIMER SAVE_ALL_BASE __LC_SAVE_AREA tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception jnz pgm_per # got per exception -> special case SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA#ifdef CONFIG_VIRT_CPU_ACCOUNTING tm SP_PSW+1(%r15),0x01 # interrupting from user ? jz pgm_no_vtime UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMERpgm_no_vtime:#endif lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct TRACE_IRQS_OFF lgf %r3,__LC_PGM_ILC # load program interruption code lghi %r8,0x7f ngr %r8,%r3pgm_do_call: sll %r8,3 larl %r1,pgm_check_table lg %r1,0(%r8,%r1) # load address of handler routine la %r2,SP_PTREGS(%r15) # address of register-save area larl %r14,sysc_return br %r1 # branch to interrupt-handler## handle per exception#pgm_per: tm __LC_PGM_OLD_PSW,0x40 # test if per event recording is on jnz pgm_per_std # ok, normal per event from user space# ok its one of the special cases, now we need to find out which one clc __LC_PGM_OLD_PSW(16),__LC_SVC_NEW_PSW je pgm_svcper# no interesting special case, ignore PER event lmg %r12,%r15,__LC_SAVE_AREA lpswe __LC_PGM_OLD_PSW## Normal per exception#pgm_per_std: SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA#ifdef CONFIG_VIRT_CPU_ACCOUNTING tm SP_PSW+1(%r15),0x01 # interrupting from user ? jz pgm_no_vtime2 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMERpgm_no_vtime2:#endif lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct TRACE_IRQS_OFF lg %r1,__TI_task(%r9) tm SP_PSW+1(%r15),0x01 # kernel per event ? jz kernel_per mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID mvc __THREAD_per+__PER_address(8,%r1),__LC_PER_ADDRESS mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID oi __TI_flags+7(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP lgf %r3,__LC_PGM_ILC # load program interruption code
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -