📄 entry.s
字号:
.long SYSCALL(sys_mlockall,sys32_mlockall_wrapper) .long SYSCALL(sys_munlockall,sys_munlockall) .long SYSCALL(sys_sched_setparam,sys32_sched_setparam_wrapper) .long SYSCALL(sys_sched_getparam,sys32_sched_getparam_wrapper) /* 155 */ .long SYSCALL(sys_sched_setscheduler,sys32_sched_setscheduler_wrapper) .long SYSCALL(sys_sched_getscheduler,sys32_sched_getscheduler_wrapper) .long SYSCALL(sys_sched_yield,sys_sched_yield) .long SYSCALL(sys_sched_get_priority_max,sys32_sched_get_priority_max_wrapper) .long SYSCALL(sys_sched_get_priority_min,sys32_sched_get_priority_min_wrapper) .long SYSCALL(sys_sched_rr_get_interval,sys32_sched_rr_get_interval_wrapper) .long SYSCALL(sys_nanosleep,sys32_nanosleep_wrapper) .long SYSCALL(sys_mremap,sys32_mremap_wrapper) .long SYSCALL(sys_ni_syscall,sys32_setresuid16_wrapper) /* old setresuid16 syscall */ .long SYSCALL(sys_ni_syscall,sys32_getresuid16_wrapper) /* old getresuid16 syscall */ .long SYSCALL(sys_ni_syscall,sys_ni_syscall) /* for vm86 */ .long SYSCALL(sys_query_module,sys32_query_module_wrapper) .long SYSCALL(sys_poll,sys32_poll_wrapper) .long SYSCALL(sys_nfsservctl,sys32_nfsservctl_wrapper) .long SYSCALL(sys_ni_syscall,sys32_setresgid16_wrapper) /* old setresgid16 syscall */ .long SYSCALL(sys_ni_syscall,sys32_getresgid16_wrapper) /* old getresgid16 syscall */ .long SYSCALL(sys_prctl,sys32_prctl_wrapper) .long SYSCALL(sys_rt_sigreturn_glue,sys32_rt_sigreturn_glue) .long SYSCALL(sys_rt_sigaction,sys32_rt_sigaction_wrapper) .long SYSCALL(sys_rt_sigprocmask,sys32_rt_sigprocmask_wrapper) /* 175 */ .long SYSCALL(sys_rt_sigpending,sys32_rt_sigpending_wrapper) .long SYSCALL(sys_rt_sigtimedwait,sys32_rt_sigtimedwait_wrapper) .long SYSCALL(sys_rt_sigqueueinfo,sys32_rt_sigqueueinfo_wrapper) .long SYSCALL(sys_rt_sigsuspend_glue,sys32_rt_sigsuspend_glue) .long SYSCALL(sys_pread,sys32_pread_wrapper) /* 180 */ .long SYSCALL(sys_pwrite,sys32_pwrite_wrapper) .long SYSCALL(sys_ni_syscall,sys32_chown16_wrapper) /* old chown16 syscall */ .long SYSCALL(sys_getcwd,sys32_getcwd_wrapper) .long SYSCALL(sys_capget,sys32_capget_wrapper) .long SYSCALL(sys_capset,sys32_capset_wrapper) /* 185 */ .long SYSCALL(sys_sigaltstack_glue,sys32_sigaltstack_glue) .long SYSCALL(sys_sendfile,sys32_sendfile_wrapper) .long SYSCALL(sys_ni_syscall,sys_ni_syscall) /* streams1 */ .long SYSCALL(sys_ni_syscall,sys_ni_syscall) /* streams2 */ .long SYSCALL(sys_vfork_glue,sys_vfork_glue) /* 190 */ .long SYSCALL(sys_getrlimit,sys32_old_getrlimit_wrapper) .long SYSCALL(sys_mmap2,sys32_mmap2_wrapper) .long SYSCALL(sys_ni_syscall,sys32_truncate64_wrapper) .long SYSCALL(sys_ni_syscall,sys32_ftruncate64_wrapper) .long SYSCALL(sys_ni_syscall,sys32_stat64_wrapper) /* 195 */ .long SYSCALL(sys_ni_syscall,sys32_lstat64_wrapper) .long SYSCALL(sys_ni_syscall,sys32_fstat64_wrapper) .long SYSCALL(sys_lchown,sys32_lchown_wrapper) .long SYSCALL(sys_getuid,sys_getuid) .long SYSCALL(sys_getgid,sys_getgid) /* 200 */ .long SYSCALL(sys_geteuid,sys_geteuid) .long SYSCALL(sys_getegid,sys_getegid) .long SYSCALL(sys_setreuid,sys32_setreuid_wrapper) .long SYSCALL(sys_setregid,sys32_setregid_wrapper) .long SYSCALL(sys_getgroups,sys32_getgroups_wrapper) /* 205 */ .long SYSCALL(sys_setgroups,sys32_setgroups_wrapper) .long SYSCALL(sys_fchown,sys32_fchown_wrapper) .long SYSCALL(sys_setresuid,sys32_setresuid_wrapper) .long SYSCALL(sys_getresuid,sys32_getresuid_wrapper) .long SYSCALL(sys_setresgid,sys32_setresgid_wrapper) /* 210 */ .long SYSCALL(sys_getresgid,sys32_getresgid_wrapper) .long SYSCALL(sys_chown,sys32_chown_wrapper) .long SYSCALL(sys_setuid,sys32_setuid_wrapper) .long SYSCALL(sys_setgid,sys32_setgid_wrapper) .long SYSCALL(sys_setfsuid,sys32_setfsuid_wrapper) /* 215 */ .long SYSCALL(sys_setfsgid,sys32_setfsgid_wrapper) .long SYSCALL(sys_pivot_root,sys32_pivot_root_wrapper) .long SYSCALL(sys_mincore,sys32_mincore_wrapper) .long SYSCALL(sys_madvise,sys32_madvise_wrapper) .long SYSCALL(sys_getdents64,sys32_getdents64_wrapper)/* 220 */ .long SYSCALL(sys_ni_syscall,sys32_fcntl64_wrapper) .long SYSCALL(sys_ni_syscall,sys_ni_syscall) /* 222 - reserved for posix_acl */ .long SYSCALL(sys_ni_syscall,sys_ni_syscall) /* 223 - reserved for posix_acl */ .long SYSCALL(sys_ni_syscall,sys_ni_syscall) /* 224 - reserved for posix_acl */ .rept 255-224 .long SYSCALL(sys_ni_syscall,sys_ni_syscall) .endr/* * Program check handler routine */ .globl pgm_check_handlerpgm_check_handler:/* * First we need to check for a special case: * Single stepping an instruction that disables the PER event mask will * cause a PER event AFTER the mask has been set. Example: SVC or LPSW. * For a single stepped SVC the program check handler gets control after * the SVC new PSW has been loaded. But we want to execute the SVC first and * then handle the PER event. Therefore we update the SVC old PSW to point * to the pgm_check_handler and branch to the SVC handler after we checked * if we have to load the kernel stack register. * For every other possible cause for PER event without the PER mask set * we just ignore the PER event (FIXME: is there anything we have to do * for LPSW?). */ tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception jz pgm_sv # skip if not tm __LC_PGM_OLD_PSW,0x40 # test if per event recording is on jnz pgm_sv # skip if it is# ok its one of the special cases, now we need to find out which one clc __LC_PGM_OLD_PSW(16),__LC_SVC_NEW_PSW je pgm_svcper# no interesting special case, ignore PER event lpswe __LC_PGM_OLD_PSW# it was a single stepped SVC that is causing all the troublepgm_svcper: SAVE_ALL __LC_SVC_OLD_PSW,1 mvc SP_PGM_OLD_ILC(4,%r15),__LC_PGM_ILC # save program check information j pgm_system_call # now do the svcpgm_svcret: lhi %r0,__LC_PGM_OLD_PSW # set trap indication back to pgm_chk st %r0,SP_TRAP(%r15) llgh %r7,SP_PGM_OLD_ILC(%r15) # get ilc from stack mvi SP_PGM_OLD_ILC(%r15),1 # mark PGM_OLD_ILC as invalid j pgm_no_svpgm_sv: SAVE_ALL __LC_PGM_OLD_PSW,1 mvi SP_PGM_OLD_ILC(%r15),1 # mark PGM_OLD_ILC as invalid llgh %r7,__LC_PGM_ILC # load instruction length GET_CURRENTpgm_no_sv: llgh %r8,__LC_PGM_INT_CODE # N.B. saved int code used later KEEP it lghi %r3,0x7f nr %r3,%r8 # clear per-event-bit & move to r3 je pgm_dn # none of Martins exceptions occurred bypass sll %r3,3 larl %r1,pgm_check_table lg %r1,0(%r3,%r1) # load address of handler routine srl %r3,3 la %r2,SP_PTREGS(%r15) # address of register-save area chi %r3,0x4 # protection-exception ? jne pgm_go # if not, lg %r5,SP_PSW+8(15) # load psw addr slgr %r5,%r7 # substract ilc from psw stg %r5,SP_PSW+8(15) # store corrected psw addrpgm_go: basr %r14,%r1 # branch to interrupt-handlerpgm_dn: nill %r8,0x80 # check for per exception je sysc_return la %r2,SP_PTREGS(15) # address of register-save area larl %r14,sysc_return # load adr. of system return jg handle_per_exception/* * IO interrupt handler routine */ .globl io_int_handlerio_int_handler: SAVE_ALL __LC_IO_OLD_PSW,0 GET_CURRENT # load pointer to task_struct to R9 la %r2,SP_PTREGS(%r15) # address of register-save area llgh %r3,__LC_SUBCHANNEL_NR # load subchannel number llgf %r4,__LC_IO_INT_PARM # load interuption parm llgf %r5,__LC_IO_INT_WORD # load interuption word brasl %r14,do_IRQ # call standard irq handlerio_return:## check, if bottom-half has to be done# lgf %r1,processor(%r9) # get cpu number from task struture larl %r2,irq_stat sll %r1,L1_CACHE_SHIFT la %r1,0(%r1,%r2) icm %r0,15,0(%r1) # test irq_stat[#cpu].__softirq_pending jnz io_handle_bottom_halfio_return_bh: tm SP_PSW+1(%r15),0x01 # returning to user ? jno io_leave # no-> skip resched & signal stosm 48(%r15),0x03 # reenable interrupts## check, if reschedule is needed# lg %r0,need_resched(%r9) # get need_resched from task_struct ltgr %r0,%r0 jnz io_reschedule icm %r0,15,sigpending(%r9) # get sigpending from task_struct jnz io_signal_returnio_leave: stnsm 48(%r15),0xfc # disable I/O and ext. interrupts RESTORE_ALL 0## call do_softirq and return from syscall, if interrupt-level# is zero#io_handle_bottom_half: larl %r14,io_return_bh jg do_softirq # return point is io_return_bh## call schedule with io_return as return-address#io_reschedule: larl %r14,io_return jg schedule # call scheduler, return to io_return## call do_signal before return#io_signal_return: la %r2,SP_PTREGS(%r15) # load pt_regs slgr %r3,%r3 # clear *oldset larl %r14,io_leave jg do_signal # return point is io_leave/* * External interrupt handler routine */ .globl ext_int_handlerext_int_handler: SAVE_ALL __LC_EXT_OLD_PSW,0 GET_CURRENT # load pointer to task_struct to R9 la %r2,SP_PTREGS(%r15) # address of register-save area llgh %r3,__LC_EXT_INT_CODE # error code lgr %r1,%r3 # calculate index = code & 0xff nill %r1,0xff sll %r1,3 larl %r4,ext_int_hash lg %r4,0(%r1,%r4) # get first list entry for hash value ltgr %r4,%r4 # == NULL ? jz io_return # yes, nothing to do, exitext_int_loop: ch %r3,16(%r4) # compare external interrupt code je ext_int_found lg %r4,0(%r4) # next list entry ltgr %r4,%r4 jnz ext_int_loop j io_returnext_int_found: lg %r4,8(%r4) # get handler address larl %r14,io_return br %r4 # branch to ext call handler/* * Machine check handler routines */ .globl mcck_int_handlermcck_int_handler: SAVE_ALL __LC_MCK_OLD_PSW,0 brasl %r14,s390_do_machine_checkmcck_return: RESTORE_ALL 0#ifdef CONFIG_SMP/* * Restart interruption handler, kick starter for additional CPUs */ .globl restart_int_handlerrestart_int_handler: lg %r15,__LC_SAVE_AREA+120 # load ksp lghi %r10,__LC_CREGS_SAVE_AREA lctlg %c0,%c15,0(%r10) # get new ctl regs lghi %r10,__LC_AREGS_SAVE_AREA lam %a0,%a15,0(%r10) stosm 0(%r15),0x04 # now we can turn dat on lmg %r6,%r15,48(%r15) # load registers from clone jg start_secondary#else/* * If we do not run with SMP enabled, let the new CPU crash ... */ .globl restart_int_handlerrestart_int_handler: basr %r1,0restart_base: lpswe restart_crash-restart_base(%r1) .align 8restart_crash: .long 0x000a0000,0x00000000,0x00000000,0x00000000restart_go:#endif/* * Integer constants */ .align 4.Lc_ac: .long 0,0,1
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -