📄 entry_32.s
字号:
/* * PowerPC version * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) * Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP * Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com> * Adapted for Power Macintosh by Paul Mackerras. * Low-level exception handlers and MMU support * rewritten by Paul Mackerras. * Copyright (C) 1996 Paul Mackerras. * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net). * * This file contains the system call entry code, context switch * code, and exception/interrupt return code for PowerPC. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * */#include <linux/config.h>#include <linux/errno.h>#include <linux/sys.h>#include <linux/threads.h>#include <asm/reg.h>#include <asm/page.h>#include <asm/mmu.h>#include <asm/cputable.h>#include <asm/thread_info.h>#include <asm/ppc_asm.h>#include <asm/asm-offsets.h>#include <asm/unistd.h>#undef SHOW_SYSCALLS#undef SHOW_SYSCALLS_TASK/* * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE. */#if MSR_KERNEL >= 0x10000#define LOAD_MSR_KERNEL(r, x) lis r,(x)@h; ori r,r,(x)@l#else#define LOAD_MSR_KERNEL(r, x) li r,(x)#endif#ifdef CONFIG_BOOKE#include "head_booke.h"#define TRANSFER_TO_HANDLER_EXC_LEVEL(exc_level) \ mtspr exc_level##_SPRG,r8; \ BOOKE_LOAD_EXC_LEVEL_STACK(exc_level); \ lwz r0,GPR10-INT_FRAME_SIZE(r8); \ stw r0,GPR10(r11); \ lwz r0,GPR11-INT_FRAME_SIZE(r8); \ stw r0,GPR11(r11); \ mfspr r8,exc_level##_SPRG .globl mcheck_transfer_to_handlermcheck_transfer_to_handler: TRANSFER_TO_HANDLER_EXC_LEVEL(MCHECK) b transfer_to_handler_full .globl debug_transfer_to_handlerdebug_transfer_to_handler: TRANSFER_TO_HANDLER_EXC_LEVEL(DEBUG) b transfer_to_handler_full .globl crit_transfer_to_handlercrit_transfer_to_handler: TRANSFER_TO_HANDLER_EXC_LEVEL(CRIT) /* fall through */#endif#ifdef CONFIG_40x .globl crit_transfer_to_handlercrit_transfer_to_handler: lwz r0,crit_r10@l(0) stw r0,GPR10(r11) lwz r0,crit_r11@l(0) stw r0,GPR11(r11) /* fall through */#endif/* * This code finishes saving the registers to the exception frame * and jumps to the appropriate handler for the exception, turning * on address translation. * Note that we rely on the caller having set cr0.eq iff the exception * occurred in kernel mode (i.e. MSR:PR = 0). */ .globl transfer_to_handler_fulltransfer_to_handler_full: SAVE_NVGPRS(r11) /* fall through */ .globl transfer_to_handlertransfer_to_handler: stw r2,GPR2(r11) stw r12,_NIP(r11) stw r9,_MSR(r11) andi. r2,r9,MSR_PR mfctr r12 mfspr r2,SPRN_XER stw r12,_CTR(r11) stw r2,_XER(r11) mfspr r12,SPRN_SPRG3 addi r2,r12,-THREAD tovirt(r2,r2) /* set r2 to current */ beq 2f /* if from user, fix up THREAD.regs */ addi r11,r1,STACK_FRAME_OVERHEAD stw r11,PT_REGS(r12)#if defined(CONFIG_40x) || defined(CONFIG_BOOKE) /* Check to see if the dbcr0 register is set up to debug. Use the single-step bit to do this. */ lwz r12,THREAD_DBCR0(r12) andis. r12,r12,DBCR0_IC@h beq+ 3f /* From user and task is ptraced - load up global dbcr0 */ li r12,-1 /* clear all pending debug events */ mtspr SPRN_DBSR,r12 lis r11,global_dbcr0@ha tophys(r11,r11) addi r11,r11,global_dbcr0@l lwz r12,0(r11) mtspr SPRN_DBCR0,r12 lwz r12,4(r11) addi r12,r12,-1 stw r12,4(r11)#endif b 3f2: /* if from kernel, check interrupted DOZE/NAP mode and * check for stack overflow */#ifdef CONFIG_6xx mfspr r11,SPRN_HID0 mtcr r11BEGIN_FTR_SECTION bt- 8,power_save_6xx_restore /* Check DOZE */END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)BEGIN_FTR_SECTION bt- 9,power_save_6xx_restore /* Check NAP */END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)#endif /* CONFIG_6xx */ .globl transfer_to_handler_conttransfer_to_handler_cont: lwz r11,THREAD_INFO-THREAD(r12) cmplw r1,r11 /* if r1 <= current->thread_info */ ble- stack_ovf /* then the kernel stack overflowed */3: mflr r9 lwz r11,0(r9) /* virtual address of handler */ lwz r9,4(r9) /* where to go when done */ FIX_SRR1(r10,r12) mtspr SPRN_SRR0,r11 mtspr SPRN_SRR1,r10 mtlr r9 SYNC RFI /* jump to handler, enable MMU *//* * On kernel stack overflow, load up an initial stack pointer * and call StackOverflow(regs), which should not return. */stack_ovf: /* sometimes we use a statically-allocated stack, which is OK. */ lis r11,_end@h ori r11,r11,_end@l cmplw r1,r11 ble 3b /* r1 <= &_end is OK */ SAVE_NVGPRS(r11) addi r3,r1,STACK_FRAME_OVERHEAD lis r1,init_thread_union@ha addi r1,r1,init_thread_union@l addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD lis r9,StackOverflow@ha addi r9,r9,StackOverflow@l LOAD_MSR_KERNEL(r10,MSR_KERNEL) FIX_SRR1(r10,r12) mtspr SPRN_SRR0,r9 mtspr SPRN_SRR1,r10 SYNC RFI/* * Handle a system call. */ .stabs "arch/powerpc/kernel/",N_SO,0,0,0f .stabs "entry_32.S",N_SO,0,0,0f0:_GLOBAL(DoSyscall) stw r0,THREAD+LAST_SYSCALL(r2) stw r3,ORIG_GPR3(r1) li r12,0 stw r12,RESULT(r1) lwz r11,_CCR(r1) /* Clear SO bit in CR */ rlwinm r11,r11,0,4,2 stw r11,_CCR(r1)#ifdef SHOW_SYSCALLS bl do_show_syscall#endif /* SHOW_SYSCALLS */ rlwinm r10,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */ li r11,0 stb r11,TI_SC_NOERR(r10) lwz r11,TI_FLAGS(r10) andi. r11,r11,_TIF_SYSCALL_T_OR_A bne- syscall_dotracesyscall_dotrace_cont: cmplwi 0,r0,NR_syscalls lis r10,sys_call_table@h ori r10,r10,sys_call_table@l slwi r0,r0,2 bge- 66f lwzx r10,r10,r0 /* Fetch system call handler [ptr] */ mtlr r10 addi r9,r1,STACK_FRAME_OVERHEAD PPC440EP_ERR42 blrl /* Call handler */ .globl ret_from_syscallret_from_syscall:#ifdef SHOW_SYSCALLS bl do_show_syscall_exit#endif mr r6,r3 li r11,-_LAST_ERRNO cmplw 0,r3,r11 rlwinm r12,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */ blt+ 30f lbz r11,TI_SC_NOERR(r12) cmpwi r11,0 bne 30f neg r3,r3 lwz r10,_CCR(r1) /* Set SO bit in CR */ oris r10,r10,0x1000 stw r10,_CCR(r1) /* disable interrupts so current_thread_info()->flags can't change */30: LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */ SYNC MTMSRD(r10) lwz r9,TI_FLAGS(r12) andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SIGPENDING|_TIF_NEED_RESCHED) bne- syscall_exit_worksyscall_exit_cont:#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) /* If the process has its own DBCR0 value, load it up. The single step bit tells us that dbcr0 should be loaded. */ lwz r0,THREAD+THREAD_DBCR0(r2) andis. r10,r0,DBCR0_IC@h bnel- load_dbcr0#endif stwcx. r0,0,r1 /* to clear the reservation */ lwz r4,_LINK(r1) lwz r5,_CCR(r1) mtlr r4 mtcr r5 lwz r7,_NIP(r1) lwz r8,_MSR(r1) FIX_SRR1(r8, r0) lwz r2,GPR2(r1) lwz r1,GPR1(r1) mtspr SPRN_SRR0,r7 mtspr SPRN_SRR1,r8 SYNC RFI66: li r3,-ENOSYS b ret_from_syscall .globl ret_from_forkret_from_fork: REST_NVGPRS(r1) bl schedule_tail li r3,0 b ret_from_syscall/* Traced system call support */syscall_dotrace: SAVE_NVGPRS(r1) li r0,0xc00 stw r0,_TRAP(r1) addi r3,r1,STACK_FRAME_OVERHEAD bl do_syscall_trace_enter lwz r0,GPR0(r1) /* Restore original registers */ lwz r3,GPR3(r1) lwz r4,GPR4(r1) lwz r5,GPR5(r1) lwz r6,GPR6(r1) lwz r7,GPR7(r1) lwz r8,GPR8(r1) REST_NVGPRS(r1) b syscall_dotrace_contsyscall_exit_work: stw r6,RESULT(r1) /* Save result */ stw r3,GPR3(r1) /* Update return value */ andi. r0,r9,_TIF_SYSCALL_T_OR_A beq 5f ori r10,r10,MSR_EE SYNC MTMSRD(r10) /* re-enable interrupts */ lwz r4,_TRAP(r1) andi. r4,r4,1 beq 4f SAVE_NVGPRS(r1) li r4,0xc00 stw r4,_TRAP(r1)4: addi r3,r1,STACK_FRAME_OVERHEAD bl do_syscall_trace_leave REST_NVGPRS(r1)2: lwz r3,GPR3(r1) LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */ SYNC MTMSRD(r10) /* disable interrupts again */ rlwinm r12,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */ lwz r9,TI_FLAGS(r12)5: andi. r0,r9,_TIF_NEED_RESCHED bne 1f lwz r5,_MSR(r1) andi. r5,r5,MSR_PR beq syscall_exit_cont andi. r0,r9,_TIF_SIGPENDING beq syscall_exit_cont b do_user_signal1: ori r10,r10,MSR_EE SYNC MTMSRD(r10) /* re-enable interrupts */ bl schedule b 2b#ifdef SHOW_SYSCALLSdo_show_syscall:#ifdef SHOW_SYSCALLS_TASK lis r11,show_syscalls_task@ha lwz r11,show_syscalls_task@l(r11) cmp 0,r2,r11 bnelr#endif stw r31,GPR31(r1) mflr r31 lis r3,7f@ha addi r3,r3,7f@l lwz r4,GPR0(r1) lwz r5,GPR3(r1) lwz r6,GPR4(r1) lwz r7,GPR5(r1) lwz r8,GPR6(r1) lwz r9,GPR7(r1) bl printk lis r3,77f@ha addi r3,r3,77f@l lwz r4,GPR8(r1) mr r5,r2 bl printk lwz r0,GPR0(r1) lwz r3,GPR3(r1) lwz r4,GPR4(r1) lwz r5,GPR5(r1) lwz r6,GPR6(r1) lwz r7,GPR7(r1) lwz r8,GPR8(r1) mtlr r31 lwz r31,GPR31(r1) blrdo_show_syscall_exit:#ifdef SHOW_SYSCALLS_TASK lis r11,show_syscalls_task@ha lwz r11,show_syscalls_task@l(r11) cmp 0,r2,r11 bnelr#endif stw r31,GPR31(r1) mflr r31 stw r3,RESULT(r1) /* Save result */ mr r4,r3 lis r3,79f@ha addi r3,r3,79f@l bl printk lwz r3,RESULT(r1) mtlr r31 lwz r31,GPR31(r1) blr7: .string "syscall %d(%x, %x, %x, %x, %x, "77: .string "%x), current=%p\n"79: .string " -> %x\n" .align 2,0#ifdef SHOW_SYSCALLS_TASK .data .globl show_syscalls_taskshow_syscalls_task: .long -1 .text#endif#endif /* SHOW_SYSCALLS *//* * The sigsuspend and rt_sigsuspend system calls can call do_signal * and thus put the process into the stopped state where we might * want to examine its user state with ptrace. Therefore we need * to save all the nonvolatile registers (r13 - r31) before calling * the C code. */ .globl ppc_sigsuspendppc_sigsuspend: SAVE_NVGPRS(r1) lwz r0,_TRAP(r1) rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */ stw r0,_TRAP(r1) /* register set saved */ b sys_sigsuspend .globl ppc_rt_sigsuspendppc_rt_sigsuspend: SAVE_NVGPRS(r1) lwz r0,_TRAP(r1) rlwinm r0,r0,0,0,30 stw r0,_TRAP(r1) b sys_rt_sigsuspend .globl ppc_forkppc_fork: SAVE_NVGPRS(r1) lwz r0,_TRAP(r1) rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */ stw r0,_TRAP(r1) /* register set saved */ b sys_fork .globl ppc_vforkppc_vfork: SAVE_NVGPRS(r1) lwz r0,_TRAP(r1) rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */ stw r0,_TRAP(r1) /* register set saved */ b sys_vfork .globl ppc_cloneppc_clone: SAVE_NVGPRS(r1) lwz r0,_TRAP(r1) rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */ stw r0,_TRAP(r1) /* register set saved */ b sys_clone .globl ppc_swapcontextppc_swapcontext: SAVE_NVGPRS(r1) lwz r0,_TRAP(r1) rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */ stw r0,_TRAP(r1) /* register set saved */ b sys_swapcontext/* * Top-level page fault handling. * This is in assembler because if do_page_fault tells us that * it is a bad kernel page fault, we want to save the non-volatile * registers before calling bad_page_fault. */ .globl handle_page_faulthandle_page_fault: stw r4,_DAR(r1) addi r3,r1,STACK_FRAME_OVERHEAD bl do_page_fault cmpwi r3,0 beq+ ret_from_except SAVE_NVGPRS(r1) lwz r0,_TRAP(r1) clrrwi r0,r0,1 stw r0,_TRAP(r1) mr r5,r3 addi r3,r1,STACK_FRAME_OVERHEAD lwz r4,_DAR(r1) bl bad_page_fault b ret_from_except_full/* * This routine switches between two different tasks. The process * state of one is saved on its kernel stack. Then the state * of the other is restored from its kernel stack. The memory * management hardware is updated to the second process's state. * Finally, we can return to the second process. * On entry, r3 points to the THREAD for the current task, r4 * points to the THREAD for the new task. * * This routine is always called with interrupts disabled. * * Note: there are two ways to get to the "going out" portion * of this code; either by coming in via the entry (_switch) * or via "fork" which must set up an environment equivalent * to the "_switch" path. If you change this , you'll have to * change the fork code also. * * The code which creates the new task context is in 'copy_thread' * in arch/ppc/kernel/process.c */_GLOBAL(_switch)
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -