📄 entry.s
字号:
movi a1, 0 wsr a0, WINDOWSTART wsr a1, WINDOWBASE rsync movi a0, 0 movi a3, exc_table l32i a1, a3, EXC_TABLE_KSTK wsr a3, EXCSAVE_1 movi a4, PS_WOE_MASK | 1 wsr a4, PS rsync movi a6, SIGSEGV movi a4, do_exit callx4 a41: /* Kernel space: PANIC! */ wsr a0, EXCSAVE_1 movi a0, unrecoverable_exception callx0 a0 # should not return1: j 1b/* * We should never get here. Bail out! */ENTRY(fast_second_level_miss_double_kernel)1: movi a0, unrecoverable_exception callx0 a0 # should not return1: j 1b/* First-level entry handler for user, kernel, and double 2nd-level * TLB miss exceptions. Note that for now, user and kernel miss * exceptions share the same entry point and are handled identically. * * An old, less-efficient C version of this function used to exist. * We include it below, interleaved as comments, for reference. * * Entry condition: * * a0: trashed, original value saved on stack (PT_AREG0) * a1: a1 * a2: new stack pointer, original in DEPC * a3: dispatch table * depc: a2, original value saved on stack (PT_DEPC) * excsave_1: a3 * * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception */ENTRY(fast_second_level_miss) /* Save a1. Note: we don't expect a double exception. */ s32i a1, a2, PT_AREG1 /* We need to map the page of PTEs for the user task. Find * the pointer to that page. Also, it's possible for tsk->mm * to be NULL while tsk->active_mm is nonzero if we faulted on * a vmalloc address. In that rare case, we must use * active_mm instead to avoid a fault in this handler. See * * http://mail.nl.linux.org/linux-mm/2002-08/msg00258.html * (or search Internet on "mm vs. active_mm") * * if (!mm) * mm = tsk->active_mm; * pgd = pgd_offset (mm, regs->excvaddr); * pmd = pmd_offset (pgd, regs->excvaddr); * pmdval = *pmd; */ GET_CURRENT(a1,a2) l32i a0, a1, TASK_MM # tsk->mm beqz a0, 9f8: rsr a1, EXCVADDR # fault address _PGD_OFFSET(a0, a1, a1) l32i a0, a0, 0 # read pmdval //beqi a0, _PAGE_USER, 2f beqz a0, 2f /* Read ptevaddr and convert to top of page-table page. * * vpnval = read_ptevaddr_register() & PAGE_MASK; * vpnval += DTLB_WAY_PGTABLE; * pteval = mk_pte (virt_to_page(pmd_val(pmdval)), PAGE_KERNEL); * write_dtlb_entry (pteval, vpnval); * * The messy computation for 'pteval' above really simplifies * into the following: * * pteval = ((pmdval - PAGE_OFFSET) & PAGE_MASK) | PAGE_KERNEL */ movi a1, -PAGE_OFFSET add a0, a0, a1 # pmdval - PAGE_OFFSET extui a1, a0, 0, PAGE_SHIFT # ... & PAGE_MASK xor a0, a0, a1 movi a1, PAGE_DIRECTORY or a0, a0, a1 # ... | PAGE_DIRECTORY rsr a1, PTEVADDR srli a1, a1, PAGE_SHIFT slli a1, a1, PAGE_SHIFT # ptevaddr & PAGE_MASK addi a1, a1, DTLB_WAY_PGTABLE # ... + way_number wdtlb a0, a1 dsync /* Exit critical section. */ movi a0, 0 s32i a0, a3, EXC_TABLE_FIXUP /* Restore the working registers, and return. */ l32i a0, a2, PT_AREG0 l32i a1, a2, PT_AREG1 l32i a2, a2, PT_DEPC xsr a3, EXCSAVE_1 bgeui a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f /* Restore excsave1 and return. */ rsr a2, DEPC rfe /* Return from double exception. */1: xsr a2, DEPC esync rfde9: l32i a0, a1, TASK_ACTIVE_MM # unlikely case mm == 0 j 8b2: /* Invalid PGD, default exception handling */ rsr a1, DEPC xsr a3, EXCSAVE_1 s32i a1, a2, PT_AREG2 s32i a3, a2, PT_AREG3 mov a1, a2 rsr a2, PS bbsi.l a2, PS_UM_SHIFT, 1f j _kernel_exception1: j _user_exception/* * StoreProhibitedException * * Update the pte and invalidate the itlb mapping for this pte. * * Entry condition: * * a0: trashed, original value saved on stack (PT_AREG0) * a1: a1 * a2: new stack pointer, original in DEPC * a3: dispatch table * depc: a2, original value saved on stack (PT_DEPC) * excsave_1: a3 * * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception */ENTRY(fast_store_prohibited) /* Save a1 and a4. */ s32i a1, a2, PT_AREG1 s32i a4, a2, PT_AREG4 GET_CURRENT(a1,a2) l32i a0, a1, TASK_MM # tsk->mm beqz a0, 9f8: rsr a1, EXCVADDR # fault address _PGD_OFFSET(a0, a1, a4) l32i a0, a0, 0 //beqi a0, _PAGE_USER, 2f # FIXME use _PAGE_INVALID beqz a0, 2f _PTE_OFFSET(a0, a1, a4) l32i a4, a0, 0 # read pteval movi a1, _PAGE_VALID | _PAGE_RW bnall a4, a1, 2f movi a1, _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_WRENABLE or a4, a4, a1 rsr a1, EXCVADDR s32i a4, a0, 0 /* We need to flush the cache if we have page coloring. */#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK dhwb a0, 0#endif pdtlb a0, a1 beqz a0, 1f idtlb a0 // FIXME do we need this? wdtlb a4, a01: /* Exit critical section. */ movi a0, 0 s32i a0, a3, EXC_TABLE_FIXUP /* Restore the working registers, and return. */ l32i a4, a2, PT_AREG4 l32i a1, a2, PT_AREG1 l32i a0, a2, PT_AREG0 l32i a2, a2, PT_DEPC /* Restore excsave1 and a3. */ xsr a3, EXCSAVE_1 bgeui a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f rsr a2, DEPC rfe /* Double exception. Restore FIXUP handler and return. */1: xsr a2, DEPC esync rfde9: l32i a0, a1, TASK_ACTIVE_MM # unlikely case mm == 0 j 8b2: /* If there was a problem, handle fault in C */ rsr a4, DEPC # still holds a2 xsr a3, EXCSAVE_1 s32i a4, a2, PT_AREG2 s32i a3, a2, PT_AREG3 l32i a4, a2, PT_AREG4 mov a1, a2 rsr a2, PS bbsi.l a2, PS_UM_SHIFT, 1f j _kernel_exception1: j _user_exception#if XCHAL_EXTRA_SA_SIZE#warning fast_coprocessor untested/* * Entry condition: * * a0: trashed, original value saved on stack (PT_AREG0) * a1: a1 * a2: new stack pointer, original in DEPC * a3: dispatch table * depc: a2, original value saved on stack (PT_DEPC) * excsave_1: a3 * * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception */ENTRY(fast_coprocessor_double) wsr a0, EXCSAVE_1 movi a0, unrecoverable_exception callx0 a0ENTRY(fast_coprocessor) /* Fatal if we are in a double exception. */ l32i a0, a2, PT_DEPC _bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_coprocessor_double /* Save some registers a1, a3, a4, SAR */ xsr a3, EXCSAVE_1 s32i a3, a2, PT_AREG3 rsr a3, SAR s32i a4, a2, PT_AREG4 s32i a1, a2, PT_AREG1 s32i a5, a1, PT_AREG5 s32i a3, a2, PT_SAR mov a1, a2 /* Currently, the HAL macros only guarantee saving a0 and a1. * These can and will be refined in the future, but for now, * just save the remaining registers of a2...a15. */ s32i a6, a1, PT_AREG6 s32i a7, a1, PT_AREG7 s32i a8, a1, PT_AREG8 s32i a9, a1, PT_AREG9 s32i a10, a1, PT_AREG10 s32i a11, a1, PT_AREG11 s32i a12, a1, PT_AREG12 s32i a13, a1, PT_AREG13 s32i a14, a1, PT_AREG14 s32i a15, a1, PT_AREG15 /* Find coprocessor number. Subtract first CP EXCCAUSE from EXCCAUSE */ rsr a0, EXCCAUSE addi a3, a0, -XCHAL_EXCCAUSE_COPROCESSOR0_DISABLED /* Set corresponding CPENABLE bit */ movi a4, 1 ssl a3 # SAR: 32 - coprocessor_number rsr a5, CPENABLE sll a4, a4 or a4, a5, a4 wsr a4, CPENABLE rsync movi a5, coprocessor_info # list of owner and offset into cp_save addx8 a0, a4, a5 # entry for CP bne a4, a5, .Lload # bit wasn't set before, cp not in use /* Now compare the current task with the owner of the coprocessor. * If they are the same, there is no reason to save or restore any * coprocessor state. Having already enabled the coprocessor, * branch ahead to return. */ GET_CURRENT(a5,a1) l32i a4, a0, COPROCESSOR_INFO_OWNER # a4: current owner for this CP beq a4, a5, .Ldone /* Find location to dump current coprocessor state: * task_struct->task_cp_save_offset + coprocessor_offset[coprocessor] * * Note: a0 pointer to the entry in the coprocessor owner table, * a3 coprocessor number, * a4 current owner of coprocessor. */ l32i a5, a0, COPROCESSOR_INFO_OFFSET addi a2, a4, THREAD_CP_SAVE add a2, a2, a5 /* Store current coprocessor states. (a5 still has CP number) */ xchal_cpi_store_funcbody /* The macro might have destroyed a3 (coprocessor number), but * SAR still has 32 - coprocessor_number! */ movi a3, 32 rsr a4, SAR sub a3, a3, a4.Lload: /* A new task now owns the corpocessors. Save its TCB pointer into * the coprocessor owner table. * * Note: a0 pointer to the entry in the coprocessor owner table, * a3 coprocessor number. */ GET_CURRENT(a4,a1) s32i a4, a0, 0 /* Find location from where to restore the current coprocessor state.*/ l32i a5, a0, COPROCESSOR_INFO_OFFSET addi a2, a4, THREAD_CP_SAVE add a2, a2, a4 xchal_cpi_load_funcbody /* We must assume that the xchal_cpi_store_funcbody macro destroyed * registers a2..a15. */.Ldone: l32i a15, a1, PT_AREG15 l32i a14, a1, PT_AREG14 l32i a13, a1, PT_AREG13 l32i a12, a1, PT_AREG12 l32i a11, a1, PT_AREG11 l32i a10, a1, PT_AREG10 l32i a9, a1, PT_AREG9 l32i a8, a1, PT_AREG8 l32i a7, a1, PT_AREG7 l32i a6, a1, PT_AREG6 l32i a5, a1, PT_AREG5 l32i a4, a1, PT_AREG4 l32i a3, a1, PT_AREG3 l32i a2, a1, PT_AREG2 l32i a0, a1, PT_AREG0 l32i a1, a1, PT_AREG1 rfe#endif /* XCHAL_EXTRA_SA_SIZE *//* * Task switch. * * struct task* _switch_to (struct task* prev, struct task* next) * a2 a2 a3 */ENTRY(_switch_to) entry a1, 16 mov a4, a3 # preserve a3 s32i a0, a2, THREAD_RA # save return address s32i a1, a2, THREAD_SP # save stack pointer /* Disable ints while we manipulate the stack pointer; spill regs. */ movi a5, PS_EXCM_MASK | LOCKLEVEL xsr a5, PS rsr a3, EXCSAVE_1 rsync s32i a3, a3, EXC_TABLE_FIXUP /* enter critical section */ call0 _spill_registers /* Set kernel stack (and leave critical section) * Note: It's save to set it here. The stack will not be overwritten * because the kernel stack will only be loaded again after * we return from kernel space. */ l32i a0, a4, TASK_THREAD_INFO rsr a3, EXCSAVE_1 # exc_table movi a1, 0 addi a0, a0, PT_REGS_OFFSET s32i a1, a3, EXC_TABLE_FIXUP s32i a0, a3, EXC_TABLE_KSTK /* restore context of the task that 'next' addresses */ l32i a0, a4, THREAD_RA /* restore return address */ l32i a1, a4, THREAD_SP /* restore stack pointer */ wsr a5, PS rsync retwENTRY(ret_from_fork) /* void schedule_tail (struct task_struct *prev) * Note: prev is still in a6 (return value from fake call4 frame) */ movi a4, schedule_tail callx4 a4 movi a4, do_syscall_trace callx4 a4 j common_exception_return/* * Table of syscalls */.data.align 4.global sys_call_tablesys_call_table:#define SYSCALL(call, narg) .word call#include "syscalls.h"/* * Number of arguments of each syscall */.global sys_narg_tablesys_narg_table:#undef SYSCALL#define SYSCALL(call, narg) .byte narg#include "syscalls.h"
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -