📄 entry.s
字号:
* 0,1 -> way 7 program (0040.0000) and virtual (c000.0000) * 2 -> way 8 shared libaries (2000.0000) * 3 -> way 0 stack (3000.0000) */ extui a3, a3, 28, 2 # addr. bit 28 and 29 0,1,2,3 rsr a1, PTEVADDR addx2 a3, a3, a3 # -> 0,3,6,9 srli a1, a1, PAGE_SHIFT extui a3, a3, 2, 2 # -> 0,0,1,2 slli a1, a1, PAGE_SHIFT # ptevaddr & PAGE_MASK addi a3, a3, DTLB_WAY_PGD add a1, a1, a3 # ... + way_number3: wdtlb a0, a1 dsync /* Exit critical section. */4: movi a3, exc_table # restore a3 movi a0, 0 s32i a0, a3, EXC_TABLE_FIXUP /* Restore the working registers, and return. */ l32i a0, a2, PT_AREG0 l32i a1, a2, PT_AREG1 l32i a2, a2, PT_DEPC xsr a3, EXCSAVE_1 bgeui a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f /* Restore excsave1 and return. */ rsr a2, DEPC rfe /* Return from double exception. */1: xsr a2, DEPC esync rfde9: l32i a0, a1, TASK_ACTIVE_MM # unlikely case mm == 0 j 8b#if (DCACHE_WAY_SIZE > PAGE_SIZE)2: /* Special case for cache aliasing. * We (should) only get here if a clear_user_page, copy_user_page * or the aliased cache flush functions got preemptively interrupted * by another task. Re-establish temporary mapping to the * TLBTEMP_BASE areas. */ /* We shouldn't be in a double exception */ l32i a0, a2, PT_DEPC bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, 2f /* Make sure the exception originated in the special functions */ movi a0, __tlbtemp_mapping_start rsr a3, EPC_1 bltu a3, a0, 2f movi a0, __tlbtemp_mapping_end bgeu a3, a0, 2f /* Check if excvaddr was in one of the TLBTEMP_BASE areas. */ movi a3, TLBTEMP_BASE_1 rsr a0, EXCVADDR bltu a0, a3, 2f addi a1, a0, -(2 << (DCACHE_ALIAS_ORDER + PAGE_SHIFT)) bgeu a1, a3, 2f /* Check if we have to restore an ITLB mapping. */ movi a1, __tlbtemp_mapping_itlb rsr a3, EPC_1 sub a3, a3, a1 /* Calculate VPN */ movi a1, PAGE_MASK and a1, a1, a0 /* Jump for ITLB entry */ bgez a3, 1f /* We can use up to two TLBTEMP areas, one for src and one for dst. */ extui a3, a0, PAGE_SHIFT + DCACHE_ALIAS_ORDER, 1 add a1, a3, a1 /* PPN is in a6 for the first TLBTEMP area and in a7 for the second. */ mov a0, a6 movnez a0, a7, a3 j 3b /* ITLB entry. We only use dst in a6. */1: witlb a6, a1 isync j 4b#endif // DCACHE_WAY_SIZE > PAGE_SIZE2: /* Invalid PGD, default exception handling */ movi a3, exc_table rsr a1, DEPC xsr a3, EXCSAVE_1 s32i a1, a2, PT_AREG2 s32i a3, a2, PT_AREG3 mov a1, a2 rsr a2, PS bbsi.l a2, PS_UM_BIT, 1f j _kernel_exception1: j _user_exception/* * StoreProhibitedException * * Update the pte and invalidate the itlb mapping for this pte. * * Entry condition: * * a0: trashed, original value saved on stack (PT_AREG0) * a1: a1 * a2: new stack pointer, original in DEPC * a3: dispatch table * depc: a2, original value saved on stack (PT_DEPC) * excsave_1: a3 * * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception */ENTRY(fast_store_prohibited) /* Save a1 and a4. */ s32i a1, a2, PT_AREG1 s32i a4, a2, PT_AREG4 GET_CURRENT(a1,a2) l32i a0, a1, TASK_MM # tsk->mm beqz a0, 9f8: rsr a1, EXCVADDR # fault address _PGD_OFFSET(a0, a1, a4) l32i a0, a0, 0 beqz a0, 2f /* Note that we assume _PAGE_WRITABLE_BIT is only set if pte is valid.*/ _PTE_OFFSET(a0, a1, a4) l32i a4, a0, 0 # read pteval bbci.l a4, _PAGE_WRITABLE_BIT, 2f movi a1, _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_HW_WRITE or a4, a4, a1 rsr a1, EXCVADDR s32i a4, a0, 0 /* We need to flush the cache if we have page coloring. */#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK dhwb a0, 0#endif pdtlb a0, a1 wdtlb a4, a0 /* Exit critical section. */ movi a0, 0 s32i a0, a3, EXC_TABLE_FIXUP /* Restore the working registers, and return. */ l32i a4, a2, PT_AREG4 l32i a1, a2, PT_AREG1 l32i a0, a2, PT_AREG0 l32i a2, a2, PT_DEPC /* Restore excsave1 and a3. */ xsr a3, EXCSAVE_1 bgeui a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f rsr a2, DEPC rfe /* Double exception. Restore FIXUP handler and return. */1: xsr a2, DEPC esync rfde9: l32i a0, a1, TASK_ACTIVE_MM # unlikely case mm == 0 j 8b2: /* If there was a problem, handle fault in C */ rsr a4, DEPC # still holds a2 xsr a3, EXCSAVE_1 s32i a4, a2, PT_AREG2 s32i a3, a2, PT_AREG3 l32i a4, a2, PT_AREG4 mov a1, a2 rsr a2, PS bbsi.l a2, PS_UM_BIT, 1f j _kernel_exception1: j _user_exception#if XCHAL_EXTRA_SA_SIZE#warning fast_coprocessor untested/* * Entry condition: * * a0: trashed, original value saved on stack (PT_AREG0) * a1: a1 * a2: new stack pointer, original in DEPC * a3: dispatch table * depc: a2, original value saved on stack (PT_DEPC) * excsave_1: a3 * * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception */ENTRY(fast_coprocessor_double) wsr a0, EXCSAVE_1 movi a0, unrecoverable_exception callx0 a0ENTRY(fast_coprocessor) /* Fatal if we are in a double exception. */ l32i a0, a2, PT_DEPC _bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_coprocessor_double /* Save some registers a1, a3, a4, SAR */ xsr a3, EXCSAVE_1 s32i a3, a2, PT_AREG3 rsr a3, SAR s32i a4, a2, PT_AREG4 s32i a1, a2, PT_AREG1 s32i a5, a1, PT_AREG5 s32i a3, a2, PT_SAR mov a1, a2 /* Currently, the HAL macros only guarantee saving a0 and a1. * These can and will be refined in the future, but for now, * just save the remaining registers of a2...a15. */ s32i a6, a1, PT_AREG6 s32i a7, a1, PT_AREG7 s32i a8, a1, PT_AREG8 s32i a9, a1, PT_AREG9 s32i a10, a1, PT_AREG10 s32i a11, a1, PT_AREG11 s32i a12, a1, PT_AREG12 s32i a13, a1, PT_AREG13 s32i a14, a1, PT_AREG14 s32i a15, a1, PT_AREG15 /* Find coprocessor number. Subtract first CP EXCCAUSE from EXCCAUSE */ rsr a0, EXCCAUSE addi a3, a0, -XCHAL_EXCCAUSE_COPROCESSOR0_DISABLED /* Set corresponding CPENABLE bit */ movi a4, 1 ssl a3 # SAR: 32 - coprocessor_number rsr a5, CPENABLE sll a4, a4 or a4, a5, a4 wsr a4, CPENABLE rsync movi a5, coprocessor_info # list of owner and offset into cp_save addx8 a0, a4, a5 # entry for CP bne a4, a5, .Lload # bit wasn't set before, cp not in use /* Now compare the current task with the owner of the coprocessor. * If they are the same, there is no reason to save or restore any * coprocessor state. Having already enabled the coprocessor, * branch ahead to return. */ GET_CURRENT(a5,a1) l32i a4, a0, COPROCESSOR_INFO_OWNER # a4: current owner for this CP beq a4, a5, .Ldone /* Find location to dump current coprocessor state: * task_struct->task_cp_save_offset + coprocessor_offset[coprocessor] * * Note: a0 pointer to the entry in the coprocessor owner table, * a3 coprocessor number, * a4 current owner of coprocessor. */ l32i a5, a0, COPROCESSOR_INFO_OFFSET addi a2, a4, THREAD_CP_SAVE add a2, a2, a5 /* Store current coprocessor states. (a5 still has CP number) */ xchal_cpi_store_funcbody /* The macro might have destroyed a3 (coprocessor number), but * SAR still has 32 - coprocessor_number! */ movi a3, 32 rsr a4, SAR sub a3, a3, a4.Lload: /* A new task now owns the corpocessors. Save its TCB pointer into * the coprocessor owner table. * * Note: a0 pointer to the entry in the coprocessor owner table, * a3 coprocessor number. */ GET_CURRENT(a4,a1) s32i a4, a0, 0 /* Find location from where to restore the current coprocessor state.*/ l32i a5, a0, COPROCESSOR_INFO_OFFSET addi a2, a4, THREAD_CP_SAVE add a2, a2, a4 xchal_cpi_load_funcbody /* We must assume that the xchal_cpi_store_funcbody macro destroyed * registers a2..a15. */.Ldone: l32i a15, a1, PT_AREG15 l32i a14, a1, PT_AREG14 l32i a13, a1, PT_AREG13 l32i a12, a1, PT_AREG12 l32i a11, a1, PT_AREG11 l32i a10, a1, PT_AREG10 l32i a9, a1, PT_AREG9 l32i a8, a1, PT_AREG8 l32i a7, a1, PT_AREG7 l32i a6, a1, PT_AREG6 l32i a5, a1, PT_AREG5 l32i a4, a1, PT_AREG4 l32i a3, a1, PT_AREG3 l32i a2, a1, PT_AREG2 l32i a0, a1, PT_AREG0 l32i a1, a1, PT_AREG1 rfe#endif /* XCHAL_EXTRA_SA_SIZE *//* * System Calls. * * void system_call (struct pt_regs* regs, int exccause) * a2 a3 */ENTRY(system_call) entry a1, 32 /* regs->syscall = regs->areg[2] */ l32i a3, a2, PT_AREG2 mov a6, a2 movi a4, do_syscall_trace_enter s32i a3, a2, PT_SYSCALL callx4 a4 /* syscall = sys_call_table[syscall_nr] */ movi a4, sys_call_table; movi a5, __NR_syscall_count movi a6, -ENOSYS bgeu a3, a5, 1f addx4 a4, a3, a4 l32i a4, a4, 0 movi a5, sys_ni_syscall; beq a4, a5, 1f /* Load args: arg0 - arg5 are passed via regs. */ l32i a6, a2, PT_AREG6 l32i a7, a2, PT_AREG3 l32i a8, a2, PT_AREG4 l32i a9, a2, PT_AREG5 l32i a10, a2, PT_AREG8 l32i a11, a2, PT_AREG9 /* Pass one additional argument to the syscall: pt_regs (on stack) */ s32i a2, a1, 0 callx4 a41: /* regs->areg[2] = return_value */ s32i a6, a2, PT_AREG2 movi a4, do_syscall_trace_leave mov a6, a2 callx4 a4 retw/* * Create a kernel thread * * int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags) * a2 a2 a3 a4 */ENTRY(kernel_thread) entry a1, 16 mov a5, a2 # preserve fn over syscall mov a7, a3 # preserve args over syscall movi a3, _CLONE_VM | _CLONE_UNTRACED movi a2, __NR_clone or a6, a4, a3 # arg0: flags mov a3, a1 # arg1: sp syscall beq a3, a1, 1f # branch if parent mov a6, a7 # args callx4 a5 # fn(args) movi a2, __NR_exit syscall # return value of fn(args) still in a61: retw/* * Do a system call from kernel instead of calling sys_execve, so we end up * with proper pt_regs. * * int kernel_execve(const char *fname, char *const argv[], charg *const envp[]) * a2 a2 a3 a4 */ENTRY(kernel_execve) entry a1, 16 mov a6, a2 # arg0 is in a6 movi a2, __NR_execve syscall retw/* * Task switch. * * struct task* _switch_to (struct task* prev, struct task* next) * a2 a2 a3 */ENTRY(_switch_to) entry a1, 16 mov a4, a3 # preserve a3 s32i a0, a2, THREAD_RA # save return address s32i a1, a2, THREAD_SP # save stack pointer /* Disable ints while we manipulate the stack pointer; spill regs. */ movi a5, (1 << PS_EXCM_BIT) | LOCKLEVEL xsr a5, PS rsr a3, EXCSAVE_1 rsync s32i a3, a3, EXC_TABLE_FIXUP /* enter critical section */ call0 _spill_registers /* Set kernel stack (and leave critical section) * Note: It's save to set it here. The stack will not be overwritten * because the kernel stack will only be loaded again after * we return from kernel space. */ l32i a0, a4, TASK_THREAD_INFO rsr a3, EXCSAVE_1 # exc_table movi a1, 0 addi a0, a0, PT_REGS_OFFSET s32i a1, a3, EXC_TABLE_FIXUP s32i a0, a3, EXC_TABLE_KSTK /* restore context of the task that 'next' addresses */ l32i a0, a4, THREAD_RA /* restore return address */ l32i a1, a4, THREAD_SP /* restore stack pointer */ wsr a5, PS rsync retwENTRY(ret_from_fork) /* void schedule_tail (struct task_struct *prev) * Note: prev is still in a6 (return value from fake call4 frame) */ movi a4, schedule_tail callx4 a4 movi a4, do_syscall_trace_leave mov a6, a1 callx4 a4 j common_exception_return
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -