📄 cpu_context.c
字号:
} /* Should never get here */ return -SOS_EFATAL;}#if defined(SOS_CPU_STATE_DETECT_KERNEL_STACK_OVERFLOW)voidsos_cpu_state_prepare_detect_kernel_stack_overflow(const struct sos_cpu_state *ctxt, sos_vaddr_t stack_bottom, sos_size_t stack_size){ sos_size_t poison_size = SOS_CPU_STATE_DETECT_KERNEL_STACK_OVERFLOW; if (poison_size > stack_size) poison_size = stack_size; memset((void*)stack_bottom, SOS_CPU_STATE_STACK_POISON, poison_size);}voidsos_cpu_state_detect_kernel_stack_overflow(const struct sos_cpu_state *ctxt, sos_vaddr_t stack_bottom, sos_size_t stack_size){ unsigned char *c; int i; /* On SOS, "ctxt" corresponds to the address of the esp register of the saved context in Kernel mode (always, even for the interrupted context of a user thread). Here we make sure that this stack pointer is within the allowed stack area */ SOS_ASSERT_FATAL(((sos_vaddr_t)ctxt) >= stack_bottom); SOS_ASSERT_FATAL(((sos_vaddr_t)ctxt) + sizeof(struct sos_cpu_kstate) <= stack_bottom + stack_size); /* Check that the bottom of the stack has not been altered */ for (c = (unsigned char*) stack_bottom, i = 0 ; (i < SOS_CPU_STATE_DETECT_KERNEL_STACK_OVERFLOW) && (i < stack_size) ; c++, i++) { SOS_ASSERT_FATAL(SOS_CPU_STATE_STACK_POISON == *c); }}#endif/* ======================================================================= * Public Accessor functions */sos_vaddr_t sos_cpu_context_get_PC(const struct sos_cpu_state *ctxt){ SOS_ASSERT_FATAL(NULL != ctxt); /* This is the PC of the interrupted context (ie kernel or user context). */ return ctxt->eip;}sos_vaddr_t sos_cpu_context_get_SP(const struct sos_cpu_state *ctxt){ SOS_ASSERT_FATAL(NULL != ctxt); /* 'ctxt' corresponds to the SP of the interrupted context, in Kernel mode. We have to test whether the original interrupted context was that of a kernel or user thread */ if (TRUE == sos_cpu_context_is_in_user_mode(ctxt)) { struct sos_cpu_ustate * uctxt = (struct sos_cpu_ustate*)ctxt; return uctxt->cpl3_esp; } /* On SOS, "ctxt" corresponds to the address of the esp register of the saved context in Kernel mode (always, even for the interrupted context of a user thread). */ return (sos_vaddr_t)ctxt;}sos_ret_tsos_cpu_context_set_EX_return_address(struct sos_cpu_state *ctxt, sos_vaddr_t ret_vaddr){ ctxt->eip = ret_vaddr; return SOS_OK;}void sos_cpu_context_dump(const struct sos_cpu_state *ctxt){ char buf[128]; snprintf(buf, sizeof(buf), "CPU: eip=%x esp0=%x eflags=%x cs=%x ds=%x ss0=%x err=%x", (unsigned)ctxt->eip, (unsigned)ctxt, (unsigned)ctxt->eflags, (unsigned)GET_CPU_CS_REGISTER_VALUE(ctxt->cs), (unsigned)ctxt->ds, (unsigned)ctxt->cpl0_ss, (unsigned)ctxt->error_code); if (TRUE == sos_cpu_context_is_in_user_mode(ctxt)) { struct sos_cpu_ustate * uctxt = (struct sos_cpu_ustate*)ctxt; snprintf(buf, sizeof(buf), "%s esp3=%x ss3=%x", buf, (unsigned)uctxt->cpl3_esp, (unsigned)uctxt->cpl3_ss); } else snprintf(buf, sizeof(buf), "%s [KERNEL MODE]", buf); sos_bochs_putstring(buf); sos_bochs_putstring("\n"); sos_x86_videomem_putstring(23, 0, SOS_X86_VIDEO_FG_BLACK | SOS_X86_VIDEO_BG_LTGRAY, buf);}/* ======================================================================= * Public Accessor functions TO BE USED ONLY BY Exception handlers */sos_ui32_t sos_cpu_context_get_EX_info(const struct sos_cpu_state *ctxt){ SOS_ASSERT_FATAL(NULL != ctxt); return ctxt->error_code;}sos_vaddr_tsos_cpu_context_get_EX_faulting_vaddr(const struct sos_cpu_state *ctxt){ sos_ui32_t cr2; /* * See Intel Vol 3 (section 5.14): the address of the faulting * virtual address of a page fault is stored in the cr2 * register. * * Actually, we do not store the cr2 register in a saved * kernel thread's context. So we retrieve the cr2's value directly * from the processor. The value we retrieve in an exception handler * is actually the correct one because an exception is synchronous * with the code causing the fault, and cannot be interrupted since * the IDT entries in SOS are "interrupt gates" (ie IRQ are * disabled). */ asm volatile ("movl %%cr2, %0" :"=r"(cr2) : ); return cr2;}/* ======================================================================= * Public Accessor functions TO BE USED ONLY BY the SYSCALL handler *//* * By convention, the USER SOS programs always pass 4 arguments to the * kernel syscall handler: in eax/../edx. For less arguments, the * unused registers are filled with 0s. For more arguments, the 4th * syscall parameter gives the address of the array containing the * remaining arguments. In any case, eax corresponds to the syscall * IDentifier. */inlinesos_ret_t sos_syscall_get3args(const struct sos_cpu_state *user_ctxt, /* out */unsigned int *arg1, /* out */unsigned int *arg2, /* out */unsigned int *arg3){ *arg1 = user_ctxt->ebx; *arg2 = user_ctxt->ecx; *arg3 = user_ctxt->edx; return SOS_OK;}sos_ret_t sos_syscall_get1arg(const struct sos_cpu_state *user_ctxt, /* out */unsigned int *arg1){ unsigned int unused; return sos_syscall_get3args(user_ctxt, arg1, & unused, & unused);}sos_ret_t sos_syscall_get2args(const struct sos_cpu_state *user_ctxt, /* out */unsigned int *arg1, /* out */unsigned int *arg2){ unsigned int unused; return sos_syscall_get3args(user_ctxt, arg1, arg2, & unused);}/* * sos_syscall_get3args() is defined in cpu_context.c because it needs * to know the structure of a struct spu_state */sos_ret_t sos_syscall_get4args(const struct sos_cpu_state *user_ctxt, /* out */unsigned int *arg1, /* out */unsigned int *arg2, /* out */unsigned int *arg3, /* out */unsigned int *arg4){ sos_uaddr_t uaddr_other_args; unsigned int other_args[2]; sos_ret_t retval; /* Retrieve the 3 arguments. The last one is an array containing the remaining arguments */ retval = sos_syscall_get3args(user_ctxt, arg1, arg2, (unsigned int *)& uaddr_other_args); if (SOS_OK != retval) return retval; /* Copy the array containing the remaining arguments from user space */ retval = sos_memcpy_from_user((sos_vaddr_t)other_args, (sos_uaddr_t)uaddr_other_args, sizeof(other_args)); if (sizeof(other_args) != retval) return -SOS_EFAULT; *arg3 = other_args[0]; *arg4 = other_args[1]; return SOS_OK;}sos_ret_t sos_syscall_get5args(const struct sos_cpu_state *user_ctxt, /* out */unsigned int *arg1, /* out */unsigned int *arg2, /* out */unsigned int *arg3, /* out */unsigned int *arg4, /* out */unsigned int *arg5){ sos_uaddr_t uaddr_other_args; unsigned int other_args[3]; sos_ret_t retval; /* Retrieve the 3 arguments. The last one is an array containing the remaining arguments */ retval = sos_syscall_get3args(user_ctxt, arg1, arg2, (unsigned int *)& uaddr_other_args); if (SOS_OK != retval) return retval; /* Copy the array containing the remaining arguments from user space */ retval = sos_memcpy_from_user((sos_vaddr_t)other_args, (sos_uaddr_t)uaddr_other_args, sizeof(other_args)); if (sizeof(other_args) != retval) return -SOS_EFAULT; *arg3 = other_args[0]; *arg4 = other_args[1]; *arg5 = other_args[2]; return SOS_OK;}sos_ret_t sos_syscall_get6args(const struct sos_cpu_state *user_ctxt, /* out */unsigned int *arg1, /* out */unsigned int *arg2, /* out */unsigned int *arg3, /* out */unsigned int *arg4, /* out */unsigned int *arg5, /* out */unsigned int *arg6){ sos_uaddr_t uaddr_other_args; unsigned int other_args[4]; sos_ret_t retval; /* Retrieve the 3 arguments. The last one is an array containing the remaining arguments */ retval = sos_syscall_get3args(user_ctxt, arg1, arg2, (unsigned int *)& uaddr_other_args); if (SOS_OK != retval) return retval; /* Copy the array containing the remaining arguments from user space */ retval = sos_memcpy_from_user((sos_vaddr_t)other_args, (sos_uaddr_t)uaddr_other_args, sizeof(other_args)); if (sizeof(other_args) != retval) return -SOS_EFAULT; *arg3 = other_args[0]; *arg4 = other_args[1]; *arg5 = other_args[2]; *arg6 = other_args[3]; return SOS_OK;}sos_ret_t sos_syscall_get7args(const struct sos_cpu_state *user_ctxt, /* out */unsigned int *arg1, /* out */unsigned int *arg2, /* out */unsigned int *arg3, /* out */unsigned int *arg4, /* out */unsigned int *arg5, /* out */unsigned int *arg6, /* out */unsigned int *arg7){ sos_uaddr_t uaddr_other_args; unsigned int other_args[5]; sos_ret_t retval; /* Retrieve the 3 arguments. The last one is an array containing the remaining arguments */ retval = sos_syscall_get3args(user_ctxt, arg1, arg2, (unsigned int *)& uaddr_other_args); if (SOS_OK != retval) return retval; /* Copy the array containing the remaining arguments from user space */ retval = sos_memcpy_from_user((sos_vaddr_t)other_args, (sos_uaddr_t)uaddr_other_args, sizeof(other_args)); if (sizeof(other_args) != retval) return -SOS_EFAULT; *arg3 = other_args[0]; *arg4 = other_args[1]; *arg5 = other_args[2]; *arg6 = other_args[3]; *arg7 = other_args[4]; return SOS_OK;}sos_ret_t sos_syscall_get8args(const struct sos_cpu_state *user_ctxt, /* out */unsigned int *arg1, /* out */unsigned int *arg2, /* out */unsigned int *arg3, /* out */unsigned int *arg4, /* out */unsigned int *arg5, /* out */unsigned int *arg6, /* out */unsigned int *arg7, /* out */unsigned int *arg8){ sos_uaddr_t uaddr_other_args; unsigned int other_args[6]; sos_ret_t retval; /* Retrieve the 3 arguments. The last one is an array containing the remaining arguments */ retval = sos_syscall_get3args(user_ctxt, arg1, arg2, (unsigned int *)& uaddr_other_args); if (SOS_OK != retval) return retval; /* Copy the array containing the remaining arguments from user space */ retval = sos_memcpy_from_user((sos_vaddr_t)other_args, (sos_uaddr_t)uaddr_other_args, sizeof(other_args)); if (sizeof(other_args) != retval) return -SOS_EFAULT; *arg3 = other_args[0]; *arg4 = other_args[1]; *arg5 = other_args[2]; *arg6 = other_args[3]; *arg7 = other_args[4]; *arg8 = other_args[5]; return SOS_OK;}/* ======================================================================= * Backtrace facility. To be used for DEBUGging purpose ONLY. */sos_ui32_t sos_backtrace(const struct sos_cpu_state *cpu_state, sos_ui32_t max_depth, sos_vaddr_t stack_bottom, sos_size_t stack_size, sos_backtrace_callback_t * backtracer, void *custom_arg){ int depth; sos_vaddr_t callee_PC, caller_frame; /* Cannot backtrace an interrupted user thread ! */ if ((NULL != cpu_state) && (TRUE == sos_cpu_context_is_in_user_mode(cpu_state))) { return 0; } /* * Layout of a frame on the x86 (compiler=gcc): * * funcA calls funcB calls funcC * * .... * funcB Argument 2 * funcB Argument 1 * funcA Return eip * frameB: funcA ebp (ie previous stack frame) * .... * (funcB local variables) * .... * funcC Argument 2 * funcC Argument 1 * funcB Return eip * frameC: funcB ebp (ie previous stack frame == A0) <---- a frame address * .... * (funcC local variables) * .... * * The presence of "ebp" on the stack depends on 2 things: * + the compiler is gcc * + the source is compiled WITHOUT the -fomit-frame-pointer option * In the absence of "ebp", chances are high that the value pushed * at that address is outside the stack boundaries, meaning that the * function will return -SOS_ENOSUP. */ if (cpu_state) { callee_PC = cpu_state->eip; caller_frame = cpu_state->ebp; } else { /* Skip the sos_backtrace() frame */ callee_PC = (sos_vaddr_t)__builtin_return_address(0); caller_frame = (sos_vaddr_t)__builtin_frame_address(1); } for(depth=0 ; depth < max_depth ; depth ++) { /* Call the callback */ backtracer(callee_PC, caller_frame + 8, depth, custom_arg); /* If the frame address is funky, don't go further */ if ( (caller_frame < stack_bottom) || (caller_frame + 4 >= stack_bottom + stack_size) ) return depth; /* Go to caller frame */ callee_PC = *((sos_vaddr_t*) (caller_frame + 4)); caller_frame = *((sos_vaddr_t*) caller_frame); } return depth;}/* ************************************************************* * Function to manage the TSS. This function is not really "public": * it is reserved to the assembler routines defined in * cpu_context_switch.S * * Update the kernel stack address so that the IRQ, syscalls and * exception return in a correct stack location when coming back into * kernel mode. */voidsos_cpu_context_update_kernel_tss(struct sos_cpu_state *next_ctxt){ /* next_ctxt corresponds to an interrupted user thread ? */ if (sos_cpu_context_is_in_user_mode(next_ctxt)) { /* * Yes: "next_ctxt" is an interrupted user thread => we are * going to switch to user mode ! Setup the stack address so * that the user thread "next_ctxt" can come back to the correct * stack location when returning in kernel mode. * * This stack location corresponds to the SP of the next user * thread once its context has been transferred on the CPU, ie * once the CPU has executed all the pop/iret instruction of the * context switch with privilege change. */ kernel_tss.esp0 = ((sos_vaddr_t)next_ctxt) + sizeof(struct sos_cpu_ustate); /* Note: no need to protect this agains IRQ because IRQs are not allowed to update it by themselves, and they are not allowed to block */ } else { /* No: No need to update kernel TSS when we stay in kernel mode */ }}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -