📄 cpu_context.c
字号:
/* Copyright (C) 2005 David Decotigny Copyright (C) 2000-2004, The KOS team This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */#include <sos/assert.h>#include <sos/klibc.h>#include <drivers/bochs.h>#include <drivers/x86_videomem.h>#include <hwcore/segment.h>#include <hwcore/gdt.h>#include <sos/uaccess.h>#include "cpu_context.h"/** * Here is the definition of a CPU context for IA32 processors. This * is a SOS convention, not a specification given by the IA32 * spec. However there is a strong constraint related to the x86 * interrupt handling specification: the top of the stack MUST be * compatible with the 'iret' instruction, ie there must be the * err_code (might be 0), eip, cs and eflags of the destination * context in that order (see Intel x86 specs vol 3, figure 5-4). * * @note IMPORTANT: This definition MUST be consistent with the way * the registers are stored on the stack in * irq_wrappers.S/exception_wrappers.S !!! Hence the constraint above. */struct sos_cpu_state { /* (Lower addresses) */ /* These are SOS convention */ sos_ui16_t gs; sos_ui16_t fs; sos_ui16_t es; sos_ui16_t ds; sos_ui16_t cpl0_ss; /* This is ALWAYS the Stack Segment of the Kernel context (CPL0) of the interrupted thread, even for a user thread */ sos_ui16_t alignment_padding; /* unused */ sos_ui32_t eax; sos_ui32_t ebx; sos_ui32_t ecx; sos_ui32_t edx; sos_ui32_t esi; sos_ui32_t edi; sos_ui32_t ebp; /* MUST NEVER CHANGE (dependent on the IA32 iret instruction) */ sos_ui32_t error_code; sos_vaddr_t eip; sos_ui32_t cs; /* 32bits according to the specs ! However, the CS register is really 16bits long */ sos_ui32_t eflags; /* (Higher addresses) */} __attribute__((packed));/** * The CS value pushed on the stack by the CPU upon interrupt, and * needed by the iret instruction, is 32bits long while the real CPU * CS register is 16bits only: this macro simply retrieves the CPU * "CS" register value from the CS value pushed on the stack by the * CPU upon interrupt. * * The remaining 16bits pushed by the CPU should be considered * "reserved" and architecture dependent. IMHO, the specs don't say * anything about them. Considering that some architectures generate * non-zero values for these 16bits (at least Cyrix), we'd better * ignore them. */#define GET_CPU_CS_REGISTER_VALUE(pushed_ui32_cs_value) \ ( (pushed_ui32_cs_value) & 0xffff )/** * Structure of an interrupted Kernel thread's context */struct sos_cpu_kstate{ struct sos_cpu_state regs;} __attribute__((packed));/** * Structure of an interrupted User thread's context. This is almost * the same as a kernel context, except that 2 additional values are * pushed on the stack before the eflags/cs/eip of the interrupted * context: the stack configuration of the interrupted user context. * * @see Section 6.4.1 of Intel x86 vol 1 */struct sos_cpu_ustate{ struct sos_cpu_state regs; struct { sos_ui32_t cpl3_esp; sos_ui16_t cpl3_ss; };} __attribute__((packed));/* * Structure of a Task State Segment on the x86 Architecture. * * @see Intel x86 spec vol 3, figure 6-2 * * @note Such a data structure should not cross any page boundary (see * end of section 6.2.1 of Intel spec vol 3). This is the reason why * we tell gcc to align it on a 128B boundary (its size is 104B, which * is <= 128). */struct x86_tss { /** * Intel provides a way for a task to switch to another in an * automatic way (call gates). In this case, the back_link field * stores the source TSS of the context switch. This allows to * easily implement coroutines, task backtracking, ... In SOS we * don't use TSS for the context switch purpouse, so we always * ignore this field. * (+0) */ sos_ui16_t back_link; sos_ui16_t reserved1; /* CPL0 saved context. (+4) */ sos_vaddr_t esp0; sos_ui16_t ss0; sos_ui16_t reserved2; /* CPL1 saved context. (+12) */ sos_vaddr_t esp1; sos_ui16_t ss1; sos_ui16_t reserved3; /* CPL2 saved context. (+20) */ sos_vaddr_t esp2; sos_ui16_t ss2; sos_ui16_t reserved4; /* Interrupted context's saved registers. (+28) */ sos_vaddr_t cr3; sos_vaddr_t eip; sos_ui32_t eflags; sos_ui32_t eax; sos_ui32_t ecx; sos_ui32_t edx; sos_ui32_t ebx; sos_ui32_t esp; sos_ui32_t ebp; sos_ui32_t esi; sos_ui32_t edi; /* +72 */ sos_ui16_t es; sos_ui16_t reserved5; /* +76 */ sos_ui16_t cs; sos_ui16_t reserved6; /* +80 */ sos_ui16_t ss; sos_ui16_t reserved7; /* +84 */ sos_ui16_t ds; sos_ui16_t reserved8; /* +88 */ sos_ui16_t fs; sos_ui16_t reserved9; /* +92 */ sos_ui16_t gs; sos_ui16_t reserved10; /* +96 */ sos_ui16_t ldtr; sos_ui16_t reserved11; /* +100 */ sos_ui16_t debug_trap_flag :1; sos_ui16_t reserved12 :15; sos_ui16_t iomap_base_addr; /* 104 */} __attribute__((packed, aligned(128)));static struct x86_tss kernel_tss;sos_ret_t sos_cpu_context_subsystem_setup(){ /* Reset the kernel TSS */ memset(&kernel_tss, 0x0, sizeof(kernel_tss)); /** * Now setup the kernel TSS. * * Considering the privilege change method we choose (cpl3 -> cpl0 * through a software interrupt), we don't need to initialize a * full-fledged TSS. See section 6.4.1 of Intel x86 vol 1. Actually, * only a correct value for the kernel esp and ss are required (aka * "ss0" and "esp0" fields). Since the esp0 will have to be updated * at privilege change time, we don't have to set it up now. */ kernel_tss.ss0 = SOS_BUILD_SEGMENT_REG_VALUE(0, FALSE, SOS_SEG_KDATA); /* Register this TSS into the gdt */ sos_gdt_register_kernel_tss((sos_vaddr_t) &kernel_tss); return SOS_OK;}/** * THE main operation of a kernel thread. This routine calls the * kernel thread function start_func and calls exit_func when * start_func returns. */static void core_routine (sos_cpu_kstate_function_arg1_t *start_func, sos_ui32_t start_arg, sos_cpu_kstate_function_arg1_t *exit_func, sos_ui32_t exit_arg) __attribute__((noreturn));static void core_routine (sos_cpu_kstate_function_arg1_t *start_func, sos_ui32_t start_arg, sos_cpu_kstate_function_arg1_t *exit_func, sos_ui32_t exit_arg){ start_func(start_arg); exit_func(exit_arg); SOS_ASSERT_FATAL(! "The exit function of the thread should NOT return !"); for(;;);}sos_ret_t sos_cpu_kstate_init(struct sos_cpu_state **ctxt, sos_cpu_kstate_function_arg1_t *start_func, sos_ui32_t start_arg, sos_vaddr_t stack_bottom, sos_size_t stack_size, sos_cpu_kstate_function_arg1_t *exit_func, sos_ui32_t exit_arg){ /* We are initializing a Kernel thread's context */ struct sos_cpu_kstate *kctxt; /* This is a critical internal function, so that it is assumed that the caller knows what he does: we legitimally assume that values for ctxt, start_func, stack_* and exit_func are allways VALID ! */ /* Setup the stack. * * On x86, the stack goes downward. Each frame is configured this * way (higher addresses first): * * - (optional unused space. As of gcc 3.3, this space is 24 bytes) * - arg n * - arg n-1 * - ... * - arg 1 * - return instruction address: The address the function returns to * once finished * - local variables * * The remaining of the code should be read from the end upward to * understand how the processor will handle it. */ sos_vaddr_t tmp_vaddr = stack_bottom + stack_size; sos_ui32_t *stack = (sos_ui32_t*)tmp_vaddr; /* If needed, poison the stack */#ifdef SOS_CPU_STATE_DETECT_UNINIT_KERNEL_VARS memset((void*)stack_bottom, SOS_CPU_STATE_STACK_POISON, stack_size);#elif defined(SOS_CPU_STATE_DETECT_KERNEL_STACK_OVERFLOW) sos_cpu_state_prepare_detect_kernel_stack_overflow(stack_bottom, stack_size);#endif /* Simulate a call to the core_routine() function: prepare its arguments */ *(--stack) = exit_arg; *(--stack) = (sos_ui32_t)exit_func; *(--stack) = start_arg; *(--stack) = (sos_ui32_t)start_func; *(--stack) = 0; /* Return address of core_routine => force page fault */ /* * Setup the initial context structure, so that the CPU will execute * the function core_routine() once this new context has been * restored on CPU */ /* Compute the base address of the structure, which must be located below the previous elements */ tmp_vaddr = ((sos_vaddr_t)stack) - sizeof(struct sos_cpu_kstate); kctxt = (struct sos_cpu_kstate*)tmp_vaddr; /* Initialize the CPU context structure */ memset(kctxt, 0x0, sizeof(struct sos_cpu_kstate)); /* Tell the CPU context structure that the first instruction to execute will be that of the core_routine() function */ kctxt->regs.eip = (sos_ui32_t)core_routine; /* Setup the segment registers */ kctxt->regs.cs = SOS_BUILD_SEGMENT_REG_VALUE(0, FALSE, SOS_SEG_KCODE); /* Code */ kctxt->regs.ds = SOS_BUILD_SEGMENT_REG_VALUE(0, FALSE, SOS_SEG_KDATA); /* Data */ kctxt->regs.es = SOS_BUILD_SEGMENT_REG_VALUE(0, FALSE, SOS_SEG_KDATA); /* Data */ kctxt->regs.cpl0_ss = SOS_BUILD_SEGMENT_REG_VALUE(0, FALSE, SOS_SEG_KDATA); /* Stack */ /* fs and gs unused for the moment. */ /* The newly created context is initially interruptible */ kctxt->regs.eflags = (1 << 9); /* set IF bit */ /* Finally, update the generic kernel/user thread context */ *ctxt = (struct sos_cpu_state*) kctxt; return SOS_OK;}sos_ret_t sos_cpu_ustate_init(struct sos_cpu_state **ctxt, sos_uaddr_t user_start_PC, sos_ui32_t user_start_arg1, sos_ui32_t user_start_arg2, sos_uaddr_t user_initial_SP, sos_vaddr_t kernel_stack_bottom, sos_size_t kernel_stack_size){ /* We are initializing a User thread's context */ struct sos_cpu_ustate *uctxt; /* This is a critical internal function, so that it is assumed that the caller knows what he does: we legitimally assume that values for ctxt, etc. are allways VALID ! */ /* Compute the address of the CPU state to restore on CPU when switching to this new user thread */ sos_vaddr_t uctxt_vaddr = kernel_stack_bottom + kernel_stack_size - sizeof(struct sos_cpu_ustate); uctxt = (struct sos_cpu_ustate*)uctxt_vaddr; /* If needed, poison the kernel stack */#ifdef SOS_CPU_STATE_DETECT_UNINIT_KERNEL_VARS memset((void*)kernel_stack_bottom, SOS_CPU_STATE_STACK_POISON, kernel_stack_size);#elif defined(SOS_CPU_STATE_DETECT_KERNEL_STACK_OVERFLOW) sos_cpu_state_prepare_detect_kernel_stack_overflow(kernel_stack_bottom, kernel_stack_size);#endif /* * Setup the initial context structure, so that the CPU will restore * the initial registers' value for the user thread. The * user thread argument is passed in the EAX register. */ memset(uctxt, 0x0, sizeof(struct sos_cpu_ustate)); /* Tell the CPU context structure that the first instruction to execute will be located at user_start_PC (in user space) */ uctxt->regs.eip = (sos_ui32_t)user_start_PC; /* Tell the CPU where will be the user stack */ uctxt->cpl3_esp = user_initial_SP; /* The parameter to the start function is not passed by the stack to avoid a possible page fault */ uctxt->regs.eax = user_start_arg1; uctxt->regs.ebx = user_start_arg2; /* Setup the segment registers */ uctxt->regs.cs = SOS_BUILD_SEGMENT_REG_VALUE(3, FALSE, SOS_SEG_UCODE); /* Code */ uctxt->regs.ds = SOS_BUILD_SEGMENT_REG_VALUE(3, FALSE, SOS_SEG_UDATA); /* Data */ uctxt->regs.es = SOS_BUILD_SEGMENT_REG_VALUE(3, FALSE, SOS_SEG_UDATA); /* Data */ uctxt->cpl3_ss = SOS_BUILD_SEGMENT_REG_VALUE(3, FALSE, SOS_SEG_UDATA); /* User Stack */ /* We need also to update the segment for the kernel stack segment. It will be used when this context will be restored on CPU: initially it will be executing in kernel mode and will switch immediatly to user mode */ uctxt->regs.cpl0_ss = SOS_BUILD_SEGMENT_REG_VALUE(0, FALSE, SOS_SEG_KDATA); /* Kernel Stack */ /* fs and gs unused for the moment. */ /* The newly created context is initially interruptible */ uctxt->regs.eflags = (1 << 9); /* set IF bit */ /* Finally, update the generic kernel/user thread context */ *ctxt = (struct sos_cpu_state*) uctxt; return SOS_OK;}sos_ret_tsos_cpu_context_is_in_user_mode(const struct sos_cpu_state *ctxt){ /* An interrupted user thread has its CS register set to that of the User code segment */ switch (GET_CPU_CS_REGISTER_VALUE(ctxt->cs)) { case SOS_BUILD_SEGMENT_REG_VALUE(3, FALSE, SOS_SEG_UCODE): return TRUE; break; case SOS_BUILD_SEGMENT_REG_VALUE(0, FALSE, SOS_SEG_KCODE): return FALSE; break; default: SOS_FATAL_ERROR("Invalid saved context Code segment register: 0x%x (k=%x, u=%x) !", (unsigned) GET_CPU_CS_REGISTER_VALUE(ctxt->cs), SOS_BUILD_SEGMENT_REG_VALUE(0, FALSE, SOS_SEG_KCODE), SOS_BUILD_SEGMENT_REG_VALUE(3, FALSE, SOS_SEG_UCODE)); break; } /* Should never get here */ return -SOS_EFATAL;}#if defined(SOS_CPU_STATE_DETECT_KERNEL_STACK_OVERFLOW)voidsos_cpu_state_prepare_detect_kernel_stack_overflow(const struct sos_cpu_state *ctxt, sos_vaddr_t stack_bottom, sos_size_t stack_size){ sos_size_t poison_size = SOS_CPU_STATE_DETECT_KERNEL_STACK_OVERFLOW; if (poison_size > stack_size) poison_size = stack_size; memset((void*)stack_bottom, SOS_CPU_STATE_STACK_POISON, poison_size);}voidsos_cpu_state_detect_kernel_stack_overflow(const struct sos_cpu_state *ctxt, sos_vaddr_t stack_bottom,
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -