📄 thread.h
字号:
/* Copyright (C) 2004,2005 David Decotigny This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */#ifndef _SOS_THREAD_H_#define _SOS_THREAD_H_/** * @file thread.h * * SOS Thread management API */#include <sos/errno.h>/* Forward declaration */struct sos_thread;#include <hwcore/cpu_context.h>#include <sos/sched.h>#include <sos/kwaitq.h>#include <sos/time.h>#include <sos/process.h>#include <sos/umem_vmm.h>/** * The possible states of a valid thread */typedef enum { SOS_THR_CREATED, /**< Thread created, not fully initialized */ SOS_THR_READY, /**< Thread fully initialized or waiting for CPU after having been blocked or preempted */ SOS_THR_RUNNING, /**< Thread currently running on CPU */ SOS_THR_BLOCKED, /**< Thread waiting for I/O (+ in at LEAST one kwaitq) and/or sleeping (+ in NO kwaitq) */ SOS_THR_ZOMBIE, /**< Thread terminated execution, waiting to be deleted by kernel */ } sos_thread_state_t;/** * TCB (Thread Control Block): structure describing a thread. Don't * access these fields directly: prefer using the accessor functions * below. */struct sos_thread{#define SOS_THR_MAX_NAMELEN 32 char name[SOS_THR_MAX_NAMELEN]; sos_thread_state_t state; sos_sched_priority_t priority; /** * The hardware context of the thread. * * It will reflect the CPU state of the thread: * - From an interrupt handler: the state of the thread at the time * of the OUTERMOST irq. An IRQ is not allowed to make context * switches, so this context will remain valid from the begining of * the outermost IRQ handler to the end of it, no matter if there * are other IRQ handlers nesting in one another. You may safely * use it from IRQ handlers to query the state of the interrupted * thread, no matter if there has been other IRQ handlers * executing meanwhile. * - From normal kernel code, exceptions and syscall: the state of * the thread the last time there was a context switch from this * thread to another one. Thus this field WON'T reflect the * current's thread cpu_state in these cases. So, in these cases, * simply DO NOT USE IT outside thread.c ! Note: for syscall and * exception handlers, the VALID state of the interrupted thread is * passed as an argument to the handlers. */ struct sos_cpu_state *cpu_state; /* Kernel stack parameters */ sos_vaddr_t kernel_stack_base_addr; sos_size_t kernel_stack_size; /* Process this thread belongs to. Always NULL for a kernel thread */ struct sos_process *process; /** * Address space currently "squatted" by the thread, or used to be * active when the thread was interrupted/preempted. This is the MMU * configuration expected before the cpu_state of the thread is * restored on CPU. * - For kernel threads: should normally be NULL, meaning that the * thread will squat the current mm_context currently set in the * MMU. Might be NON NULL when a kernel thread squats a given * process to manipulate its address space. * - For user threads: should normally be NULL. More precisely: * - in user mode: the thread->process.mm_context is ALWAYS * set on MMU. squatted_mm_context is ALWAYS NULL in this * situation, meaning that the thread in user mode uses its * process-space as expected * - in kernel mode: NULL means that we keep on using the * mm_context currently set on MMU, which might be the * mm_context of another process. This is natural since a * thread in kernel mode normally only uses data in kernel * space. BTW, this limits the number of TLB flushes. However, * there are exceptions where this squatted_mm_context will * NOT be NULL. One is the copy_from/to_user API, which can * force the effective mm_context so that the MMU will be * (re)configured upon every context to the thread to match * the squatted_mm_context. Another exception is when a parent * thread creates the address space of a child process, in * which case the parent thread might temporarilly decide to * switch to the child's process space. * * This is the SOS implementation of the Linux "Lazy TLB" and * address-space loaning. */ struct sos_mm_context *squatted_mm_context; /* Data specific to each state */ union { struct { struct sos_sched_queue *rdy_queue; struct sos_thread *rdy_prev, *rdy_next; } ready; }; /* Anonymous union (gcc extenion) */ struct sos_time user_time_spent_in_slice; /** * When a thread in kernel mode is accessing the user space, it may * page fault in the usual way only if return_vaddr below is * set. This structure holds information regarding what to do when a * page fault from kernel into user space could not be resolved. * * @note the fields below should be considered read-only. @see * sos_thread_prepare_user_space_access() and @see * sos_thread_end_user_space_access() to modify them. */ struct { /** This is the address (in kernel code) to return to when a user-space page fault from a kernel-mode thread could not be resolved. @see sos_thread_prepare_user_space_access() */ sos_vaddr_t return_vaddr; /** This is the address of the user-space address that caused the unresolved page fault (set by the page fault handler) */ sos_uaddr_t faulted_uaddr; } fixup_uaccess; /* * Data used by the kwaitq subsystem: list of kwaitqueues the thread * is waiting for. * * @note: a RUNNING or READY thread might be in one or more * waitqueues ! The only property we have is that, among these * waitqueues (if any), _at least_ one has woken the thread. */ struct sos_kwaitq_entry *kwaitq_list; /** * Some statistics */ struct rusage { /* Updated by sched.c */ struct sos_time ru_utime; /* Time spent in user mode */ struct sos_time ru_stime; /* Time spent in kernel mode */ } rusage; /** * Chaining pointers for the list of threads in the parent process */ struct sos_thread *prev_in_process, *next_in_process; /** * Chaining pointers for global ("gbl") list of threads (debug) */ struct sos_thread *gbl_prev, *gbl_next;};/** * Definition of the function executed by a kernel thread */typedef void (*sos_kernel_thread_start_routine_t)(void *arg);/** * Initialize the subsystem responsible for thread management * * Initialize the primary kernel thread so that it can be handled the * same way as an ordinary thread created by sos_thread_create(). */sos_ret_t sos_thread_subsystem_setup(sos_vaddr_t init_thread_stack_base_addr, sos_size_t init_thread_stack_size);/** * Create a new kernel thread */struct sos_thread *sos_create_kernel_thread(const char *name, sos_kernel_thread_start_routine_t start_func, void *start_arg, sos_sched_priority_t priority);/** * Create a new user thread */struct sos_thread *sos_create_user_thread(const char *name, struct sos_process *process, sos_uaddr_t user_initial_PC, sos_ui32_t user_start_arg1, sos_ui32_t user_start_arg2, sos_uaddr_t user_initial_SP, sos_sched_priority_t priority);/** * Create a new user thread, copy of the given user thread with the * given user context */struct sos_thread *sos_duplicate_user_thread(const char *name, struct sos_process *process, const struct sos_thread * model_thread, const struct sos_cpu_state * model_uctxt, sos_ui32_t retval);/** * Terminate the execution of the current thread. For kernel threads, * it is called by default when the start routine returns. */void sos_thread_exit() __attribute__((noreturn));/** * Get the identifier of the thread currently running on CPU. Trivial * function. */struct sos_thread *sos_thread_get_current();/** * If thr == NULL, set the priority of the current thread. Trivial * function. * * @note NOT protected against interrupts */sos_sched_priority_t sos_thread_get_priority(struct sos_thread *thr);/** * If thr == NULL, get the state of the current thread. Trivial * function. * * @note NOT protected against interrupts */sos_thread_state_t sos_thread_get_state(struct sos_thread *thr);/** * If thr == NULL, set the priority of the current thread * * @note NO context-switch ever occurs in this function ! */sos_ret_t sos_thread_set_priority(struct sos_thread *thr, sos_sched_priority_t priority);/** * Yield CPU to another ready thread. * * @note This is a BLOCKING FUNCTION */sos_ret_t sos_thread_yield();/** * Release the CPU for (at least) the given delay. * * @param delay The delay to wait for. If delay == NULL then wait * forever that any event occurs. * * @return SOS_OK when delay expired (and delay is reset to zero), * -SOS_EINTR otherwise (and delay contains the amount of time * remaining). * * @note This is a BLOCKING FUNCTION */sos_ret_t sos_thread_sleep(/* in/out */struct sos_time *delay);/** * Mark the given thread as READY (if not already ready) even if it is * blocked in a kwaitq or in a sleep ! As a result, the interrupted * kwaitq/sleep function call of the thread will return with * -SOS_EINTR. * * @return -SOS_EINVAL if thread does not exist, or -SOS_EFATAL if * marked ZOMBIE. * * @note As a result, the semaphore/mutex/conditions/... functions * return values SHOULD ALWAYS be checked ! If they are != SOS_OK, * then the caller should consider that the resource is not aquired * because somebody woke the thread by some way. */sos_ret_t sos_thread_force_unblock(struct sos_thread *thread);/** * Dump the backtrace of the current thread to console and/or bochs */void sos_thread_dump_backtrace(sos_bool_t on_console, sos_bool_t on_bochs);/* ********************************************** * Restricted functions *//** * Restricted function to indicate that we are to access the given * user address space from inside the kernel. * * @param dest_as The address space we want to access, or NULL to * access current thread's address space * * @param fixup_retvaddr When != 0, then dest_as MUST BE NULL (we * don't allow controlled access from kernel into user space from a * foreign thread). In this case, the page fault handler should accept * page faults from the kernel in user space, and resolve them in the * usual way. The value in retvaddr is where the page fault handler * has to return to in case the page fault remains unresolved. The * address of the faulting address is kept in * 閠hread->fixup_uaccess.faulted_uaddr * * @note typical values for fixup_retvaddr are obtained by "Labels as * values" (see gcc's doc: operator "&&"). See uaccess.c for example * code. */sos_ret_tsos_thread_prepare_user_space_access(struct sos_umem_vmm_as * dest_as, sos_vaddr_t fixup_retvaddr);/** * Restricted function to signal we are not accessing any user address * space anymore */sos_ret_tsos_thread_end_user_space_access(void);/** * Restricted callback called when a syscall goes back in user mode, * to reconfigure the MMU to match that of the current thread's * process MMU context. * * @note The use of this function is RESERVED to the syscall wrapper */void sos_thread_prepare_syscall_switch_back(struct sos_cpu_state *cpu_state);/** * Restricted callback called when an exception handler goes back to * the interrupted thread to reconfigure the MMU to match that of the * current thread's process MMU context. * * @note The use of this function is RESERVED to the exception wrappers */void sos_thread_prepare_exception_switch_back(struct sos_cpu_state *cpu_state);/** * Restricted callback called when an IRQ is entered while the CPU was * NOT already servicing any other IRQ (ie the outermost IRQ handler * is entered). This callback simply updates the "cpu_state" field so * that IRQ handlers always know the state of the interrupted thread, * even if they are imbricated in other IRQ handlers. * * @note The use of this function is RESERVED to the irq wrappers */voidsos_thread_prepare_irq_servicing(struct sos_cpu_state *interrupted_state);/** * Restricted callback called when the outermost IRQ handler returns, * to select the thread to return to. This callbacks implements: * - preemption of user threads in user mode (time sharing / FIFO) * - non-preemption of user threads in kernel mode (interrupted thread * is restored on CPU "as is") * - non-preemption of kernel threads (same remark) * The MMU is reconfigured correctly to match the address space of the * selected thread. * * @return The CPU context of the thread to return to * * @note The use of this function is RESERVED to the irq wrappers */struct sos_cpu_state *sos_thread_prepare_irq_switch_back(void);#endif /* _SOS_THREAD_H_ */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -