⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 thread.c

📁 Simple Operating Systems (简称SOS)是一个可以运行在X86平台上(包括QEMU
💻 C
📖 第 1 页 / 共 2 页
字号:
  SOS_ASSERT_FATAL(current_thread->state == SOS_THR_RUNNING);  /* Interrupt handlers are NOT allowed to block ! */  SOS_ASSERT_FATAL(! sos_servicing_irq());  myself = (struct sos_thread*)current_thread;  /* Make sure that if we are to be marked "BLOCKED", we have any     reason of effectively being blocked */  if (BLOCK_MYSELF == operation)    {      myself->state = SOS_THR_BLOCKED;    }  /* Identify the next thread */  next_thread = sos_reschedule(myself, YIELD_MYSELF == operation);  /* Avoid context switch if the context does not change */  if (myself != next_thread)    {      /* Sanity checks for the next thread */      sos_cpu_state_detect_kernel_stack_overflow(next_thread->cpu_state,						 next_thread->kernel_stack_base_addr,						 next_thread->kernel_stack_size);      /*       * Perform an MMU context switch if needed       */      _prepare_mm_context(next_thread);      /*       * Actual CPU context switch       */      _set_current(next_thread);      sos_cpu_context_switch(& myself->cpu_state, next_thread->cpu_state);            /* Back here ! */      SOS_ASSERT_FATAL(current_thread == myself);      SOS_ASSERT_FATAL(current_thread->state == SOS_THR_RUNNING);    }  else    {      /* No context switch but still update ID of current thread */      _set_current(next_thread);    }  return SOS_OK;}/** * Helper function to change the thread's priority in all the * waitqueues associated with the thread. */static sos_ret_t _change_waitq_priorities(struct sos_thread *thr,					  sos_sched_priority_t priority){  struct sos_kwaitq_entry *kwq_entry;  int nb_waitqs;  list_foreach_forward_named(thr->kwaitq_list, kwq_entry, nb_waitqs,			     prev_entry_for_thread, next_entry_for_thread)    {      SOS_ASSERT_FATAL(SOS_OK == sos_kwaitq_change_priority(kwq_entry->kwaitq,							    kwq_entry,							    priority));    }  return SOS_OK;}sos_ret_t sos_thread_set_priority(struct sos_thread *thr,				   sos_sched_priority_t priority){  __label__ exit_set_prio;  sos_ui32_t flags;  sos_ret_t retval;  if (! SOS_SCHED_PRIO_IS_VALID(priority))    return -SOS_EINVAL;  if (! thr)    thr = (struct sos_thread*)current_thread;  sos_disable_IRQs(flags);  /* Signal kwaitq subsystem that the priority of the thread in all     the waitq it is waiting in should be updated */  retval = _change_waitq_priorities(thr, priority);  if (SOS_OK != retval)    goto exit_set_prio;  /* Signal scheduler that the thread, currently in a waiting list,     should take into account the change of priority */  if (SOS_THR_READY == thr->state)    retval = sos_sched_change_priority(thr, priority);  /* Update priority */  thr->priority = priority; exit_set_prio:  sos_restore_IRQs(flags);  return retval;}sos_ret_t sos_thread_yield(){  sos_ui32_t flags;  sos_ret_t retval;  sos_disable_IRQs(flags);  retval = _switch_to_next_thread(YIELD_MYSELF);  sos_restore_IRQs(flags);  return retval;}/** * Internal sleep timeout management */struct sleep_timeout_params{  struct sos_thread *thread_to_wakeup;  sos_bool_t timeout_triggered;};/** * Callback called when a timeout happened */static void sleep_timeout(struct sos_timeout_action *act){  struct sleep_timeout_params *sleep_timeout_params    = (struct sleep_timeout_params*) act->routine_data;  /* Signal that we have been woken up by the timeout */  sleep_timeout_params->timeout_triggered = TRUE;  /* Mark the thread ready */  SOS_ASSERT_FATAL(SOS_OK ==		   sos_thread_force_unblock(sleep_timeout_params					     ->thread_to_wakeup));}sos_ret_t sos_thread_sleep(struct sos_time *timeout){  sos_ui32_t flags;  struct sleep_timeout_params sleep_timeout_params;  struct sos_timeout_action timeout_action;  sos_ret_t retval;  /* Block forever if no timeout is given */  if (NULL == timeout)    {      sos_disable_IRQs(flags);      retval = _switch_to_next_thread(BLOCK_MYSELF);      sos_restore_IRQs(flags);      return retval;    }  /* Initialize the timeout action */  sos_time_init_action(& timeout_action);  /* Prepare parameters used by the sleep timeout callback */  sleep_timeout_params.thread_to_wakeup     = (struct sos_thread*)current_thread;  sleep_timeout_params.timeout_triggered = FALSE;  sos_disable_IRQs(flags);  /* Now program the timeout ! */  SOS_ASSERT_FATAL(SOS_OK ==		   sos_time_register_action_relative(& timeout_action,						     timeout,						     sleep_timeout,						     & sleep_timeout_params));  /* Prepare to block: wait for sleep_timeout() to wakeup us in the     timeout kwaitq, or for someone to wake us up in any other     waitq */  retval = _switch_to_next_thread(BLOCK_MYSELF);  /* Unblocked by something ! */  /* Unblocked by timeout ? */  if (sleep_timeout_params.timeout_triggered)    {      /* Yes */      SOS_ASSERT_FATAL(sos_time_is_zero(& timeout_action.timeout));      retval = SOS_OK;    }  else    {      /* No: We have probably been woken up while in some other	 kwaitq */      SOS_ASSERT_FATAL(SOS_OK == sos_time_unregister_action(& timeout_action));      retval = -SOS_EINTR;    }  sos_restore_IRQs(flags);  /* Update the remaining timeout */  memcpy(timeout, & timeout_action.timeout, sizeof(struct sos_time));  return retval;}sos_ret_t sos_thread_force_unblock(struct sos_thread *thread){  sos_ret_t retval;  sos_ui32_t flags;  if (! thread)    return -SOS_EINVAL;    sos_disable_IRQs(flags);  /* Thread already woken up ? */  retval = SOS_OK;  switch(sos_thread_get_state(thread))    {    case SOS_THR_RUNNING:    case SOS_THR_READY:      /* Do nothing */      break;    case SOS_THR_ZOMBIE:      retval = -SOS_EFATAL;      break;    default:      retval = sos_sched_set_ready(thread);      break;    }  sos_restore_IRQs(flags);  return retval;}void sos_thread_dump_backtrace(sos_bool_t on_console,			       sos_bool_t on_bochs){  sos_vaddr_t stack_bottom = current_thread->kernel_stack_base_addr;  sos_size_t stack_size    = current_thread->kernel_stack_size;  void backtracer(sos_vaddr_t PC,		  sos_vaddr_t params,		  sos_ui32_t depth,		  void *custom_arg)    {      sos_ui32_t invalid = 0xffffffff, *arg1, *arg2, *arg3, *arg4;      /* Get the address of the first 3 arguments from the	 frame. Among these arguments, 0, 1, 2, 3 arguments might be	 meaningful (depending on how many arguments the function may	 take). */      arg1 = (sos_ui32_t*)params;      arg2 = (sos_ui32_t*)(params+4);      arg3 = (sos_ui32_t*)(params+8);      arg4 = (sos_ui32_t*)(params+12);      /* Make sure the addresses of these arguments fit inside the	 stack boundaries */#define INTERVAL_OK(b,v,u) ( ((b) <= (sos_vaddr_t)(v)) \                             && ((sos_vaddr_t)(v) < (u)) )      if (!INTERVAL_OK(stack_bottom, arg1, stack_bottom + stack_size))	arg1 = &invalid;      if (!INTERVAL_OK(stack_bottom, arg2, stack_bottom + stack_size))	arg2 = &invalid;      if (!INTERVAL_OK(stack_bottom, arg3, stack_bottom + stack_size))	arg3 = &invalid;      if (!INTERVAL_OK(stack_bottom, arg4, stack_bottom + stack_size))	arg4 = &invalid;      /* Print the function context for this frame */      if (on_bochs)	sos_bochs_printf("[%d] PC=0x%x arg1=0x%x arg2=0x%x arg3=0x%x\n",			 (unsigned)depth, (unsigned)PC,			 (unsigned)*arg1, (unsigned)*arg2,			 (unsigned)*arg3);      if (on_console)	sos_x86_videomem_printf(23-depth, 3,				SOS_X86_VIDEO_BG_BLUE				  | SOS_X86_VIDEO_FG_LTGREEN,				"[%d] PC=0x%x arg1=0x%x arg2=0x%x arg3=0x%x arg4=0x%x",				(unsigned)depth, PC,				(unsigned)*arg1, (unsigned)*arg2,				(unsigned)*arg3, (unsigned)*arg4);          }  sos_backtrace(NULL, 15, stack_bottom, stack_size,		backtracer, NULL);}/* ********************************************** * Restricted functions */static sos_ret_tchange_current_mm_context(struct sos_mm_context *mm_ctxt){  /* Retrieve the previous mm context */  struct sos_mm_context * prev_mm_ctxt    = current_thread->squatted_mm_context;  /* Update current thread's squatted mm context */  current_thread->squatted_mm_context = mm_ctxt;  /* Update the reference counts and switch the MMU configuration if     needed */  if (mm_ctxt != NULL)    {      sos_mm_context_ref(mm_ctxt); /* Because it is now referenced as				      the squatted_mm_context field of				      the thread */      sos_mm_context_switch_to(mm_ctxt);    }  else    sos_mm_context_unref(prev_mm_ctxt); /* Because it is not referenced as					   the squatted_mm_context field of					   the thread any more */  return SOS_OK;}sos_ret_tsos_thread_prepare_user_space_access(struct sos_umem_vmm_as * dest_as,				     sos_vaddr_t fixup_retvaddr){  sos_ret_t  retval;  sos_ui32_t flags;  if (! dest_as)    {      /* Thread is not a user thread: do nothing */      if (! current_thread->process)	return -SOS_EINVAL;      dest_as = sos_process_get_address_space(current_thread->process);    }  else    /* Don't allow to access to an address space different than that       of the current thread if the page fault are allowed ! */    SOS_ASSERT_FATAL(! fixup_retvaddr);  sos_disable_IRQs(flags);  SOS_ASSERT_FATAL(NULL == current_thread->squatted_mm_context);  SOS_ASSERT_FATAL(0 == current_thread->fixup_uaccess.return_vaddr);  /* Change the MMU configuration and init the fixup return address */  retval = change_current_mm_context(sos_umem_vmm_get_mm_context(dest_as));  if (SOS_OK == retval)    {      current_thread->fixup_uaccess.return_vaddr  = fixup_retvaddr;      current_thread->fixup_uaccess.faulted_uaddr = 0;          }  sos_restore_IRQs(flags);  return retval;}sos_ret_tsos_thread_end_user_space_access(void){  sos_ret_t  retval;  sos_ui32_t flags;  sos_disable_IRQs(flags);  SOS_ASSERT_FATAL(NULL != current_thread->squatted_mm_context);  /* Don't impose anything regarding the current MMU configuration anymore */  retval = change_current_mm_context(NULL);  current_thread->fixup_uaccess.return_vaddr  = 0;  current_thread->fixup_uaccess.faulted_uaddr = 0;  sos_restore_IRQs(flags);  return retval;}void sos_thread_prepare_syscall_switch_back(struct sos_cpu_state *cpu_state){  /* Don't preempt the current thread */  /*   * Save the state of the interrupted context to make sure that:   *   - The list of threads correctly reflects that the thread is back   *     in user mode   *   - _prepare_mm_context() deals with the correct mm_context   */  current_thread->cpu_state = cpu_state;  /* Perform an MMU context switch if needed */  _prepare_mm_context((struct sos_thread*) current_thread);}void sos_thread_prepare_exception_switch_back(struct sos_cpu_state *cpu_state){  /* Don't preempt the current thread */  /*   * Save the state of the interrupted context to make sure that:   *   - The list of threads correctly reflects that the thread is   *     running in user or kernel mode   *   - _prepare_mm_context() deals with the correct mm_context   */  current_thread->cpu_state = cpu_state;  /* Perform an MMU context switch if needed */  _prepare_mm_context((struct sos_thread*) current_thread);}voidsos_thread_prepare_irq_servicing(struct sos_cpu_state *interrupted_state){  current_thread->cpu_state = interrupted_state;}struct sos_cpu_state *sos_thread_prepare_irq_switch_back(void){  struct sos_thread *myself, *next_thread;  /* In SOS, threads in kernel mode are NEVER preempted from the     interrupt handlers ! */  if (! sos_cpu_context_is_in_user_mode(current_thread->cpu_state))    return current_thread->cpu_state;  /*   * Here we are dealing only with possible preemption of user threads   * in user context !   */  /* Make sure the thread actually is a user thread */  SOS_ASSERT_FATAL(current_thread->process != NULL);  /* Save the state of the interrupted context */  myself = (struct sos_thread*)current_thread;  /* Select the next thread to run */  next_thread = sos_reschedule(myself, FALSE);  /* Perform an MMU context switch if needed */  _prepare_mm_context(next_thread);  /* Setup the next_thread's context into the CPU */  _set_current(next_thread);  return next_thread->cpu_state;}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -