⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 pod.c

📁 rtai-3.1-test3的源代码(Real-Time Application Interface )
💻 C
📖 第 1 页 / 共 5 页
字号:
 * release such mutex and switch in another thread atomically. The * incoming thread can then grab this mutex while the initial holder * is suspended. The nanokernel automatically reacquires the mutex on * behalf of the initial holder when it eventually resumes execution. * This is a desirable feature which provides a simple and safe way * for the upper interfaces to deal with scheduling points inside * critical sections. Since the aforementioned mutex is usually * defined by a client real-time interface to protect from races when * concurrent threads access its internal data structures, it is * dubbed the "interface mutex" in the Xenomai documentation. * * The rescheduling procedure always leads to a null-effect if the * scheduler is locked (XNLOCK bit set in the status mask of the * running thread), or if it is called on behalf of an interrupt * service thread.  (ISVC threads have a separate internal * rescheduling procedure named xnpod_schedule_runnable()). * * Calling this procedure with no applicable context switch pending is * harmless and simply leads to a null-effect. * * Side-effects: * - If an asynchronous service routine exists, the pending * asynchronous signals are delivered to a resuming thread or on * behalf of the caller before it returns from the procedure if no * context switch has taken place. This behaviour can be disabled by * setting the XNASDI flag in the thread's status mask by calling * xnpod_set_thread_mode(). *  * - The switch hooks are called on behalf of the resuming thread. * * - This call may affect the ready queue and switch thread contexts. * * Context: This routine can be called on behalf of a thread or IST * context. */void xnpod_schedule (xnmutex_t *imutex){    xnthread_t *threadout, *threadin, *runthread;    atomic_counter_t imutexval;    int doswitch, simutex = 0;    xnsched_t *sched;    spl_t s;#ifdef __KERNEL__    int shadow;#endif /* __KERNEL__ */    /* No immediate rescheduling is possible if an interrupt service       thread or callout context is active or if the scheduler is       locked. */    if (xnpod_callout_p())	return;    sched = xnpod_current_sched();    runthread = sched->runthread;    /* We have to deal with xnpod_schedule() being called by a regular       thread that has been resumed through a mutex wakeup operation       so it can release a mutex claimed by an interrupt service       thread. In such a case, we just let it release the interface       mutex as needed on entry; when it later resumes, it will simply       execute the rescheduling procedure. Interrupt service threads       can _never_ execute the standard rescheduling code, they always       use the xnpod_schedule_runnable() routine to switch to the next       runnable thread. */    if (xnpod_interrupt_p())	{	if (testbits(runthread->status,XNISVC))	    return;	}    else if (testbits(runthread->status,XNLOCK))	     {	     /* The running thread has locked the scheduler and is		still ready to run. Just check for (self-posted)		pending signals, then exit the procedure. In this		particular case, we must make sure that the interface		mutex is free while the ASR is running, since the		thread might self-deletes from the routine. */	    	     if (runthread->signals)		 {		 splhigh(s);		 if (imutex)		     {		     simutex = xnmutex_clear_lock(imutex,&imutexval);		     if (simutex < 0)			 xnpod_schedule_runnable(runthread,XNPOD_SCHEDLIFO);		     }		 xnpod_dispatch_signals();		 if (simutex)		     xnmutex_set_lock(imutex,&imutexval);		 splexit(s);		 }	     return;	     }    splhigh(s);    /* The rescheduling proc automagically releases the interface       mutex (if given) before switching the runthread out then       reacquires it/them after switching the thread in so that       callers can save tricky critical section management. The       atomicity of the operation is kept while releasing the locks by       xnmutex_clear_lock() which reschedules but does not switch. */    if (imutex)	{	simutex = xnmutex_clear_lock(imutex,&imutexval);	if (simutex < 0)	    xnpod_schedule_runnable(runthread,XNPOD_SCHEDLIFO);	}    doswitch = 0;    if (!testbits(runthread->status,XNTHREAD_BLOCK_BITS|XNZOMBIE))	{	if (countpq(&sched->readyq) > 0)	    {	    xnthread_t *head = link2thread(getheadpq(&sched->readyq),rlink);	    	    if (head == runthread)		doswitch++;	    else if (xnpod_priocompare(head->cprio,runthread->cprio) > 0)		{		if (!testbits(runthread->status,XNREADY))		    /* Preempt the running thread */		    xnpod_preempt_current_thread();		doswitch++;		}	    else if (testbits(runthread->status,XNREADY))		doswitch++;	    }	}    else	doswitch++;    /* Clear the rescheduling bit */    clrbits(nkpod->status,XNSCHED);    if (!doswitch)	{noswitch:	/* Check for signals (self-posted or posted from an interrupt	   context) in case the current thread keeps	   running. Interface mutex must be released while ASR is	   executed just in case the thread self-deletes from the	   routine. */	if (runthread->signals)	    xnpod_dispatch_signals();	if (simutex)	    xnmutex_set_lock(imutex,&imutexval);	splexit(s);	return;	}    threadout = runthread;    threadin = link2thread(getpq(&sched->readyq),rlink);#ifdef CONFIG_RTAI_XENOMAI_DEBUG    if (!threadin)	xnpod_fatal("schedule: no thread to schedule?!");#endif /* CONFIG_RTAI_XENOMAI_DEBUG */    clrbits(threadin->status,XNREADY);    if (threadout == threadin &&	/* Note: the root thread never restarts. */	!testbits(threadout->status,XNRESTART))	goto noswitch;#ifdef __KERNEL__    shadow = testbits(threadout->status,XNSHADOW);#endif /* __KERNEL__ */    if (testbits(threadout->status,XNZOMBIE))	{	splexit(s);	if (countq(&nkpod->tdeleteq) > 0 &&	    !testbits(threadout->status,XNTHREAD_SYSTEM_BITS))	    xnpod_fire_callouts(&nkpod->tdeleteq,threadout);	splhigh(s);	sched->runthread = threadin;	sched->usrthread = threadin;	if (testbits(threadin->status,XNROOT))	    xnarch_enter_root(xnthread_archtcb(threadin));	xnthread_cleanup_tcb(threadout);	 	xnarch_finalize_and_switch(xnthread_archtcb(threadout),				   xnthread_archtcb(threadin));#ifdef __KERNEL__	if (shadow)	    /* Reap the user-space mate of a deleted real-time shadow.	       The Linux task has resumed into the Linux domain at the	       last code location executed by the shadow. Remember	       that both sides use the Linux task's stack. */	    xnshadow_exit();#endif /* __KERNEL__ */	xnpod_fatal("zombie thread %s (%p) will not die...",threadout->name,threadout);	}    sched->runthread = threadin;    sched->usrthread = threadin;    if (testbits(threadout->status,XNROOT))	xnarch_leave_root(xnthread_archtcb(threadout));    else if (testbits(threadin->status,XNROOT))	xnarch_enter_root(xnthread_archtcb(threadin));    xnarch_switch_to(xnthread_archtcb(threadout),		     xnthread_archtcb(threadin));    runthread = sched->runthread;#ifdef CONFIG_RTAI_FPU_SUPPORT    xnpod_switch_fpu();#endif /* CONFIG_RTAI_FPU_SUPPORT */#ifdef __KERNEL__    /* Shadow on entry and root without shadow extension on exit?        Mmmm... This must be the user-space mate of a deleted real-time       shadow we've just rescheduled in the Linux domain to have it       exit properly.  Reap it now. */    if (shadow &&	testbits(runthread->status,XNROOT) &&	xnshadow_ptd(current) == NULL)	{	splexit(s);	xnshadow_exit();	}#endif /* __KERNEL__ */    if (runthread->signals)	xnpod_dispatch_signals();    if (simutex)	xnmutex_set_lock(imutex,&imutexval);    splexit(s);    if (nkpod->schedhook)	nkpod->schedhook(runthread,XNRUNNING);        if (countq(&nkpod->tswitchq) > 0 &&	!testbits(runthread->status,XNTHREAD_SYSTEM_BITS))	xnpod_fire_callouts(&nkpod->tswitchq,runthread);}/*!  * \fn void xnpod_schedule_runnable(xnthread_t *thread,                                    int flags); * \brief Hidden rescheduling procedure - INTERNAL. * * This internal routine should NEVER be used directly by the upper * interfaces. It reinserts the given thread into the ready queue then * switches to the most prioritary runnable thread. It must be called * interrupts off. * * @param thread The descriptor address of the thread to reinsert into * the ready queue. * * @param flags A bitmask composed as follows: * *        - XNPOD_SCHEDLIFO causes the target thread to be inserted at *        front of its priority group in the ready queue. Otherwise, *        the FIFO ordering is applied. * *        - XNPOD_NOSWITCH reorders the ready queue without switching *        contexts. This feature is used by the nanokernel mutex code *        to preserve the atomicity of some operations. */void xnpod_schedule_runnable (xnthread_t *thread, int flags){    xnsched_t *sched = thread->sched;    xnthread_t *runthread = sched->runthread, *threadin;    if (thread != runthread)	{	removepq(&sched->readyq,&thread->rlink);	/* The running thread might be in the process of being blocked	   or reniced but not (un/re)scheduled yet.  Therefore, we	   have to be careful about not spuriously inserting this	   thread into the readyq. */	if (!testbits(runthread->status,XNTHREAD_BLOCK_BITS|XNREADY))	    {	    /* Since the runthread is preempted, it must be put at               _front_ of its priority group so that no spurious               round-robin effect can occur, unless it holds the               scheduler lock, in which case it is put at front of the               readyq, regardless of its priority. */	    if (testbits(runthread->status,XNLOCK))		prependpq(&sched->readyq,&runthread->rlink);	    else		insertpql(&sched->readyq,&runthread->rlink,runthread->cprio);	    setbits(runthread->status,XNREADY);	    }	}    else if (testbits(thread->status,XNTHREAD_BLOCK_BITS))	/* Same remark as before in the case this routine is called	   with a soon-to-be-blocked running thread as argument. */	goto maybe_switch;    if (flags & XNPOD_SCHEDLIFO)	/* Insert LIFO inside priority group */	insertpql(&sched->readyq,&thread->rlink,thread->cprio);    else	/* Insert FIFO inside priority group */	insertpqf(&sched->readyq,&thread->rlink,thread->cprio);    setbits(thread->status,XNREADY);maybe_switch:    if (flags & XNPOD_NOSWITCH)	{	if (testbits(runthread->status,XNREADY))	    {	    removepq(&sched->readyq,&runthread->rlink);	    clrbits(runthread->status,XNREADY);	    }	return;	}       threadin = link2thread(getpq(&sched->readyq),rlink);#ifdef CONFIG_RTAI_XENOMAI_DEBUG    if (!threadin)	xnpod_fatal("schedule_runnable: no thread to schedule?!");#endif /* CONFIG_RTAI_XENOMAI_DEBUG */    clrbits(threadin->status,XNREADY);    if (threadin == runthread)	return;	/* No switch. */    sched->runthread = threadin;    if (!testbits(threadin->status,XNTHREAD_SYSTEM_BITS))	sched->usrthread = threadin;    if (testbits(runthread->status,XNROOT))	xnarch_leave_root(xnthread_archtcb(runthread));    else if (testbits(threadin->status,XNROOT))	xnarch_enter_root(xnthread_archtcb(threadin));    if (nkpod->schedhook)	nkpod->schedhook(runthread,XNREADY);    xnarch_switch_to(xnthread_archtcb(runthread),		     xnthread_archtcb(threadin));#ifdef CONFIG_RTAI_FPU_SUPPORT    xnpod_switch_fpu();#endif /* CONFIG_RTAI_FPU_SUPPORT */    if (nkpod->schedhook && runthread == sched->runthread)	nkpod->schedhook(runthread,XNRUNNING);}/*!  * \fn void xnpod_set_time(xnticks_t newtime); * \brief Set the nanokernel idea of time. * * The nanokernel tracks the current time as a monotonously increasing * count of ticks announced by the timer source since the epoch. The * epoch is initially defined by the time the nanokernel has started. * This service changes the epoch. Running timers use a different time * base thus are not affected by this operation. The nanokernel time * is only accounted when the system timer runs in periodic mode. * * Side-effect: * * - This routine does not call the rescheduling procedure. * * Context: This routine can be called on behalf of a thread or IST * context. */void xnpod_set_time (xnticks_t newtime){    spl_t s;    splhigh(s);    nkpod->wallclock = newtime;    setbits(nkpod->status,XNTMSET);    splexit(s);}/*!  * \fn xnticks_t xnpod_get_time(void); * \brief Get the nanokernel idea of time. * * This service gets the nanokernel (external) clock time. * * @return The current nanokernel time (in ticks) if the underlying * time source runs in periodic mode, or the CPU tick count if the * aperiodic mode is in effect, or no timer is running. * * Side-effect: This routine d

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -