⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 sched_up.c

📁 rtai-3.1-test3的源代码(Real-Time Application Interface )
💻 C
📖 第 1 页 / 共 4 页
字号:
	} else {		return -0x7FFFFFFF;	}}#if ALLOW_RR#define RR_YIELD() \if (rt_current->policy > 0) { \	rt_current->rr_remaining = rt_current->yield_time - rt_times.tick_time; \	if (rt_current->rr_remaining <= 0) { \		rt_current->rr_remaining = rt_current->rr_quantum; \		if (rt_current->state == RT_SCHED_READY) { \			RT_TASK *task; \			task = rt_current->rnext; \			while (rt_current->priority == task->priority) { \				task = task->rnext; \			} \			if (task != rt_current->rnext) { \				(rt_current->rprev)->rnext = rt_current->rnext; \				(rt_current->rnext)->rprev = rt_current->rprev; \				task->rprev = (rt_current->rprev = task->rprev)->rnext = rt_current; \				rt_current->rnext = task; \			} \		} \	} \}#define RR_SETYT() \	if (new_task->policy > 0) { \		new_task->yield_time = rt_time_h + new_task->rr_remaining; \	}#define RR_SPREMP() \	if (new_task->policy > 0) { \		preempt = 1; \		if (new_task->yield_time < intr_time) { \			intr_time = new_task->yield_time; \		} \	} else { \		preempt = 0; \	}#define RR_TPREMP() \	if (new_task->policy > 0) { \		preempt = 1; \		if (new_task->yield_time < rt_times.intr_time) { \			rt_times.intr_time = new_task->yield_time; \		} \	} else { \	  preempt = (preempt_always || prio == RT_SCHED_LINUX_PRIORITY);	\	}#else#define RR_YIELD()#define RR_SETYT()#define RR_SPREMP() \do { preempt = 0; } while (0)#define RR_TPREMP() \    do { preempt = (preempt_always || prio == RT_SCHED_LINUX_PRIORITY); } while (0)#endif#define ANTICIPATE#define EXECTIME#ifdef EXECTIMERTIME switch_time;#define KEXECTIME() \do { \	RTIME now; \	now = rdtsc(); \	if (!rt_current->lnxtsk) { \		rt_current->exectime[0] += (now - switch_time); \	} \	switch_time = now; \} while (0)#else#define KEXECTIME()#endifvoid rt_schedule(void){	RT_TASK *task, *new_task;	RTIME intr_time, now;	int prio, delay, preempt;	sched_rqsted = 1;	prio = RT_SCHED_LINUX_PRIORITY;	task = new_task = &rt_linux_task;	RR_YIELD();	if (oneshot_running) {#ifdef ANTICIPATE		rt_time_h = rdtsc() + (RTIME)rt_half_tick;		wake_up_timed_tasks(0);#endif		TASK_TO_SCHEDULE();		RR_SETYT();		intr_time = shot_fired ? rt_times.intr_time :			    rt_times.intr_time + (RTIME)rt_times.linux_tick;		RR_SPREMP();		task = &rt_linux_task;		while ((task = task->tnext) != &rt_linux_task) {			if (task->priority <= prio && task->resume_time < intr_time) {				intr_time = task->resume_time;				preempt = 1;				break;			}		}		if (preempt || (!shot_fired && prio == RT_SCHED_LINUX_PRIORITY)) {			shot_fired = 1;			if (preempt) {				rt_times.intr_time = intr_time;			}			delay = (int)(rt_times.intr_time - (now = rdtsc())) - tuned.latency;			if (delay >= tuned.setup_time_TIMER_CPUNIT) {				delay = imuldiv(delay, TIMER_FREQ, tuned.cpu_freq);			} else {				delay = tuned.setup_time_TIMER_UNIT;				rt_times.intr_time = now + (RTIME)tuned.setup_time_TIMER_CPUNIT;			}			rt_set_timer_delay(delay);		}	} else {		TASK_TO_SCHEDULE();		RR_SETYT();	}	if (new_task != rt_current) {		TRACE_RTAI_SCHED_CHANGE(rt_current->tid, new_task->tid, rt_current->state);		if (rt_current == &rt_linux_task) {			rt_switch_to_real_time(0);			save_cr0_and_clts(linux_cr0);		}		if (new_task->uses_fpu) {			enable_fpu();			if (new_task != fpu_task) {				save_fpenv(fpu_task->fpu_reg);				fpu_task = new_task;				restore_fpenv(fpu_task->fpu_reg);			}		}		KEXECTIME();		if (new_task == &rt_linux_task) {			restore_cr0(linux_cr0);			rt_switch_to_linux(0);			/* From now on, the Linux stage is re-enabled,			   but not sync'ed until we have actually			   switched to the Linux task, so that we			   don't end up running the Linux IRQ handlers			   on behalf of a non-Linux stack			   context... */		}		rt_switch_to(new_task);		if (rt_current->signal) {			(*rt_current->signal)();		}	}}void rt_spv_RMS(int cpuid){	RT_TASK *task;	int prio;	prio = 0;	task = &rt_linux_task;	while ((task = task->next)) {		RT_TASK *task, *htask;		RTIME period;		htask = 0;		task = &rt_linux_task;		period = RT_TIME_END;		while ((task = task->next)) {			if (task->priority >= 0 && task->policy >= 0 && task->period && task->period < period) {				period = (htask = task)->period;			}		}		if (htask) {			htask->priority = -1;			htask->base_priority = prio++;		} else {			goto ret;		}	}ret:	task = &rt_linux_task;	while ((task = task->next)) {		if (task->priority < 0) {			task->priority = task->base_priority;		}	}	return;}/** * @ingroup tasks * @anchor rt_sched_lock * @brief Lock the scheduling of tasks. * * rt_sched_lock, lock on the CPU on which they are called, any * scheduler activity, thus preventing a higher priority task to * preempt a lower priority one. They can be nested, provided unlocks * are paired to locks in reversed order. It can be used for * synchronization access to data among tasks. Note however that under * MP the lock is active only for the CPU on which it has been issued, * so it cannot be used to avoid races with tasks that can run on any * other available CPU.  * Interrupts are not affected by such calls. Any task that needs * rescheduling while a scheduler lock is in placewill be only at the * issuing of the last unlock  *  * @note To be used only with RTAI24.x.xx. * * See also: @ref rt_sched_unlock(). */void rt_sched_lock(void){	unsigned long flags;	hard_save_flags_and_cli(flags);	if (rt_current->priority >= 0) {		rt_current->sched_lock_priority = rt_current->priority;		sched_rqsted = rt_current->priority = -1;	} else {		rt_current->priority--;	}	hard_restore_flags(flags);}/** * @ingroup tasks * @anchor rt_sched_unlock * @brief Unlock the scheduling of tasks. * * rt_sched_unlock, unlock on the CPU on which they are called, any * scheduler activity, thus preventing a higher priority task to * preempt a lower priority one. They can be nested, provided unlocks * are paired to locks in reversed order. It can be used for * synchronization access to data among tasks. Note however that under * MP the lock is active only for the CPU on which it has been issued, * so it cannot be used to avoid races with tasks that can run on any * other available CPU.  * Interrupts are not affected by such calls. Any task that needs * rescheduling while a scheduler lock is in placewill be only at the * issuing of the last unlock  *  * @note To be used only with RTAI24.x.xx. * * See also: @ref rt_sched_unlock(). */void rt_sched_unlock(void){	unsigned long flags;	hard_save_flags_and_cli(flags);	if (rt_current->priority < 0 && !(++rt_current->priority)) {		if ((rt_current->priority = rt_current->sched_lock_priority) != RT_SCHED_LINUX_PRIORITY) {			(rt_current->rprev)->rnext = rt_current->rnext;			(rt_current->rnext)->rprev = rt_current->rprev;			enq_ready_task(rt_current);		}		if (sched_rqsted > 0) {			rt_schedule();		}	}	hard_restore_flags(flags);}/** * @ingroup tasks * @anchor rt_task_delete * Delete a real time task. * * rt_task_delete deletes a real time task previously created by * @ref rt_task_init() or @ref rt_task_init_cpuid(). * * @param task is the pointer to the task structure. If task task was *	  waiting on a queue, i.e. semaphore, mailbox, etc, it is *	  removed from such a queue and messaging tasks pending on its *	  message queue are unblocked with an error return. *  * @return 0 on success. A negative value on failure as described * below:  * - @b EINVAL: task does not refer to a valid task. */int rt_task_delete(RT_TASK *task){	unsigned long flags;	QUEUE *q;	if (task->magic != RT_TASK_MAGIC || task->priority == RT_SCHED_LINUX_PRIORITY) {		return -EINVAL;	}	TRACE_RTAI_TASK(TRACE_RTAI_EV_TASK_DELETE, task->tid, 0, 0);	hard_save_flags_and_cli(flags);	if (!(task->owndres & SEMHLF) || task == rt_current || rt_current->priority == RT_SCHED_LINUX_PRIORITY) {		call_exit_handlers(task);		rem_timed_task(task);		if (task->blocked_on) {			(task->queue.prev)->next = task->queue.next;			(task->queue.next)->prev = task->queue.prev;			if (task->state & RT_SCHED_SEMAPHORE) {				if (!((SEM *)(task->blocked_on))->type) {					((SEM *)(task->blocked_on))->count++;				} else {					((SEM *)(task->blocked_on))->count = 1;				}			}		}		q = &(task->msg_queue);		while ((q = q->next) != &(task->msg_queue)) {			rem_timed_task(q->task);			if ((q->task)->state != RT_SCHED_READY && ((q->task)->state &= ~(RT_SCHED_SEND | RT_SCHED_RPC | RT_SCHED_DELAYED)) == RT_SCHED_READY) {				enq_ready_task(q->task);			}			(q->task)->blocked_on = 0;		}       		q = &(task->ret_queue);		while ((q = q->next) != &(task->ret_queue)) {			rem_timed_task(q->task);			if ((q->task)->state != RT_SCHED_READY && ((q->task)->state &= ~(RT_SCHED_RETURN | RT_SCHED_DELAYED)) == RT_SCHED_READY) {				enq_ready_task(q->task);			}			(q->task)->blocked_on = 0;		}       		if (!((task->prev)->next = task->next)) {			rt_linux_task.prev = task->prev;		} else {			(task->next)->prev = task->prev;		}		if (fpu_task == task) {			/* XXX Don't we lose the linux FPU context here? */			fpu_task = &rt_linux_task;		}		frstk_srq.mp[frstk_srq.in] = task->stack_bottom;		frstk_srq.in = (frstk_srq.in + 1) & (MAX_FRESTK_SRQ - 1);		task->magic = 0;		rt_pend_linux_srq(frstk_srq.srq);		rem_ready_task(task);		task->state = 0;		if (task == rt_current) {			rt_schedule();		}	} else {		task->suspdepth = -0x7FFFFFFF;	}	hard_restore_flags(flags);	return 0;}static void rt_timer_handler(void){	RT_TASK *task, *new_task;	RTIME now;	int prio, delay, preempt;	TRACE_RTAI_TIMER(TRACE_RTAI_EV_TIMER_HANDLE_EXPIRY, 0, 0);	sched_rqsted = 1;	DO_TIMER_PROPER_OP();	prio = RT_SCHED_LINUX_PRIORITY;	task = new_task = &rt_linux_task;	rt_times.tick_time = oneshot_timer ? rdtsc() : rt_times.intr_time;	rt_time_h = rt_times.tick_time + (RTIME)rt_half_tick;	if (rt_times.tick_time >= rt_times.linux_time) {		rt_times.linux_time += (RTIME)rt_times.linux_tick;		rt_pend_linux_irq(TIMER_8254_IRQ);	}	wake_up_timed_tasks(0);	RR_YIELD();	TASK_TO_SCHEDULE();	RR_SETYT();	if (oneshot_timer) {		rt_times.intr_time = rt_times.linux_time > rt_times.tick_time ?		rt_times.linux_time : rt_times.tick_time + (RTIME)rt_times.linux_tick;		RR_TPREMP();		task = &rt_linux_task;		while ((task = task->tnext) != &rt_linux_task) {			if (task->priority <= prio && task->resume_time < rt_times.intr_time) {				rt_times.intr_time = task->resume_time;				preempt = 1;				break;			}		}		if ((shot_fired = preempt)) {			delay = (int)(rt_times.intr_time - (now = rdtsc())) - tuned.latency;			if (delay >= tuned.setup_time_TIMER_CPUNIT) {				delay = imuldiv(delay, TIMER_FREQ, tuned.cpu_freq);			} else {				delay = tuned.setup_time_TIMER_UNIT;				rt_times.intr_time = now + (RTIME)tuned.setup_time_TIMER_CPUNIT;			}			rt_set_timer_delay(delay);		}	} else {		rt_times.intr_time += (RTIME)rt_times.periodic_tick;		rt_set_timer_delay(0);	}	if (new_task != rt_current) {		if (rt_current == &rt_linux_task) {			rt_switch_to_real_time(0);			save_cr0_and_clts(linux_cr0);		}		if (new_task->uses_fpu) {			enable_fpu();			if (new_task != fpu_task) {				save_fpenv(fpu_task->fpu_reg);				fpu_task = new_task;				restore_fpenv(fpu_task->fpu_reg);			}		}		TRACE_RTAI_SCHED_CHANGE(rt_current->tid, new_task->tid, rt_current->state);		KEXECTIME();		rt_switch_to(new_task);		if (rt_current->signal) {			(*rt_current->signal)();		}	}	return;}static irqreturn_t recover_jiffies(int irq, void *dev_id, struct pt_regs *regs){	unsigned long flags;	hard_save_flags_and_cli(flags);	if (rt_times.tick_time >= rt_times.linux_time) {		rt_times.linux_time += rt_times.linux_tick;		rt_pend_linux_irq(TIMER_8254_IRQ);	}	hard_restore_flags(flags);	return RTAI_LINUX_IRQ_HANDLED;} int rt_is_hard_timer_running(void){	return (rt_time_h > 0);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -