⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 sched.c

📁 GNU Mach 微内核源代码, 基于美国卡内基美隆大学的 Mach 研究项目
💻 C
📖 第 1 页 / 共 3 页
字号:
 * processes. * * Notably, the inline "up()" and "down()" functions can * efficiently test if they need to do any extra work (up * needs to do something only if count was negative before * the increment operation. * * This routine must execute atomically. */static inline int waking_non_zero(struct semaphore *sem){	int	ret ;	long	flags ;	get_buzz_lock(&sem->lock) ;	save_flags(flags) ;	cli() ;	if ((ret = (sem->waking > 0)))		sem->waking-- ;	restore_flags(flags) ;	give_buzz_lock(&sem->lock) ;	return(ret) ;}/* * When __up() is called, the count was negative before * incrementing it, and we need to wake up somebody. * * This routine adds one to the count of processes that need to * wake up and exit.  ALL waiting processes actually wake up but * only the one that gets to the "waking" field first will gate * through and acquire the semaphore.  The others will go back * to sleep. * * Note that these functions are only called when there is * contention on the lock, and as such all this is the * "non-critical" part of the whole semaphore business. The * critical part is the inline stuff in <asm/semaphore.h> * where we want to avoid any extra jumps and calls. */void __up(struct semaphore *sem){	atomic_inc(&sem->waking) ;	wake_up(&sem->wait);}/* * Perform the "down" function.  Return zero for semaphore acquired, * return negative for signalled out of the function. * * If called from __down, the return is ignored and the wait loop is * not interruptible.  This means that a task waiting on a semaphore * using "down()" cannot be killed until someone does an "up()" on * the semaphore. * * If called from __down_interruptible, the return value gets checked * upon return.  If the return value is negative then the task continues * with the negative value in the return register (it can be tested by * the caller). * * Either form may be used in conjunction with "up()". * */int __do_down(struct semaphore * sem, int task_state){	struct task_struct *tsk = current;	struct wait_queue wait = { tsk, NULL };	int		  ret = 0 ;	tsk->state = task_state;	add_wait_queue(&sem->wait, &wait);	/*	 * Ok, we're set up.  sem->count is known to be less than zero	 * so we must wait.	 *	 * We can let go the lock for purposes of waiting.	 * We re-acquire it after awaking so as to protect	 * all semaphore operations.	 *	 * If "up()" is called before we call waking_non_zero() then	 * we will catch it right away.  If it is called later then	 * we will have to go through a wakeup cycle to catch it.	 *	 * Multiple waiters contend for the semaphore lock to see	 * who gets to gate through and who has to wait some more.	 */	for (;;)	{		if (waking_non_zero(sem))	/* are we waking up?  */		    break ;			/* yes, exit loop */		if (   task_state == TASK_INTERRUPTIBLE		    && (tsk->signal & ~tsk->blocked)	/* signalled */		   )		{		    ret = -EINTR ;		/* interrupted */		    atomic_inc(&sem->count) ;	/* give up on down operation */		    break ;		}		schedule();		tsk->state = task_state;	}	tsk->state = TASK_RUNNING;	remove_wait_queue(&sem->wait, &wait);	return(ret) ;} /* __do_down */void __down(struct semaphore * sem){	__do_down(sem,TASK_UNINTERRUPTIBLE) ; }int __down_interruptible(struct semaphore * sem){	return(__do_down(sem,TASK_INTERRUPTIBLE)) ; }static inline void __sleep_on(struct wait_queue **p, int state){	unsigned long flags;	struct wait_queue wait = { current, NULL };	if (!p)		return;	if (current == task[0])		panic("task[0] trying to sleep");	current->state = state;	save_flags(flags);	cli();	__add_wait_queue(p, &wait);	sti();	schedule();	cli();	__remove_wait_queue(p, &wait);	restore_flags(flags);}void interruptible_sleep_on(struct wait_queue **p){	__sleep_on(p,TASK_INTERRUPTIBLE);}void sleep_on(struct wait_queue **p){	__sleep_on(p,TASK_UNINTERRUPTIBLE);}#define TVN_BITS 6#define TVR_BITS 8#define TVN_SIZE (1 << TVN_BITS)#define TVR_SIZE (1 << TVR_BITS)#define TVN_MASK (TVN_SIZE - 1)#define TVR_MASK (TVR_SIZE - 1)#define SLOW_BUT_DEBUGGING_TIMERS 0struct timer_vec {        int index;        struct timer_list *vec[TVN_SIZE];};struct timer_vec_root {        int index;        struct timer_list *vec[TVR_SIZE];};static struct timer_vec tv5 = { 0 };static struct timer_vec tv4 = { 0 };static struct timer_vec tv3 = { 0 };static struct timer_vec tv2 = { 0 };static struct timer_vec_root tv1 = { 0 };static struct timer_vec * const tvecs[] = {	(struct timer_vec *)&tv1, &tv2, &tv3, &tv4, &tv5};#define NOOF_TVECS (sizeof(tvecs) / sizeof(tvecs[0]))static unsigned long timer_jiffies = 0;static inline void insert_timer(struct timer_list *timer,				struct timer_list **vec, int idx){	if ((timer->next = vec[idx]))		vec[idx]->prev = timer;	vec[idx] = timer;	timer->prev = (struct timer_list *)&vec[idx];}static inline void internal_add_timer(struct timer_list *timer){	/*	 * must be cli-ed when calling this	 */	unsigned long expires = timer->expires;	unsigned long idx = expires - timer_jiffies;	if (idx < TVR_SIZE) {		int i = expires & TVR_MASK;		insert_timer(timer, tv1.vec, i);	} else if (idx < 1 << (TVR_BITS + TVN_BITS)) {		int i = (expires >> TVR_BITS) & TVN_MASK;		insert_timer(timer, tv2.vec, i);	} else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) {		int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK;		insert_timer(timer, tv3.vec, i);	} else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) {		int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK;		insert_timer(timer, tv4.vec, i);	} else if (expires < timer_jiffies) {		/* can happen if you add a timer with expires == jiffies,		 * or you set a timer to go off in the past		 */		insert_timer(timer, tv1.vec, tv1.index);	} else if (idx < 0xffffffffUL) {		int i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;		insert_timer(timer, tv5.vec, i);	} else {		/* Can only get here on architectures with 64-bit jiffies */		timer->next = timer->prev = timer;	}}void add_timer(struct timer_list *timer){	unsigned long flags;	save_flags(flags);	cli();#if SLOW_BUT_DEBUGGING_TIMERS        if (timer->next || timer->prev) {                printk("add_timer() called with non-zero list from %p\n",		       __builtin_return_address(0));		goto out;        }#endif	internal_add_timer(timer);#if SLOW_BUT_DEBUGGING_TIMERSout:#endif	restore_flags(flags);}static inline int detach_timer(struct timer_list *timer){	int ret = 0;	struct timer_list *next, *prev;	next = timer->next;	prev = timer->prev;	if (next) {		next->prev = prev;	}	if (prev) {		ret = 1;		prev->next = next;	}	return ret;}int del_timer(struct timer_list * timer){	int ret;	unsigned long flags;	save_flags(flags);	cli();	ret = detach_timer(timer);	timer->next = timer->prev = 0;	restore_flags(flags);	return ret;}static inline void cascade_timers(struct timer_vec *tv){        /* cascade all the timers from tv up one level */        struct timer_list *timer;        timer = tv->vec[tv->index];        /*         * We are removing _all_ timers from the list, so we don't  have to         * detach them individually, just clear the list afterwards.         */        while (timer) {                struct timer_list *tmp = timer;                timer = timer->next;                internal_add_timer(tmp);        }        tv->vec[tv->index] = NULL;        tv->index = (tv->index + 1) & TVN_MASK;}static inline void run_timer_list(void){	cli();	while ((long)(jiffies - timer_jiffies) >= 0) {		struct timer_list *timer;		if (!tv1.index) {			int n = 1;			do {				cascade_timers(tvecs[n]);			} while (tvecs[n]->index == 1 && ++n < NOOF_TVECS);		}		while ((timer = tv1.vec[tv1.index])) {			void (*fn)(unsigned long) = timer->function;			unsigned long data = timer->data;			detach_timer(timer);			timer->next = timer->prev = NULL;			sti();			fn(data);			cli();		}		++timer_jiffies; 		tv1.index = (tv1.index + 1) & TVR_MASK;	}	sti();}static inline void run_old_timers(void){	struct timer_struct *tp;	unsigned long mask;	for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {		if (mask > timer_active)			break;		if (!(mask & timer_active))			continue;		if (tp->expires > jiffies)			continue;		timer_active &= ~mask;		tp->fn();		sti();	}}void tqueue_bh(void){	run_task_queue(&tq_timer);}void immediate_bh(void){	run_task_queue(&tq_immediate);}unsigned long timer_active = 0;struct timer_struct timer_table[32];/* * Hmm.. Changed this, as the GNU make sources (load.c) seems to * imply that avenrun[] is the standard name for this kind of thing. * Nothing else seems to be standardized: the fractional size etc * all seem to differ on different machines. */unsigned long avenrun[3] = { 0,0,0 };/* * Nr of active tasks - counted in fixed-point numbers */static unsigned long count_active_tasks(void){	struct task_struct **p;	unsigned long nr = 0;	for(p = &LAST_TASK; p > &FIRST_TASK; --p)		if (*p && ((*p)->state == TASK_RUNNING ||			   (*p)->state == TASK_UNINTERRUPTIBLE ||			   (*p)->state == TASK_SWAPPING))			nr += FIXED_1;#ifdef __SMP__	nr-=(smp_num_cpus-1)*FIXED_1;#endif				return nr;}static inline void calc_load(unsigned long ticks){	unsigned long active_tasks; /* fixed-point */	static int count = LOAD_FREQ;	count -= ticks;	if (count < 0) {		count += LOAD_FREQ;		active_tasks = count_active_tasks();		CALC_LOAD(avenrun[0], EXP_1, active_tasks);		CALC_LOAD(avenrun[1], EXP_5, active_tasks);		CALC_LOAD(avenrun[2], EXP_15, active_tasks);	}}/* * this routine handles the overflow of the microsecond field * * The tricky bits of code to handle the accurate clock support * were provided by Dave Mills (Mills@UDEL.EDU) of NTP fame. * They were originally developed for SUN and DEC kernels. * All the kudos should go to Dave for this stuff. * */static void second_overflow(void){    long ltemp;    /* Bump the maxerror field */    time_maxerror += time_tolerance >> SHIFT_USEC;    if ( time_maxerror > NTP_PHASE_LIMIT ) {        time_maxerror = NTP_PHASE_LIMIT;	time_state = TIME_ERROR;	/* p. 17, sect. 4.3, (b) */	time_status |= STA_UNSYNC;    }    /*     * Leap second processing. If in leap-insert state at     * the end of the day, the system clock is set back one     * second; if in leap-delete state, the system clock is     * set ahead one second. The microtime() routine or     * external clock driver will insure that reported time     * is always monotonic. The ugly divides should be     * replaced.     */    switch (time_state) {    case TIME_OK:	if (time_status & STA_INS)	    time_state = TIME_INS;	else if (time_status & STA_DEL)	    time_state = TIME_DEL;	break;    case TIME_INS:	if (xtime.tv_sec % 86400 == 0) {	    xtime.tv_sec--;	    time_state = TIME_OOP;	    printk(KERN_NOTICE "Clock: inserting leap second 23:59:60 UTC\n");	}	break;    case TIME_DEL:	if ((xtime.tv_sec + 1) % 86400 == 0) {	    xtime.tv_sec++;	    time_state = TIME_WAIT;	    printk(KERN_NOTICE "Clock: deleting leap second 23:59:59 UTC\n");	}	break;    case TIME_OOP:	time_state = TIME_WAIT;	break;    case TIME_WAIT:	if (!(time_status & (STA_INS | STA_DEL)))	    time_state = TIME_OK;    }    /*     * Compute the phase adjustment for the next second. In     * PLL mode, the offset is reduced by a fixed factor     * times the time constant. In FLL mode the offset is     * used directly. In either mode, the maximum phase     * adjustment for each second is clamped so as to spread     * the adjustment over not more than the number of     * seconds between updates.     */    if (time_offset < 0) {	ltemp = -time_offset;	if (!(time_status & STA_FLL))	    ltemp >>= SHIFT_KG + time_constant;	if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)	    ltemp = (MAXPHASE / MINSEC) << SHIFT_UPDATE;	time_offset += ltemp;	time_adj = -ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);    } else {	ltemp = time_offset;	if (!(time_status & STA_FLL))	    ltemp >>= SHIFT_KG + time_constant;	if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)	    ltemp = (MAXPHASE / MINSEC) << SHIFT_UPDATE;	time_offset -= ltemp;	time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);    }    /*     * Compute the frequency estimate and additional phase     * adjustment due to frequency error for the next     * second. When the PPS signal is engaged, gnaw on the     * watchdog counter and update the frequency computed by     * the pll and the PPS signal.     */    pps_valid++;    if (pps_valid == PPS_VALID) {	/* PPS signal lost */	pps_jitter = MAXTIME;	pps_stabil = MAXFREQ;	time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER |			 STA_PPSWANDER | STA_PPSERROR);    }    ltemp = time_freq + pps_freq;    if (ltemp < 0)	time_adj -= -ltemp >> (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE);    else	time_adj +=  ltemp >> (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE);#if HZ == 100    /* Compensate for (HZ==100) != (1 << SHIFT_HZ).     * Add 25% and 3.125% to get 128.125; => only 0.125% error (p. 14)     */    if (time_adj < 0)	time_adj -= (-time_adj >> 2) + (-time_adj >> 5);    else	time_adj += (time_adj >> 2) + (time_adj >> 5);#endif}/* in the NTP reference this is called "hardclock()" */static void update_wall_time_one_tick(void){	if ( (time_adjust_step = time_adjust) != 0 ) {	    /* We are doing an adjtime thing. 	     *	     * Prepare time_adjust_step to be within bounds.	     * Note that a positive time_adjust means we want the clock	     * to run faster.	     *	     * Limit the amount of the step to be in the range	     * -tickadj .. +tickadj	     */	     if (time_adjust > tickadj)		time_adjust_step = tickadj;	     else if (time_adjust < -tickadj)		time_adjust_step = -tickadj;	     	    /* Reduce by this step the amount of time left  */	    time_adjust -= time_adjust_step;	}	xtime.tv_usec += tick + time_adjust_step;	/*	 * Advance the phase, once it gets to one microsecond, then	 * advance the tick more.	 */	time_phase += time_adj;	if (time_phase <= -FINEUSEC) {		long ltemp = -time_phase >> SHIFT_SCALE;		time_phase += ltemp << SHIFT_SCALE;		xtime.tv_usec -= ltemp;	}	else if (time_phase >= FINEUSEC) {		long ltemp = time_phase >> SHIFT_SCALE;		time_phase -= ltemp << SHIFT_SCALE;		xtime.tv_usec += ltemp;	}}/* * Using a loop looks inefficient, but "ticks" is * usually just one (we shouldn't be losing ticks, * we're doing this this way mainly for interrupt * latency reasons, not because we think we'll * have lots of lost timer ticks */static void update_wall_time(unsigned long ticks){	do {		ticks--;		update_wall_time_one_tick();	} while (ticks);	if (xtime.tv_usec >= 1000000) {	    xtime.tv_usec -= 1000000;	    xtime.tv_sec++;	    second_overflow();	}}static inline void do_process_times(struct task_struct *p,	unsigned long user, unsigned long system){	long psecs;	p->utime += user;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -