⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 sched.c

📁 GNU Mach 微内核源代码, 基于美国卡内基美隆大学的 Mach 研究项目
💻 C
📖 第 1 页 / 共 3 页
字号:
/* *  linux/kernel/sched.c * *  Copyright (C) 1991, 1992  Linus Torvalds * *  1996-04-21	Modified by Ulrich Windl to make NTP work *  1996-12-23  Modified by Dave Grothe to fix bugs in semaphores and *              make semaphores SMP safe *  1997-01-28  Modified by Finn Arne Gangstad to make timers scale better. *  1997-09-10	Updated NTP code according to technical memorandum Jan '96 *		"A Kernel Model for Precision Timekeeping" by Dave Mills *//* * 'sched.c' is the main kernel file. It contains scheduling primitives * (sleep_on, wakeup, schedule etc) as well as a number of simple system * call functions (type getpid()), which just extract a field from * current-task */#include <linux/signal.h>#include <linux/sched.h>#include <linux/timer.h>#include <linux/kernel.h>#include <linux/kernel_stat.h>#include <linux/fdreg.h>#include <linux/errno.h>#include <linux/time.h>#include <linux/ptrace.h>#include <linux/delay.h>#include <linux/interrupt.h>#include <linux/tqueue.h>#include <linux/resource.h>#include <linux/mm.h>#include <linux/smp.h>#include <asm/system.h>#include <asm/io.h>#include <asm/segment.h>#include <asm/pgtable.h>#include <asm/mmu_context.h>#include <linux/timex.h>/* * kernel variables */int securelevel = 0;			/* system security level */long tick = (1000000 + HZ/2) / HZ;	/* timer interrupt period */volatile struct timeval xtime;		/* The current time */int tickadj = 500/HZ ? 500/HZ : 1;	/* microsecs */DECLARE_TASK_QUEUE(tq_timer);DECLARE_TASK_QUEUE(tq_immediate);DECLARE_TASK_QUEUE(tq_scheduler);/* * phase-lock loop variables *//* TIME_ERROR prevents overwriting the CMOS clock */int time_state = TIME_ERROR;	/* clock synchronization status */int time_status = STA_UNSYNC;	/* clock status bits */long time_offset = 0;		/* time adjustment (us) */long time_constant = 2;		/* pll time constant */long time_tolerance = MAXFREQ;	/* frequency tolerance (ppm) */long time_precision = 1;	/* clock precision (us) */long time_maxerror = NTP_PHASE_LIMIT;	/* maximum error (us) */long time_esterror = NTP_PHASE_LIMIT;	/* estimated error (us) */long time_phase = 0;		/* phase offset (scaled us) */long time_freq = ((1000000 + HZ/2) % HZ - HZ/2) << SHIFT_USEC;	/* frequency offset (scaled ppm) */long time_adj = 0;		/* tick adjust (scaled 1 / HZ) */long time_reftime = 0;		/* time at last adjustment (s) */long time_adjust = 0;long time_adjust_step = 0;int need_resched = 0;unsigned long event = 0;extern int _setitimer(int, struct itimerval *, struct itimerval *);unsigned int * prof_buffer = NULL;unsigned long prof_len = 0;unsigned long prof_shift = 0;#define _S(nr) (1<<((nr)-1))extern void mem_use(void);extern unsigned long get_wchan(struct task_struct *);static unsigned long init_kernel_stack[1024] = { STACK_MAGIC, };unsigned long init_user_stack[1024] = { STACK_MAGIC, };static struct vm_area_struct init_mmap = INIT_MMAP;static struct fs_struct init_fs = INIT_FS;static struct files_struct init_files = INIT_FILES;static struct signal_struct init_signals = INIT_SIGNALS;struct mm_struct init_mm = INIT_MM;struct task_struct init_task = INIT_TASK;unsigned long volatile jiffies=0;struct task_struct *current_set[NR_CPUS];struct task_struct *last_task_used_math = NULL;struct task_struct * task[NR_TASKS] = {&init_task, };struct kernel_stat kstat = { 0 };static inline void add_to_runqueue(struct task_struct * p){#ifdef __SMP__	int cpu=smp_processor_id();#endif	#if 1	/* sanity tests */	if (p->next_run || p->prev_run) {		printk("task already on run-queue\n");		return;	}#endif	if (p->policy != SCHED_OTHER || p->counter > current->counter + 3)		need_resched = 1;	nr_running++;	(p->prev_run = init_task.prev_run)->next_run = p;	p->next_run = &init_task;	init_task.prev_run = p;#ifdef __SMP__	/* this is safe only if called with cli()*/	while(set_bit(31,&smp_process_available))	{		while(test_bit(31,&smp_process_available))		{			if(clear_bit(cpu,&smp_invalidate_needed))			{				local_flush_tlb();				set_bit(cpu,&cpu_callin_map[0]);			}		}	}	smp_process_available++;	clear_bit(31,&smp_process_available);	if ((0!=p->pid) && smp_threads_ready)	{		int i;		for (i=0;i<smp_num_cpus;i++)		{			if (0==current_set[cpu_logical_map[i]]->pid) 			{				smp_message_pass(cpu_logical_map[i], MSG_RESCHEDULE, 0L, 0);				break;			}		}	}#endif}static inline void del_from_runqueue(struct task_struct * p){	struct task_struct *next = p->next_run;	struct task_struct *prev = p->prev_run;#if 1	/* sanity tests */	if (!next || !prev) {		printk("task not on run-queue\n");		return;	}#endif	if (p == &init_task) {		static int nr = 0;		if (nr < 5) {			nr++;			printk("idle task may not sleep\n");		}		return;	}	nr_running--;	next->prev_run = prev;	prev->next_run = next;	p->next_run = NULL;	p->prev_run = NULL;}static inline void move_last_runqueue(struct task_struct * p){	struct task_struct *next = p->next_run;	struct task_struct *prev = p->prev_run;	/* remove from list */	next->prev_run = prev;	prev->next_run = next;	/* add back to list */	p->next_run = &init_task;	prev = init_task.prev_run;	init_task.prev_run = p;	p->prev_run = prev;	prev->next_run = p;}/* * Wake up a process. Put it on the run-queue if it's not * already there.  The "current" process is always on the * run-queue (except when the actual re-schedule is in * progress), and as such you're allowed to do the simpler * "current->state = TASK_RUNNING" to mark yourself runnable * without the overhead of this. */inline void wake_up_process(struct task_struct * p){	unsigned long flags;	save_flags(flags);	cli();	p->state = TASK_RUNNING;	if (!p->next_run)		add_to_runqueue(p);	restore_flags(flags);}static void process_timeout(unsigned long __data){	struct task_struct * p = (struct task_struct *) __data;	p->timeout = 0;	wake_up_process(p);}/* * This is the function that decides how desirable a process is.. * You can weigh different processes against each other depending * on what CPU they've run on lately etc to try to handle cache * and TLB miss penalties. * * Return values: *	 -1000: never select this *	     0: out of time, recalculate counters (but it might still be *		selected) *	   +ve: "goodness" value (the larger, the better) *	 +1000: realtime process, select this. */static inline int goodness(struct task_struct * p, struct task_struct * prev, int this_cpu){	int weight;#ifdef __SMP__		/* We are not permitted to run a task someone else is running */	if (p->processor != NO_PROC_ID)		return -1000;#ifdef PAST_2_0			/* This process is locked to a processor group */	if (p->processor_mask && !(p->processor_mask & (1<<this_cpu))		return -1000;#endif		#endif	/*	 * Realtime process, select the first one on the	 * runqueue (taking priorities within processes	 * into account).	 */	if (p->policy != SCHED_OTHER)		return 1000 + p->rt_priority;	/*	 * Give the process a first-approximation goodness value	 * according to the number of clock-ticks it has left.	 *	 * Don't do any other calculations if the time slice is	 * over..	 */	weight = p->counter;	if (weight) {			#ifdef __SMP__		/* Give a largish advantage to the same processor...   */		/* (this is equivalent to penalizing other processors) */		if (p->last_processor == this_cpu)			weight += PROC_CHANGE_PENALTY;#endif		/* .. and a slight advantage to the current process */		if (p == prev)			weight += 1;	}	return weight;}/*  The following allow_interrupts function is used to workaround a rare but  nasty deadlock situation that is possible for 2.0.x Intel SMP because it uses  a single kernel lock and interrupts are only routed to the boot CPU.  There  are two deadlock scenarios this code protects against.  The first scenario is that if a CPU other than the boot CPU holds the kernel  lock and needs to wait for an operation to complete that itself requires an  interrupt, there is a deadlock since the boot CPU may be able to accept the  interrupt but will not be able to acquire the kernel lock to process it.  The workaround for this deadlock requires adding calls to allow_interrupts to  places where this deadlock is possible.  These places are known to be present  in buffer.c and keyboard.c.  It is also possible that there are other such  places which have not been identified yet.  In order to break the deadlock,  the code in allow_interrupts temporarily yields the kernel lock directly to  the boot CPU to allow the interrupt to be processed.  The boot CPU interrupt  entry code indicates that it is spinning waiting for the kernel lock by  setting the smp_blocked_interrupt_pending variable.  This code notices that  and manipulates the active_kernel_processor variable to yield the kernel lock  without ever clearing it.  When the interrupt has been processed, the  saved_active_kernel_processor variable contains the value for the interrupt  exit code to restore, either the APICID of the CPU that granted it the kernel  lock, or NO_PROC_ID in the normal case where no yielding occurred.  Restoring  active_kernel_processor from saved_active_kernel_processor returns the kernel  lock back to the CPU that yielded it.  The second form of deadlock is even more insidious.  Suppose the boot CPU  takes a page fault and then the previous scenario ensues.  In this case, the  boot CPU would spin with interrupts disabled waiting to acquire the kernel  lock.  To resolve this deadlock, the kernel lock acquisition code must enable  interrupts briefly so that the pending interrupt can be handled as in the  case above.  An additional form of deadlock is where kernel code running on a non-boot CPU  waits for the jiffies variable to be incremented.  This deadlock is avoided  by having the spin loops in ENTER_KERNEL increment jiffies approximately  every 10 milliseconds.  Finally, if approximately 60 seconds elapse waiting  for the kernel lock, a message will be printed if possible to indicate that a  deadlock has been detected.		Leonard N. Zubkoff		   4 August 1997*/#if defined(__SMP__) && defined(__i386__)volatile unsigned char smp_blocked_interrupt_pending = 0;volatile unsigned char saved_active_kernel_processor = NO_PROC_ID;void allow_interrupts(void){  if (smp_processor_id() == boot_cpu_id) return;  if (smp_blocked_interrupt_pending)    {      unsigned long saved_kernel_counter;      long timeout_counter;      saved_active_kernel_processor = active_kernel_processor;      saved_kernel_counter = kernel_counter;      kernel_counter = 0;      active_kernel_processor = boot_cpu_id;      timeout_counter = 6000000;      while (active_kernel_processor != saved_active_kernel_processor &&	     --timeout_counter >= 0)	{	  udelay(10);	  barrier();	}      if (timeout_counter < 0)	panic("FORWARDED INTERRUPT TIMEOUT (AKP = %d, Saved AKP = %d)\n",	      active_kernel_processor, saved_active_kernel_processor);      kernel_counter = saved_kernel_counter;      saved_active_kernel_processor = NO_PROC_ID;    }}#elsevoid allow_interrupts(void) {}#endif/* *  'schedule()' is the scheduler function. It's a very simple and nice * scheduler: it's not perfect, but certainly works for most things. * * The goto is "interesting". * *   NOTE!!  Task 0 is the 'idle' task, which gets called when no other * tasks can run. It can not be killed, and it cannot sleep. The 'state' * information in task[0] is never used. */asmlinkage void schedule(void){	int c;	struct task_struct * p;	struct task_struct * prev, * next;	unsigned long timeout = 0;	int this_cpu=smp_processor_id();/* check alarm, wake up any interruptible tasks that have got a signal */	allow_interrupts();	if (intr_count)		goto scheduling_in_interrupt;	if (bh_active & bh_mask) {		intr_count = 1;		do_bottom_half();		intr_count = 0;	}	run_task_queue(&tq_scheduler);	need_resched = 0;	prev = current;	cli();	/* move an exhausted RR process to be last.. */	if (!prev->counter && prev->policy == SCHED_RR) {		prev->counter = prev->priority;		move_last_runqueue(prev);	}	switch (prev->state) {		case TASK_INTERRUPTIBLE:			if (prev->signal & ~prev->blocked)				goto makerunnable;			timeout = prev->timeout;			if (timeout && (timeout <= jiffies)) {				prev->timeout = 0;				timeout = 0;		makerunnable:				prev->state = TASK_RUNNING;				break;			}		default:			del_from_runqueue(prev);		case TASK_RUNNING:	}	p = init_task.next_run;	sti();	#ifdef __SMP__	/*	 *	This is safe as we do not permit re-entry of schedule()	 */	prev->processor = NO_PROC_ID;#define idle_task (task[cpu_number_map[this_cpu]])#else#define idle_task (&init_task)#endif	/* * Note! there may appear new tasks on the run-queue during this, as * interrupts are enabled. However, they will be put on front of the * list, so our list starting at "p" is essentially fixed. *//* this is the scheduler proper: */	c = -1000;	next = idle_task;	while (p != &init_task) {		int weight = goodness(p, prev, this_cpu);		if (weight > c)			c = weight, next = p;		p = p->next_run;	}	/* if all runnable processes have "counter == 0", re-calculate counters */	if (!c) {		for_each_task(p)			p->counter = (p->counter >> 1) + p->priority;	}#ifdef __SMP__	/*	 *	Allocate process to CPU	 */	 	 next->processor = this_cpu;	 next->last_processor = this_cpu;#endif	 #ifdef __SMP_PROF__ 	/* mark processor running an idle thread */	if (0==next->pid)		set_bit(this_cpu,&smp_idle_map);	else		clear_bit(this_cpu,&smp_idle_map);#endif	if (prev != next) {		struct timer_list timer;		kstat.context_swtch++;		if (timeout) {			init_timer(&timer);			timer.expires = timeout;			timer.data = (unsigned long) prev;			timer.function = process_timeout;			add_timer(&timer);		}		get_mmu_context(next);		switch_to(prev,next);		if (timeout)			del_timer(&timer);	}	return;scheduling_in_interrupt:	printk("Aiee: scheduling in interrupt %p\n",		__builtin_return_address(0));}#ifndef __alpha__/* * For backwards compatibility?  This can be done in libc so Alpha * and all newer ports shouldn't need it. */asmlinkage int sys_pause(void){	current->state = TASK_INTERRUPTIBLE;	schedule();	return -ERESTARTNOHAND;}#endif/* * wake_up doesn't wake up stopped processes - they have to be awakened * with signals or similar. * * Note that this doesn't need cli-sti pairs: interrupts may not change * the wait-queue structures directly, but only call wake_up() to wake * a process. The process itself must remove the queue once it has woken. */void wake_up(struct wait_queue **q){	struct wait_queue *next;	struct wait_queue *head;	if (!q || !(next = *q))		return;	head = WAIT_QUEUE_HEAD(q);	while (next != head) {		struct task_struct *p = next->task;		next = next->next;		if (p != NULL) {			if ((p->state == TASK_UNINTERRUPTIBLE) ||			    (p->state == TASK_INTERRUPTIBLE))				wake_up_process(p);		}		if (!next)			goto bad;	}	return;bad:	printk("wait_queue is bad (eip = %p)\n",		__builtin_return_address(0));	printk("        q = %p\n",q);	printk("       *q = %p\n",*q);}void wake_up_interruptible(struct wait_queue **q){	struct wait_queue *next;	struct wait_queue *head;	if (!q || !(next = *q))		return;	head = WAIT_QUEUE_HEAD(q);	while (next != head) {		struct task_struct *p = next->task;		next = next->next;		if (p != NULL) {			if (p->state == TASK_INTERRUPTIBLE)				wake_up_process(p);		}		if (!next)			goto bad;	}	return;bad:	printk("wait_queue is bad (eip = %p)\n",		__builtin_return_address(0));	printk("        q = %p\n",q);	printk("       *q = %p\n",*q);}/* * Semaphores are implemented using a two-way counter: * The "count" variable is decremented for each process * that tries to sleep, while the "waking" variable is * incremented when the "up()" code goes to wake up waiting

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -