📄 sched.c
字号:
p->stime += system; psecs = (p->stime + p->utime) / HZ; if (psecs > p->rlim[RLIMIT_CPU].rlim_cur) { /* Send SIGXCPU every second.. */ if (psecs * HZ == p->stime + p->utime) send_sig(SIGXCPU, p, 1); /* and SIGKILL when we go over max.. */ if (psecs > p->rlim[RLIMIT_CPU].rlim_max) send_sig(SIGKILL, p, 1); }}static inline void do_it_virt(struct task_struct * p, unsigned long ticks){ unsigned long it_virt = p->it_virt_value; if (it_virt) { if (it_virt <= ticks) { it_virt = ticks + p->it_virt_incr; send_sig(SIGVTALRM, p, 1); } p->it_virt_value = it_virt - ticks; }}static inline void do_it_prof(struct task_struct * p, unsigned long ticks){ unsigned long it_prof = p->it_prof_value; if (it_prof) { if (it_prof <= ticks) { it_prof = ticks + p->it_prof_incr; send_sig(SIGPROF, p, 1); } p->it_prof_value = it_prof - ticks; }}static __inline__ void update_one_process(struct task_struct *p, unsigned long ticks, unsigned long user, unsigned long system){ do_process_times(p, user, system); do_it_virt(p, user); do_it_prof(p, ticks);} static void update_process_times(unsigned long ticks, unsigned long system){#ifndef __SMP__ struct task_struct * p = current; unsigned long user = ticks - system; if (p->pid) { p->counter -= ticks; if (p->counter < 0) { p->counter = 0; need_resched = 1; } if (p->priority < DEF_PRIORITY) kstat.cpu_nice += user; else kstat.cpu_user += user; kstat.cpu_system += system; } update_one_process(p, ticks, user, system);#else int cpu,j; cpu = smp_processor_id(); for (j=0;j<smp_num_cpus;j++) { int i = cpu_logical_map[j]; struct task_struct *p; #ifdef __SMP_PROF__ if (test_bit(i,&smp_idle_map)) smp_idle_count[i]++;#endif p = current_set[i]; /* * Do we have a real process? */ if (p->pid) { /* assume user-mode process */ unsigned long utime = ticks; unsigned long stime = 0; if (cpu == i) { utime = ticks-system; stime = system; } else if (smp_proc_in_lock[j]) { utime = 0; stime = ticks; } update_one_process(p, ticks, utime, stime); if (p->priority < DEF_PRIORITY) kstat.cpu_nice += utime; else kstat.cpu_user += utime; kstat.cpu_system += stime; p->counter -= ticks; if (p->counter >= 0) continue; p->counter = 0; } else { /* * Idle processor found, do we have anything * we could run? */ if (!(0x7fffffff & smp_process_available)) continue; } /* Ok, we should reschedule, do the magic */ if (i==cpu) need_resched = 1; else smp_message_pass(i, MSG_RESCHEDULE, 0L, 0); }#endif}static unsigned long lost_ticks = 0;static unsigned long lost_ticks_system = 0;static inline void update_times(void){ unsigned long ticks; ticks = xchg(&lost_ticks, 0); if (ticks) { unsigned long system; system = xchg(&lost_ticks_system, 0); calc_load(ticks); update_wall_time(ticks); update_process_times(ticks, system); }}static void timer_bh(void){ update_times(); run_old_timers(); run_timer_list();}void do_timer(struct pt_regs * regs){ (*(unsigned long *)&jiffies)++; lost_ticks++; mark_bh(TIMER_BH); if (!user_mode(regs)) { lost_ticks_system++; if (prof_buffer && current->pid) { extern int _stext; unsigned long ip = instruction_pointer(regs); ip -= (unsigned long) &_stext; ip >>= prof_shift; if (ip < prof_len) prof_buffer[ip]++; } } if (tq_timer) mark_bh(TQUEUE_BH);}#ifndef __alpha__/* * For backwards compatibility? This can be done in libc so Alpha * and all newer ports shouldn't need it. */asmlinkage unsigned int sys_alarm(unsigned int seconds){ struct itimerval it_new, it_old; unsigned int oldalarm; it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0; it_new.it_value.tv_sec = seconds; it_new.it_value.tv_usec = 0; _setitimer(ITIMER_REAL, &it_new, &it_old); oldalarm = it_old.it_value.tv_sec; /* ehhh.. We can't return 0 if we have an alarm pending.. */ /* And we'd better return too much than too little anyway */ if (it_old.it_value.tv_usec) oldalarm++; return oldalarm;}/* * The Alpha uses getxpid, getxuid, and getxgid instead. Maybe this * should be moved into arch/i386 instead? */asmlinkage int sys_getpid(void){ return current->pid;}asmlinkage int sys_getppid(void){ return current->p_opptr->pid;}asmlinkage int sys_getuid(void){ return current->uid;}asmlinkage int sys_geteuid(void){ return current->euid;}asmlinkage int sys_getgid(void){ return current->gid;}asmlinkage int sys_getegid(void){ return current->egid;}/* * This has been replaced by sys_setpriority. Maybe it should be * moved into the arch dependent tree for those ports that require * it for backward compatibility? */asmlinkage int sys_nice(int increment){ unsigned long newprio; int increase = 0; newprio = increment; if (increment < 0) { if (!suser()) return -EPERM; newprio = -increment; increase = 1; } if (newprio > 40) newprio = 40; /* * do a "normalization" of the priority (traditionally * unix nice values are -20..20, linux doesn't really * use that kind of thing, but uses the length of the * timeslice instead (default 150 msec). The rounding is * why we want to avoid negative values. */ newprio = (newprio * DEF_PRIORITY + 10) / 20; increment = newprio; if (increase) increment = -increment; newprio = current->priority - increment; if ((signed) newprio < 1) newprio = 1; if (newprio > DEF_PRIORITY*2) newprio = DEF_PRIORITY*2; current->priority = newprio; return 0;}#endifstatic struct task_struct *find_process_by_pid(pid_t pid) { struct task_struct *p, *q; if (pid == 0) p = current; else { p = 0; for_each_task(q) { if (q && q->pid == pid) { p = q; break; } } } return p;}static int setscheduler(pid_t pid, int policy, struct sched_param *param){ int error; struct sched_param lp; struct task_struct *p; if (!param || pid < 0) return -EINVAL; error = verify_area(VERIFY_READ, param, sizeof(struct sched_param)); if (error) return error; memcpy_fromfs(&lp, param, sizeof(struct sched_param)); p = find_process_by_pid(pid); if (!p) return -ESRCH; if (policy < 0) policy = p->policy; else if (policy != SCHED_FIFO && policy != SCHED_RR && policy != SCHED_OTHER) return -EINVAL; /* * Valid priorities for SCHED_FIFO and SCHED_RR are 1..99, valid * priority for SCHED_OTHER is 0. */ if (lp.sched_priority < 0 || lp.sched_priority > 99) return -EINVAL; if ((policy == SCHED_OTHER) != (lp.sched_priority == 0)) return -EINVAL; if ((policy == SCHED_FIFO || policy == SCHED_RR) && !suser()) return -EPERM; if ((current->euid != p->euid) && (current->euid != p->uid) && !suser()) return -EPERM; p->policy = policy; p->rt_priority = lp.sched_priority; cli(); if (p->next_run) move_last_runqueue(p); sti(); need_resched = 1; return 0;}asmlinkage int sys_sched_setscheduler(pid_t pid, int policy, struct sched_param *param){ return setscheduler(pid, policy, param);}asmlinkage int sys_sched_setparam(pid_t pid, struct sched_param *param){ return setscheduler(pid, -1, param);}asmlinkage int sys_sched_getscheduler(pid_t pid){ struct task_struct *p; if (pid < 0) return -EINVAL; p = find_process_by_pid(pid); if (!p) return -ESRCH; return p->policy;}asmlinkage int sys_sched_getparam(pid_t pid, struct sched_param *param){ int error; struct task_struct *p; struct sched_param lp; if (!param || pid < 0) return -EINVAL; error = verify_area(VERIFY_WRITE, param, sizeof(struct sched_param)); if (error) return error; p = find_process_by_pid(pid); if (!p) return -ESRCH; lp.sched_priority = p->rt_priority; memcpy_tofs(param, &lp, sizeof(struct sched_param)); return 0;}asmlinkage int sys_sched_yield(void){ cli(); move_last_runqueue(current); current->counter = 0; need_resched = 1; sti(); return 0;}asmlinkage int sys_sched_get_priority_max(int policy){ switch (policy) { case SCHED_FIFO: case SCHED_RR: return 99; case SCHED_OTHER: return 0; } return -EINVAL;}asmlinkage int sys_sched_get_priority_min(int policy){ switch (policy) { case SCHED_FIFO: case SCHED_RR: return 1; case SCHED_OTHER: return 0; } return -EINVAL;}asmlinkage int sys_sched_rr_get_interval(pid_t pid, struct timespec *interval){ int error; struct timespec t; error = verify_area(VERIFY_WRITE, interval, sizeof(struct timespec)); if (error) return error; /* Values taken from 2.1.38 */ t.tv_sec = 0; t.tv_nsec = 150000; /* is this right for non-intel architecture too?*/ memcpy_tofs(interval, &t, sizeof(struct timespec)); return 0;}/* * change timeval to jiffies, trying to avoid the * most obvious overflows.. */static unsigned long timespectojiffies(struct timespec *value){ unsigned long sec = (unsigned) value->tv_sec; long nsec = value->tv_nsec; if (sec > (LONG_MAX / HZ)) return LONG_MAX; nsec += 1000000000L / HZ - 1; nsec /= 1000000000L / HZ; return HZ * sec + nsec;}static void jiffiestotimespec(unsigned long jiffies, struct timespec *value){ value->tv_nsec = (jiffies % HZ) * (1000000000L / HZ); value->tv_sec = jiffies / HZ; return;}asmlinkage int sys_nanosleep(struct timespec *rqtp, struct timespec *rmtp){ int error; struct timespec t; unsigned long expire; error = verify_area(VERIFY_READ, rqtp, sizeof(struct timespec)); if (error) return error; memcpy_fromfs(&t, rqtp, sizeof(struct timespec)); if (rmtp) { error = verify_area(VERIFY_WRITE, rmtp, sizeof(struct timespec)); if (error) return error; } if (t.tv_nsec >= 1000000000L || t.tv_nsec < 0 || t.tv_sec < 0) return -EINVAL; if (t.tv_sec == 0 && t.tv_nsec <= 2000000L && current->policy != SCHED_OTHER) { /* * Short delay requests up to 2 ms will be handled with * high precision by a busy wait for all real-time processes. */ udelay((t.tv_nsec + 999) / 1000); return 0; } expire = timespectojiffies(&t) + (t.tv_sec || t.tv_nsec) + jiffies; current->timeout = expire; current->state = TASK_INTERRUPTIBLE; schedule(); if (expire > jiffies) { if (rmtp) { jiffiestotimespec(expire - jiffies - (expire > jiffies + 1), &t); memcpy_tofs(rmtp, &t, sizeof(struct timespec)); } return -EINTR; } return 0;}static void show_task(int nr,struct task_struct * p){ unsigned long free; static const char * stat_nam[] = { "R", "S", "D", "Z", "T", "W" }; printk("%-8s %3d ", p->comm, (p == current) ? -nr : nr); if (((unsigned) p->state) < sizeof(stat_nam)/sizeof(char *)) printk(stat_nam[p->state]); else printk(" ");#if ((~0UL) == 0xffffffff) if (p == current) printk(" current "); else printk(" %08lX ", thread_saved_pc(&p->tss)); printk("%08lX ", get_wchan(p));#else if (p == current) printk(" current task "); else printk(" %016lx ", thread_saved_pc(&p->tss)); printk("%08lX ", get_wchan(p) & 0xffffffffL);#endif for (free = 1; free < PAGE_SIZE/sizeof(long) ; free++) { if (((unsigned long *)p->kernel_stack_page)[free]) break; } printk("%5lu %5d %6d ", free*sizeof(long), p->pid, p->p_pptr->pid); if (p->p_cptr) printk("%5d ", p->p_cptr->pid); else printk(" "); if (p->p_ysptr) printk("%7d", p->p_ysptr->pid); else printk(" "); if (p->p_osptr) printk(" %5d\n", p->p_osptr->pid); else printk("\n");}void show_state(void){ int i;#if ((~0UL) == 0xffffffff) printk("\n" " free sibling\n"); printk(" task PC wchan stack pid father child younger older\n");#else printk("\n" " free sibling\n"); printk(" task PC wchan stack pid father child younger older\n");#endif for (i=0 ; i<NR_TASKS ; i++) if (task[i]) show_task(i,task[i]);}void sched_init(void){ /* * We have to do a little magic to get the first * process right in SMP mode. */ int cpu=smp_processor_id();#ifndef __SMP__ current_set[cpu]=&init_task;#else init_task.processor=cpu; for(cpu = 0; cpu < NR_CPUS; cpu++) current_set[cpu] = &init_task;#endif init_bh(TIMER_BH, timer_bh); init_bh(TQUEUE_BH, tqueue_bh); init_bh(IMMEDIATE_BH, immediate_bh);}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -