📄 timer.c
字号:
ltemp = (MAXPHASE / MINSEC) << SHIFT_UPDATE; time_offset -= ltemp; time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE); } /* * Compute the frequency estimate and additional phase * adjustment due to frequency error for the next * second. When the PPS signal is engaged, gnaw on the * watchdog counter and update the frequency computed by * the pll and the PPS signal. */ pps_valid++; if (pps_valid == PPS_VALID) { /* PPS signal lost */ pps_jitter = MAXTIME; pps_stabil = MAXFREQ; time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER | STA_PPSWANDER | STA_PPSERROR); } ltemp = time_freq + pps_freq; if (ltemp < 0) time_adj -= -ltemp >> (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE); else time_adj += ltemp >> (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE);#if HZ == 100 /* Compensate for (HZ==100) != (1 << SHIFT_HZ). * Add 25% and 3.125% to get 128.125; => only 0.125% error (p. 14) */ if (time_adj < 0) time_adj -= (-time_adj >> 2) + (-time_adj >> 5); else time_adj += (time_adj >> 2) + (time_adj >> 5);#endif}/* in the NTP reference this is called "hardclock()" */static void update_wall_time_one_tick(void){ if ( (time_adjust_step = time_adjust) != 0 ) { /* We are doing an adjtime thing. * * Prepare time_adjust_step to be within bounds. * Note that a positive time_adjust means we want the clock * to run faster. * * Limit the amount of the step to be in the range * -tickadj .. +tickadj */ if (time_adjust > tickadj) time_adjust_step = tickadj; else if (time_adjust < -tickadj) time_adjust_step = -tickadj; /* Reduce by this step the amount of time left */ time_adjust -= time_adjust_step; } xtime.tv_usec += tick + time_adjust_step; /* * Advance the phase, once it gets to one microsecond, then * advance the tick more. */ time_phase += time_adj; if (time_phase <= -FINEUSEC) { long ltemp = -time_phase >> SHIFT_SCALE; time_phase += ltemp << SHIFT_SCALE; xtime.tv_usec -= ltemp; } else if (time_phase >= FINEUSEC) { long ltemp = time_phase >> SHIFT_SCALE; time_phase -= ltemp << SHIFT_SCALE; xtime.tv_usec += ltemp; }}/* * Using a loop looks inefficient, but "ticks" is * usually just one (we shouldn't be losing ticks, * we're doing this this way mainly for interrupt * latency reasons, not because we think we'll * have lots of lost timer ticks */static void update_wall_time(unsigned long ticks){ do { ticks--; update_wall_time_one_tick(); } while (ticks); if (xtime.tv_usec >= 1000000) { xtime.tv_usec -= 1000000; xtime.tv_sec++; second_overflow(); }}static inline void do_process_times(struct task_struct *p, unsigned long user, unsigned long system){ unsigned long psecs; psecs = (p->times.tms_utime += user); psecs += (p->times.tms_stime += system); if (psecs / HZ > p->rlim[RLIMIT_CPU].rlim_cur) { /* Send SIGXCPU every second.. */ if (!(psecs % HZ)) send_sig(SIGXCPU, p, 1); /* and SIGKILL when we go over max.. */ if (psecs / HZ > p->rlim[RLIMIT_CPU].rlim_max) send_sig(SIGKILL, p, 1); }}static inline void do_it_virt(struct task_struct * p, unsigned long ticks){ unsigned long it_virt = p->it_virt_value; if (it_virt) { it_virt -= ticks; if (!it_virt) { it_virt = p->it_virt_incr; send_sig(SIGVTALRM, p, 1); } p->it_virt_value = it_virt; }}static inline void do_it_prof(struct task_struct *p){ unsigned long it_prof = p->it_prof_value; if (it_prof) { if (--it_prof == 0) { it_prof = p->it_prof_incr; send_sig(SIGPROF, p, 1); } p->it_prof_value = it_prof; }}void update_one_process(struct task_struct *p, unsigned long user, unsigned long system, int cpu){ p->per_cpu_utime[cpu] += user; p->per_cpu_stime[cpu] += system; do_process_times(p, user, system); do_it_virt(p, user); do_it_prof(p);} /* * Called from the timer interrupt handler to charge one tick to the current * process. user_tick is 1 if the tick is user time, 0 for system. */void update_process_times(int user_tick){ struct task_struct *p = current; int cpu = smp_processor_id(), system = user_tick ^ 1; update_one_process(p, user_tick, system, cpu); if (p->pid) { if (--p->counter <= 0) { p->counter = 0; p->need_resched = 1; } if (p->nice > 0) kstat.per_cpu_nice[cpu] += user_tick; else kstat.per_cpu_user[cpu] += user_tick; kstat.per_cpu_system[cpu] += system; } else if (local_bh_count(cpu) || local_irq_count(cpu) > 1) kstat.per_cpu_system[cpu] += system;}/* * Nr of active tasks - counted in fixed-point numbers */static unsigned long count_active_tasks(void){ struct task_struct *p; unsigned long nr = 0; read_lock(&tasklist_lock); for_each_task(p) { if ((p->state == TASK_RUNNING || (p->state & TASK_UNINTERRUPTIBLE))) nr += FIXED_1; } read_unlock(&tasklist_lock); return nr;}/* * Hmm.. Changed this, as the GNU make sources (load.c) seems to * imply that avenrun[] is the standard name for this kind of thing. * Nothing else seems to be standardized: the fractional size etc * all seem to differ on different machines. */unsigned long avenrun[3];static inline void calc_load(unsigned long ticks){ unsigned long active_tasks; /* fixed-point */ static int count = LOAD_FREQ; count -= ticks; if (count < 0) { count += LOAD_FREQ; active_tasks = count_active_tasks(); CALC_LOAD(avenrun[0], EXP_1, active_tasks); CALC_LOAD(avenrun[1], EXP_5, active_tasks); CALC_LOAD(avenrun[2], EXP_15, active_tasks); }}/* jiffies at the most recent update of wall time */unsigned long wall_jiffies;/* * This spinlock protect us from races in SMP while playing with xtime. -arca */rwlock_t xtime_lock = RW_LOCK_UNLOCKED;static inline void update_times(void){ unsigned long ticks; /* * update_times() is run from the raw timer_bh handler so we * just know that the irqs are locally enabled and so we don't * need to save/restore the flags of the local CPU here. -arca */ write_lock_irq(&xtime_lock); ticks = jiffies - wall_jiffies; if (ticks) { wall_jiffies += ticks; update_wall_time(ticks); } write_unlock_irq(&xtime_lock); calc_load(ticks);}void timer_bh(void){ update_times(); run_timer_list();}void do_timer(struct pt_regs *regs){ (*(unsigned long *)&jiffies)++;#ifndef CONFIG_SMP /* SMP process accounting uses the local APIC timer */ update_process_times(user_mode(regs));#endif mark_bh(TIMER_BH); if (TQ_ACTIVE(tq_timer)) mark_bh(TQUEUE_BH);}#if !defined(__alpha__) && !defined(__ia64__)/* * For backwards compatibility? This can be done in libc so Alpha * and all newer ports shouldn't need it. */asmlinkage unsigned long sys_alarm(unsigned int seconds){ struct itimerval it_new, it_old; unsigned int oldalarm; it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0; it_new.it_value.tv_sec = seconds; it_new.it_value.tv_usec = 0; do_setitimer(ITIMER_REAL, &it_new, &it_old); oldalarm = it_old.it_value.tv_sec; /* ehhh.. We can't return 0 if we have an alarm pending.. */ /* And we'd better return too much than too little anyway */ if (it_old.it_value.tv_usec) oldalarm++; return oldalarm;}#endif#ifndef __alpha__/* * The Alpha uses getxpid, getxuid, and getxgid instead. Maybe this * should be moved into arch/i386 instead? */ asmlinkage long sys_getpid(void){ /* This is SMP safe - current->pid doesn't change */ return current->tgid;}/* * This is not strictly SMP safe: p_opptr could change * from under us. However, rather than getting any lock * we can use an optimistic algorithm: get the parent * pid, and go back and check that the parent is still * the same. If it has changed (which is extremely unlikely * indeed), we just try again.. * * NOTE! This depends on the fact that even if we _do_ * get an old value of "parent", we can happily dereference * the pointer: we just can't necessarily trust the result * until we know that the parent pointer is valid. * * The "mb()" macro is a memory barrier - a synchronizing * event. It also makes sure that gcc doesn't optimize * away the necessary memory references.. The barrier doesn't * have to have all that strong semantics: on x86 we don't * really require a synchronizing instruction, for example. * The barrier is more important for code generation than * for any real memory ordering semantics (even if there is * a small window for a race, using the old pointer is * harmless for a while). */asmlinkage long sys_getppid(void){ int pid; struct task_struct * me = current; struct task_struct * parent; parent = me->p_opptr; for (;;) { pid = parent->pid;#if CONFIG_SMP{ struct task_struct *old = parent; mb(); parent = me->p_opptr; if (old != parent) continue;}#endif break; } return pid;}asmlinkage long sys_getuid(void){ /* Only we change this so SMP safe */ return current->uid;}asmlinkage long sys_geteuid(void){ /* Only we change this so SMP safe */ return current->euid;}asmlinkage long sys_getgid(void){ /* Only we change this so SMP safe */ return current->gid;}asmlinkage long sys_getegid(void){ /* Only we change this so SMP safe */ return current->egid;}#endif/* Thread ID - the internal kernel "pid" */asmlinkage long sys_gettid(void){ return current->pid;}asmlinkage long sys_nanosleep(struct timespec *rqtp, struct timespec *rmtp){ struct timespec t; unsigned long expire; if(copy_from_user(&t, rqtp, sizeof(struct timespec))) return -EFAULT; if (t.tv_nsec >= 1000000000L || t.tv_nsec < 0 || t.tv_sec < 0) return -EINVAL; if (t.tv_sec == 0 && t.tv_nsec <= 2000000L && current->policy != SCHED_OTHER) { /* * Short delay requests up to 2 ms will be handled with * high precision by a busy wait for all real-time processes. * * Its important on SMP not to do this holding locks. */ udelay((t.tv_nsec + 999) / 1000); return 0; } expire = timespec_to_jiffies(&t) + (t.tv_sec || t.tv_nsec); current->state = TASK_INTERRUPTIBLE; expire = schedule_timeout(expire); if (expire) { if (rmtp) { jiffies_to_timespec(expire, &t); if (copy_to_user(rmtp, &t, sizeof(struct timespec))) return -EFAULT; } return -EINTR; } return 0;}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -