📄 ntp_loopfilter.c
字号:
}#endif /* STA_NANO */ sys_tai = i + TAI_1972 - 1; }#endif /* OPENSSL */#ifdef KERNEL_PLL /* * This code segment works when clock adjustments are made using * precision time kernel support and the ntp_adjtime() system * call. This support is available in Solaris 2.6 and later, * Digital Unix 4.0 and later, FreeBSD, Linux and specially * modified kernels for HP-UX 9 and Ultrix 4. In the case of the * DECstation 5000/240 and Alpha AXP, additional kernel * modifications provide a true microsecond clock and nanosecond * clock, respectively. * * Important note: The kernel discipline is used only if the * offset is less than 0.5 s, as anything higher can lead to * overflow problems. This might occur if some misguided lad set * the step threshold to something ridiculous. No problem; use * the ntp discipline until the residual offset sinks beneath * the waves. */ if (pll_control && kern_enable && fabs(clock_offset) < .5) { /* * We initialize the structure for the ntp_adjtime() * system call. We have to convert everything to * microseconds or nanoseconds first. Do not update the * system variables if the ext_enable flag is set. In * this case, the external clock driver will update the * variables, which will be read later by the local * clock driver. Afterwards, remember the time and * frequency offsets for jitter and stability values and * to update the drift file. */ memset(&ntv, 0, sizeof(ntv)); if (ext_enable) { ntv.modes = MOD_STATUS; } else { struct tm *tm = NULL; time_t tstamp;#ifdef STA_NANO ntv.modes = MOD_BITS | MOD_NANO;#else /* STA_NANO */ ntv.modes = MOD_BITS;#endif /* STA_NANO */ if (clock_offset < 0) dtemp = -.5; else dtemp = .5;#ifdef STA_NANO ntv.offset = (int32)(clock_offset * 1e9 + dtemp); ntv.constant = sys_poll;#else /* STA_NANO */ ntv.offset = (int32)(clock_offset * 1e6 + dtemp); ntv.constant = sys_poll - 4;#endif /* STA_NANO */ if (clock_frequency != 0) { ntv.modes |= MOD_FREQUENCY; ntv.freq = (int32)((clock_frequency + drift_comp) * 65536e6); } ntv.esterror = (u_int32)(clock_jitter * 1e6); ntv.maxerror = (u_int32)((sys_rootdelay / 2 + sys_rootdispersion) * 1e6); ntv.status = STA_PLL; /* * Set the leap bits in the status word, but * only on the last day of June or December. */ tstamp = peer->rec.l_ui - JAN_1970; tm = gmtime(&tstamp); if (tm != NULL) { if ((tm->tm_mon + 1 == 6 && tm->tm_mday == 30) || (tm->tm_mon + 1 == 12 && tm->tm_mday == 31)) { if (leap_next & LEAP_ADDSECOND) ntv.status |= STA_INS; else if (leap_next & LEAP_DELSECOND) ntv.status |= STA_DEL; } } /* * Switch to FLL mode if the poll interval is * greater than MAXDPOLL, so that the kernel * loop behaves as the daemon loop; viz., * selects the FLL when necessary, etc. For * legacy only. */ if (sys_poll > NTP_MAXDPOLL) ntv.status |= STA_FLL; /* * If the PPS signal is up and enabled, light * the frequency bit. If the PPS driver is * working, light the phase bit as well. If not, * douse the lights, since somebody else may * have left the switch on. */ if (pps_enable && pll_status & STA_PPSSIGNAL) { ntv.status |= STA_PPSFREQ; if (pps_stratum < STRATUM_UNSPEC) ntv.status |= STA_PPSTIME; } else { ntv.status &= ~(STA_PPSFREQ | STA_PPSTIME); } } /* * Pass the stuff to the kernel. If it squeals, turn off * the pigs. In any case, fetch the kernel offset and * frequency and pretend we did it here. */ if (ntp_adjtime(&ntv) == TIME_ERROR) { if (ntv.status != pll_status) NLOG(NLOG_SYNCEVENT | NLOG_SYSEVENT) msyslog(LOG_NOTICE, "kernel time sync disabled %04x", ntv.status); ntv.status &= ~(STA_PPSFREQ | STA_PPSTIME); } else { if (ntv.status != pll_status) NLOG(NLOG_SYNCEVENT | NLOG_SYSEVENT) msyslog(LOG_NOTICE, "kernel time sync enabled %04x", ntv.status); } pll_status = ntv.status;#ifdef STA_NANO clock_offset = ntv.offset / 1e9;#else /* STA_NANO */ clock_offset = ntv.offset / 1e6;#endif /* STA_NANO */ clock_frequency = ntv.freq / 65536e6 - drift_comp; flladj = plladj = 0; /* * If the kernel PPS is lit, monitor its performance. */ if (ntv.status & STA_PPSTIME) { pps_control = current_time;#ifdef STA_NANO clock_jitter = ntv.jitter / 1e9;#else /* STA_NANO */ clock_jitter = ntv.jitter / 1e6;#endif /* STA_NANO */ } }#endif /* KERNEL_PLL */ /* * Adjust the clock frequency and calculate the stability. If * kernel support is available, we use the results of the kernel * discipline instead of the PLL/FLL discipline. In this case, * drift_comp is a sham and used only for updating the drift * file and for billboard eye candy. */ dtemp = clock_frequency + flladj + plladj; etemp = drift_comp + dtemp; if (etemp > NTP_MAXFREQ) drift_comp = NTP_MAXFREQ; else if (etemp <= -NTP_MAXFREQ) drift_comp = -NTP_MAXFREQ; else drift_comp = etemp; if (fabs(etemp) > NTP_MAXFREQ) NLOG(NLOG_SYNCEVENT | NLOG_SYSEVENT) msyslog(LOG_NOTICE, "frequency error %.0f PPM exceeds tolerance %.0f PPM", etemp * 1e6, NTP_MAXFREQ * 1e6); etemp = SQUARE(clock_stability); dtemp = SQUARE(dtemp); clock_stability = SQRT(etemp + (dtemp - etemp) / CLOCK_AVG); /* * Here we adjust the poll interval by comparing the current * offset with the clock jitter. If the offset is less than the * clock jitter times a constant, then the averaging interval is * increased, otherwise it is decreased. A bit of hysteresis * helps calm the dance. Works best using burst mode. */ if (fabs(clock_offset) < CLOCK_PGATE * clock_jitter) { tc_counter += sys_poll; if (tc_counter > CLOCK_LIMIT) { tc_counter = CLOCK_LIMIT; if (sys_poll < peer->maxpoll) { tc_counter = 0; sys_poll++; } } } else { tc_counter -= sys_poll << 1; if (tc_counter < -CLOCK_LIMIT) { tc_counter = -CLOCK_LIMIT; if (sys_poll > peer->minpoll) { tc_counter = 0; sys_poll--; } } } /* * Yibbidy, yibbbidy, yibbidy; that'h all folks. */ record_loop_stats(clock_offset, drift_comp, clock_jitter, clock_stability, sys_poll);#ifdef DEBUG if (debug) printf( "local_clock: mu %lu jitr %.6f freq %.3f stab %.6f poll %d count %d\n", mu, clock_jitter, drift_comp * 1e6, clock_stability * 1e6, sys_poll, tc_counter);#endif /* DEBUG */ return (rval);#endif /* LOCKCLOCK */}/* * adj_host_clock - Called once every second to update the local clock. * * LOCKCLOCK: The only thing this routine does is increment the * sys_rootdispersion variable. */voidadj_host_clock( void ){ double adjustment; /* * Update the dispersion since the last update. In contrast to * NTPv3, NTPv4 does not declare unsynchronized after one day, * since the dispersion check serves this function. Also, * since the poll interval can exceed one day, the old test * would be counterproductive. Note we do this even with * external clocks, since the clock driver will recompute the * maximum error and the local clock driver will pick it up and * pass to the common refclock routines. Very elegant. */ sys_rootdispersion += clock_phi;#ifndef LOCKCLOCK /* * Declare PPS kernel unsync if the pps signal has not been * heard for a few minutes. */ if (pps_control && current_time - pps_control > PPS_MAXAGE) { if (pps_control) NLOG(NLOG_SYNCEVENT | NLOG_SYSEVENT) msyslog(LOG_NOTICE, "pps sync disabled"); pps_control = 0; } /* * If NTP is disabled or ntpdate mode enabled or the kernel * discipline is enabled, we have no business going further. */ if (!ntp_enable || mode_ntpdate || (pll_control && kern_enable)) return; /* * Implement the phase and frequency adjustments. The gain * factor (denominator) is not allowed to increase beyond the * Allan intercept. It doesn't make sense to average phase noise * beyond this point and it helps to damp residual offset at the * longer poll intervals. */ adjustment = clock_offset / (CLOCK_PLL * min(ULOGTOD(sys_poll), allan_xpt)); clock_offset -= adjustment; adj_systime(adjustment + drift_comp);#endif /* LOCKCLOCK */}/* * Clock state machine. Enter new state and set state variables. Note we * use the time of the last clock filter sample, which may be earlier * than the current time. */static voidrstclock( int trans, /* new state */ u_long update, /* new update time */ double offset /* new offset */ ){ state = trans; sys_clocktime = update; last_base = offset - clock_offset; last_offset = clock_offset = offset;#ifdef DEBUG if (debug) printf("local_clock: time %lu base %.6f offset %.6f freq %.3f state %d\n", sys_clocktime, last_base, last_offset, drift_comp * 1e6, trans);#endif}/* * huff-n'-puff filter */voidhuffpuff(){ int i; if (sys_huffpuff == NULL) return; sys_huffptr = (sys_huffptr + 1) % sys_hufflen; sys_huffpuff[sys_huffptr] = 1e9; sys_mindly = 1e9; for (i = 0; i < sys_hufflen; i++) { if (sys_huffpuff[i] < sys_mindly) sys_mindly = sys_huffpuff[i]; }}/* * loop_config - configure the loop filter * * LOCKCLOCK: The LOOP_DRIFTINIT and LOOP_DRIFTCOMP cases are no-ops. */voidloop_config( int item, double freq ){ int i; switch (item) { case LOOP_DRIFTINIT:#ifndef LOCKCLOCK#ifdef KERNEL_PLL /* * Assume the kernel supports the ntp_adjtime() syscall. * If that syscall works, initialize the kernel time * variables. Otherwise, continue leaving no harm * behind. While at it, ask to set nanosecond mode. If * the kernel agrees, rejoice; othewise, it does only * microseconds. * * Call out the safety patrol. If ntpdate mode or if the * step threshold has been increased by the -x option or * tinker command, kernel discipline is unsafe, so don't * do any of this stuff. Otherwise, initialize the * kernel to appear unsynchronized until the first * update is received. */ if (mode_ntpdate || clock_max > CLOCK_MAX) break; pll_control = 1; memset(&ntv, 0, sizeof(ntv));#ifdef STA_NANO ntv.modes = MOD_BITS | MOD_NANO;#else /* STA_NANO */ ntv.modes = MOD_BITS;#endif /* STA_NANO */ ntv.maxerror = MAXDISPERSE; ntv.esterror = MAXDISPERSE; ntv.status = STA_UNSYNC;#ifdef SIGSYS /* * Use sigsetjmp() to save state and then call * ntp_adjtime(); if it fails, then siglongjmp() is used * to return control */ newsigsys.sa_handler = pll_trap; newsigsys.sa_flags = 0; if (sigaction(SIGSYS, &newsigsys, &sigsys)) { msyslog(LOG_ERR, "sigaction() fails to save SIGSYS trap: %m"); pll_control = 0; } if (sigsetjmp(env, 1) == 0) ntp_adjtime(&ntv); if ((sigaction(SIGSYS, &sigsys, (struct sigaction *)NULL))) { msyslog(LOG_ERR, "sigaction() fails to restore SIGSYS trap: %m"); pll_control = 0; }#else /* SIGSYS */ ntp_adjtime(&ntv);#endif /* SIGSYS */ /* * Save the result status and light up an external clock * if available. */ pll_status = ntv.status; if (pll_control) {#ifdef STA_NANO if (pll_status & STA_CLK) ext_enable = 1;#endif /* STA_NANO */ NLOG(NLOG_SYNCEVENT | NLOG_SYSEVENT) msyslog(LOG_INFO, "kernel time sync status %04x", pll_status); }#endif /* KERNEL_PLL */#endif /* LOCKCLOCK */ break; case LOOP_DRIFTCOMP:#ifndef LOCKCLOCK /* * If the frequency value is reasonable, set the initial * frequency to the given value and the state to S_FSET. * Otherwise, the drift file may be missing or broken, * so set the frequency to zero. This erases past * history should somebody break something. */ if (freq <= NTP_MAXFREQ && freq >= -NTP_MAXFREQ) { drift_comp = freq; rstclock(S_FSET, 0, 0); } else { drift_comp = 0; }#ifdef KERNEL_PLL /* * Sanity check. If the kernel is available, load the * frequency and light up the loop. Make sure the offset * is zero to cancel any previous nonsense. If you don't * want this initialization, remove the ntp.drift file. */ if (pll_control && kern_enable) { memset((char *)&ntv, 0, sizeof(ntv)); ntv.modes = MOD_FREQUENCY; ntv.freq = (int32)(drift_comp * 65536e6); ntp_adjtime(&ntv); }#endif /* KERNEL_PLL */#endif /* LOCKCLOCK */ break; /* * Special tinker variables for Ulrich Windl. Very dangerous. */ case LOOP_MAX: /* step threshold */ clock_max = freq; break; case LOOP_PANIC: /* panic threshold */ clock_panic = freq; break; case LOOP_PHI: /* dispersion rate */ clock_phi = freq; break; case LOOP_MINSTEP: /* watchdog bark */ clock_minstep = freq; break; case LOOP_ALLAN: /* Allan intercept */ allan_xpt = freq; break; case LOOP_HUFFPUFF: /* huff-n'-puff filter length */ if (freq < HUFFPUFF) freq = HUFFPUFF; sys_hufflen = (int)(freq / HUFFPUFF); sys_huffpuff = (double *)emalloc(sizeof(double) * sys_hufflen); for (i = 0; i < sys_hufflen; i++) sys_huffpuff[i] = 1e9; sys_mindly = 1e9; break; case LOOP_FREQ: /* initial frequency */ drift_comp = freq / 1e6; rstclock(S_FSET, 0, 0); break; }}#if defined(KERNEL_PLL) && defined(SIGSYS)/* * _trap - trap processor for undefined syscalls * * This nugget is called by the kernel when the SYS_ntp_adjtime() * syscall bombs because the silly thing has not been implemented in * the kernel. In this case the phase-lock loop is emulated by * the stock adjtime() syscall and a lot of indelicate abuse. */static RETSIGTYPEpll_trap( int arg ){ pll_control = 0; siglongjmp(env, 1);}#endif /* KERNEL_PLL && SIGSYS */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -