📄 ntp_proto.c
字号:
*/ if ((peer == sys.peer) && (sys.hold == 0)) { /* * Update the local system variables */ sys.leap = peer->leap;#ifndef REFCLOCK sys.stratum = peer->stratum + 1; if (peer->src.type == AF_INET) { sys.refid.rid_type = RID_INET; sys.refid.rid_inet = peer->src.inet_ad.sin_addr.s_addr; } else if (peer -> src.type == AF_OSI) { sys.refid.rid_type = RID_PSAP; sys.refid.rid_psap = peer->src.psap_ad; }#else if (peer->flags & PEER_FL_REFCLOCK) { /* once we re-map the stratums so that stratum 0 is better than stratum 1, some of this foolishness can go away */ sys.stratum = peer->stratum; sys.refid = peer->refid; } else { sys.stratum = peer->stratum + 1; if (peer->src.type == AF_INET) { sys.refid.rid_type = RID_INET; sys.refid.rid_inet = peer->src.inet_ad.sin_addr.s_addr; } else if (peer -> src.type == AF_OSI) { sys.refid.rid_type = RID_PSAP; sys.refid.rid_psap = peer->src.psap_ad; } }#endif temp = s_fixed_to_double(&peer->distance) + peer->estdelay; double_to_s_fixed(&sys.distance, temp); temp = s_fixed_to_double(&peer->dispersion) + peer->estdisp; double_to_s_fixed(&sys.dispersion, temp); sys.reftime = peer->rec; TRACE (3, ("clock_update: synced to peer, adj clock")); /* * Sanity check: is computed offset insane? */ if (peer->estoffset > WayTooBig || peer->estoffset < -WayTooBig) { advise (LLOG_EXCEPTIONS, NULLCP, "Clock is too far off %f sec. [%s]", peer->estoffset, paddr (&peer->src)); return; } clock_watchdog = 0; /* reset watchdog timer */ if (adj_logical(peer->estoffset) > 0) { register struct ntp_peer *p = peer_list.head; advise (LLOG_NOTICE, NULLCP, "adjust: STEP %s st %d off %f drft %f cmpl %f", paddr (&peer->src), peer->stratum, peer->estoffset, drift_comp, compliance); TRACE (1, ("Clockset from %s stratum %d offset %f", paddr (&peer->src), peer->stratum, peer->estoffset)); while (p) { clear(p); p = p->next; } sys.hold = PEER_SHIFT * (1 << NTP_MINPOLL); TRACE (3, ("clock_updates: STEP ADJ")); } else { if (logstats) { advise (LLOG_NOTICE, NULLCP, "adjust: SLEW %s st %d off %f drft %f cmpl %f", paddr (&peer->src), peer->stratum, peer->estoffset, drift_comp, compliance); } } }}/* 3.4.6 Initialization procedure */voidinitialize(){ sys.leap = ALARM; /* indicate unsynchronized */ sys.stratum = 0; sys.precision = 0; /* may be specified in the config file; if not, gets set in init_kern_vars() */#if 0 /* under construction */ sys.keyid = 0; sys.keys = ??;#endif sys.distance.int_part = sys.distance.fraction = 0; sys.dispersion.int_part = sys.dispersion.fraction = 0; sys.refid.rid_type = 0; sys.refid.rid_inet = 0; sys.reftime.int_part = sys.reftime.fraction = 0; sys.hold = 0; sys.peer = NULL;}/* 3.4.7 Clear Procedure */voidclear(peer) register struct ntp_peer *peer;{ register int i; TRACE (3, ("clear: emptied filter for %s", paddr (&peer->src))); if (peer->reach != 0) peer->hpoll = NTP_MINPOLL; peer->estdisp = PEER_MAXDISP; for (i = 0; i < NTP_WINDOW; i++) peer->filter.offset[i] = 0.0; peer->filter.samples = 0; /* Implementation specific */ peer->valid = 0; peer->org.int_part = peer->org.fraction = 0; peer->rec.int_part = peer->rec.fraction = 0; peer->xmt.int_part = peer->xmt.fraction = 0; if (peer->reach != 0) poll_update(peer, NTP_MINPOLL); select_clock(); if (sys.peer != NULL) poll_update(sys.peer, NTP_MINPOLL);}/* 3.4.8 Poll Update Procedure */voidpoll_update(peer, new_hpoll) register struct ntp_peer *peer; int new_hpoll;{ int interval; peer->hpoll = MAX(NTP_MINPOLL, MIN(NTP_MAXPOLL, new_hpoll));#if XTAL /* if crystal controlled clock */ if (peer == sys.peer)#endif peer->hpoll = NTP_MINPOLL; interval = 1 << (MAX(MIN((int)peer->ppoll, MIN((int)peer->hpoll, NTP_MAXPOLL)), NTP_MINPOLL));#ifdef REFCLOCK if (peer->flags & PEER_FL_REFCLOCK) interval = 1 << NTP_MINPOLL;#endif if (interval == peer->timer) return; /* only randomize when poll interval changes */ if (interval < peer->timer) { interval = (double)interval * (double)(random () % 100 / 100.0); peer -> timer = interval; } TRACE (3, ("poll_update: timer %d, poll=%d", peer->timer, interval));}/* 3.4.9 Authentication Procedures */#if 0encrypt() {}decrypt() {}#endif/* 4.1 Clock Filter Procedure *//* * The previous incarnation of this code made the assumption that * the value of PEER_FILTER was a power of two and used shifting. * This version has been generalized, so that experimenting with * different PEER_FILTER values should be much easier. */voidclock_filter(peer, new_delay, new_offset) register struct ntp_peer *peer; double new_delay, new_offset;{ double offset[PEER_SHIFT], delay[PEER_SHIFT]; register double temp, d, w; register int i, j, samples; if (peer->filter.samples < PEER_SHIFT) peer->filter.samples++; /* * Too bad C doesn't have a barrel shifter... */ for (i = PEER_SHIFT - 1; i; i--) { peer->filter.offset[i] = peer->filter.offset[i - 1]; peer->filter.delay[i] = peer->filter.delay[i - 1]; } peer->filter.offset[0] = new_offset; peer->filter.delay[0] = new_delay; samples = 0; /* * Now sort the valid (non-zero delay) samples into a temporary * list by delay. * * First, build the temp list... */ for (i = 0; i < peer->filter.samples; i++) { if (peer->filter.delay[i] != 0.0) { offset[samples] = peer->filter.offset[i]; delay[samples++] = peer->filter.delay[i]; } } /* ..and now sort it. */ if (samples) { for (i = 0; i < samples - 1; i++) { for (j = i + 1; j < samples; j++) { if (delay[i] > delay[j]) { temp = delay[i]; delay[i] = delay[j]; delay[j] = temp; temp = offset[i]; offset[i] = offset[j]; offset[j] = temp; } } } /* samples are now sorted by delay */ peer->estdelay = delay[0]; peer->estoffset = offset[0]; } temp = 0.0; w = 1.0; for (i = 0; i < PEER_SHIFT; i++) { if (i >= samples) d = PEER_MAXDISP; else { if ((d = offset[i] - offset[0]) < 0) d = -d; if (d > PEER_MAXDISP) d = PEER_MAXDISP; } temp += d * w; /* compute PEER_FILTER**i as we go along */ w *= PEER_FILTER; } peer->estdisp = temp; TRACE (3, ("clock_filter: estdelay %f, estoffset %f, estdisp %f", peer->estdelay, peer->estoffset, peer->estdisp));}/* 4.2 Clock Select Procedure */voidselect_clock() { struct ntp_peer *ptmp, *peer = peer_list.head; struct sel_lst { struct ntp_peer *peer; double distance; double precision; } sel_lst[X_NTP_CANDIDATES]; int i, j, stratums, candidates; int sanity_check(); double dtmp; candidates = 0; stratums = 0; while (peer != NULL && candidates < X_NTP_CANDIDATES) { /* * Check if this is a candidate for "sys.peer" */ peer->flags &= ~(PEER_FL_SANE | PEER_FL_CANDIDATE); if(sanity_check(peer)) { sel_lst[candidates].peer = peer; sel_lst[candidates].distance = peer->estdisp + s_fixed_to_double(&peer->dispersion); peer->flags |= PEER_FL_SANE; candidates++; } peer = peer->next; } TRACE (3, ("select_clock: step1 %d candidates", candidates)); /* * If no candidates passed the sanity check, then give up. */ if (!candidates) { if (sys.peer != NULL) { advise (LLOG_NOTICE, NULLCP, "Lost NTP peer %s", paddr (&sys.peer->src)); } TRACE (3, ("select_clock: no candidates")); sys.peer = NULL; /* * leave sys.stratum and sys.refid intact after losing * reachability to all clocks. After 24 hours, we'll * set the alarm condition if we didn't get any clock * updates. */ return; } /* * Sort the list. We assume that sanity_check() above trashed any * peers which were stratum 0, so we can safely compare stratums * below. Sort the list by stratum. Where stratums are equal, the * peer with the lowest (peer.estdisp + peer.dispersion) is preferred. */ for (i = 0; i < candidates - 1; i++) { for (j = i + 1; j < candidates; j++) { if ((sel_lst[i].peer->stratum > sel_lst[j].peer->stratum) || ((sel_lst[i].peer->stratum == sel_lst[j].peer->stratum) && (sel_lst[i].distance > sel_lst[j].distance))) { ptmp = sel_lst[i].peer; dtmp = sel_lst[i].distance; sel_lst[i].peer = sel_lst[j].peer; sel_lst[i].distance = sel_lst[j].distance; sel_lst[j].peer = ptmp; sel_lst[j].distance = dtmp; } } } TRACE (3, ("select_clock: step2 %d candidates", candidates)); /* truncate the list at NTP_MAXLIST peers */ if (candidates > NTP_MAXLIST) candidates = NTP_MAXLIST; TRACE (3, ("select_clock: step3 %d candidates", candidates)); /* truncate list where number of different strata exceeds NTP_MAXSTRA */ for (stratums = 0, i = 1; i < candidates; i++) { if (sel_lst[i - 1].peer->stratum != sel_lst[i].peer->stratum) { if (++stratums > NTP_MAXSTRA) { TRACE (2, ("select_clock: truncated to %d peers", i)); candidates = i; break; } } } TRACE (3, ("select_clock: step4 %d candidates", candidates)); /* * Kick out falsetickers */ /* now, re-sort the list by peer.stratum and peer.estdelay */ for (i = 0; i < candidates - 1; i++) { for (j = i + 1; j < candidates; j++) { if ((sel_lst[i].peer->stratum > sel_lst[j].peer->stratum) || ((sel_lst[i].peer->stratum == sel_lst[j].peer->stratum) && (sel_lst[i].peer->estdelay > sel_lst[j].peer->estdelay))) { ptmp = sel_lst[i].peer; sel_lst[i].peer = sel_lst[j].peer; sel_lst[j].peer = ptmp; } } } while (candidates > 1) { double maxdispersion = 0.0, dispersion, weight; double min_precision_thres = 10e20, precision_thres; short worst = 0; /* shut up GNU CC about unused var */ TRACE (3, ("select_clock: step5 %d candidates", candidates)); for (i = 0; i < candidates; i++) { /* compute dispersion of candidate `i' relative to the rest of the candidates */ dispersion = 0.0; weight = 1.0; sel_lst[i].peer->flags |= PEER_FL_CANDIDATE; for (j = 0; j < candidates; j++) { dtmp = sel_lst[j].peer->estoffset - sel_lst[i].peer->estoffset; if (dtmp < 0) dtmp = -dtmp; dispersion += dtmp * weight; weight *= NTP_SELECT; } /* since we just happen to have this double floating around.. */ sel_lst[i].distance = dispersion; precision_thres = NTP_MAXSKW + 1.0/(1<<-sys.precision); if (sel_lst[i].peer->precision < 0 && -sel_lst[i].peer->precision < sizeof(long)*NBBY) precision_thres += 1.0/(1<<-sel_lst[i].peer->precision); sel_lst[i].precision = precision_thres; if (dispersion >= maxdispersion) { maxdispersion = dispersion; worst = i; } if (precision_thres < min_precision_thres) { min_precision_thres = precision_thres; } TRACE (4, (" peer %s => disp %f prec_th %f", paddr(&sel_lst[i].peer->src), dispersion, precision_thres)); } /* * Now check to see if the max dispersion is greater than * the min dispersion limit. If so, crank again, otherwise * bail out. */ if (! (maxdispersion > min_precision_thres)) { TRACE (4, (" %d left valid", candidates)); break; } TRACE (4, (" peer %s => TOSS", paddr(&sel_lst[worst].peer->src))); /* * now, we need to trash the peer with the worst dispersion * and interate until there is only one candidate peer left. */ if (worst != candidates - 1) { sel_lst[worst].peer->flags &= ~PEER_FL_CANDIDATE; for (i = worst, j = worst + 1; j < candidates; ) sel_lst[i++].peer = sel_lst[j++].peer; } candidates--; /* one more time.. */ } TRACE (3, ("select_clock: step6 %d candidates", candidates)); /* * Check to see if current peer is on the list of candidate peers. If * don't change sys.peer. Note that if the first selected clock is * at a lower stratum, don't even bother; we're going to want to * switch to it. */ if (sys.peer != NULL && (sys.peer->stratum <= sel_lst[0].peer->stratum)) { for (i = 0; i < candidates; i++) { if (sys.peer == sel_lst[i].peer) { /* * The clock we're currently synchronized to * is among the candidate peers. Don't switch. */ if (i != 0) { /* * Count instances where the best * candidate is different from the * current clock, thus inhibiting * clockhopping. */ peer_sw_inhibited++; } return; } } } /* * The currently selected peer (if any) isn't on the candidate list. * Grab the first one and let it be. */ if (sys.peer != sel_lst[0].peer) { if (sys.peer != NULL) advise (LLOG_NOTICE, NULLCP, "clock: select peer %s stratum %d was %s stratum %d", paddr (&sel_lst[0].peer->src), sel_lst[0].peer->stratum, paddr (&sys.peer->src), sys.peer->stratum); else advise (LLOG_NOTICE, NULLCP, "clock: select peer %s stratum %d was UNSYNCED", paddr (&sel_lst[0].peer->src), sel_lst[0].peer->stratum); sys.peer = sel_lst[0].peer; peer_switches++; }}intsanity_check(peer) struct ntp_peer *peer;{ TRACE (7, ("Checking peer %s stratum %d", paddr (&peer->src), peer->stratum)); /* Snity check -1 - not really in consideration */ if (peer->flags & PEER_FL_SNOOZE) return 0; /* Sanity check 0. ?? */ if (!(peer->flags & PEER_FL_SYNC)) return(0); /* Sanity check 1. */ if (peer->stratum <= 0 || peer->stratum >= NTP_INFIN) return(0); /* Sanity check 2. if peer.stratum is greater than one (synchronized via NTP), peer.refid must not match peer.dstadr */ if (peer->stratum > 1) { register int i; for (i = 1; i < nintf; i++) { if ((addrs[i].flags & INTF_VALID) == 0) continue; if (addrs[i].addr.type == AF_INET && peer->refid.rid_type == RID_INET && addrs[i].addr.inet_ad.sin_addr.s_addr == peer->refid.rid_inet) return (0); if (addrs[i].addr.type == AF_OSI && peer->refid.rid_type == RID_PSAP && psapaddr_cmp (&peer->refid.rid_psap, &addrs[i].addr.psap_ad)) return 0; } } /* Sanity check 3. Both peer.estdelay and peer.estdisp to be less than NTP_MAXWGT, which insures that the filter register at least half full, yet avoids using data from very noisy associations or broken implementations. */ if (peer->estdisp > (float)NTP_MAXWGT || peer->estdelay > (float)NTP_MAXWGT) return(0); /* Sanity check 4. The peer clock must be synchronized... and the interval since the peer clock was last updated satisfy peer.org - peer.reftime < NTP.MAXAGE */ if (peer->leap == ALARM || (ul_fixed_to_double(&peer->org) - ul_fixed_to_double(&peer->reftime)) >= NTP_MAXAGE) return(0); TRACE (7, ("That one is certainly qualified %s", paddr (&peer->src))); return(1);}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -