📄 iproutingp.nc
字号:
default_route_failures++;
if ((e != &(neigh_table[0])) &&
// we have higher confidence and lower cost
(((getConfidence(e) > CONF_PROM_THRESHOLD) && // getConfidence(e - 1)) &&
(checkThresh(getMetric(e), getMetric(e-1), PATH_COST_DIFF_THRESH) == BELOW_THRESH)) ||
// we have similar cost and sufficient confidenceIP
((checkThresh(getMetric(e), getMetric(e-1), PATH_COST_DIFF_THRESH) == WITHIN_THRESH) &&
(getConfidence(e) > CONF_PROM_THRESHOLD)))) {
dbg("IPRouting", "Promoting node 0x%x over node 0x%x\n", e->neighbor, (e-1)->neighbor);
swapNodes((e - 1), e);
}
// stats.successes += 1;
// stats.transmissions += policy->actRetries;
} else {
dbg("IPRouting", "FAILURE!!!!!\n");
}
}
}
/*
* @returns TRUE if the routing engine has established a default route.
*/
command bool IPRouting.hasRoute() {
return (IS_NEIGH_VALID(&(neigh_table[0])));
}
struct ip6_route *insertSourceHeader(struct split_ip_msg *msg, struct flow_entry *entry) {
// these actually need to be static
static uint8_t source_buf[sizeof(struct ip6_route) + MAX_PATH_LENGTH * sizeof(uint16_t)];
static struct generic_header g_sh;
struct ip6_route *sh = (struct ip6_route *)source_buf;
uint8_t i;
sh->nxt_hdr = msg->hdr.nxt_hdr;
msg->hdr.nxt_hdr = IPV6_ROUTING;
sh->len = sizeof(struct ip6_route) + entry->entries[0].pathE->path_len * sizeof(uint16_t);
sh->type = IP6ROUTE_TYPE_SOURCE;
sh->segs_remain = entry->entries[0].pathE->path_len;
g_sh.hdr.ext = (struct ip6_ext *)sh;
g_sh.len = sh->len;
g_sh.next = msg->headers;
msg->headers = &g_sh;
dbg("Install", "Inserted source header with length 0x%x and next hop: 0x%x\n",
entry->entries[0].pathE->path_len, entry->entries[0].pathE->path[0]);
for (i = 0; i < entry->entries[0].pathE->path_len; i++) {
sh->hops[i] = ntohs(entry->entries[0].pathE->path[i]);
}
return sh;
}
#ifdef CENTRALIZED_ROUTING
command void IPRouting.clearFlows() {
int i, j;
for (i = 0; i < N_FLOW_ENT; i++) {
SET_INVALID_SLOT((&(flow_table[i])));
flow_table[i].count = N_FLOW_ENT;
for (j = 0; j < N_FLOW_CHOICES; j++) {
SET_INVALID_ENTRY(flow_table[i].entries[j]);
}
}
for (i = 0; i < N_FULL_PATH_ENTRIES; i++) {
full_path_entries[i].path_len = 0;
}
}
#endif
#define convertTo8(X) ((X) > 0xff ? 0xff : (X))
/*
* Inserts all necessary routing headers for the packet
*
* If packet is going to the root, inserts a topology information
* collection header
* XXX : SDH : the detection of weather it's going to the root is
* very broken...
*
*/
event struct tlv_hdr *DestinationExt.getHeader(int label,int nxt_hdr,
struct ip6_hdr *iph) {
static uint8_t sh_buf[sizeof(struct tlv_hdr) +
sizeof(struct topology_header) +
(sizeof(struct topology_entry) * N_NEIGH)];
struct tlv_hdr *tlv = (struct tlv_hdr *)sh_buf;
struct topology_header *th = (struct topology_header *)(tlv + 1);
tlv->len = sizeof(struct tlv_hdr) + sizeof(struct topology_header);
tlv->type = TLV_TYPE_TOPOLOGY;
if (iph->ip6_dst.s6_addr[0] == 0xff &&
(iph->ip6_dst.s6_addr[1] & 0xf) <= 3) {
return NULL;
}
printfUART("inserting destination options header\n");
// AT: We theoretically only want to attach this topology header if we're
// sending this message to a controller. Isn't it easier to just check
// to see if the dest address matches that of the sink?
// SDH: how do you know what the address of the sink is?
// some how we need to check if we're using a default route and
// only attach the topology information if we are. This still isn't
// perfect since somebody further down the tree may have a route and the
// packet might not get to the controller.
if (iph->nxt_hdr == IANA_UDP ||
iph->nxt_hdr == IPV6_NONEXT) {
int i,j = 0;
if (iph->ip6_dst.s6_addr16[0] == htons(0xff02)) return NULL;
if (traffic_sent) return NULL;
traffic_sent = TRUE;
// only add topology information directly behind actual payload
// headers.
// SDH : TODO : check that this will not fragment the packet...
// AT: Why do we care about the number of hops? Debugging purposes?
th->seqno = reportSeqno++;
th->seqno = htons(th->seqno);
// For all these 16-bit values, we're only using 8 bit values
for (i = 0; i < N_NEIGH; i++) {
if (IS_NEIGH_VALID(&neigh_table[i]) && j < 4 &&
(IS_MATURE(&neigh_table[i]) || default_route == &neigh_table[i])) {
th->topo[j].etx = convertTo8(getLinkCost(&neigh_table[i]));
th->topo[j].conf = convertTo8(getConfidence(&neigh_table[i]));
th->topo[j].hwaddr = htons(neigh_table[i].neighbor);
j++;
tlv->len += sizeof(struct topology_entry);
dbg("Lqi", "link est: 0x%x hops: 0x%x\n",
neigh_table[i].linkEstimate, neigh_table[i].hops);
}
}
if (j > 0) {
return tlv;
}
}
return NULL;
}
event void DestinationExt.free() {
}
command struct ip6_route *IPRouting.insertRoutingHeader(struct split_ip_msg *msg) {
// these actually need to be static
#ifdef CENTRALIZED_ROUTING
struct flow_entry *entry;
// Need to source route this packet
// Put this last because theoretically we could have a source
// routed packet to the root, in which case it would have a topo
// header, but the source header must always be the first in the
// header list
if (((entry = getFlowEntry_Header(&msg->hdr)) != NULL) &&
IS_FULL_TYPE(entry->entries[0]) &&
entry->entries[0].pathE->path_len > 1) {
dbg("IPRouting", "Inserting a source routing header for a full path!\n");
updateFlowCounts(entry);
return insertSourceHeader(msg, entry);
}
#endif
return NULL;
}
/*
* Sort timer will no longer be used only for sorting, but rather to expire an epoch and
* change entry statistics
*/
event void SortTimer.fired() {
dbg("IPRouting", "Epoch ended!\n");
printTable();
if (!call IPRouting.hasRoute() && !soliciting) {
call ICMP.sendSolicitations();
soliciting = TRUE;
}
if (checkThresh(call IPRouting.getQuality(), last_qual, 5) != WITHIN_THRESH ||
last_hops != call IPRouting.getHopLimit()) {
call ICMP.sendAdvertisements();
last_qual = call IPRouting.getQuality();
last_hops = call IPRouting.getHopLimit();
}
updateRankings();
if (call Random.rand16() % 32 < 8) {
dbg("IPRouting", "Attemting exploration\n");
chooseNewRandomDefault(FALSE);
} else {
// default_route = &neigh_table[0];
default_route_failures = 0;
}
}
/*
* This is called when the ICMP engine finishes sending out router solicitations.
*
* We will keep sending solicitations so long as we have not
* established a default route.
*
*/
event void ICMP.solicitationDone() {
//int i;
dbg("IPRouting", "done soliciting\n");
soliciting = FALSE;
if (!call IPRouting.hasRoute()) {
call ICMP.sendSolicitations();
soliciting = TRUE;
}
}
command void Statistics.get(route_statistics_t *statistics) {
//struct neigh_entry *p = getNeighEntry((getFlowEntry_Header(NULL))->entries[0].nextHop);
// struct neigh_entry *p = &(neigh_table[0]);
// stats.hop_limit = call IPRouting.getHopLimit();
// if (p != NULL) {
// ip_memcpy(&stats.parent, p, sizeof(struct neigh_entry));
// stats.parentmetric = getMetric(p);
// }
statistics->hop_limit = call IPRouting.getHopLimit();
statistics->parent = (uint16_t) default_route->neighbor;
statistics->parent_metric = call IPRouting.getQuality();
statistics->parent_etx = getMetric(default_route);
}
command void Statistics.clear() {
// ip_memclr((uint8_t *)&stats, sizeof(route_statistics_t));
}
void evictNeighbor(struct neigh_entry *neigh) {
struct neigh_entry *iterator;
bool reset_default = FALSE;
dbg("IPRouting", "Evicting neighbor 0x%x\n", neigh->neighbor);
dbg("Evictions", "evict: 0x%x\n", neigh->neighbor);
SET_NEIGH_INVALID(neigh);
if (neigh == default_route) {
reset_default = TRUE;
}
ip_memclr((uint8_t *)(neigh), sizeof(struct neigh_entry));
for (iterator = neigh; iterator < &(neigh_table[N_NEIGH - 1]); iterator++) {
if (!IS_NEIGH_VALID(iterator + 1)) break;
swapNodes(iterator, iterator + 1);
}
if (reset_default) {
// send new topology updates quickly to let an edge router know
// that something happened.
restartTrafficGen();
default_route = &neigh_table[0];
default_route_failures = 0;
}
printTable();
}
// Typically called after an epoch change
void updateRankings() {
uint8_t i;
bool evicted = FALSE;
for (i = 0; i < N_NEIGH; i++) {
UNSET_EVICT(neigh_table[i]);
if (!IS_NEIGH_VALID(&neigh_table[i])) continue;
neigh_table[i].stats[LONG_EPOCH].total += neigh_table[i].stats[SHORT_EPOCH].total;
neigh_table[i].stats[LONG_EPOCH].receptions += neigh_table[i].stats[SHORT_EPOCH].receptions;
neigh_table[i].stats[LONG_EPOCH].success += neigh_table[i].stats[SHORT_EPOCH].success;
if (neigh_table[i].stats[LONG_EPOCH].total & (0xf000)) {
// if we're this big, the etx computation might overflow.
// Make it smaller by dividing top and bottom by 2.
neigh_table[i].stats[LONG_EPOCH].total >>= 1;
neigh_table[i].stats[LONG_EPOCH].success >>= 1;
}
if (neigh_table[i].stats[LONG_EPOCH].total > CONF_EVICT_THRESHOLD)
SET_MATURE(&neigh_table[i]);
if (IS_MATURE(&(neigh_table[i]))) {
uint16_t cost;
// if we didn't try the link, don't evict it
if (neigh_table[i].stats[SHORT_EPOCH].total == 0) goto done_iter;
if (neigh_table[i].stats[SHORT_EPOCH].success == 0) {
cost = 0xff;
} else {
cost = (10 * neigh_table[i].stats[SHORT_EPOCH].total) /
neigh_table[i].stats[SHORT_EPOCH].success;
}
if (cost > LINK_EVICT_THRESH) {
dbg("Evictions", "cost: 0x%x, slot %i\n", cost, i);
SET_EVICT(neigh_table[i]);
}
}
done_iter:
neigh_table[i].stats[SHORT_EPOCH].total = 0;
neigh_table[i].stats[SHORT_EPOCH].receptions = 0;
neigh_table[i].stats[SHORT_EPOCH].success = 0;
}
for (i = 0; i < N_NEIGH; i++) {
if (IS_NEIGH_VALID(&neigh_table[i]) &&
SHOULD_EVICT(neigh_table[i])) {
// #if 0
// SDH : because of the overflow bug, this was never being
// triggered. I'm not sure it's actually a good idea because
// it seems to increase path lengths for heavily used routes.
// Let's disable it for now.
dbg("Evictions", "performing evict: %i\n", i);
evictNeighbor(&neigh_table[i]);
i --;
// #endif
evicted = TRUE;
}
}
if (evicted)
call ICMP.sendSolicitations();
}
void swapNodes(struct neigh_entry *highNode, struct neigh_entry *lowNode) {
struct neigh_entry tempNode;
if (highNode == NULL || lowNode == NULL) return;
ip_memcpy(&tempNode, highNode, sizeof(struct neigh_entry));
ip_memcpy(highNode, lowNode, sizeof(struct neigh_entry));
ip_memcpy(lowNode, &tempNode, sizeof(struct neigh_entry));
if (highNode == default_route) default_route = lowNode;
else if (lowNode == default_route) default_route = highNode;
}
uint8_t checkThresh(uint32_t firstVal, uint32_t secondVal, uint16_t thresh) {
if (((firstVal > secondVal) && ((firstVal - secondVal) <= thresh)) ||
((secondVal >= firstVal) && (secondVal - firstVal) <= thresh)) return WITHIN_THRESH;
if (((firstVal > secondVal) && ((firstVal - secondVal) > thresh))) return ABOVE_THRESH;
return BELOW_THRESH;
}
}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -