📄 networkm.nc
字号:
// $Id: NetworkM.nc,v 1.1.4.5 2003/08/26 09:08:14 cssharp Exp $/* tab:4 * "Copyright (c) 2000-2003 The Regents of the University of California. * All rights reserved. * * Permission to use, copy, modify, and distribute this software and its * documentation for any purpose, without fee, and without written agreement is * hereby granted, provided that the above copyright notice, the following * two paragraphs and the author appear in all copies of this software. * * IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT * OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF THE UNIVERSITY OF * CALIFORNIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES, * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY * AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS * ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATION TO * PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS." * * Copyright (c) 2002-2003 Intel Corporation * All rights reserved. * * This file is distributed under the terms in the attached INTEL-LICENSE * file. If you do not find these files, copies can be found by writing to * Intel Research Berkeley, 2150 Shattuck Avenue, Suite 1300, Berkeley, CA, * 94704. Attention: Intel License Inquiry. *//* * Authors: Sam Madden * Design by Sam Madden, Wei Hong, and Joe Hellerstein * Date last modified: 6/26/02 * * *//* Routing component for TinyDB. Can receive and send data and query messages. Data messages are tuples or aggregate values. Queries represent portions of queries being injected into the network. In the TAG world, query messages flood down the routing tree, and data messages are sent up the network (to the node's) parent towards the root. Obviously, there are other routing mechanisms that might make sense, but the routing system needs to be careful to do duplicate elimination -- which may be hard since this API doesn't export the contents of the data messages. Note that the send routines in this component depend on the higher level tuple router in several ugly ways: 1) They expect that the TOS_MsgPtr's deliver have space for the appropriate TINYDB_NETWORK header (type DbMsgHdr) at the top. The basic routing algorithm implemented by this component works as follows: 1) Each mote maintains a small buffer of "neighbor" motes it can hear 2) For each neighbor, the mote tracks the signal quality by observing sequence numbers on messages from that neighbor degrading quality when messages are missed 3) Each mote picks a parent from its neighbors; ideally, parents will be motes that have a high signal quality and are close to the root 4) Once a parent has been selected, motes stick to that parent unless the quality of the parent degrades substantially (even if another parent that is closer to the root with similar quality appears) -- this "topology stability" property is important for insuring correct computation of aggregates. These data structures have been updated to be parameterized by ROOT_ID, which allows us maintain several routing trees. Note that we keep only one neighbor list (and estimate of quality per neighbor), but that we keep multiple levels for each neighbor, ourself, and our parents. Authors: Sam Madden -- basic structure, current maintainer Joe Hellerstein -- initial implementation of neighbor tracking and parent selection*//** * @author Sam Madden * @author Design by Sam Madden * @author Wei Hong * @author and Joe Hellerstein */includes TinyDB;module NetworkM { provides { interface Network; interface StdControl; interface NetworkMonitor; } uses { interface SendMsg as SendDataMsg; interface SendMsg as SendQueryMsg; interface SendMsg as SendQueryRequest; interface SendMsg as DebugMsg; interface SendMsg as SchemaMsg;#ifdef kSUPPORTS_EVENTS interface SendMsg as EventMsg;#endif#ifdef kSTATUS interface SendMsg as SendStatusMessage;#endif interface ReceiveMsg as RcvDataMsg; interface ReceiveMsg as RcvQueryMsg;#ifdef kQUERY_SHARING interface ReceiveMsg as RcvRequestMsg;#endif interface ReceiveMsg as RcvCommandMsg;#ifdef kSUPPORTS_EVENTS interface ReceiveMsg as RcvEventMsg;#endif#ifdef kSTATUS interface ReceiveMsg as RcvStatusMessage;#endif interface CommandUse;#ifdef kSUPPORTS_EVENTS interface EventUse;#endif interface Leds; interface Random; interface QueryProcessor; interface StdControl as SubControl; } }implementation { enum { LEVEL_UNKNOWN = -1, PARENT_UNKNOWN = -1, BAD_IX = -1, UNKNOWN_ROOT = -1, PARENT_RESELECT_INTERVAL = 5, //5 epochs PARENT_LOST_INTERVAL = 10, //5 epochs TIME_OF_FLIGHT = 0, NUM_RELATIVES = 5, MAX_PROB_THRESH = 127, //only switch parents if there's a node thats a lot more reliable ALPHABITS = 2, // We do a moving average at time t where // avg_t = avg_{t-1}*(1-alpha) + alpha*newvalue // Since we're using integers, we shift by ALPHABITS, rather than multiplying // by a fraction. // so ALPHABITS=2 is like alpha = 0.25 (i.e. divide by 4) NACKALPHA= 4, //alpha bits to use on a failed ack QUERY_RETRIES = 5, NUM_ROOTS = 4, DATA_RETRIES = 0, //don't retry data messages for now, since we have no way of eliminating duplicates }; uint16_t mSendCount; // increment each epoch // We track NUM_RELATIVES parents potential parents. // For each, we will track the mean # of drops of msgs from that "relative". char mRelatives[NUM_RELATIVES]; // an array of senders we're tracking potential parents char mRelOccupiedBits; // occupancy bitmap for slots in the relatives array short mParentIx[NUM_ROOTS]; // the current parent's index in the "relatives" array // invariant: parentIx < NUM_RELATIVES // invariant: relOccupiedBits is on for parentIx unsigned short mLastIdxRelative[NUM_RELATIVES]; // last idx # from this rel unsigned char mCommProb[NUM_RELATIVES]; // an 8-bit weighted moving average of // delivery success (delivered = 255, // dropped = 0). short mRoots[NUM_ROOTS]; char mRelLevel[NUM_ROOTS][NUM_RELATIVES]; // current level of rel TOS_MsgPtr mMsg; TOS_Msg mDbg; uint8_t mAmId; uint8_t mIdx; bool mIsRoot; bool mForceTopology; bool mUart; //is uart in use? bool mLocal; //was this message send local, or do our children need to see completion events bool mWasCommand; //is there a command that needs to be executed in dbg? bool mRadio; //is radio in use? (pending flag for radio) char mFanout; // fanout of routing tree if forceTopology is true bool mCentralized; //all messages should be routed to the root (no aggregation?) short mMinparent; // min parent id number when we force topology short mMaxparent; // max parent id number when we force topology short mParentCand1, mParentCand2, mParentCand3; short mLastCheck, mLastHeard; uint16_t mContention; // a measure of the amount of contention on the radio -- measured via failed ACKs uint16_t mRem; enum { NUM_RECENT_MSGS = 8 }; long mRecentMsgs[NUM_RECENT_MSGS]; uint8_t mNextMsg; short mRetryCnt; SchemaErrorNo errorNo; typedef enum { QUERY_TYPE = 0, DATA_TYPE = 1 } MsgType; typedef struct { short nodeid; short msgcount; } NodeMsgCount; void initHeader(DbMsgHdr *header,bool amRoot, uint8_t rootId); bool processHeader(DbMsgHdr header, MsgType type,uint8_t rootId); void setParentRange(); void setRoot(uint8_t rootId); bool checkRoot(TOS_MsgPtr msg, uint8_t *rootId); void degradeLinkQuality(short neighborId); uint8_t myLevel(uint8_t rootId) { if (mRoots[rootId] == TOS_LOCAL_ADDRESS) return 0; if (mParentIx[rootId] == PARENT_UNKNOWN || mRelLevel[rootId][mParentIx[rootId]] == LEVEL_UNKNOWN) return LEVEL_UNKNOWN; return mRelLevel[rootId][mParentIx[rootId]] + 1; } command result_t StdControl.init() { int i; mSendCount = 0; mLastCheck = 0; mLastHeard = 0; mContention = 0; mRem = 0; mNextMsg = 0; for (i = 0; i < NUM_RECENT_MSGS; i++) mRecentMsgs[i] = 0xFFFFFFFF; // mark statistics invalid mRelOccupiedBits = 0; for (i = 0; i < NUM_ROOTS; i++) { mParentIx[i] = PARENT_UNKNOWN; mRoots[i] = UNKNOWN_ROOT; } mIdx = 0; mFanout = 0xFF; //default -- no fanout mCentralized = FALSE; mFanout = 0; mRadio = FALSE; mForceTopology = FALSE; mWasCommand = FALSE; mUart = FALSE; mRetryCnt = 0; return call SubControl.init(); } //set up data structures as though we're the root of this network void setRoot(uint8_t rootId) { //mRelOccupiedBits = 1; //mRelatives[0] = 0; //mRelLevel[rootId][0] = LEVEL_UNKNOWN; //mParentIx[rootId] = 0; } /** Given the node id of a routing tree root, determine the routing tree id (root parameter of data structures to use. Returns -1 if no more routing trees are available. */ uint8_t getRootId(short rootNode) { int i; int firstUnknown = -1; for (i = 0; i < NUM_ROOTS; i++) { if (mRoots[i] != UNKNOWN_ROOT) { if (mRoots[i] == rootNode) { return i; } } else if (firstUnknown == -1) { firstUnknown = i; } } if (firstUnknown != -1) { mRoots[firstUnknown] = rootNode; return firstUnknown; } //HACK -- no more routing trees are available! return -1; //something, anyway... } //check the message to see if it sez we're supposed to be root //if so, set up the root and return TRUE, otherwise return FALSE bool checkRoot(TOS_MsgPtr msg, uint8_t *rootIdPtr) { short root = call QueryProcessor.msgToQueryRoot(msg); uint8_t rootId; if (root == -1) { *rootIdPtr = 0; //default... return FALSE; } rootId = getRootId(root); *rootIdPtr = rootId; if (root == TOS_LOCAL_ADDRESS) setRoot(rootId); return root == TOS_LOCAL_ADDRESS; } command result_t StdControl.start() { return call SubControl.start(); } command result_t StdControl.stop() { return call SubControl.stop(); } command QueryResultPtr Network.getDataPayLoad(TOS_MsgPtr msg) { return (QueryResultPtr)&((NetworkMessage*)msg->data)->data[0]; } /* Send a 'data' message -- data messages contain information about data tuples that should be sent up the routing tree. REQUIRES: msg is a message buffer of which the first entry is of type DbMsgHdr SIGNALS: TINYDB_NETWORK_SUB_MSG_SEND_DONE after message is sent, unless an error is returned. RETURNS: err_NoError if no error err_UnknownError if transmission fails.*/ command TinyDBError Network.sendDataMessage(TOS_MsgPtr msg) { uint8_t rootId = 0; bool amRoot; amRoot = checkRoot(msg, &rootId); return call Network.sendDataMessageTo(msg, mRelatives[mParentIx[rootId]]); } //note that the "sendMessageTo" interface doesn't change the behavior of this // thing at all... command TinyDBError Network.sendDataMessageTo(TOS_MsgPtr msg, uint16_t dest) { NetworkMessage *nw = (NetworkMessage *)msg->data; bool amRoot; uint8_t rootId = 0; mMsg = msg; mAmId = kDATA_MESSAGE_ID; amRoot = checkRoot(msg, &rootId); //send message bcast, filter at app level // amRoot == a base station (connected directly to the pc via the uart) if (!mRadio) { mRadio = TRUE; initHeader(&nw->hdr, amRoot, rootId); if (amRoot) { mIdx--; //no one else will see this message -- reset the sequence counter if (call SendDataMsg.send(TOS_UART_ADDR, kMSG_LEN, msg) == SUCCESS) { return err_NoError; } else { mRadio = FALSE; return err_MessageSendFailed; } } else { mRetryCnt = DATA_RETRIES; if (call SendDataMsg.send(TOS_BCAST_ADDR, kMSG_LEN, msg) == SUCCESS) { return err_NoError; } else { mIdx--; //failure -- reuse this index on the next try mRadio = FALSE; return err_MessageSendFailed; } } } return err_MessageSendFailed; } command QueryMessagePtr Network.getQueryPayLoad(TOS_MsgPtr msg) { return (QueryMessagePtr)&((NetworkMessage*)msg->data)->data[0]; } /* Send a 'query' message -- query messages contain information about queries that should be sent to neighbors in the network. REQUIRES: msg is a message buffer of which the first entry is of type DbMsgHdr SIGNALS: TINYDB_NETWORK_SUB_MSG_SEND_DONE after message is sent, unless an error is returned. RETURNS: err_NoError if no error err_UnknownError if transmission fails. */ command TinyDBError Network.sendQueryMessage(TOS_MsgPtr msg) { NetworkMessage *nw = (NetworkMessage *)msg->data;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -