📄 route.c
字号:
Time Now)
{
NDIS_STATUS Status;
LARGE_INTEGER Timestamp;
LARGE_INTEGER Frequency;
KIRQL OldIrql;
Time NextTimeout = Now + MIN_RTE_TIMOUT_INTERVAL;
RouteTableEntry *RTE;
RouteTableEntry *NextRTE;
#if DBG
uint OldRTCount;
uint CheckCount = 0;
#endif
KeAcquireSpinLock(&VA->RT.Lock, &OldIrql);
#if DBG
OldRTCount = VA->RT.Count;
#endif
for (RTE = VA->RT.FirstRTE;
RTE != SentinelRTE(&VA->RT);
RTE = NextRTE) {
Time RTETimeout = RTE->Timeout;
#if DBG
CheckCount++;
#endif
VRRASSERT(RTE->RefCnt >= VRR_MIN_RTE_REFCNT);
VRRASSERT(VRR_RTE_STATE_ORPHAN != RTE->State);
NextRTE = RTE->Next;
if (RTETimeout != 0 && RTETimeout < Now) {
//
// A timer has expired on this RTE.
//
if (VRR_RTE_STATE_RETIRED == RTE->State) {
//
// Finally release a retired RTE.
//
RemoveRTE(&VA->RT,RTE);
}
else if (VRR_RTE_STATE_ACTIVE == RTE->State) {
//
// Retire this RTE.
//
RTE->State = VRR_RTE_STATE_RETIRED;
RTE->Timeout = RTERetireTimeout(RTE->Flags);
}
}
if (RTETimeout > Now && RTETimeout < NextTimeout)
NextTimeout = RTETimeout;
}
#if DBG
VRRASSERT(CheckCount == OldRTCount);
#endif
KeReleaseSpinLock(&VA->RT.Lock, OldIrql);
return NextTimeout;
}
//* RouteResetNTE
//
// The state of a node in the NodeTable has changed.
// Update the Route Table to reflect the changed node.
//
// Caller must hold a ref on the NTE around the call to this procedure.
// Caller must not hold the Route Table lock.
//
void
RouteResetNTE(
RouteTable *RT,
VirtualAddress NTEAddress,
uchar NTEState,
uchar NTEFlags,
NodeTableEntry *NTE)
{
KIRQL OldIrql;
RouteTableEntry *RTE;
RouteTableEntry *NextRTE;
KeAcquireSpinLock(&RT->Lock, &OldIrql);
for (RTE = RT->FirstRTE;
RTE != SentinelRTE(RT);
RTE = NextRTE) {
NextRTE = RTE->Next;
VRRASSERT(RTE->RefCnt >= VRR_MIN_RTE_REFCNT);
VRRASSERT(VRR_RTE_STATE_ORPHAN != RTE->State);
//
// Update links for EndpointA.
//
if (VirtualAddressEqual(NTEAddress,RTE->A.Address)) {
if (VRR_NTE_STATE_ORPHAN == NTEState) {
//
// The NTE is being removed. Drop RTE state for it.
//
if (NULL != RTE->A.NTE) {
ReleaseNTE(RTE->A.NTE);
RTE->A.NTE = NULL;
}
} else {
if (NULL == RTE->A.NTE) {
//
// Create a link to the NTE.
//
AddRefNTE(NTE);
RTE->A.NTE = NTE;
}
}
}
//
// Update links for EndpointB.
//
if (VirtualAddressEqual(NTEAddress,RTE->B.Address)) {
if (VRR_NTE_STATE_ORPHAN == NTEState) {
//
// The NTE is being removed. Drop RTE state for it.
//
if (NULL != RTE->B.NTE) {
ReleaseNTE(RTE->B.NTE);
RTE->B.NTE = NULL;
}
} else {
if (NULL == RTE->B.NTE) {
//
// Create a link to the NTE.
//
AddRefNTE(NTE);
RTE->B.NTE = NTE;
}
}
}
}
KeReleaseSpinLock(&RT->Lock, OldIrql);
}
//* RouteUpdateNCE
//
// Called to update the route table with state change of a specific NCE.
// The caller must hold a reference on the NCE across this call.
// The caller must not hold RT->Lock.
//
void
RouteUpdateNCE(
MiniportAdapter *VA,
NeighborCacheEntry *NCE,
uchar State)
{
KIRQL OldIrql;
RouteTable *RT = &VA->RT;
RouteTableEntry *RTE;
RouteTableEntry *NextRTE;
Time Now = KeQueryInterruptTime();
TeardownInformQueue *InformQueue = NULL;
uint TearDownSize = 0;
VrrGlobalPathId *TDBuff = NULL;
VrrGlobalPathId *PPId = NULL;
uint Overflow = FALSE;
uint NumPId = 0;
uint NumPIdCopied = 0;
VSetRepairQueue *VSetRepair = NULL;
VRRASSERT(NCE != NULL);
KeAcquireSpinLock(&RT->Lock, &OldIrql);
//
// Mark which RTE to tear down, and count them.
//
for (RTE = RT->FirstRTE; RTE != SentinelRTE(RT); RTE = RTE->Next) {
if (RTE->State == VRR_RTE_STATE_ACTIVE)
if (RTE->A.Next == NCE || RTE->B.Next == NCE) {
RTE->State = VRR_RTE_STATE_TEARDOWN;
if (RTE->Flags.VSet == 1)
if (++NumPId == VRRTEARDOWN_MAX_PID(VA)) {
Overflow = TRUE;
break;
}
}
}
if (NumPId != 0) {
//
// Allocate memory to hold list of global PathIds being torn down.
//
TearDownSize = NumPId * sizeof(VrrGlobalPathId);
TDBuff = ExAllocatePool(NonPagedPool, TearDownSize);
if (TDBuff == NULL) {
VrrKdPrint("RouteUpdNCE: failed to allocate TDBuff\n",NULL,NULL);
goto ReleaseAndReturn;
}
PPId = TDBuff;
}
//
// Retire all RTE that know about the failing NCE.
//
for (RTE = RT->FirstRTE; RTE != SentinelRTE(RT); RTE = RTE->Next) {
TeardownInformQueue *TIQ;
VirtualAddress *Inform;
if (RTE->State != VRR_RTE_STATE_TEARDOWN)
continue;
//
// Leave the RTE disabled and available for garbage collection.
//
RTE->State = VRR_RTE_STATE_RETIRED;
RTE->Timeout = RTERetireTimeout(RTE->Flags);
//
// Schedule vset repair if we retired a path to a virtual neighbor.
// Ref "or marks the next hop in the path failed"
//
VSetRepair = ScheduleVSetRepair(VA,VSetRepair,RTE);
//
// Skip forward if no neighbors need know about this path.
//
if (TearDownSize == 0)
continue;
//
// If we have PathId to signal, and space permits, add the
// PathId to buffer of PathIds in the TearDown.
//
if (NumPIdCopied < NumPId)
if (RTE->Flags.VSet == 1) {
RtlCopyMemory(PPId->Address, RTE->A.Address, sizeof(VirtualAddress));
PPId->PathId = RtlUlongByteSwap(RTE->PathId);
PPId++;
NumPIdCopied++;
}
//
// Teardown must be routed along exact path, i.e. cannot choose an
// arbitrary next hop towards the path endpoint.
//
if (RTE->A.Next == NCE)
Inform = (RTE->B.Next != NULL) ? &RTE->B.Next->VAddress : &VA->Address;
else
Inform = (RTE->A.Next != NULL) ? &RTE->A.Next->VAddress : &VA->Address;
//
// No need to tell ourself about the tear down.
//
if (VirtualAddressEqual(VA->Address,*Inform))
continue;
//
// Check if neighbor already on list of TearDown recipients.
//
TIQ = InformQueue;
for (TIQ = InformQueue; TIQ != NULL; TIQ = TIQ->Next)
if(VirtualAddressEqual(TIQ->SendTo,*Inform))
break;
if (TIQ != NULL)
continue;
//
// Add neighbor to list of TearDown recipients.
//
if ((TIQ = ExAllocatePool(NonPagedPool, sizeof *TIQ)) == NULL) {
VrrKdPrint("RouteUpdNCE: failed to allocate TIQ\n",NULL,NULL);
continue;
}
RtlCopyMemory(TIQ->SendTo, *Inform, sizeof(VirtualAddress));
TIQ->Next = InformQueue;
InformQueue = TIQ;
}
VRRASSERT(NumPIdCopied == NumPId);
ReleaseAndReturn:
KeReleaseSpinLock(&RT->Lock, OldIrql);
//
// Schedule a TearDown message to each node in InformQueue.
// Note for multi-homed self and/or neighbour that we sent
// TD for all relevant IFs in one bundle via an aribtrary
// pair of IFs.
//
while (NULL != InformQueue) {
TeardownInformQueue *TIQ = InformQueue;
InternalVrrTearDown *TDO;
InformQueue = InformQueue->Next;
TDO=CreateTearDownOpt(VA,TIQ->SendTo, NumPId, TearDownSize, TDBuff, FALSE, FALSE);
MsgQueueMessage(VA, TIQ->SendTo, (InternalOption *) TDO, VRR_IFID_UNSPECIFIED,VRR_IFID_UNSPECIFIED,TDOWN_DELAY,NULL);
ExFreePool(TIQ);
}
if (TDBuff != NULL)
ExFreePool(TDBuff);
//
// Perform any required vset repair.
//
ProcessVSetRepairQueue(VA,VSetRepair);
//
// Deal with overflow by calling self again.
//
if (Overflow == TRUE) {
VrrKdPrint("RouteUpdNCE: re-enter self to deal with overflow",NCE->VAddress,NULL);
RouteUpdateNCE(VA,NCE,State);
}
}
//* RouteEvictNTE
//
// Called to update the route table when an NTE is evicted.
// The caller must hold a reference on the NTE across this call.
//
// The caller must not hold RT->Lock.
//
void
RouteEvictNTE(
MiniportAdapter *VA,
NodeTableEntry *NTE)
{
KIRQL OldIrql;
RouteTable *RT = &VA->RT;
RouteTableEntry *RTE;
RouteTableEntry *NextRTE;
Time Now = KeQueryInterruptTime();
TeardownInformQueue *InformQueue = NULL;
uint TearDownSize = 0;
VrrGlobalPathId *TDBuff = NULL;
VrrGlobalPathId *PPId = NULL;
uint Overflow = FALSE;
uint NumPId = 0;
VRRASSERT(NTE != NULL);
VrrTrace(VA,3,"RtEvictNTE: remove and TD routes for NTE(s)",NTE->Address,NULL,NULL,NULL,0,NULL,0);
KeAcquireSpinLock(&RT->Lock, &OldIrql);
VRRASSERT(NTE->State == VRR_NTE_STATE_ORPHAN);
//
// Mark which RTE to tear down, and count them.
// We tear down every "setup" route between self and NTE
// that terminates with self and NTE. Physical neighbor
// RTE are left intact.
//
for (RTE = RT->FirstRTE; RTE != SentinelRTE(RT); RTE = RTE->Next) {
if (RTE->State == VRR_RTE_STATE_ACTIVE)
if (VirtualAddressEqual(VA->Address, RTE->A.Address) ||
VirtualAddressEqual(VA->Address, RTE->B.Address))
if (RTE->Flags.VSet == 1)
if (RTE->B.NTE == NTE || RTE->A.NTE == NTE) {
RTE->State = VRR_RTE_STATE_TEARDOWN;
if (++NumPId == VRRTEARDOWN_MAX_PID(VA)) {
Overflow = TRUE;
break;
}
}
}
if (NumPId == 0)
goto ReleaseAndReturn;
//
// Allocate memory to hold list of global PathIds being torn down.
//
TearDownSize = NumPId * sizeof(VrrGlobalPathId);
TDBuff = ExAllocatePool(NonPagedPool, TearDownSize);
if (TDBuff == NULL) {
VrrKdPrint("RtEvictNTE: failed to allocate TDBuff\n",NULL,NULL);
goto ReleaseAndReturn;
}
PPId = TDBuff;
//
// Retire all RTE that know about the failing NTE.
//
for (RTE = RT->FirstRTE; RTE != SentinelRTE(RT); RTE = RTE->Next) {
TeardownInformQueue *TIQ;
VirtualAddress *Inform;
if (RTE->State != VRR_RTE_STATE_TEARDOWN)
continue;
//
// Leave the RTE disabled and available for garbage collection.
//
RTE->State = VRR_RTE_STATE_RETIRED;
RTE->Timeout = RTERetireTimeout(RTE->Flags);
//
// Skip forward if no neighbors need know about this path.
//
if (TearDownSize == 0)
continue;
//
// Add to buffer of PathIds being torn down.
//
RtlCopyMemory(PPId->Address, RTE->A.Address, sizeof(VirtualAddress));
PPId->PathId = RtlUlongByteSwap(RTE->PathId);
PPId++;
//
// Schedule TearDown for this RTE.
// Note: no need to tell ourself about the tear down.
//
if (RTE->A.NTE == NTE)
Inform = (RTE->A.Next != NULL) ? &RTE->A.Next->VAddress : &VA->Address;
else
Inform = (RTE->B.Next != NULL) ? &RTE->B.Next->VAddress : &VA->Address;
if (VirtualAddressEqual(VA->Address,*Inform))
continue;
if ((TIQ = ExAllocatePool(NonPagedPool, sizeof *TIQ)) == NULL) {
VrrKdPrint("RouteUpdNCE: failed to allocate TIQ\n",NULL,NULL);
continue;
}
RtlCopyMemory(TIQ->SendTo, *Inform, sizeof(VirtualAddress));
TIQ->Next = InformQueue;
InformQueue = TIQ;
}
ReleaseAndReturn:
KeReleaseSpinLock(&RT->Lock, OldIrql);
//
// Schedule a TearDown message to each node in InformQueue.
//
while (NULL != InformQueue) {
TeardownInformQueue *TIQ = InformQueue;
InternalVrrTearDown *TDO;
InformQueue = InformQueue->Next;
TDO=CreateTearDownOpt(VA,TIQ->SendTo, NumPId, TearDownSize, TDBuff, FALSE, FALSE);
MsgQueueMessage(VA, TIQ->SendTo, (InternalOption *) TDO, VRR_IFID_UNSPECIFIED,VRR_IFID_UNSPECIFIED,TDOWN_DELAY,NULL);
ExFreePool(TIQ);
}
if (TDBuff != NULL)
ExFreePool(TDBuff);
//
// Deal with overflow by calling self again.
//
if (Overflow == TRUE) {
VrrKdPrint("RtEvictNTE: re-enter self to deal with overflow",NTE->Address,NULL);
RouteEvictNTE(VA,NTE);
}
}
typedef struct RouteFailLinkContext {
PIO_WORKITEM Item; // IoWorkQueueItem to be freed when done.
MiniportAdapter *VA; // VA to which target NCE belongs.
NeighborCacheEntry *NCE; // NCE whose state has changed. Holds a ref.
} RouteFailLinkContext;
//* RouteFailLinkWorker
//
// IoWorkQueueItem routine. Ref RouteFailLink.
//
void
RouteFailLinkWorker(
DEVICE_OBJECT *DeviceObject,
void *Context)
{
RouteFailLinkContext *RDDC;
PIO_WORKITEM Item;
MiniportAdapter *VA;
NeighborCacheEntry *NCE;
RDDC = (RouteFailLinkContext *)Context;
Item = RDDC->Item;
VA = RDDC->VA;
NCE = RDDC->NCE;
//
// Downgrade routes.
//
RouteUpdateNCE(VA, NCE, NCE->State);
//
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -