📄 node.c
字号:
//
if (IsJoiningOnHold(VA) == FALSE &&
IsDriverInitialized(VA) == TRUE &&
SnapPLECount == 0 &&
SnapVSetIsComplete == FALSE &&
DirectedProbeFail == TRUE) {
VrrTrace(VA,2,"gregos: NTE call FailAllAndActivateSelf",NULL,NULL,NULL,NULL,0,NULL,0);
FailAllNeigboursAndChangeState(VA,IsPartitionRepairEnabled(VA));
InterlockedIncrement((PLONG)&VA->CountExitNTE);
SendHellos(VA, Now);
}
//
// Test aid wrt behaviour during initial convergence.
//
if (SnapPLECount > VA->CountMaxPLE)
VA->CountMaxPLE = SnapPLECount;
//
// If there are SetupRequests pending, send them now.
//
while (SRList != NULL) {
VirtualAddress Proxy;
ProbeListSRList *SRL;
NeighborCacheEntry *NCE;
uint FrameSeqNo;
SRL = SRList;
SRList = SRL->Next;
//
// Omit use of proxy if destination is an active physical neighbor.
//
KeAcquireSpinLock(&VA->NC.Lock, &OldIrql);
NCE = FindNCE(&VA->NC,
SRL->Address,
VRR_IFID_UNSPECIFIED,
VRR_IFID_UNSPECIFIED,
VRR_NCE_STATE_LINKED);
if (NCE != NULL)
RtlCopyMemory(Proxy, NCE->VAddress, sizeof(VirtualAddress));
KeReleaseSpinLock(&VA->NC.Lock, OldIrql);
//
// Send the SetupReq via a random physical neighbor.
//
if (NCE == NULL && RandomPhysicalNeighbor(VA,Proxy) == FALSE) {
VrrTrace(VA,1,"NT:PL=TO RandPhysNeighbor() fails to find proxy",NULL,NULL,NULL,NULL,0,NULL,0);
}
else if (AttemptingToJoin == TRUE)
//
// Special case when attempting to join the ring.
//
JoinSendSetupReq(VA, VA->Address, Proxy, SRL->FrameSeqNo, SRL->SRcAntiRoute);
else {
//
// Standard path for sending a SetupRequest.
//
InternalVrrSetupReq *ISR;
ISR = CreateSetupReqOpt(VA,
SRL->Address,
VRR_SR_TYPE_REQUEST,
Proxy,
SRL->FrameSeqNo,
SRL->Address,
SRL->SRcAntiRoute);
if (ISR == NULL) {
VrrKdPrint("PLTimeout: cannot allocate ISR",NULL,NULL);
}
else {
//
// Send the SetupReq message via the proxy.
//
TxToken Token;
if (FindNextHop(VA,
Proxy,
VRR_IFID_UNSPECIFIED,
VRR_IFID_UNSPECIFIED,
&Token,
SRL->SRcAntiRoute,
VRR_SR_DIRECT_EITHER)==SuccessFwd) {
VrrTrace(VA,3,"NT:SR=2MQ_",VA->Address,Proxy,SRL->Address,NULL,0,"SeqNo",SRL->FrameSeqNo);
ISR->Opt.Type |= SRL->SRDirection;
MsgQueueMessage(VA, Proxy, (InternalOption *) ISR, VRR_IFID_UNSPECIFIED,VRR_IFID_UNSPECIFIED,SR_DELAY,&Token);
}
else {
VrrTrace(VA,2,"NT:SR=Drop(FNH!=Fwd))",VA->Address,NULL,SRL->Address,NULL,0,"SeqNo",SRL->FrameSeqNo);
}
}
}
AddressListFree(SRL->SRcAntiRoute);
ExFreePool(SRL);
}
//
// If there are vset evictions pending, do them now.
//
if (EvictList != NULL) {
SetVSetChanged(VA);
Evictions = TRUE;
}
while (EvictList != NULL) {
ProbeListEvictList *Evict;
Evict = EvictList;
EvictList = Evict->Next;
RouteEvictNTE(VA,Evict->NTE);
ReleaseNTE(Evict->NTE);
ExFreePool(Evict);
}
//
// Evictions may warrant immediate transition of NT->Self state.
//
if (Evictions)
NodeTableTimeout(VA,Now);
return NextTimeout;
}
//* NodeTableInit
//
// Initializes a Node cache.
//
void
NodeTableInit(NodeTable *NT)
{
#if DBG
//
// Some algebraic and numeric properties of the address space.
//
const VirtualAddress AddrHi = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
const VirtualAddress AddrHiBit = { 0x80, 0x00, 0x00, 0x00, 0x00, 0x00 };
VRRASSERT(VirtualAddressDistance(AddrHi,NullAddress) == 1);
VRRASSERT(VirtualAddressDistance(NullAddress,AddrHi) == 1);
VRRASSERT(DistanceRight(NullAddress,AddrHi) == VirtualAddressToUint64(AddrHi));
VRRASSERT(DistanceRight(NullAddress,AddrHi) + DistanceLeft(NullAddress,AddrHi) == VRR_RINGSIZE);
VRRASSERT(VirtualAddressDistance(NullAddress,AddrHiBit) == VRR_MAX_ADDR_DISTANCE);
#endif
KeInitializeSpinLock(&NT->Lock);
NT->FirstNTE = NT->LastNTE = SentinelNTE(NT);
}
//* AddRefNTE
void
AddRefNTE(NodeTableEntry *NTE)
{
InterlockedIncrement(&NTE->RefCnt);
}
//* ReleaseNTE
void
ReleaseNTE(NodeTableEntry *NTE)
{
if (InterlockedDecrement(&NTE->RefCnt) == 0) {
VRRASSERT(VRR_NTE_STATE_ORPHAN == NTE->State);
ExFreePool(NTE);
}
}
typedef struct PurgeQueuesContext {
PIO_WORKITEM Item; // IoWorkQueueItem to be freed when done.
MiniportAdapter *VA; // VA to which queues are attached.
} PurgeQueuesContext;
//* PurgeQueuesWorker
//
// IoWorkQueueItem routine. Ref PurgeQueues.
//
void
PurgeQueuesWorker(
DEVICE_OBJECT *DeviceObject,
void *Context)
{
PurgeQueuesContext *PQC;
PIO_WORKITEM Item;
MiniportAdapter *VA;
KIRQL OldIrql;
MaintBuf *MB;
MaintBufNode *MBN;
MaintBufPacket *MBP;
PQC = (PurgeQueuesContext *)Context;
Item = PQC->Item;
VA = PQC->VA;
MB = VA->MaintBuf;
//
// Purge message (PCache) queue.
//
KeAcquireSpinLock(&VA->PCache.Lock, &OldIrql);
PbackCleanup(VA);
VA->PCache.List = NULL;
KeReleaseSpinLock(&VA->PCache.Lock, OldIrql);
//
// Purge packet (MB) queues.
//
KeAcquireSpinLock(&VA->MaintBuf->Lock, &OldIrql);
while ((MBN = MB->MBN) != NULL) {
MB->MBN = MBN->Next;
while ((MBP = MBN->MBP) != NULL) {
SRPacket *SRP = MBP->srp;
MBN->MBP = MBP->Next;
(*SRP->TransmitComplete)(VA, SRP, NDIS_STATUS_SUCCESS);
ExFreePool(MBP);
}
ExFreePool(MBN);
}
VA->MaintBuf->MBN = NULL;
KeReleaseSpinLock(&VA->MaintBuf->Lock, OldIrql);
VrrKdPrint("PurgeQueuesWorker: called",NULL,NULL);
IoFreeWorkItem(Item);
ExFreePool(PQC);
}
//* PurgeMsgAndPktQueues
//
// Purge message (PCache) and packet (MB) queues when we exit
// the ring. In particular this prevents us from injecting
// stale messages into the ring if we eventually rejoin.
//
// Typically called from ReleaseNTE, whose caller holds
// NT->Lock, and we want to avoid ordering between
// NT->Lock and the two locks below.
//
// Caller must not hold PCache->Lock or MB->Lock.
//
void
PurgeMsgAndPktQueues(
MiniportAdapter *VA)
{
PIO_WORKITEM Item;
PurgeQueuesContext *PQC;
PQC = ExAllocatePool(NonPagedPool, sizeof *PQC);
if (PQC == NULL)
return;
Item = IoAllocateWorkItem(VA->DeviceObject);
if (Item == NULL) {
ExFreePool(PQC);
return;
}
PQC->VA = VA;
PQC->Item = Item;
IoQueueWorkItem(Item, PurgeQueuesWorker, DelayedWorkQueue, PQC);
}
//* RemoveNTE
//
// Remove an NTE from list.
//
// Caller must hold the NT lock for the virtual adapter.
//
void RemoveNTE(
MiniportAdapter *VA,
NodeTableEntry *NTE)
{
NodeTable *NT = &VA->NT;
//
// Both links must agree that NTE is (not) linked in VSet.
// If NTE is not linked to VSet, we are done.
//
VRRASSERT(!(NTE->Next == NULL && NTE->Prev != NULL));
VRRASSERT(!(NTE->Prev == NULL && NTE->Next != NULL));
if (NTE->Next == NULL)
return;
//
// Sanity checks.
//
VRRASSERT(NTE != (NodeTableEntry *)NT);
VRRASSERT(NTE->RefCnt >= 2); // Refs 2 used in NT links. There may be others.
VRRASSERT(VRR_NTE_STATE_ORPHAN != NTE->State);
//
// Adjust pointers and refs.
//
InterlockedDecrement(&NT->Count);
NTE->State = VRR_NTE_STATE_ORPHAN;
NTE->Next->Prev = NTE->Prev;
ReleaseNTE(NTE);
NTE->Prev->Next = NTE->Next;
NTE->Next = NULL;
NTE->Prev = NULL;
ReleaseNTE(NTE);
}
//* NodeTableCleanup
//
// Uninitializes a Node cache.
//
void
NodeTableCleanup(MiniportAdapter *VA)
{
NodeTable *NT = &VA->NT;
NodeTableEntry *NTE;
KIRQL OldIrql;
VrrTrace(VA,2,"NT:NULL=NodeTableCleanup",NULL,NULL,NULL,NULL,0,NULL,0);
KeAcquireSpinLock(&NT->Lock, &OldIrql);
while ((NTE = NT->FirstNTE) != SentinelNTE(NT))
RemoveNTE(VA,NTE);
VRRASSERT(NT->Self != NULL);
NT->Self->State = VRR_NTE_STATE_ORPHAN;
ReleaseNTE(NT->Self);
NT->Self = NULL;
AddressListFree(NT->VSetLeft);
NT->VSetLeft = NULL;
AddressListFree(NT->VSetRight);
NT->VSetRight = NULL;
KeReleaseSpinLock(&NT->Lock, OldIrql);
}
//* CreateNTE
//
// Allocate an new NTE in memory.
//
NodeTableEntry *
CreateNTE(
const VirtualAddress Address)
{
NodeTableEntry *NTE;
NTE = ExAllocatePool(NonPagedPool, sizeof *NTE);
if (NTE == NULL)
return NULL;
//
// Initialize the NTE.
//
RtlZeroMemory(NTE, sizeof *NTE);
RtlCopyMemory(NTE->Address, Address, sizeof(VirtualAddress));
return NTE;
}
//* NodeInitSelf
//
// Initialize Self member of Node Table during miniport (VA) initialization.
// Allocates the NTE and takes a ref on behalf of the Node Table.
//
NodeTableEntry *
NodeInitSelf(
MiniportAdapter *VA)
{
NodeTable *NT;
NodeTableEntry *NTE;
NT = &VA->NT;
VRRASSERT(NULL == NT->Self);
VRRASSERT(0 == NT->Count);
if ((NTE=CreateNTE(VA->Address)) == NULL)
return NULL;
//
// Insert the NTE into the Node Table.
//
NTE->Flags |= VRR_NTE_FLAG_SELF;
NT->FirstNTE = NT->LastNTE = NTE;
NTE->Prev = SentinelNTE(NT);
AddRefNTE(NTE);
NTE->Next = SentinelNTE(NT);
AddRefNTE(NTE);
InterlockedIncrement(&NT->Count);
//
// Take ref on behalf of NT->Self.
//
AddRefNTE(NTE);
return NTE;
}
//* FindNTE
//
// Find an NTE in the node table.
//
// Caller must hold the node table lock.
//
NodeTableEntry *
FindNTE(
NodeTable *NT,
const VirtualAddress Address)
{
NodeTableEntry *NTE = NULL;
for (NTE = NT->FirstNTE; NTE != SentinelNTE(NT); NTE = NTE->Next)
if (VirtualAddressEqual(NTE->Address, Address))
return NTE;
return NULL;
}
//* FailAllNeigboursAndChangeState
//
// Used as a final resort if we consistently fail to join an existing ring.
// Fails all routes, physical neighbours and virtual neighbors, before
// launching ourself as an ACTIVE member of singleton ring. It is up to the
// partition repair protocol to sort things out from there.
//
// Caller must not hold RT->Lock.
// Caller must not hold NC->Lock.
// Caller must not hold NT->Lock.
void
FailAllNeigboursAndChangeState(
MiniportAdapter *VA,
uint GoActive)
{
NodeTable *NT = &VA->NT;
NodeTableEntry *NTE;
ProbeListEntry *PLE;
KIRQL OldIrql;
InterlockedIncrement((PLONG)&VA->CountExitRing);
VrrTrace(VA,1,"NT:**=FailAllNeigboursAndChangeState",NULL,NULL,NULL,"GoActive",GoActive,"CountExitRing",VA->CountExitRing);
//
// Release NCE and NTE refs from all RTE, then free the RTE themselves.
//
KeAcquireSpinLock(&VA->RT.Lock, &OldIrql);
RouteTableCleanup(&VA->RT);
//
// Fail all NCE. Fail rather than free in case of race, so that
// links cannot be reinstated too fast for neighbours to fail them.
//
FailAllNCE(&VA->NC, KeQueryInterruptTime());
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -