📄 ndbcntrmain.cpp
字号:
{ jamEntry(); const ReadNodesConf * readNodes = (ReadNodesConf *)&signal->theData[0]; cmasterNodeId = readNodes->masterNodeId; cdynamicNodeId = readNodes->ndynamicId; /** * All defined nodes... */ c_allDefinedNodes.assign(NdbNodeBitmask::Size, readNodes->allNodes); c_clusterNodes.assign(NdbNodeBitmask::Size, readNodes->clusterNodes); Uint32 to_1 = 30000; Uint32 to_2 = 0; Uint32 to_3 = 0; const ndb_mgm_configuration_iterator * p = theConfiguration.getOwnConfigIterator(); ndbrequire(p != 0); ndb_mgm_get_int_parameter(p, CFG_DB_START_PARTIAL_TIMEOUT, &to_1); ndb_mgm_get_int_parameter(p, CFG_DB_START_PARTITION_TIMEOUT, &to_2); ndb_mgm_get_int_parameter(p, CFG_DB_START_FAILURE_TIMEOUT, &to_3); c_start.m_startTime = NdbTick_CurrentMillisecond(); c_start.m_startPartialTimeout = setTimeout(c_start.m_startTime, to_1); c_start.m_startPartitionedTimeout = setTimeout(c_start.m_startTime, to_2); c_start.m_startFailureTimeout = setTimeout(c_start.m_startTime, to_3); UpgradeStartup::sendCmAppChg(* this, signal, 0); // ADD sendCntrStartReq(signal); signal->theData[0] = ZSTARTUP; sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 1000, 1); return;}voidNdbcntr::execCM_ADD_REP(Signal* signal){ jamEntry(); c_clusterNodes.set(signal->theData[0]);}voidNdbcntr::sendCntrStartReq(Signal * signal){ jamEntry(); CntrStartReq * req = (CntrStartReq*)signal->getDataPtrSend(); req->startType = ctypeOfStart; req->lastGci = c_start.m_lastGci; req->nodeId = getOwnNodeId(); sendSignal(calcNdbCntrBlockRef(cmasterNodeId), GSN_CNTR_START_REQ, signal, CntrStartReq::SignalLength, JBB);}voidNdbcntr::execCNTR_START_REF(Signal * signal){ jamEntry(); const CntrStartRef * ref = (CntrStartRef*)signal->getDataPtr(); switch(ref->errorCode){ case CntrStartRef::NotMaster: jam(); cmasterNodeId = ref->masterNodeId; sendCntrStartReq(signal); return; case CntrStartRef::StopInProgress: jam(); progError(__LINE__, NDBD_EXIT_RESTART_DURING_SHUTDOWN); } ndbrequire(false);}voidNdbcntr::StartRecord::reset(){ m_starting.clear(); m_waiting.clear(); m_withLog.clear(); m_withoutLog.clear(); m_lastGci = m_lastGciNodeId = 0; m_startPartialTimeout = ~0; m_startPartitionedTimeout = ~0; m_startFailureTimeout = ~0; m_logNodesCount = 0;}voidNdbcntr::execCNTR_START_CONF(Signal * signal){ jamEntry(); const CntrStartConf * conf = (CntrStartConf*)signal->getDataPtr(); cnoStartNodes = conf->noStartNodes; ctypeOfStart = (NodeState::StartType)conf->startType; c_start.m_lastGci = conf->startGci; cmasterNodeId = conf->masterNodeId; NdbNodeBitmask tmp; tmp.assign(NdbNodeBitmask::Size, conf->startedNodes); c_startedNodes.bitOR(tmp); c_start.m_starting.assign(NdbNodeBitmask::Size, conf->startingNodes); ph2GLab(signal); UpgradeStartup::sendCmAppChg(* this, signal, 2); //START}/** * Tried with parallell nr, but it crashed in DIH * so I turned it off, as I don't want to debug DIH now... * Jonas 19/11-03 * * After trying for 2 hours, I gave up. * DIH is not designed to support it, and * it requires quite of lot of changes to * make it work * Jonas 5/12-03 */#define PARALLELL_NR 0#if PARALLELL_NRconst bool parallellNR = true;#elseconst bool parallellNR = false;#endifvoidNdbcntr::execCNTR_START_REP(Signal* signal){ jamEntry(); Uint32 nodeId = signal->theData[0]; c_startedNodes.set(nodeId); c_start.m_starting.clear(nodeId); if(!c_start.m_starting.isclear()){ jam(); return; } if(cmasterNodeId != getOwnNodeId()){ jam(); c_start.reset(); return; } if(c_start.m_waiting.isclear()){ jam(); c_start.reset(); return; } startWaitingNodes(signal);}voidNdbcntr::execCNTR_START_REQ(Signal * signal){ jamEntry(); const CntrStartReq * req = (CntrStartReq*)signal->getDataPtr(); const Uint32 nodeId = req->nodeId; const Uint32 lastGci = req->lastGci; const NodeState::StartType st = (NodeState::StartType)req->startType; if(cmasterNodeId == 0){ jam(); // Has not completed READNODES yet sendSignalWithDelay(reference(), GSN_CNTR_START_REQ, signal, 100, signal->getLength()); return; } if(cmasterNodeId != getOwnNodeId()){ jam(); sendCntrStartRef(signal, nodeId, CntrStartRef::NotMaster); return; } const NodeState & nodeState = getNodeState(); switch(nodeState.startLevel){ case NodeState::SL_NOTHING: case NodeState::SL_CMVMI: jam(); ndbrequire(false); case NodeState::SL_STARTING: case NodeState::SL_STARTED: jam(); break; case NodeState::SL_STOPPING_1: case NodeState::SL_STOPPING_2: case NodeState::SL_STOPPING_3: case NodeState::SL_STOPPING_4: jam(); sendCntrStartRef(signal, nodeId, CntrStartRef::StopInProgress); return; } /** * Am I starting (or started) */ const bool starting = (nodeState.startLevel != NodeState::SL_STARTED); c_start.m_waiting.set(nodeId); switch(st){ case NodeState::ST_INITIAL_START: jam(); c_start.m_withoutLog.set(nodeId); break; case NodeState::ST_SYSTEM_RESTART: jam(); c_start.m_withLog.set(nodeId); if(starting && lastGci > c_start.m_lastGci){ jam(); CntrStartRef * ref = (CntrStartRef*)signal->getDataPtrSend(); ref->errorCode = CntrStartRef::NotMaster; ref->masterNodeId = nodeId; NodeReceiverGroup rg (NDBCNTR, c_start.m_waiting); sendSignal(rg, GSN_CNTR_START_REF, signal, CntrStartRef::SignalLength, JBB); return; } if(starting){ jam(); Uint32 i = c_start.m_logNodesCount++; c_start.m_logNodes[i].m_nodeId = nodeId; c_start.m_logNodes[i].m_lastGci = req->lastGci; } break; case NodeState::ST_NODE_RESTART: case NodeState::ST_INITIAL_NODE_RESTART: case NodeState::ST_ILLEGAL_TYPE: ndbrequire(false); } const bool startInProgress = !c_start.m_starting.isclear(); if((starting && startInProgress) || (startInProgress && !parallellNR)){ jam(); // We're already starting together with a bunch of nodes // Let this node wait... return; } if(starting){ jam(); trySystemRestart(signal); } else { jam(); startWaitingNodes(signal); } return;}voidNdbcntr::startWaitingNodes(Signal * signal){#if ! PARALLELL_NR const Uint32 nodeId = c_start.m_waiting.find(0); const Uint32 Tref = calcNdbCntrBlockRef(nodeId); ndbrequire(nodeId != c_start.m_waiting.NotFound); NodeState::StartType nrType = NodeState::ST_NODE_RESTART; if(c_start.m_withoutLog.get(nodeId)){ jam(); nrType = NodeState::ST_INITIAL_NODE_RESTART; } /** * Let node perform restart */ CntrStartConf * conf = (CntrStartConf*)signal->getDataPtrSend(); conf->noStartNodes = 1; conf->startType = nrType; conf->startGci = ~0; // Not used conf->masterNodeId = getOwnNodeId(); BitmaskImpl::clear(NdbNodeBitmask::Size, conf->startingNodes); BitmaskImpl::set(NdbNodeBitmask::Size, conf->startingNodes, nodeId); c_startedNodes.copyto(NdbNodeBitmask::Size, conf->startedNodes); sendSignal(Tref, GSN_CNTR_START_CONF, signal, CntrStartConf::SignalLength, JBB); c_start.m_waiting.clear(nodeId); c_start.m_withLog.clear(nodeId); c_start.m_withoutLog.clear(nodeId); c_start.m_starting.set(nodeId);#else // Parallell nr c_start.m_starting = c_start.m_waiting; c_start.m_waiting.clear(); CntrStartConf * conf = (CntrStartConf*)signal->getDataPtrSend(); conf->noStartNodes = 1; conf->startGci = ~0; // Not used conf->masterNodeId = getOwnNodeId(); c_start.m_starting.copyto(NdbNodeBitmask::Size, conf->startingNodes); c_startedNodes.copyto(NdbNodeBitmask::Size, conf->startedNodes); char buf[100]; if(!c_start.m_withLog.isclear()){ jam(); ndbout_c("Starting nodes w/ log: %s", c_start.m_withLog.getText(buf)); NodeReceiverGroup rg(NDBCNTR, c_start.m_withLog); conf->startType = NodeState::ST_NODE_RESTART; sendSignal(rg, GSN_CNTR_START_CONF, signal, CntrStartConf::SignalLength, JBB); } if(!c_start.m_withoutLog.isclear()){ jam(); ndbout_c("Starting nodes wo/ log: %s", c_start.m_withoutLog.getText(buf)); NodeReceiverGroup rg(NDBCNTR, c_start.m_withoutLog); conf->startType = NodeState::ST_INITIAL_NODE_RESTART; sendSignal(rg, GSN_CNTR_START_CONF, signal, CntrStartConf::SignalLength, JBB); } c_start.m_waiting.clear(); c_start.m_withLog.clear(); c_start.m_withoutLog.clear();#endif}voidNdbcntr::sendCntrStartRef(Signal * signal, Uint32 nodeId, CntrStartRef::ErrorCode code){ CntrStartRef * ref = (CntrStartRef*)signal->getDataPtrSend(); ref->errorCode = code; ref->masterNodeId = cmasterNodeId; sendSignal(calcNdbCntrBlockRef(nodeId), GSN_CNTR_START_REF, signal, CntrStartRef::SignalLength, JBB);}CheckNodeGroups::OutputNdbcntr::checkNodeGroups(Signal* signal, const NdbNodeBitmask & mask){ CheckNodeGroups* sd = (CheckNodeGroups*)&signal->theData[0]; sd->blockRef = reference(); sd->requestType = CheckNodeGroups::Direct | CheckNodeGroups::ArbitCheck; sd->mask = mask; EXECUTE_DIRECT(DBDIH, GSN_CHECKNODEGROUPSREQ, signal, CheckNodeGroups::SignalLength); jamEntry(); return (CheckNodeGroups::Output)sd->output;}boolNdbcntr::trySystemRestart(Signal* signal){ /** * System restart something */ const bool allNodes = c_start.m_waiting.equal(c_allDefinedNodes); const bool allClusterNodes = c_start.m_waiting.equal(c_clusterNodes); const Uint64 now = NdbTick_CurrentMillisecond(); if(!allClusterNodes){ jam(); return false; } NodeState::StartType srType = NodeState::ST_SYSTEM_RESTART; if(c_start.m_waiting.equal(c_start.m_withoutLog)) { jam(); srType = NodeState::ST_INITIAL_START; c_start.m_starting = c_start.m_withoutLog; // Used for starting... c_start.m_withoutLog.clear(); } else { CheckNodeGroups::Output wLog = checkNodeGroups(signal, c_start.m_withLog); switch (wLog) { case CheckNodeGroups::Win: jam(); break; case CheckNodeGroups::Lose: jam(); // If we lose with all nodes, then we're in trouble ndbrequire(!allNodes); return false; case CheckNodeGroups::Partitioning: jam(); bool allowPartition = (c_start.m_startPartitionedTimeout != (Uint64)~0); if(allNodes){ if(allowPartition){ jam(); break; } ndbrequire(false); // All nodes -> partitioning, which is not allowed } break; } // For now only with the "logged"-ones. // Let the others do node restart afterwards... c_start.m_starting = c_start.m_withLog; c_start.m_withLog.clear(); } /** * Okidoki, we try to start */ CntrStartConf * conf = (CntrStartConf*)signal->getDataPtr(); conf->noStartNodes = c_start.m_starting.count(); conf->startType = srType; conf->startGci = c_start.m_lastGci; conf->masterNodeId = c_start.m_lastGciNodeId; c_start.m_starting.copyto(NdbNodeBitmask::Size, conf->startingNodes); c_startedNodes.copyto(NdbNodeBitmask::Size, conf->startedNodes); ndbrequire(c_start.m_lastGciNodeId == getOwnNodeId()); NodeReceiverGroup rg(NDBCNTR, c_start.m_starting); sendSignal(rg, GSN_CNTR_START_CONF, signal, CntrStartConf::SignalLength,JBB); c_start.m_waiting.bitANDC(c_start.m_starting); return true;}void Ndbcntr::ph2GLab(Signal* signal) { if (cndbBlocksCount < ZNO_NDB_BLOCKS) { jam(); sendNdbSttor(signal); return; }//if sendSttorry(signal); return;}//Ndbcntr::ph2GLab()/*4.4 START PHASE 3 *//*###########################################################################*/// SEND SIGNAL NDBSTTOR TO ALL BLOCKS, ACC, DICT, DIH, LQH, TC AND TUP// WHEN ALL BLOCKS HAVE RETURNED THEIR NDB_STTORRY ALL BLOCK HAVE FINISHED// THEIR LOCAL CONNECTIONs SUCESSFULLY// AND THEN WE CAN SEND APPL_STARTREG TO INFORM QMGR THAT WE ARE READY TO// SET UP DISTRIBUTED CONNECTIONS./*--------------------------------------------------------------*/// THIS IS NDB START PHASE 3./*--------------------------------------------------------------*//*******************************//* STTOR *//*******************************/void Ndbcntr::startPhase3Lab(Signal* signal) { ph3ALab(signal); return;}//Ndbcntr::startPhase3Lab()/*******************************//* NDB_STTORRY *//*******************************/void Ndbcntr::ph3ALab(Signal* signal) {
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -