📄 dbtuxscan.cpp
字号:
}/* * Find start position for single range scan. If it exists, sets state * to Next and links the scan to the node. The first entry is returned * by scanNext. */voidDbtux::scanFirst(ScanOpPtr scanPtr){ ScanOp& scan = *scanPtr.p; Frag& frag = *c_fragPool.getPtr(scan.m_fragPtrI); TreeHead& tree = frag.m_tree; // set up index keys for this operation setKeyAttrs(frag); // scan direction 0, 1 const unsigned idir = scan.m_descending; // unpack start key into c_dataBuffer const ScanBound& bound = *scan.m_bound[idir]; ScanBoundIterator iter; bound.first(iter); for (unsigned j = 0; j < bound.getSize(); j++) { jam(); c_dataBuffer[j] = *iter.data; bound.next(iter); } TreePos treePos; searchToScan(frag, c_dataBuffer, scan.m_boundCnt[idir], scan.m_descending, treePos); if (treePos.m_loc == NullTupLoc) { // empty result set jam(); scan.m_state = ScanOp::Last; return; } // set position and state scan.m_scanPos = treePos; scan.m_state = ScanOp::Next; // link the scan to node found NodeHandle node(frag); selectNode(node, treePos.m_loc); linkScan(node, scanPtr);}/* * Move to next entry. The scan is already linked to some node. When * we leave, if an entry was found, it will be linked to a possibly * different node. The scan has a position, and a direction which tells * from where we came to this position. This is one of (all comments * are in terms of ascending scan): * * 0 - up from left child (scan this node next) * 1 - up from right child (proceed to parent) * 2 - up from root (the scan ends) * 3 - left to right within node (at end proceed to right child) * 4 - down from parent (proceed to left child) * * If an entry was found, scan direction is 3. Therefore tree * re-organizations need not worry about scan direction. */voidDbtux::scanNext(ScanOpPtr scanPtr, bool fromMaintReq){ ScanOp& scan = *scanPtr.p; Frag& frag = *c_fragPool.getPtr(scan.m_fragPtrI);#ifdef VM_TRACE if (debugFlags & DebugScan) { debugOut << "Next in scan " << scanPtr.i << " " << scan << endl; }#endif // cannot be moved away from tuple we have locked ndbrequire(scan.m_state != ScanOp::Locked); // set up index keys for this operation setKeyAttrs(frag); // scan direction const unsigned idir = scan.m_descending; // 0, 1 const int jdir = 1 - 2 * (int)idir; // 1, -1 // unpack end key into c_dataBuffer const ScanBound& bound = *scan.m_bound[1 - idir]; ScanBoundIterator iter; bound.first(iter); for (unsigned j = 0; j < bound.getSize(); j++) { jam(); c_dataBuffer[j] = *iter.data; bound.next(iter); } // use copy of position TreePos pos = scan.m_scanPos; // get and remember original node NodeHandle origNode(frag); selectNode(origNode, pos.m_loc); ndbrequire(islinkScan(origNode, scanPtr)); // current node in loop NodeHandle node = origNode; // copy of entry found TreeEnt ent; while (true) { jam();#ifdef VM_TRACE if (debugFlags & DebugScan) { debugOut << "Scan next pos " << pos << " " << node << endl; }#endif if (pos.m_dir == 2) { // coming up from root ends the scan jam(); pos.m_loc = NullTupLoc; scan.m_state = ScanOp::Last; break; } if (node.m_loc != pos.m_loc) { jam(); selectNode(node, pos.m_loc); } if (pos.m_dir == 4) { // coming down from parent proceed to left child jam(); TupLoc loc = node.getLink(idir); if (loc != NullTupLoc) { jam(); pos.m_loc = loc; pos.m_dir = 4; // unchanged continue; } // pretend we came from left child pos.m_dir = idir; } const unsigned occup = node.getOccup(); if (occup == 0) { jam(); ndbrequire(fromMaintReq); // move back to parent - see comment in treeRemoveInner pos.m_loc = node.getLink(2); pos.m_dir = node.getSide(); continue; } if (pos.m_dir == idir) { // coming up from left child scan current node jam(); pos.m_pos = idir == 0 ? 0 : occup - 1; pos.m_match = false; pos.m_dir = 3; } if (pos.m_dir == 3) { // within node jam(); // advance position if (! pos.m_match) pos.m_match = true; else // becomes ZNIL (which is > occup) if 0 and scan descending pos.m_pos += jdir; if (pos.m_pos < occup) { jam(); ent = node.getEnt(pos.m_pos); pos.m_dir = 3; // unchanged // read and compare all attributes readKeyAttrs(frag, ent, 0, c_entryKey); int ret = cmpScanBound(frag, 1 - idir, c_dataBuffer, scan.m_boundCnt[1 - idir], c_entryKey); ndbrequire(ret != NdbSqlUtil::CmpUnknown); if (jdir * ret < 0) { jam(); // hit upper bound of single range scan pos.m_loc = NullTupLoc; scan.m_state = ScanOp::Last; break; } // can we see it if (! scanVisible(scanPtr, ent)) { jam(); continue; } // found entry scan.m_state = ScanOp::Current; break; } // after node proceed to right child TupLoc loc = node.getLink(1 - idir); if (loc != NullTupLoc) { jam(); pos.m_loc = loc; pos.m_dir = 4; continue; } // pretend we came from right child pos.m_dir = 1 - idir; } if (pos.m_dir == 1 - idir) { // coming up from right child proceed to parent jam(); pos.m_loc = node.getLink(2); pos.m_dir = node.getSide(); continue; } ndbrequire(false); } // copy back position scan.m_scanPos = pos; // relink if (scan.m_state == ScanOp::Current) { ndbrequire(pos.m_match == true && pos.m_dir == 3); ndbrequire(pos.m_loc == node.m_loc); if (origNode.m_loc != node.m_loc) { jam(); unlinkScan(origNode, scanPtr); linkScan(node, scanPtr); } // copy found entry scan.m_scanEnt = ent; } else if (scan.m_state == ScanOp::Last) { jam(); ndbrequire(pos.m_loc == NullTupLoc); unlinkScan(origNode, scanPtr); } else { ndbrequire(false); }#ifdef VM_TRACE if (debugFlags & DebugScan) { debugOut << "Next out scan " << scanPtr.i << " " << scan << endl; }#endif}/* * Check if an entry is visible to the scan. * * There is a special check to never accept same tuple twice in a row. * This is faster than asking TUP. It also fixes some special cases * which are not analyzed or handled yet. */boolDbtux::scanVisible(ScanOpPtr scanPtr, TreeEnt ent){ const ScanOp& scan = *scanPtr.p; const Frag& frag = *c_fragPool.getPtr(scan.m_fragPtrI); Uint32 fragBit = ent.m_fragBit; Uint32 tableFragPtrI = frag.m_tupTableFragPtrI[fragBit]; Uint32 fragId = frag.m_fragId | fragBit; Uint32 tupAddr = getTupAddr(frag, ent); Uint32 tupVersion = ent.m_tupVersion; // check for same tuple twice in row if (scan.m_scanEnt.m_tupLoc == ent.m_tupLoc && scan.m_scanEnt.m_fragBit == fragBit) { jam(); return false; } Uint32 transId1 = scan.m_transId1; Uint32 transId2 = scan.m_transId2; Uint32 savePointId = scan.m_savePointId; bool ret = c_tup->tuxQueryTh(tableFragPtrI, tupAddr, tupVersion, transId1, transId2, savePointId); jamEntry(); return ret;}/* * Finish closing of scan and send conf. Any lock wait has been done * already. */voidDbtux::scanClose(Signal* signal, ScanOpPtr scanPtr){ ScanOp& scan = *scanPtr.p; ndbrequire(! scan.m_lockwait && scan.m_accLockOp == RNIL); // unlock all not unlocked by LQH for (unsigned i = 0; i < scan.m_maxAccLockOps; i++) { if (scan.m_accLockOps[i] != RNIL) { jam(); AccLockReq* const lockReq = (AccLockReq*)signal->getDataPtrSend(); lockReq->returnCode = RNIL; lockReq->requestInfo = AccLockReq::Abort; lockReq->accOpPtr = scan.m_accLockOps[i]; EXECUTE_DIRECT(DBACC, GSN_ACC_LOCKREQ, signal, AccLockReq::UndoSignalLength); jamEntry(); ndbrequire(lockReq->returnCode == AccLockReq::Success); scan.m_accLockOps[i] = RNIL; } } // send conf NextScanConf* const conf = (NextScanConf*)signal->getDataPtrSend(); conf->scanPtr = scanPtr.p->m_userPtr; conf->accOperationPtr = RNIL; conf->fragId = RNIL; unsigned signalLength = 3; sendSignal(scanPtr.p->m_userRef, GSN_NEXT_SCANCONF, signal, signalLength, JBB); releaseScanOp(scanPtr);}voidDbtux::addAccLockOp(ScanOp& scan, Uint32 accLockOp){ ndbrequire(accLockOp != RNIL); Uint32* list = scan.m_accLockOps; bool ok = false; for (unsigned i = 0; i < scan.m_maxAccLockOps; i++) { ndbrequire(list[i] != accLockOp); if (! ok && list[i] == RNIL) { list[i] = accLockOp; ok = true; // continue check for duplicates } } if (! ok) { unsigned i = scan.m_maxAccLockOps; if (i < MaxAccLockOps) { list[i] = accLockOp; ok = true; scan.m_maxAccLockOps = i + 1; } } ndbrequire(ok);}voidDbtux::removeAccLockOp(ScanOp& scan, Uint32 accLockOp){ ndbrequire(accLockOp != RNIL); Uint32* list = scan.m_accLockOps; bool ok = false; for (unsigned i = 0; i < scan.m_maxAccLockOps; i++) { if (list[i] == accLockOp) { list[i] = RNIL; ok = true; break; } } ndbrequire(ok);}/* * Release allocated records. */voidDbtux::releaseScanOp(ScanOpPtr& scanPtr){#ifdef VM_TRACE if (debugFlags & DebugScan) { debugOut << "Release scan " << scanPtr.i << " " << *scanPtr.p << endl; }#endif Frag& frag = *c_fragPool.getPtr(scanPtr.p->m_fragPtrI); scanPtr.p->m_boundMin.release(); scanPtr.p->m_boundMax.release(); // unlink from per-fragment list and release from pool frag.m_scanList.release(scanPtr);}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -