dbtuxscan.cpp
来自「MySQL源码文件5.X系列, 可自已编译到服务器」· C++ 代码 · 共 1,120 行 · 第 1/3 页
CPP
1,120 行
ScanOp& scan = *scanPtr.p; Frag& frag = *c_fragPool.getPtr(scan.m_fragPtrI);#ifdef VM_TRACE if (debugFlags & DebugScan) { debugOut << "Enter find scan " << scanPtr.i << " " << scan << endl; }#endif ndbrequire(scan.m_state == ScanOp::Current || scan.m_state == ScanOp::Next); while (1) { jam(); if (scan.m_state == ScanOp::Next) scanNext(scanPtr, false); if (scan.m_state == ScanOp::Current) { jam(); const TreePos pos = scan.m_scanPos; NodeHandle node(frag); selectNode(node, pos.m_loc); const TreeEnt ent = node.getEnt(pos.m_pos); if (scanVisible(scanPtr, ent)) { jam(); scan.m_state = ScanOp::Found; scan.m_scanEnt = ent; break; } } else { jam(); break; } scan.m_state = ScanOp::Next; }#ifdef VM_TRACE if (debugFlags & DebugScan) { debugOut << "Leave find scan " << scanPtr.i << " " << scan << endl; }#endif}/* * Move to next entry. The scan is already linked to some node. When * we leave, if an entry was found, it will be linked to a possibly * different node. The scan has a position, and a direction which tells * from where we came to this position. This is one of (all comments * are in terms of ascending scan): * * 0 - up from left child (scan this node next) * 1 - up from right child (proceed to parent) * 2 - up from root (the scan ends) * 3 - left to right within node (at end proceed to right child) * 4 - down from parent (proceed to left child) * * If an entry was found, scan direction is 3. Therefore tree * re-organizations need not worry about scan direction. * * This method is also used to move a scan when its entry is removed * (see moveScanList). If the scan is Blocked, we check if it remains * Blocked on a different version of the tuple. Otherwise the tuple is * lost and state becomes Current. */voidDbtux::scanNext(ScanOpPtr scanPtr, bool fromMaintReq){ ScanOp& scan = *scanPtr.p; Frag& frag = *c_fragPool.getPtr(scan.m_fragPtrI);#ifdef VM_TRACE if (debugFlags & (DebugMaint | DebugScan)) { debugOut << "Enter next scan " << scanPtr.i << " " << scan << endl; }#endif // cannot be moved away from tuple we have locked ndbrequire(scan.m_state != ScanOp::Locked); // set up index keys for this operation setKeyAttrs(frag); // scan direction const unsigned idir = scan.m_descending; // 0, 1 const int jdir = 1 - 2 * (int)idir; // 1, -1 // use copy of position TreePos pos = scan.m_scanPos; // get and remember original node NodeHandle origNode(frag); selectNode(origNode, pos.m_loc); ndbrequire(islinkScan(origNode, scanPtr)); // current node in loop NodeHandle node = origNode; // copy of entry found TreeEnt ent; while (true) { jam();#ifdef VM_TRACE if (debugFlags & (DebugMaint | DebugScan)) { debugOut << "Current scan " << scanPtr.i << " pos " << pos << " node " << node << endl; }#endif if (pos.m_dir == 2) { // coming up from root ends the scan jam(); pos.m_loc = NullTupLoc; break; } if (node.m_loc != pos.m_loc) { jam(); selectNode(node, pos.m_loc); } if (pos.m_dir == 4) { // coming down from parent proceed to left child jam(); TupLoc loc = node.getLink(idir); if (loc != NullTupLoc) { jam(); pos.m_loc = loc; pos.m_dir = 4; // unchanged continue; } // pretend we came from left child pos.m_dir = idir; } const unsigned occup = node.getOccup(); if (occup == 0) { jam(); ndbrequire(fromMaintReq); // move back to parent - see comment in treeRemoveInner pos.m_loc = node.getLink(2); pos.m_dir = node.getSide(); continue; } if (pos.m_dir == idir) { // coming up from left child scan current node jam(); pos.m_pos = idir == 0 ? (Uint16)-1 : occup; pos.m_dir = 3; } if (pos.m_dir == 3) { // before or within node jam(); // advance position - becomes ZNIL (> occup) if 0 and descending pos.m_pos += jdir; if (pos.m_pos < occup) { jam(); pos.m_dir = 3; // unchanged ent = node.getEnt(pos.m_pos); if (! scanCheck(scanPtr, ent)) { jam(); pos.m_loc = NullTupLoc; } break; } // after node proceed to right child TupLoc loc = node.getLink(1 - idir); if (loc != NullTupLoc) { jam(); pos.m_loc = loc; pos.m_dir = 4; continue; } // pretend we came from right child pos.m_dir = 1 - idir; } if (pos.m_dir == 1 - idir) { // coming up from right child proceed to parent jam(); pos.m_loc = node.getLink(2); pos.m_dir = node.getSide(); continue; } ndbrequire(false); } // copy back position scan.m_scanPos = pos; // relink if (pos.m_loc != NullTupLoc) { ndbrequire(pos.m_dir == 3); ndbrequire(pos.m_loc == node.m_loc); if (origNode.m_loc != node.m_loc) { jam(); unlinkScan(origNode, scanPtr); linkScan(node, scanPtr); } if (scan.m_state != ScanOp::Blocked) { scan.m_state = ScanOp::Current; } else { jam(); ndbrequire(fromMaintReq); TreeEnt& scanEnt = scan.m_scanEnt; ndbrequire(scanEnt.m_tupLoc != NullTupLoc); if (scanEnt.eqtuple(ent)) { // remains blocked on another version scanEnt = ent; } else { jam(); scanEnt.m_tupLoc = NullTupLoc; scan.m_state = ScanOp::Current; } } } else { jam(); unlinkScan(origNode, scanPtr); scan.m_state = ScanOp::Last; }#ifdef VM_TRACE if (debugFlags & (DebugMaint | DebugScan)) { debugOut << "Leave next scan " << scanPtr.i << " " << scan << endl; }#endif}/* * Check end key. Return true if scan is still within range. */boolDbtux::scanCheck(ScanOpPtr scanPtr, TreeEnt ent){ ScanOp& scan = *scanPtr.p; Frag& frag = *c_fragPool.getPtr(scan.m_fragPtrI); const unsigned idir = scan.m_descending; const int jdir = 1 - 2 * (int)idir; unpackBound(*scan.m_bound[1 - idir], c_dataBuffer); unsigned boundCnt = scan.m_boundCnt[1 - idir]; readKeyAttrs(frag, ent, 0, c_entryKey); int ret = cmpScanBound(frag, 1 - idir, c_dataBuffer, boundCnt, c_entryKey); ndbrequire(ret != NdbSqlUtil::CmpUnknown); if (jdir * ret > 0) return true; // hit upper bound of single range scan return false;}/* * Check if an entry is visible to the scan. * * There is a special check to never accept same tuple twice in a row. * This is faster than asking TUP. It also fixes some special cases * which are not analyzed or handled yet. */boolDbtux::scanVisible(ScanOpPtr scanPtr, TreeEnt ent){ const ScanOp& scan = *scanPtr.p; const Frag& frag = *c_fragPool.getPtr(scan.m_fragPtrI); Uint32 fragBit = ent.m_fragBit; Uint32 tableFragPtrI = frag.m_tupTableFragPtrI[fragBit]; Uint32 fragId = frag.m_fragId | fragBit; Uint32 tupAddr = getTupAddr(frag, ent); Uint32 tupVersion = ent.m_tupVersion; // check for same tuple twice in row if (scan.m_scanEnt.m_tupLoc == ent.m_tupLoc && scan.m_scanEnt.m_fragBit == fragBit) { jam(); return false; } Uint32 transId1 = scan.m_transId1; Uint32 transId2 = scan.m_transId2; Uint32 savePointId = scan.m_savePointId; bool ret = c_tup->tuxQueryTh(tableFragPtrI, tupAddr, tupVersion, transId1, transId2, savePointId); jamEntry(); return ret;}/* * Finish closing of scan and send conf. Any lock wait has been done * already. */voidDbtux::scanClose(Signal* signal, ScanOpPtr scanPtr){ ScanOp& scan = *scanPtr.p; ndbrequire(! scan.m_lockwait && scan.m_accLockOp == RNIL); // unlock all not unlocked by LQH if (! scan.m_accLockOps.isEmpty()) { jam(); abortAccLockOps(signal, scanPtr); } // send conf NextScanConf* const conf = (NextScanConf*)signal->getDataPtrSend(); conf->scanPtr = scanPtr.p->m_userPtr; conf->accOperationPtr = RNIL; conf->fragId = RNIL; unsigned signalLength = 3; sendSignal(scanPtr.p->m_userRef, GSN_NEXT_SCANCONF, signal, signalLength, JBB); releaseScanOp(scanPtr);}voidDbtux::abortAccLockOps(Signal* signal, ScanOpPtr scanPtr){ ScanOp& scan = *scanPtr.p;#ifdef VM_TRACE if (debugFlags & (DebugScan | DebugLock)) { debugOut << "Abort locks in scan " << scanPtr.i << " " << scan << endl; }#endif LocalDLFifoList<ScanLock> list(c_scanLockPool, scan.m_accLockOps); ScanLockPtr lockPtr; while (list.first(lockPtr)) { jam(); AccLockReq* const lockReq = (AccLockReq*)signal->getDataPtrSend(); lockReq->returnCode = RNIL; lockReq->requestInfo = AccLockReq::Abort; lockReq->accOpPtr = lockPtr.p->m_accLockOp; EXECUTE_DIRECT(DBACC, GSN_ACC_LOCKREQ, signal, AccLockReq::UndoSignalLength); jamEntry(); ndbrequire(lockReq->returnCode == AccLockReq::Success); list.release(lockPtr); }}voidDbtux::addAccLockOp(ScanOpPtr scanPtr, Uint32 accLockOp){ ScanOp& scan = *scanPtr.p;#ifdef VM_TRACE if (debugFlags & (DebugScan | DebugLock)) { debugOut << "Add lock " << hex << accLockOp << dec << " to scan " << scanPtr.i << " " << scan << endl; }#endif LocalDLFifoList<ScanLock> list(c_scanLockPool, scan.m_accLockOps); ScanLockPtr lockPtr;#ifdef VM_TRACE list.first(lockPtr); while (lockPtr.i != RNIL) { ndbrequire(lockPtr.p->m_accLockOp != accLockOp); list.next(lockPtr); }#endif bool ok = list.seize(lockPtr); ndbrequire(ok); ndbrequire(accLockOp != RNIL); lockPtr.p->m_accLockOp = accLockOp;}voidDbtux::removeAccLockOp(ScanOpPtr scanPtr, Uint32 accLockOp){ ScanOp& scan = *scanPtr.p;#ifdef VM_TRACE if (debugFlags & (DebugScan | DebugLock)) { debugOut << "Remove lock " << hex << accLockOp << dec << " from scan " << scanPtr.i << " " << scan << endl; }#endif LocalDLFifoList<ScanLock> list(c_scanLockPool, scan.m_accLockOps); ScanLockPtr lockPtr; list.first(lockPtr); while (lockPtr.i != RNIL) { if (lockPtr.p->m_accLockOp == accLockOp) { jam(); break; } list.next(lockPtr); } ndbrequire(lockPtr.i != RNIL); list.release(lockPtr);}/* * Release allocated records. */voidDbtux::releaseScanOp(ScanOpPtr& scanPtr){#ifdef VM_TRACE if (debugFlags & DebugScan) { debugOut << "Release scan " << scanPtr.i << " " << *scanPtr.p << endl; }#endif Frag& frag = *c_fragPool.getPtr(scanPtr.p->m_fragPtrI); scanPtr.p->m_boundMin.release(); scanPtr.p->m_boundMax.release(); // unlink from per-fragment list and release from pool frag.m_scanList.release(scanPtr);}
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?