📄 dbtuxmeta.cpp
字号:
/* Copyright (C) 2003 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */#define DBTUX_META_CPP#include "Dbtux.hpp"#include <my_sys.h>/* * Create index. * * For historical reasons it looks like we are adding random fragments * and attributes to existing index. In fact all fragments must be * created at one time and they have identical attributes. */voidDbtux::execTUXFRAGREQ(Signal* signal){ jamEntry(); if (signal->theData[0] == (Uint32)-1) { jam(); abortAddFragOp(signal); return; } const TuxFragReq reqCopy = *(const TuxFragReq*)signal->getDataPtr(); const TuxFragReq* const req = &reqCopy; IndexPtr indexPtr; indexPtr.i = RNIL; FragOpPtr fragOpPtr; fragOpPtr.i = RNIL; TuxFragRef::ErrorCode errorCode = TuxFragRef::NoError; do { // get the index record if (req->tableId >= c_indexPool.getSize()) { jam(); errorCode = TuxFragRef::InvalidRequest; break; } c_indexPool.getPtr(indexPtr, req->tableId); if (indexPtr.p->m_state != Index::NotDefined && indexPtr.p->m_state != Index::Defining) { jam(); errorCode = TuxFragRef::InvalidRequest; indexPtr.i = RNIL; // leave alone break; } // get new operation record c_fragOpPool.seize(fragOpPtr); ndbrequire(fragOpPtr.i != RNIL); new (fragOpPtr.p) FragOp(); fragOpPtr.p->m_userPtr = req->userPtr; fragOpPtr.p->m_userRef = req->userRef; fragOpPtr.p->m_indexId = req->tableId; fragOpPtr.p->m_fragId = req->fragId; fragOpPtr.p->m_fragNo = indexPtr.p->m_numFrags; fragOpPtr.p->m_numAttrsRecvd = 0;#ifdef VM_TRACE if (debugFlags & DebugMeta) { debugOut << "Seize frag op " << fragOpPtr.i << " " << *fragOpPtr.p << endl; }#endif // check if index has place for more fragments ndbrequire(indexPtr.p->m_numFrags < MaxIndexFragments); // seize new fragment record FragPtr fragPtr; c_fragPool.seize(fragPtr); if (fragPtr.i == RNIL) { jam(); errorCode = TuxFragRef::NoFreeFragment; break; } new (fragPtr.p) Frag(c_scanOpPool); fragPtr.p->m_tableId = req->primaryTableId; fragPtr.p->m_indexId = req->tableId; fragPtr.p->m_fragId = req->fragId; fragPtr.p->m_numAttrs = req->noOfAttr; fragPtr.p->m_storeNullKey = true; // not yet configurable fragPtr.p->m_tupIndexFragPtrI = req->tupIndexFragPtrI; fragPtr.p->m_tupTableFragPtrI[0] = req->tupTableFragPtrI[0]; fragPtr.p->m_tupTableFragPtrI[1] = req->tupTableFragPtrI[1]; fragPtr.p->m_accTableFragPtrI[0] = req->accTableFragPtrI[0]; fragPtr.p->m_accTableFragPtrI[1] = req->accTableFragPtrI[1]; // add the fragment to the index indexPtr.p->m_fragId[indexPtr.p->m_numFrags] = req->fragId; indexPtr.p->m_fragPtrI[indexPtr.p->m_numFrags] = fragPtr.i; indexPtr.p->m_numFrags++; // save under operation fragOpPtr.p->m_fragPtrI = fragPtr.i; // prepare to receive attributes if (fragOpPtr.p->m_fragNo == 0) { jam(); // receiving first fragment ndbrequire( indexPtr.p->m_state == Index::NotDefined && DictTabInfo::isOrderedIndex(req->tableType) && req->noOfAttr > 0 && req->noOfAttr <= MaxIndexAttributes && indexPtr.p->m_descPage == RNIL); indexPtr.p->m_state = Index::Defining; indexPtr.p->m_tableType = (DictTabInfo::TableType)req->tableType; indexPtr.p->m_tableId = req->primaryTableId; indexPtr.p->m_numAttrs = req->noOfAttr; indexPtr.p->m_storeNullKey = true; // not yet configurable // allocate attribute descriptors if (! allocDescEnt(indexPtr)) { jam(); errorCode = TuxFragRef::NoFreeAttributes; break; } } else { // receiving subsequent fragment jam(); ndbrequire( indexPtr.p->m_state == Index::Defining && indexPtr.p->m_tableType == (DictTabInfo::TableType)req->tableType && indexPtr.p->m_tableId == req->primaryTableId && indexPtr.p->m_numAttrs == req->noOfAttr); } // copy metadata address to each fragment fragPtr.p->m_descPage = indexPtr.p->m_descPage; fragPtr.p->m_descOff = indexPtr.p->m_descOff;#ifdef VM_TRACE if (debugFlags & DebugMeta) { debugOut << "Add frag " << fragPtr.i << " " << *fragPtr.p << endl; }#endif // error inserts if (ERROR_INSERTED(12001) && fragOpPtr.p->m_fragNo == 0 || ERROR_INSERTED(12002) && fragOpPtr.p->m_fragNo == 1) { jam(); errorCode = (TuxFragRef::ErrorCode)1; CLEAR_ERROR_INSERT_VALUE; break; } // success TuxFragConf* const conf = (TuxFragConf*)signal->getDataPtrSend(); conf->userPtr = req->userPtr; conf->tuxConnectPtr = fragOpPtr.i; conf->fragPtr = fragPtr.i; conf->fragId = fragPtr.p->m_fragId; sendSignal(req->userRef, GSN_TUXFRAGCONF, signal, TuxFragConf::SignalLength, JBB); return; } while (0); // error TuxFragRef* const ref = (TuxFragRef*)signal->getDataPtrSend(); ref->userPtr = req->userPtr; ref->errorCode = errorCode; sendSignal(req->userRef, GSN_TUXFRAGREF, signal, TuxFragRef::SignalLength, JBB); if (fragOpPtr.i != RNIL) {#ifdef VM_TRACE if (debugFlags & DebugMeta) { debugOut << "Release on frag error frag op " << fragOpPtr.i << " " << *fragOpPtr.p << endl; }#endif c_fragOpPool.release(fragOpPtr); } if (indexPtr.i != RNIL) { jam(); // let DICT drop the unfinished index }}voidDbtux::execTUX_ADD_ATTRREQ(Signal* signal){ jamEntry(); const TuxAddAttrReq reqCopy = *(const TuxAddAttrReq*)signal->getDataPtr(); const TuxAddAttrReq* const req = &reqCopy; // get the records FragOpPtr fragOpPtr; IndexPtr indexPtr; FragPtr fragPtr; c_fragOpPool.getPtr(fragOpPtr, req->tuxConnectPtr); c_indexPool.getPtr(indexPtr, fragOpPtr.p->m_indexId); c_fragPool.getPtr(fragPtr, fragOpPtr.p->m_fragPtrI); TuxAddAttrRef::ErrorCode errorCode = TuxAddAttrRef::NoError; do { // expected attribute id const unsigned attrId = fragOpPtr.p->m_numAttrsRecvd++; ndbrequire( indexPtr.p->m_state == Index::Defining && attrId < indexPtr.p->m_numAttrs && attrId == req->attrId); // define the attribute DescEnt& descEnt = getDescEnt(indexPtr.p->m_descPage, indexPtr.p->m_descOff); DescAttr& descAttr = descEnt.m_descAttr[attrId]; descAttr.m_attrDesc = req->attrDescriptor; descAttr.m_primaryAttrId = req->primaryAttrId; descAttr.m_typeId = AttributeDescriptor::getType(req->attrDescriptor); descAttr.m_charset = (req->extTypeInfo >> 16);#ifdef VM_TRACE if (debugFlags & DebugMeta) { debugOut << "Add frag " << fragPtr.i << " attr " << attrId << " " << descAttr << endl; }#endif // check that type is valid and has a binary comparison method const NdbSqlUtil::Type& type = NdbSqlUtil::getTypeBinary(descAttr.m_typeId); if (type.m_typeId == NdbSqlUtil::Type::Undefined || type.m_cmp == 0) { jam(); errorCode = TuxAddAttrRef::InvalidAttributeType; break; } if (descAttr.m_charset != 0) { uint err; CHARSET_INFO *cs = all_charsets[descAttr.m_charset]; ndbrequire(cs != 0); if ((err = NdbSqlUtil::check_column_for_ordered_index(descAttr.m_typeId, cs))) { jam(); errorCode = (TuxAddAttrRef::ErrorCode) err; break; } } const bool lastAttr = (indexPtr.p->m_numAttrs == fragOpPtr.p->m_numAttrsRecvd); if (ERROR_INSERTED(12003) && fragOpPtr.p->m_fragNo == 0 && attrId == 0 || ERROR_INSERTED(12004) && fragOpPtr.p->m_fragNo == 0 && lastAttr || ERROR_INSERTED(12005) && fragOpPtr.p->m_fragNo == 1 && attrId == 0 || ERROR_INSERTED(12006) && fragOpPtr.p->m_fragNo == 1 && lastAttr) { errorCode = (TuxAddAttrRef::ErrorCode)1; CLEAR_ERROR_INSERT_VALUE; break; } if (lastAttr) { jam(); // initialize tree header TreeHead& tree = fragPtr.p->m_tree; new (&tree) TreeHead(); // make these configurable later tree.m_nodeSize = MAX_TTREE_NODE_SIZE; tree.m_prefSize = MAX_TTREE_PREF_SIZE; const unsigned maxSlack = MAX_TTREE_NODE_SLACK; // size up to and including first 2 entries const unsigned pref = tree.getSize(AccPref); if (! (pref <= tree.m_nodeSize)) { jam(); errorCode = TuxAddAttrRef::InvalidNodeSize; break; } const unsigned slots = (tree.m_nodeSize - pref) / TreeEntSize; // leave out work space entry tree.m_maxOccup = 2 + slots - 1; // min occupancy of interior node must be at least 2 if (! (2 + maxSlack <= tree.m_maxOccup)) {
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -