⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 testindex.cpp

📁 mysql-5.0.22.tar.gz源码包
💻 CPP
📖 第 1 页 / 共 4 页
字号:
  return NDBT_OK;}intrunTransactions1(NDBT_Context* ctx, NDBT_Step* step){  // Verify that data in index match   // table data  Ndb* pNdb = GETNDB(step);  HugoTransactions hugoTrans(*ctx->getTab());  const int batchSize = ctx->getProperty("BatchSize", 50);  int rows = ctx->getNumRecords();  while (ctx->isTestStopped() == false) {    if (hugoTrans.pkUpdateRecords(pNdb, rows, batchSize) != 0){      g_err << "Updated table failed" << endl;      return NDBT_FAILED;    }        ctx->sync_down("PauseThreads");    if(ctx->isTestStopped())      break;        if (hugoTrans.scanUpdateRecords(pNdb, rows, batchSize) != 0){      g_err << "Updated table failed" << endl;      return NDBT_FAILED;    }            ctx->sync_down("PauseThreads");  }  return NDBT_OK;}intrunTransactions2(NDBT_Context* ctx, NDBT_Step* step){  // Verify that data in index match   // table data  Ndb* pNdb = GETNDB(step);  HugoTransactions hugoTrans(*ctx->getTab());  const int batchSize = ctx->getProperty("BatchSize", 50);  int rows = ctx->getNumRecords();  while (ctx->isTestStopped() == false) {#if 1    if (hugoTrans.indexReadRecords(pNdb, pkIdxName, rows, batchSize) != 0){      g_err << "Index read failed" << endl;      return NDBT_FAILED;    }#endif    ctx->sync_down("PauseThreads");    if(ctx->isTestStopped())      break;#if 1    if (hugoTrans.indexUpdateRecords(pNdb, pkIdxName, rows, batchSize) != 0){      g_err << "Index update failed" << endl;      return NDBT_FAILED;    }#endif    ctx->sync_down("PauseThreads");  }  return NDBT_OK;}intrunTransactions3(NDBT_Context* ctx, NDBT_Step* step){  // Verify that data in index match   // table data  Ndb* pNdb = GETNDB(step);  HugoTransactions hugoTrans(*ctx->getTab());  UtilTransactions utilTrans(*ctx->getTab());  const int batchSize = ctx->getProperty("BatchSize", 32);  const int parallel = batchSize > 240 ? 240 : batchSize;  int rows = ctx->getNumRecords();  while (ctx->isTestStopped() == false) {    if(hugoTrans.loadTable(pNdb, rows, batchSize, false) != 0){      g_err << "Load table failed" << endl;      return NDBT_FAILED;    }    ctx->sync_down("PauseThreads");    if(ctx->isTestStopped())      break;    if (hugoTrans.pkUpdateRecords(pNdb, rows, batchSize) != 0){      g_err << "Updated table failed" << endl;      return NDBT_FAILED;    }        ctx->sync_down("PauseThreads");    if(ctx->isTestStopped())      break;        if (hugoTrans.indexReadRecords(pNdb, pkIdxName, rows, batchSize) != 0){      g_err << "Index read failed" << endl;      return NDBT_FAILED;    }        ctx->sync_down("PauseThreads");    if(ctx->isTestStopped())      break;        if (hugoTrans.indexUpdateRecords(pNdb, pkIdxName, rows, batchSize) != 0){      g_err << "Index update failed" << endl;      return NDBT_FAILED;    }        ctx->sync_down("PauseThreads");    if(ctx->isTestStopped())      break;    if (hugoTrans.scanUpdateRecords(pNdb, rows, 5, parallel) != 0){      g_err << "Scan updated table failed" << endl;      return NDBT_FAILED;    }    ctx->sync_down("PauseThreads");    if(ctx->isTestStopped())      break;    if(utilTrans.clearTable(pNdb, rows, parallel) != 0){      g_err << "Clear table failed" << endl;      return NDBT_FAILED;    }    ctx->sync_down("PauseThreads");    if(ctx->isTestStopped())      break;        int count = -1;    if(utilTrans.selectCount(pNdb, 64, &count) != 0 || count != 0)      return NDBT_FAILED;    ctx->sync_down("PauseThreads");  }  return NDBT_OK;}int runRestarts(NDBT_Context* ctx, NDBT_Step* step){  int result = NDBT_OK;  int loops = ctx->getNumLoops();  NDBT_TestCase* pCase = ctx->getCase();  NdbRestarts restarts;  int i = 0;  int timeout = 240;  int sync_threads = ctx->getProperty("Threads", (unsigned)0);  while(i<loops && result != NDBT_FAILED && !ctx->isTestStopped()){    if(restarts.executeRestart("RestartRandomNodeAbort", timeout) != 0){      g_err << "Failed to executeRestart(" <<pCase->getName() <<")" << endl;      result = NDBT_FAILED;      break;    }        ctx->sync_up_and_wait("PauseThreads", sync_threads);    i++;  }  ctx->stopTest();  return result;}int runCreateLoadDropIndex(NDBT_Context* ctx, NDBT_Step* step){  int loops = ctx->getNumLoops();  int records = ctx->getNumRecords();  int l = 0;  const NdbDictionary::Table* pTab = ctx->getTab();  Ndb* pNdb = GETNDB(step);  int result = NDBT_OK;  int batchSize = ctx->getProperty("BatchSize", 1);  int parallelism = batchSize > 240? 240: batchSize;  ndbout << "batchSize="<<batchSize<<endl;  bool logged = ctx->getProperty("LoggedIndexes", 1);  HugoTransactions hugoTrans(*pTab);  UtilTransactions utilTrans(*pTab);  AttribList attrList;  attrList.buildAttribList(pTab);  for (unsigned int i = 0; i < attrList.attriblist.size(); i++){            while (l < loops && result == NDBT_OK){      if ((l % 2) == 0){	// Create index first and then load		// Try to create index	if (create_index(ctx, i, pTab, pNdb, attrList.attriblist[i], logged) == NDBT_FAILED){	  result = NDBT_FAILED;      	}		// Load the table with data	ndbout << "Loading data after" << endl;	CHECK(hugoTrans.loadTable(pNdb, records, batchSize) == 0);		      } else {	// Load table then create index		// Load the table with data	ndbout << "Loading data before" << endl;	CHECK(hugoTrans.loadTable(pNdb, records, batchSize) == 0);		// Try to create index	if (create_index(ctx, i, pTab, pNdb, attrList.attriblist[i], logged) == NDBT_FAILED)	  result = NDBT_FAILED;      	      }            // Verify that data in index match       // table data      CHECK(utilTrans.verifyIndex(pNdb, idxName, parallelism) == 0);            // Do it all...      ndbout <<"Doing it all"<<endl;      int count;      ndbout << "  pkUpdateRecords" << endl;      CHECK(hugoTrans.pkUpdateRecords(pNdb, records, batchSize) == 0);      CHECK(utilTrans.verifyIndex(pNdb, idxName, parallelism) == 0);      CHECK(hugoTrans.pkUpdateRecords(pNdb, records, batchSize) == 0);      CHECK(utilTrans.verifyIndex(pNdb, idxName, parallelism) == 0);      ndbout << "  pkDelRecords half" << endl;      CHECK(hugoTrans.pkDelRecords(pNdb, records/2, batchSize) == 0);      CHECK(utilTrans.verifyIndex(pNdb, idxName, parallelism) == 0);      ndbout << "  scanUpdateRecords" << endl;      CHECK(hugoTrans.scanUpdateRecords(pNdb, records/2, parallelism) == 0);      CHECK(utilTrans.verifyIndex(pNdb, idxName, parallelism) == 0);      ndbout << "  clearTable" << endl;      CHECK(utilTrans.clearTable(pNdb, records/2, parallelism) == 0);      CHECK(utilTrans.verifyIndex(pNdb, idxName, parallelism) == 0);      CHECK(utilTrans.selectCount(pNdb, 64, &count) == 0);      CHECK(count == 0);      ndbout << "  loadTable" << endl;      CHECK(hugoTrans.loadTable(pNdb, records, batchSize) == 0);      CHECK(utilTrans.verifyIndex(pNdb, idxName, parallelism) == 0);      ndbout << "  loadTable again" << endl;      CHECK(hugoTrans.loadTable(pNdb, records, batchSize) == 0);      CHECK(utilTrans.verifyIndex(pNdb, idxName, parallelism) == 0);      CHECK(utilTrans.selectCount(pNdb, 64, &count) == 0);      CHECK(count == records);      if ((l % 2) == 0){	// Drop index first and then clear		// Try to create index	if (drop_index(i, pNdb, pTab, attrList.attriblist[i]) != NDBT_OK){	  result = NDBT_FAILED;      	}		// Clear table	ndbout << "Clearing table after" << endl;	CHECK(hugoTrans.clearTable(pNdb, records, parallelism) == 0);		      } else {	// Clear table then drop index		//Clear table	ndbout << "Clearing table before" << endl;	CHECK(hugoTrans.clearTable(pNdb, records, parallelism) == 0);		// Try to drop index	if (drop_index(i, pNdb, pTab, attrList.attriblist[i]) != NDBT_OK)	  result = NDBT_FAILED;      	      }            ndbout << "  Done!" << endl;      l++;    }          // Make sure index is dropped    drop_index(i, pNdb, pTab, attrList.attriblist[i]);  }  return result;}int runInsertDelete(NDBT_Context* ctx, NDBT_Step* step){  int loops = ctx->getNumLoops();  int records = ctx->getNumRecords();  const NdbDictionary::Table* pTab = ctx->getTab();  Ndb* pNdb = GETNDB(step);  int result = NDBT_OK;  int batchSize = ctx->getProperty("BatchSize", 1);  int parallelism = batchSize > 240? 240: batchSize;  ndbout << "batchSize="<<batchSize<<endl;  bool logged = ctx->getProperty("LoggedIndexes", 1);  HugoTransactions hugoTrans(*pTab);  UtilTransactions utilTrans(*pTab);    AttribList attrList;  attrList.buildAttribList(pTab);  for (unsigned int i = 0; i < attrList.attriblist.size(); i++){        Attrib* attr = attrList.attriblist[i];     // Create index    if (create_index(ctx, i, pTab, pNdb, attr, logged) == NDBT_OK){      int l = 1;      while (l <= loops && result == NDBT_OK){  	CHECK(hugoTrans.loadTable(pNdb, records, batchSize) == 0);	CHECK(utilTrans.verifyIndex(pNdb, idxName, parallelism) == 0);	CHECK(utilTrans.clearTable(pNdb, records, parallelism) == 0);	CHECK(utilTrans.verifyIndex(pNdb, idxName, parallelism) == 0);	l++;	          }                        // Drop index      if (drop_index(i, pNdb, pTab, attr) != NDBT_OK)	result = NDBT_FAILED;      		         }      }    return result;}int runLoadTable(NDBT_Context* ctx, NDBT_Step* step){  int records = ctx->getNumRecords();    HugoTransactions hugoTrans(*ctx->getTab());  int batchSize = ctx->getProperty("BatchSize", 1);  if(hugoTrans.loadTable(GETNDB(step), records, batchSize) != 0){    return NDBT_FAILED;  }  return NDBT_OK;}int runClearTable(NDBT_Context* ctx, NDBT_Step* step){  int records = ctx->getNumRecords();    UtilTransactions utilTrans(*ctx->getTab());  if (utilTrans.clearTable(GETNDB(step),  records) != 0){    return NDBT_FAILED;  }  return NDBT_OK;}int runSystemRestart1(NDBT_Context* ctx, NDBT_Step* step){  Ndb* pNdb = GETNDB(step);  int result = NDBT_OK;  int timeout = 300;  Uint32 loops = ctx->getNumLoops();  int records = ctx->getNumRecords();  int count;  NdbRestarter restarter;  Uint32 i = 1;  UtilTransactions utilTrans(*ctx->getTab());  HugoTransactions hugoTrans(*ctx->getTab());  const char * name = ctx->getTab()->getName();  while(i<=loops && result != NDBT_FAILED){    ndbout << "Loop " << i << "/"<< loops <<" started" << endl;    /*      1. Load data      2. Restart cluster and verify records      3. Update records      4. Restart cluster and verify records      5. Delete half of the records      6. Restart cluster and verify records      7. Delete all records      8. Restart cluster and verify records      9. Insert, update, delete records      10. Restart cluster and verify records      11. Insert, update, delete records      12. Restart cluster with error insert 5020 and verify records    */    ndbout << "Loading records..." << endl;    CHECK(hugoTrans.loadTable(pNdb, records, 1) == 0);    CHECK(utilTrans.verifyIndex(pNdb, idxName, 16, false) == 0);        ndbout << "Restarting cluster" << endl;    CHECK(restarter.restartAll() == 0);    CHECK(restarter.waitClusterStarted(timeout) == 0);    CHECK(pNdb->waitUntilReady(timeout) == 0);    ndbout << "Verifying records..." << endl;    CHECK(hugoTrans.pkReadRecords(pNdb, records) == 0);    CHECK(utilTrans.selectCount(pNdb, 64, &count) == 0);    CHECK(count == records);    CHECK(utilTrans.verifyIndex(pNdb, idxName, 16, false) == 0);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -