📄 createindexconstantaction.java
字号:
{ indexProperties = properties; } else { indexProperties = new Properties(); } // Tell it the conglomerate id of the base table indexProperties.put("baseConglomerateId", Long.toString(td.getHeapConglomerateId())); // All indexes are unique because they contain the RowLocation. // The number of uniqueness columns must include the RowLocation // if the user did not specify a unique index. indexProperties.put("nUniqueColumns", Integer.toString(unique ? baseColumnPositions.length : baseColumnPositions.length + 1) ); // By convention, the row location column is the last column indexProperties.put("rowLocationColumn", Integer.toString(baseColumnPositions.length)); // For now, all columns are key fields, including the RowLocation indexProperties.put("nKeyFields", Integer.toString(baseColumnPositions.length + 1)); // For now, assume that all index columns are ordered columns if (! duplicate) { indexRowGenerator = new IndexRowGenerator(indexType, unique, baseColumnPositions, isAscending, baseColumnPositions.length); } /* Now add the rows from the base table to the conglomerate. * We do this by scanning the base table and inserting the * rows into a sorter before inserting from the sorter * into the index. This gives us better performance * and a more compact index. */ rowSource = null; sortId = 0; boolean needToDropSort = false; // set to true once the sorter is created /* bulkFetchSIze will be 16 (for now) unless * we are creating the table in which case it * will be 1. Too hard to remove scan when * creating index on new table, so minimize * work where we can. */ int bulkFetchSize = (forCreateTable) ? 1 : 16; int numColumns = td.getNumberOfColumns(); int approximateRowSize = 0; // Create the FormatableBitSet for mapping the partial to full base row FormatableBitSet bitSet = new FormatableBitSet(numColumns+1); for (int index = 0; index < baseColumnPositions.length; index++) { bitSet.set(baseColumnPositions[index]); } FormatableBitSet zeroBasedBitSet = RowUtil.shift(bitSet, 1); // Start by opening a full scan on the base table. scan = tc.openGroupFetchScan( td.getHeapConglomerateId(), false, // hold 0, // open base table read only TransactionController.MODE_TABLE, TransactionController.ISOLATION_SERIALIZABLE, zeroBasedBitSet, // all fields as objects (DataValueDescriptor[]) null, // startKeyValue 0, // not used when giving null start posn. null, // qualifier (DataValueDescriptor[]) null, // stopKeyValue 0); // not used when giving null stop posn. // Create an array to put base row template baseRows = new ExecRow[bulkFetchSize]; indexRows = new ExecIndexRow[bulkFetchSize]; compactBaseRows = new ExecRow[bulkFetchSize]; try { // Create the array of base row template for (int i = 0; i < bulkFetchSize; i++) { // create a base row template baseRows[i] = activation.getExecutionFactory().getValueRow(maxBaseColumnPosition); // create an index row template indexRows[i] = indexRowGenerator.getIndexRowTemplate(); // create a compact base row template compactBaseRows[i] = activation.getExecutionFactory().getValueRow( baseColumnPositions.length); } indexTemplateRow = indexRows[0]; // Fill the partial row with nulls of the correct type ColumnDescriptorList cdl = td.getColumnDescriptorList(); int cdlSize = cdl.size(); for (int index = 0, numSet = 0; index < cdlSize; index++) { if (! zeroBasedBitSet.get(index)) { continue; } numSet++; ColumnDescriptor cd = (ColumnDescriptor) cdl.elementAt(index); DataTypeDescriptor dts = cd.getType(); for (int i = 0; i < bulkFetchSize; i++) { // Put the column in both the compact and sparse base rows baseRows[i].setColumn(index + 1, dts.getNull()); compactBaseRows[i].setColumn(numSet, baseRows[i].getColumn(index + 1)); } // Calculate the approximate row size for the index row approximateRowSize += dts.getTypeId().getApproximateLengthInBytes(dts); } // Get an array of RowLocation template RowLocation rl[] = new RowLocation[bulkFetchSize]; for (int i = 0; i < bulkFetchSize; i++) { rl[i] = scan.newRowLocationTemplate(); // Get an index row based on the base row indexRowGenerator.getIndexRow(compactBaseRows[i], rl[i], indexRows[i], bitSet); } /* now that we got indexTemplateRow, done for duplicate index */ if (duplicate) return; /* For non-unique indexes, we order by all columns + the RID. * For unique indexes, we just order by the columns. * We create a unique index observer for unique indexes * so that we can catch duplicate key. * We create a basic sort observer for non-unique indexes * so that we can reuse the wrappers during an external * sort. */ int numColumnOrderings; SortObserver sortObserver = null; if (unique) { numColumnOrderings = baseColumnPositions.length; // if the index is a constraint, use constraintname in possible error messagge String indexOrConstraintName = indexName; if (conglomerateUUID != null) { ConglomerateDescriptor cd = dd.getConglomerateDescriptor(conglomerateUUID); if ((isConstraint) && (cd != null && cd.getUUID() != null && td != null)) { ConstraintDescriptor conDesc = dd.getConstraintDescriptor(td, cd.getUUID()); indexOrConstraintName = conDesc.getConstraintName(); } } sortObserver = new UniqueIndexSortObserver(true, isConstraint, indexOrConstraintName, indexTemplateRow, true, td.getName()); } else { numColumnOrderings = baseColumnPositions.length + 1; sortObserver = new BasicSortObserver(true, false, indexTemplateRow, true); } ColumnOrdering[] order = new ColumnOrdering[numColumnOrderings]; for (int i=0; i < numColumnOrderings; i++) { order[i] = new IndexColumnOrder(i, unique || i < numColumnOrderings - 1 ? isAscending[i] : true); } // create the sorter sortId = tc.createSort((Properties)null, indexTemplateRow.getRowArrayClone(), order, sortObserver, false, // not in order scan.getEstimatedRowCount(), approximateRowSize // est row size, -1 means no idea ); needToDropSort = true; // Populate sorter and get the output of the sorter into a row // source. The sorter has the indexed columns only and the columns // are in the correct order. rowSource = loadSorter(baseRows, indexRows, tc, scan, sortId, rl); conglomId = tc.createAndLoadConglomerate( indexType, indexTemplateRow.getRowArray(), // index row template order, //colums sort order indexProperties, TransactionController.IS_DEFAULT, // not temporary rowSource, (long[]) null); } finally { /* close the table scan */ if (scan != null) scan.close(); /* close the sorter row source before throwing exception */ if (rowSource != null) rowSource.closeRowSource(); /* ** drop the sort so that intermediate external sort run can be ** removed from disk */ if (needToDropSort) tc.dropSort(sortId); } ConglomerateController indexController = tc.openConglomerate( conglomId, false, 0, TransactionController.MODE_TABLE, TransactionController.ISOLATION_SERIALIZABLE); // Check to make sure that the conglomerate can be used as an index if ( ! indexController.isKeyed()) { indexController.close(); throw StandardException.newException(SQLState.LANG_NON_KEYED_INDEX, indexName, indexType); } indexController.close(); // // Create a conglomerate descriptor with the conglomId filled in and // add it. // ConglomerateDescriptor cgd = ddg.newConglomerateDescriptor(conglomId, indexName, true, indexRowGenerator, isConstraint, conglomerateUUID, td.getUUID(), sd.getUUID() ); dd.addDescriptor(cgd, sd, DataDictionary.SYSCONGLOMERATES_CATALOG_NUM, false, tc); // add newly added conglomerate to the list of conglomerate descriptors // in the td. ConglomerateDescriptorList cdl = td.getConglomerateDescriptorList(); cdl.add(cgd); CardinalityCounter cCount = (CardinalityCounter)rowSource; long numRows; if ((numRows = cCount.getRowCount()) > 0) { long[] c = cCount.getCardinality(); for (int i = 0; i < c.length; i++) { StatisticsDescriptor statDesc = new StatisticsDescriptor(dd, dd.getUUIDFactory().createUUID(), cgd.getUUID(), td.getUUID(), "I", new StatisticsImpl(numRows, c[i]), i + 1); dd.addDescriptor(statDesc, null, DataDictionary.SYSSTATISTICS_CATALOG_NUM, true, tc); } } } // CLASS METHODS /////////////////////////////////////////////////////////////////////// // // GETTERs called by CreateConstraint // /////////////////////////////////////////////////////////////////////// ExecRow getIndexTemplateRow() { return indexTemplateRow; } /** * Do necessary clean up (close down controllers, etc.) before throwing * a statement exception. * * @param scan ScanController for the heap * @param indexController ConglomerateController for the index * * @return Nothing. */ private void statementExceptionCleanup( ScanController scan, ConglomerateController indexController) throws StandardException { if (indexController != null) { indexController.close(); } if (scan != null) { scan.close(); } } /** * Scan the base conglomerate and insert the keys into a sorter, * returning a rowSource on the sorter. * * @return RowSource on the sorted index keys. * * @exception StandardException thrown on error */ private RowLocationRetRowSource loadSorter(ExecRow[] baseRows, ExecIndexRow[] indexRows, TransactionController tc, GroupFetchScanController scan, long sortId, RowLocation rl[]) throws StandardException { SortController sorter; long rowCount = 0; sorter = tc.openSort(sortId); try { // Step through all the rows in the base table // prepare an array or rows for bulk fetch int bulkFetchSize = baseRows.length; if (SanityManager.DEBUG) { SanityManager.ASSERT(bulkFetchSize == indexRows.length, "number of base rows and index rows does not match"); SanityManager.ASSERT(bulkFetchSize == rl.length, "number of base rows and row locations does not match"); } DataValueDescriptor[][] baseRowArray = new DataValueDescriptor[bulkFetchSize][]; for (int i = 0; i < bulkFetchSize; i++) baseRowArray[i] = baseRows[i].getRowArray(); // rl[i] and baseRowArray[i] and indexRows[i] are all tied up // beneath the surface. Fetching the base row and row location // from the table scan will automagically set up the indexRow // fetchNextGroup will return how many rows are actually fetched. int bulkFetched = 0; while ((bulkFetched = scan.fetchNextGroup(baseRowArray, rl)) > 0) { for (int i = 0; i < bulkFetched; i++) { sorter.insert(indexRows[i].getRowArray()); rowCount++; } } /* ** We've just done a full scan on the heap, so set the number ** of rows so the optimizer will have an accurate count. */ scan.setEstimatedRowCount(rowCount); } finally { sorter.close(); } return new CardinalityCounter(tc.openSortRowSource(sortId)); }}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -