📄 insertresultset.java
字号:
indexDCOCIs[index]); // Get the properties on the old index indexCC.getInternalTablePropertySet(properties); /* Create the properties that language supplies when creating the * the index. (The store doesn't preserve these.) */ int indexRowLength = indexRows[index].nColumns(); properties.put("baseConglomerateId", Long.toString(newHeapConglom)); if (cd.getIndexDescriptor().isUnique()) { properties.put("nUniqueColumns", Integer.toString(indexRowLength - 1)); } else { properties.put("nUniqueColumns", Integer.toString(indexRowLength)); } properties.put("rowLocationColumn", Integer.toString(indexRowLength - 1)); properties.put("nKeyFields", Integer.toString(indexRowLength)); indexCC.close(); // We can finally drain the sorter and rebuild the index // RESOLVE - all indexes are btrees right now // Populate the index. sorters[index].close(); sorters[index] = null; rowSources[index] = new CardinalityCounter(tc.openSortRowSource(sortIds[index])); newIndexCongloms[index] = tc.createAndLoadConglomerate( "BTREE", indexRows[index].getRowArray(), ordering[index], properties, TransactionController.IS_DEFAULT, rowSources[index], (long[]) null); CardinalityCounter cCount = (CardinalityCounter)rowSources[index]; long numRows; if ((numRows = cCount.getRowCount()) > 0) { long[] c = cCount.getCardinality(); DataDescriptorGenerator ddg = dd.getDataDescriptorGenerator(); for (int i= 0; i < c.length; i++) { StatisticsDescriptor statDesc = new StatisticsDescriptor(dd, dd.getUUIDFactory().createUUID(), cd.getUUID(), td.getUUID(), "I", new StatisticsImpl(numRows, c[i]), i + 1); dd.addDescriptor(statDesc, null, DataDictionary.SYSSTATISTICS_CATALOG_NUM, true, tc); } } /* Update the DataDictionary * RESOLVE - this will change in 1.4 because we will get * back the same conglomerate number * * Update sys.sysconglomerates with new conglomerate #, if the * conglomerate is shared by duplicate indexes, all the descriptors * for those indexes need to be updated with the new number. */ dd.updateConglomerateDescriptor( td.getConglomerateDescriptors(constants.indexCIDS[index]), newIndexCongloms[index], tc); // Drop the old conglomerate tc.dropConglomerate(constants.indexCIDS[index]); indexConversionTable.put(new Long(constants.indexCIDS[index]), new Long(newIndexCongloms[index])); } } /** * @see ResultSet#cleanUp * * @exception StandardException Thrown on error */ public void cleanUp() throws StandardException { if (tableScan != null) { tableScan.close(); tableScan = null; } if (triggerActivator != null) { triggerActivator.cleanup(); // triggerActivator is reused across executions } /* Close down the source ResultSet tree */ if (sourceResultSet != null) { sourceResultSet.close(); // sourceResultSet is reused across executions } numOpens = 0; if (rowChanger != null) { rowChanger.close(); } if (rowHolder != null) { rowHolder.close(); } if (fkChecker != null) { fkChecker.close(); // fkChecker is reused across executions } if (bulkHeapCC != null) { bulkHeapCC.close(); bulkHeapCC = null; } if (bulkHeapSC != null) { bulkHeapSC.close(); bulkHeapSC = null; } // Close each sorter if (sorters != null) { for (int index = 0; index < constants.irgs.length; index++) { if (sorters[index] != null) { sorters[index].close(); } sorters[index] = null; } } if (needToDropSort != null) { for (int index = 0; index < needToDropSort.length; index++) { if (needToDropSort[index]) { tc.dropSort(sortIds[index]); needToDropSort[index] = false; } } } if (rowSources != null) { for (int index = 0; index < rowSources.length; index++) { if (rowSources[index] != null) { rowSources[index].closeRowSource(); rowSources[index] = null; } } } super.close(); } // Class implementation /** * Verify that bulkInsert is allowed on this table. * The execution time check to see if bulkInsert is allowed * simply consists of checking to see if this is not a deferred * mode insert and that the table is empty if this is not replace. * * A side effect of calling this method is to get an exclusive * table lock on the table. * * @return Whether or not bulkInsert is allowed on this table. * * @exception StandardException Thrown on error */ protected boolean verifyBulkInsert() throws StandardException { // bulk insert is disabled for deferred mode inserts if (constants.deferred) { /* bulk insert replace should be disallowed for * deferred mode inserts. */ if (SanityManager.DEBUG) { SanityManager.ASSERT(! bulkInsertReplace, "bulkInsertReplace expected to be false for deferred mode inserts"); } return false; } return getExclusiveTableLock(); } /** * Get an exclusive table lock on the target table * (and check to see if the table is populated if * this is not a bulk insert replace). * * @return Whether or not bulkInsert is allowed on this table. * * @exception StandardException Thrown on error */ private boolean getExclusiveTableLock() throws StandardException { boolean rowFound = false; bulkHeapSC = tc.openCompiledScan( false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_TABLE, TransactionController.ISOLATION_SERIALIZABLE, (FormatableBitSet) null, (DataValueDescriptor[]) null, 0, (Qualifier[][]) null, (DataValueDescriptor[]) null, 0, constants.heapSCOCI, heapDCOCI); /* No need to do next if bulk insert replace * but we do need to get a row location for the * case where the replace leaves an empty table. */ if (! bulkInsertReplace) { rowFound = bulkHeapSC.next(); } else { rl = bulkHeapSC.newRowLocationTemplate(); } bulkHeapSC.close(); bulkHeapSC = null; return ! rowFound; } /** * Set the estimated row count for this table. * * @param heapConglom Conglomerate number for the heap * * @return Nothing * * @exception StandardException Thrown on failure */ private void setEstimatedRowCount(long heapConglom) throws StandardException { bulkHeapSC = tc.openCompiledScan( false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_TABLE, TransactionController.ISOLATION_SERIALIZABLE, (FormatableBitSet) null, (DataValueDescriptor[]) null, 0, (Qualifier[][]) null, (DataValueDescriptor[]) null, 0, constants.heapSCOCI, heapDCOCI); bulkHeapSC.setEstimatedRowCount(rowCount); bulkHeapSC.close(); bulkHeapSC = null; } /** * Empty the indexes after doing a bulk insert replace * where the table has 0 rows after the replace. * RESOLVE: This method is ugly! Prior to 2.0, we simply * scanned back across the table to build the indexes. We * changed this in 2.0 to populate the sorters via a call back * as we populated the table. Doing a 0 row replace into a * table with indexes is a degenerate case, hence we allow * ugly and unoptimized code. * * @return Nothing. * * @exception StandardException Thrown on failure */ private void emptyIndexes(long newHeapConglom, InsertConstantAction constants, TableDescriptor td, DataDictionary dd, ExecRow fullTemplate) throws StandardException { int numIndexes = constants.irgs.length; ExecIndexRow[] indexRows = new ExecIndexRow[numIndexes]; ExecRow baseRows = null; ColumnOrdering[][] ordering = new ColumnOrdering[numIndexes][]; int numColumns = td.getNumberOfColumns(); // Create the BitSet for mapping the partial row to the full row FormatableBitSet bitSet = new FormatableBitSet(numColumns + 1); // Need to check each index for referenced columns int numReferencedColumns = 0; for (int index = 0; index < numIndexes; index++) { int[] baseColumnPositions = constants.irgs[index].baseColumnPositions(); for (int bcp = 0; bcp < baseColumnPositions.length; bcp++) { if (! bitSet.get(baseColumnPositions[bcp])) { bitSet.set(baseColumnPositions[bcp] ); numReferencedColumns++; } } } // We can finally create the partial base row baseRows = activation.getExecutionFactory().getValueRow(numReferencedColumns); // Fill in each base row with nulls of the correct data type int colNumber = 0; for (int index = 0; index < numColumns; index++) { if (bitSet.get(index + 1)) { colNumber++; // NOTE: 1-based column numbers baseRows.setColumn( colNumber, fullTemplate.getColumn(index + 1).getClone()); } } needToDropSort = new boolean[numIndexes]; sortIds = new long[numIndexes]; /* Do the initial set up before scanning the heap. * For each index, build a single index row and a sorter. */ for (int index = 0; index < numIndexes; index++) { // create a single index row template for each index indexRows[index] = constants.irgs[index].getIndexRowTemplate(); // Get an index row based on the base row // (This call is only necessary here because we need to pass a template to the sorter.) constants.irgs[index].getIndexRow(baseRows, rl, indexRows[index], bitSet); /* For non-unique indexes, we order by all columns + the RID. * For unique indexes, we just order by the columns. * We create a unique index observer for unique indexes * so that we can catch duplicate key */ ConglomerateDescriptor cd; // Get the ConglomerateDescriptor for the index cd = td.getConglomerateDescriptor(constants.indexCIDS[index]); int[] baseColumnPositions = constants.irgs[index].baseColumnPositions(); boolean[] isAscending = constants.irgs[index].isAscending(); int numColumnOrderings; SortObserver sortObserver = null; if (cd.getIndexDescriptor().isUnique()) { numColumnOrderings = baseColumnPositions.length; String[] columnNames = getColumnNames(baseColumnPositions); String indexOrConstraintName = cd.getConglomerateName(); if (cd.isConstraint()) // so, the index is backing up a constraint { ConstraintDescriptor conDesc = dd.getConstraintDescriptor(td, cd.getUUID()); indexOrConstraintName = conDesc.getConstraintName(); } sortObserver = new UniqueIndexSortObserver( false, // don't clone rows cd.isConstraint(), indexOrConstraintName, indexRows[index], true, td.getName()); } else { numColumnOrderings = baseColumnPositions.length + 1; sortObserver = new BasicSortObserver(false, false, indexRows[index], true); } ordering[index] = new ColumnOrdering[numColumnOrderings]; for (int ii =0; ii < isAscending.length; ii++) { ordering[index][ii] = new IndexColumnOrder(ii, isAscending[ii]); } if (numColumnOrderings > isAscending.length) ordering[index][isAscending.length] = new IndexColumnOrder(isAscending.length); // create the sorters sortIds[index] = tc.createSort( (Properties)null, indexRows[index].getRowArrayClone(), ordering[index], sortObserver, false, // not in order rowCount, // est rows -1 // est row size, -1 means no idea ); needToDropSort[index] = true; } // Populate sorters and get the output of each sorter into a row // source. The sorters have the indexed columns only and the columns // are in the correct order. rowSources = new RowLocationRetRowSource[numIndexes]; // Fill in the RowSources SortController[] sorters = new SortController[numIndexes]; for (int index = 0; index < num
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -