📄 insertresultset.java
字号:
throws StandardException { FKInfo fkInfo; /* ** If there are no foreign keys, then nothing to worry ** about. ** With bulk insert replace, we still need to verify ** all non-self referencing foreign keys when ** there are no rows inserted into the table. */ if ((indexRows == null && !bulkInsertReplace) || fkInfoArray == null) { return; } for (int i = 0; i < fkInfoArray.length; i++) { fkInfo = fkInfoArray[i]; /* With regular bulk insert, we only need to check the * foreign keys in the table we inserted into. We need * to get the new conglomerate #s for the foreign keys. * * With bulk insert replace, we need to check both the * foreign keys in the table as well as any foreign keys * on other tables referencing the table we inserted into. * If the foreign key is self-referencing then we need to * get the new conglomerate #, otherwise the conglomerate * # is the same as the compile time conglomerate #. * If the foreign key is self-referencing then we need to * get the new conglomerate # for the primary key as it * has changed. However, if the foreign key is not self-referencing * then we only need to get the new conglomerate # for * the primary key if the primary key is on the table being * inserted into. */ if (bulkInsertReplace) { for (int index = 0; index < fkInfo.fkConglomNumbers.length; index++) { /* No need to check foreign key if it is self referencing * and there were no rows inserted on the replace, as both * indexes will be empty. */ if (fkInfo.fkIsSelfReferencing[index] && indexRows == null) { continue; } long pkConglom; long fkConglom; if (fkInfo.fkIsSelfReferencing[index]) { /* Self-referencing foreign key. Both conglomerate * #s have changed. */ pkConglom = ((Long)indexConversionTable.get( new Long(fkInfo.refConglomNumber))).longValue(); fkConglom = ((Long)indexConversionTable.get( new Long(fkInfo.fkConglomNumbers[index]))).longValue(); } else { /* Non-self referencing foreign key. At this point we * don't know if the primary key or the foreign key is * on this table. So, for each one, we look to see * if the old conglomerate # is in the conversion table. * If so, then we get the new conglomerate #, otherwise * we use the compile time conglomerate #. This * is very simple, though not very elegant. */ Long pkConglomLong = (Long)indexConversionTable.get( new Long(fkInfo.refConglomNumber)); Long fkConglomLong = (Long)indexConversionTable.get( new Long(fkInfo.fkConglomNumbers[index])); if (pkConglomLong == null) { pkConglom = fkInfo.refConglomNumber; } else { pkConglom = pkConglomLong.longValue(); } if (fkConglomLong == null) { fkConglom = fkInfo.fkConglomNumbers[index]; } else { fkConglom = fkConglomLong.longValue(); } } bulkValidateForeignKeysCore( tc, cm, fkInfoArray[i], fkConglom, pkConglom, fkInfo.fkConstraintNames[index]); } } else { /* ** We have a FKInfo for each foreign key we are ** checking. Note that there are no primary key ** checks on insert, so we can always reference ** element[0] in the current FKInfo structure. */ if (SanityManager.DEBUG) { SanityManager.ASSERT(fkInfo.type == FKInfo.FOREIGN_KEY, "error, expected to only check foreign keys on insert"); } Long fkConglom = (Long)indexConversionTable.get( new Long(fkInfo.fkConglomNumbers[0])); bulkValidateForeignKeysCore( tc, cm, fkInfoArray[i], fkConglom.longValue(), fkInfo.refConglomNumber, fkInfo.fkConstraintNames[0]); } } } private void bulkValidateForeignKeysCore( TransactionController tc, ContextManager cm, FKInfo fkInfo, long fkConglom, long pkConglom, String fkConstraintName) throws StandardException { ExecRow template; GroupFetchScanController refScan = null; GroupFetchScanController fkScan = null; try { template = makeIndexTemplate(fkInfo, fullTemplate, cm); /* ** The indexes have been dropped and recreated, so ** we need to get the new index conglomerate number. */ fkScan = tc.openGroupFetchScan( fkConglom, false, // hold 0, // read only tc.MODE_TABLE, // doesn't matter, // already locked tc.ISOLATION_READ_COMMITTED, // doesn't matter, // already locked (FormatableBitSet)null, // retrieve all fields (DataValueDescriptor[])null, // startKeyValue ScanController.GE, // startSearchOp null, // qualifier (DataValueDescriptor[])null, // stopKeyValue ScanController.GT // stopSearchOp ); if (SanityManager.DEBUG) { /* ** Bulk insert replace calls this method regardless ** of whether or not any rows were inserted because ** it has to check any referencing foreign keys ** after the replace. Otherwise, we ** make sure that we actually have a row in the fk. ** If not, we have an error because we thought that ** since indexRows != null, we must have gotten some ** rows. */ if (! bulkInsertReplace) { SanityManager.ASSERT(fkScan.next(), "No rows in fk index, even though indexRows != null"); /* ** Crank up the scan again. */ fkScan.reopenScan( (DataValueDescriptor[])null, // startKeyValue ScanController.GE, // startSearchOp null, // qualifier (DataValueDescriptor[])null, // stopKeyValue ScanController.GT // stopSearchOp ); } } /* ** Open the referenced key scan. Use row locking on ** the referenced table unless it is self-referencing ** (in which case we don't need locks) */ refScan = tc.openGroupFetchScan( pkConglom, false, // hold 0, // read only (fkConglom == pkConglom) ? tc.MODE_TABLE : tc.MODE_RECORD, tc.ISOLATION_READ_COMMITTED, // read committed is // good enough (FormatableBitSet)null, // retrieve all fields (DataValueDescriptor[])null, // startKeyValue ScanController.GE, // startSearchOp null, // qualifier (DataValueDescriptor[])null, // stopKeyValue ScanController.GT // stopSearchOp ); /* ** Give the scans to the bulk checker to do its ** magic. It will do a merge on the two indexes. */ ExecRow firstFailedRow = template.getClone(); RIBulkChecker riChecker = new RIBulkChecker(refScan, fkScan, template, true, // fail on 1st failure (ConglomerateController)null, firstFailedRow); int numFailures = riChecker.doCheck(); if (numFailures > 0) { StandardException se = StandardException.newException(SQLState.LANG_FK_VIOLATION, fkConstraintName, fkInfo.tableName, StatementUtil.typeName(fkInfo.stmtType), RowUtil.toString(firstFailedRow, 0, fkInfo.colArray.length - 1)); throw se; } } finally { if (fkScan != null) { fkScan.close(); fkScan = null; } if (refScan != null) { refScan.close(); refScan = null; } } } /** * Make a template row with the correct columns. */ private ExecRow makeIndexTemplate(FKInfo fkInfo, ExecRow fullTemplate, ContextManager cm) throws StandardException { ExecRow newRow = RowUtil.getEmptyIndexRow(fkInfo.colArray.length+1, cm); DataValueDescriptor[] templateColArray = fullTemplate.getRowArray(); DataValueDescriptor[] newRowColArray = newRow.getRowArray(); int i; for (i = 0; i < fkInfo.colArray.length; i++) { newRowColArray[i] = (templateColArray[fkInfo.colArray[i] - 1]).getClone(); } newRowColArray[i] = (DataValueDescriptor) fkInfo.rowLocation.cloneObject(); return newRow; } /** * Set up to update all of the indexes on a table when doing a bulk insert * on an empty table. * * @exception StandardException thrown on error */ private void setUpAllSorts(ExecRow sourceRow, RowLocation rl) throws StandardException { int numIndexes = constants.irgs.length; int numColumns = td.getNumberOfColumns(); ordering = new ColumnOrdering[numIndexes][]; needToDropSort = new boolean[numIndexes]; sortIds = new long[numIndexes]; rowSources = new RowLocationRetRowSource[numIndexes]; // indexedCols is 1-based indexedCols = new FormatableBitSet(numColumns + 1); /* For each index, build a single index row and a sorter. */ for (int index = 0; index < numIndexes; index++) { // Update the bit map of indexed columns int[] keyColumns = constants.irgs[index].baseColumnPositions(); for (int i2 = 0; i2 < keyColumns.length; i2++) { // indexedCols is 1-based indexedCols.set(keyColumns[i2]); } // create a single index row template for each index indexRows[index] = constants.irgs[index].getIndexRowTemplate(); // Get an index row based on the base row // (This call is only necessary here because we need to pass a template to the sorter.) constants.irgs[index].getIndexRow(sourceRow, rl, indexRows[index], (FormatableBitSet) null); /* For non-unique indexes, we order by all columns + the RID. * For unique indexes, we just order by the columns. * We create a unique index observer for unique indexes * so that we can catch duplicate key */ ConglomerateDescriptor cd; // Get the ConglomerateDescriptor for the index cd = td.getConglomerateDescriptor(constants.indexCIDS[index]); int[] baseColumnPositions = constants.irgs[index].baseColumnPositions(); boolean[] isAscending = constants.irgs[index].isAscending(); int numColumnOrderings; SortObserver sortObserver = null; /* We can only reuse the wrappers when doing an * external sort if there is only 1 index. Otherwise, * we could get in a situation where 1 sort reuses a * wrapper that is still in use in another sort. */ boolean reuseWrappers = (numIndexes == 1); if (cd.getIndexDescriptor().isUnique()) { numColumnOrderings = baseColumnPositions.length; String[] columnNames = getColumnNames(baseColumnPositions); String indexOrConstraintName = cd.getConglomerateName(); if (cd.isConstraint()) // so, the index is backing up a constraint { ConstraintDescriptor conDesc = dd.getConstraintDescriptor(td, cd.getUUID()); indexOrConstraintName = conDesc.getConstraintName(); } sortObserver = new UniqueIndexSortObserver( false, // don't clone rows cd.isConstraint(), indexOrConstraintName, indexRows[index], reuseWrappers, td.getName()); } else { numColumnOrderings = baseColumnPositions.length + 1; sortObserver = new BasicSortObserver(false, false, indexRows[index], reuseWrappers); } ordering[index] = new ColumnOrdering[numColumnOrderings]; for (int ii =0; ii < isAscending.length; ii++) { ordering[index][ii] = new IndexColumnOrder(ii, isAscending[ii]); } if (numColumnOrderings > isAscending.length) ordering[index][isAscending.length] = new IndexColumnOrder(isAscending.length); // create the sorters sortIds[index] = tc.createSort( (Properties)null, indexRows[index].getRowArrayClone(), ordering[index], sortObserver, false, // not in order (int) sourceResultSet.getEstimatedRowCount(), // est rows -1 // est row size, -1 means no idea ); needToDropSort[index] = true; } sorters = new SortController[numIndexes]; // Open the sorts for (int index = 0; index < numIndexes; index++) { sorters[index] = tc.openSort(sortIds[index]); needToDropSort[index] = true; } } /** * Update all of the indexes on a table when doing a bulk insert * on an empty table. * * @exception StandardException thrown on error */ private void updateAllIndexes(long newHeapConglom, InsertConstantAction constants, TableDescriptor td, DataDictionary dd, ExecRow fullTemplate) throws StandardException { int numIndexes = constants.irgs.length; /* ** If we didn't actually read in any rows, then ** we don't need to do anything, unless we were ** doing a replace. */ if (indexRows == null) { if (bulkInsertReplace) { emptyIndexes(newHeapConglom, constants, td, dd, fullTemplate); } return; } dd.dropStatisticsDescriptors(td.getUUID(), null, tc); long[] newIndexCongloms = new long[numIndexes]; indexConversionTable = new Hashtable(numIndexes); // Populate each index for (int index = 0; index < numIndexes; index++) { ConglomerateController indexCC; Properties properties = new Properties(); ConglomerateDescriptor cd; // Get the ConglomerateDescriptor for the index cd = td.getConglomerateDescriptor(constants.indexCIDS[index]); // Build the properties list for the new conglomerate indexCC = tc.openCompiledConglomerate( false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_TABLE, TransactionController.ISOLATION_SERIALIZABLE, constants.indexSCOCIs[index],
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -