📄 updateresultset.java
字号:
/* Allocate the temporary rows and get result description * if this is the 1st time that we are executing. */ if (firstOpen) { deferredTempRow = RowUtil.getEmptyValueRow(numberOfBaseColumns+1, lcc); oldDeletedRow = RowUtil.getEmptyValueRow(numberOfBaseColumns, lcc); triggerResultDescription = (resultDescription != null) ? resultDescription.truncateColumns(numberOfBaseColumns+1) : null; } Properties properties = new Properties(); // Get the properties on the heap rowChanger.getHeapConglomerateController().getInternalTablePropertySet(properties); if(beforeUpdateCopyRequired){ deletedRowHolder = new TemporaryRowHolderImpl(tc, properties, triggerResultDescription); } insertedRowHolder = new TemporaryRowHolderImpl(tc, properties, triggerResultDescription); rowChanger.setRowHolder(insertedRowHolder); } } /* Following 2 methods are for checking and make sure we don't have one un-objectified stream * to be inserted into 2 temp table rows for deferred update. Otherwise it would cause problem * when writing to disk using the stream a second time. In other cases we don't want to * unnecessarily objectify the stream. beetle 4896. */ private FormatableBitSet checkStreamCols() { DataValueDescriptor[] cols = row.getRowArray(); FormatableBitSet streamCols = null; for (int i = 0; i < numberOfBaseColumns; i++) { if (cols[i+numberOfBaseColumns] instanceof StreamStorable) //check new values { if (streamCols == null) streamCols = new FormatableBitSet(numberOfBaseColumns); streamCols.set(i); } } return streamCols; } private void objectifyStream(ExecRow tempRow, FormatableBitSet streamCols) throws StandardException { DataValueDescriptor[] cols = tempRow.getRowArray(); for (int i = 0; i < numberOfBaseColumns; i++) { if (cols[i] != null && streamCols.get(i)) ((StreamStorable)cols[i]).loadStream(); } } public boolean collectAffectedRows() throws StandardException { boolean rowsFound = false; row = getNextRowCore(source); if (row!=null) rowsFound = true; else { activation.addWarning( StandardException.newWarning( SQLState.LANG_NO_ROW_FOUND)); } //beetle 3865, update cursor use index. TableScanResultSet tableScan = (TableScanResultSet) activation.getForUpdateIndexScan(); boolean notifyCursor = ((tableScan != null) && ! tableScan.sourceDrained); boolean checkStream = (deferred && rowsFound && ! constants.singleRowSource); FormatableBitSet streamCols = (checkStream ? checkStreamCols() : null); checkStream = (streamCols != null); while ( row != null ) { /* By convention, the last column in the result set for an * update contains a SQLRef containing the RowLocation of * the row to be updated. */ /* ** If we're doing deferred update, write the new row and row ** location to the temporary conglomerate. If we're not doing ** deferred update, update the permanent conglomerates now ** using the RowChanger. */ if (deferred) { /* ** If we have a before trigger, we must evaluate the ** check constraint after we have executed the trigger. ** Note that we have compiled checkGM accordingly (to ** handle the different row shape if we are evaluating ** against the input result set or a temporary row holder ** result set). */ if (triggerInfo == null) { evaluateCheckConstraints( checkGM, activation ); } /* ** We are going to only save off the updated ** columns and the RID. For a trigger, all columns ** were marked as needed so we'll copy them all. */ RowUtil.copyRefColumns(deferredTempRow, row, numberOfBaseColumns, numberOfBaseColumns + 1); if (checkStream) objectifyStream(deferredTempRow, streamCols); insertedRowHolder.insert(deferredTempRow); /* ** Grab a copy of the row to delete. We are ** going to use this for deferred RI checks. */ if(beforeUpdateCopyRequired) { RowUtil.copyRefColumns(oldDeletedRow, row, numberOfBaseColumns); deletedRowHolder.insert(oldDeletedRow); } /* ** If we haven't already, lets get a template to ** use as a template for our rescan of the base table. ** Do this now while we have a real row to use ** as a copy. ** ** There is one less column in the base row than ** there is in source row, because the base row ** doesn't contain the row location. */ if (deferredBaseRow == null) { deferredBaseRow = RowUtil.getEmptyValueRow(numberOfBaseColumns, lcc); RowUtil.copyCloneColumns(deferredBaseRow, row, numberOfBaseColumns); /* ** While we're here, let's also create a sparse row for ** fetching from the store. */ deferredSparseRow = makeDeferredSparseRow(deferredBaseRow, baseRowReadList, lcc); } } else { evaluateCheckConstraints( checkGM, activation ); /* Get the RowLocation to update * NOTE - Column #s in the Row are 1 based. */ RowLocation baseRowLocation = (RowLocation) (row.getColumn(resultWidth)).getObject(); RowUtil.copyRefColumns(newBaseRow, row, numberOfBaseColumns, numberOfBaseColumns); if (riChecker != null) { /* ** Make sure all foreign keys in the new row ** are maintained. Note that we don't bother ** checking primary/unique keys that are referenced ** here. The reason is that if we are updating ** a referenced key, we'll be updating in deferred ** mode, so we wont get here. */ riChecker.doFKCheck(newBaseRow); } rowChanger.updateRow(row,newBaseRow,baseRowLocation); //beetle 3865, update cursor use index. if (notifyCursor) notifyForUpdateCursor(row.getRowArray(),newBaseRow.getRowArray(),baseRowLocation, tableScan); } rowCount++; // No need to do a next on a single row source if (constants.singleRowSource) { row = null; } else { row = getNextRowCore(source); } } return rowsFound; } /* beetle 3865, updateable cursor use index. If the row we are updating has new value that * falls into the direction of the index scan of the cursor, we save this rid into a hash table * (for fast search), so that when the cursor hits it again, it knows to skip it. When we get * to a point that the hash table is full, we scan forward the cursor until one of two things * happen: (1) we hit a record whose rid is in the hash table (we went through it already, so * skip it), we remove it from hash table, so that we can continue to use hash table. OR, (2) the scan * forward hit the end. If (2) happens, we can de-reference the hash table to make it available * for garbage collection. We save the future row id's in a virtual mem heap. In any case, * next read will use a row id that we saved. */ private void notifyForUpdateCursor(DataValueDescriptor[] row, DataValueDescriptor[] newBaseRow, RowLocation rl, TableScanResultSet tableScan) throws StandardException { int[] indexCols = tableScan.indexCols; int[] changedCols = constants.changedColumnIds; boolean placedForward = false, ascending, decided = false, overlap = false; int basePos, k; /* first of all, we see if there's overlap between changed column ids and index key * columns. If so, we see if the new update value falls into the future range of the * index scan, if so, we need to save it in hash table. */ for (int i = 0; i < indexCols.length; i++) { basePos = indexCols[i]; if (basePos > 0) ascending = true; else { ascending = false; basePos = -basePos; } for (int j = 0; j < changedCols.length; j++) { if (basePos == changedCols[j]) { decided = true; //we pretty much decided if new row falls in front //of the cursor or behind /* the row and newBaseRow we get are compact base row that only have * referenced columns. Our "basePos" is index in sparse heap row, so * we need the BaseRowReadMap to map into the compact row. */ int[] map = constants.getBaseRowReadMap(); if (map == null) k = basePos - 1; else k = map[basePos - 1]; DataValueDescriptor key; /* We need to compare with saved most-forward cursor scan key if we * are reading records from the saved RowLocation temp table (instead * of the old column value) because we only care if new update value * jumps forward the most-forward scan key. */ if (tableScan.compareToLastKey) key = tableScan.lastCursorKey.getColumn(i + 1); else key = row[k]; /* Starting from the first index key column forward, we see if the direction * of the update change is consistent with the direction of index scan. * If so, we save it in hash table. */ if ((ascending && key.greaterThan(newBaseRow[k], key).equals(true)) || (!ascending && key.lessThan(newBaseRow[k], key).equals(true))) placedForward = true; else if (key.equals(newBaseRow[k], key).equals(true)) { decided = false; overlap = true; } break; } } if (decided) // already decided if new row falls in front or behind break; } /* If index row gets updated but key value didn't actually change, we still * put it in hash table because it can either fall in front or behind. This * can happen if the update explicitly sets a value, but same as old. */ if (overlap && !decided) placedForward = true; if (placedForward) // add it to hash table { /* determining initial capacity of hash table from a few factors: * (1) user specified MAX_MEMORY_PER_TABLE property, (2) min value 100 * (3) optimizer estimated row count. We want to avoid re-hashing if * possible, for performance reason, yet don't waste space. If initial * capacity is greater than max size divided by load factor, no rehash * is ever needed. */ int maxCapacity = lcc.getOptimizerFactory().getMaxMemoryPerTable() / 16; if (maxCapacity < 100) maxCapacity = 100; if (tableScan.past2FutureTbl == null) { double rowCount = tableScan.getEstimatedRowCount(); int initCapacity = 32 * 1024; if (rowCount > 0.0) { rowCount = rowCount / 0.75 + 1.0; // load factor if (rowCount < initCapacity) initCapacity = (int) rowCount; } if (maxCapacity < initCapacity) initCapacity = maxCapacity; tableScan.past2FutureTbl = new Hashtable(initCapacity); } Hashtable past2FutureTbl = tableScan.past2FutureTbl; /* If hash table is not full, we add it in. The key of the hash entry * is the string value of the RowLocation. If the hash table is full, * as the comments above this function say, we scan forward. * * Need to save a clone because when we get cached currentRow, "rl" shares the * same reference, so is changed at the same time. */ RowLocation updatedRL = (RowLocation) rl.getClone(); if (past2FutureTbl.size() < maxCapacity) past2FutureTbl.put(updatedRL, updatedRL); else { tableScan.skipFutureRowHolder = true; ExecRow rlRow = new ValueRow(1); for (;;) { ExecRow aRow = tableScan.getNextRowCore(); if (aRow == null) { tableScan.sourceDrained = true; tableScan.past2FutureTbl = null; // de-reference for garbage coll. break; } RowLocation rowLoc = (RowLocation) aRow.getColumn(aRow.nColumns()); if (updatedRL.equals(rowLoc)) //this row we are updating jumped forward { saveLastCusorKey(tableScan, aRow); break; // don't need to worry about adding this row to hash any more } if (tableScan.futureForUpdateRows == null) { // virtual memory heap. In-memory part size 100. With the co-operation // of hash table and in-memory part of heap (hash table shrinks while // in-memory heap grows), hopefully we never spill temp table to disk. tableScan.futureForUpdateRows = new TemporaryRowHolderImpl (tc, null, null, 100, false, true); } rlRow.setColumn(1, rowLoc); tableScan.futureForUpdateRows.insert(rlRow); if (past2FutureTbl.size() < maxCapacity) //we got space in the hash table now, stop! { past2FutureTbl.put(updatedRL, updatedRL); saveLastCusorKey(tableScan, aRow); break; } } tableScan.skipFutureRowHolder = false; } } } private void saveLastCusorKey(TableScanResultSet tableScan, ExecRow aRow) throws StandardException { /* We save the most-forward cursor scan key where we are stopping, so * that next time when we decide if we need to put an updated row id into * hash table, we can compare with this key. This is an optimization on * memory usage of the hash table, otherwise it may be "leaking". */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -