📄 cachedpage.java
字号:
* See comment on class header on meaning of isDirty and preDirty bits. * <p> **/ protected void setDirty() { synchronized (this) { isDirty = true; preDirty = false; } } /** * exclusive latch on page is being released. * <p> * The only work done in CachedPage is to update the row count on the * container if it is too out of sync. **/ protected void releaseExclusive() { // look at dirty bit without latching, the updating of the row // count is just an optimization so does not need the latch. // // if this page actually has > 1/8 rows of the entire container, then // consider updating the row count if it is different. // // No need to special case allocation pages because it has recordCount // of zero, thus the if clause will never be true for an allocation // page. if (isDirty && !isOverflowPage() && (containerRowCount / 8) < recordCount()) { int currentRowCount = internalNonDeletedRecordCount(); int delta = currentRowCount-initialRowCount; int posDelta = delta > 0 ? delta : (-delta); if ((containerRowCount/8) < posDelta) { // This pages delta row count represents a significant change // with respect to current container row count so update // container row count FileContainer myContainer = null; try { myContainer = (FileContainer) containerCache.find(identity.getContainerId()); if (myContainer != null) { myContainer.updateEstimatedRowCount(delta); setContainerRowCount( myContainer.getEstimatedRowCount(0)); initialRowCount = currentRowCount; // since I have the container, might as well update the // unfilled information myContainer.trackUnfilledPage( identity.getPageNumber(), unfilled()); } } catch (StandardException se) { // do nothing, not sure what could fail but this update // is just an optimization so no need to throw error. } finally { if (myContainer != null) containerCache.release(myContainer); } } } super.releaseExclusive(); } /** * Write the page to disk. * <p> * MP - In a simple world we would just not allow clean until it held the * latch on the page. But in order to fit into the cache system, we * don't have enough state around to just make clean() latch the page * while doing the I/O - but we still need someway to insure that no * changes happen to the page while the I/O is taking place. * Also someday it would be fine to allow reads of this page * while the I/O was taking place. * * * @return The identifier to be used to open the conglomerate later. * * @exception StandardException Error writing the page. * * @see Cacheable#clean **/ public void clean(boolean remove) throws StandardException { // must wait for the page to be unlatched synchronized (this) { if (!isDirty()) return; // is someone else cleaning it while (inClean) { try { wait(); } catch (InterruptedException ie) { throw StandardException.interrupt(ie); } } // page is not "inClean" by other thread at this point. if (!isDirty()) return; inClean = true; // If page is in LATCHED state (as opposed to UNLATCH or PRELATCH) // wait for the page to move to UNLATCHED state. See Comments in // Generic/BasePage.java describing the interaction of inClean, // (owner != null), and preLatch. while ((owner != null) && !preLatch) { try { wait(); } catch (InterruptedException ie) { inClean = false; throw StandardException.interrupt(ie); } } // The page is now effectively latched by the cleaner. // We only want to clean the page if the page is actually dirtied, // not when it is just pre-dirtied. if (!isActuallyDirty()) { // the person who latched it gives up the // latch without really dirtying the page preDirty = false; inClean = false; notifyAll(); return; } } try { writePage(getPageId(), false); } catch(StandardException se) { // If we get an error while trying to write a page, current // recovery system requires that entire DB is shutdown. Then // when system is rebooted we will run redo recovery which // if it does not encounter disk errors will guarantee to recover // to a transaction consistent state. If this write is a // persistent device problem, redo recovery will likely fail // attempting to the same I/O. Mark corrupt will stop all further // writes of data and log by the system. throw dataFactory.markCorrupt(se); } finally { // if there is something wrong in writing out the page, // do not leave it inClean state or it will block the next cleaner // forever synchronized (this) { inClean = false; notifyAll(); } } } public void clearIdentity() { alreadyReadPage = false; super.clearIdentity(); } /** * read the page from disk into this CachedPage object. * <p> * A page is read in from disk into the pageData array of this object, * and then put in the cache. * <p> * * @param myContainer the container to read the page from. * @param newIdentity indentity (ie. page number) of the page to read * * @exception StandardException Standard exception policy. **/ private void readPage( FileContainer myContainer, PageKey newIdentity) throws StandardException { int pagesize = myContainer.getPageSize(); // we will reuse the existing page array if it is same size, the // cache does support caching various sized pages. setPageArray(pagesize); for (int io_retry_count = 0;;) { try { myContainer.readPage(newIdentity.getPageNumber(), pageData); break; } catch (IOException ioe) { io_retry_count++; // Retrying read I/O's has been found to be successful sometimes // in completing the read without having to fail the calling // query, and in some cases avoiding complete db shutdown. // Some situations are: // spurious interrupts being sent to thread by clients. // unreliable hardware like a network mounted file system. // // The only option other than retrying is to fail the I/O // immediately and throwing an error, thus performance cost // not really a consideration. // // The retry max of 4 is arbitrary, but has been enough that // not many read I/O errors have been reported. if (io_retry_count > 4) { // page cannot be physically read StandardException se = StandardException.newException( SQLState.FILE_READ_PAGE_EXCEPTION, ioe, newIdentity, new Integer(pagesize)); if (dataFactory.getLogFactory().inRFR()) { //if in rollforward recovery, it is possible that this //page actually does not exist on the disk yet because //the log record we are proccessing now is actually //creating the page, we will recreate the page if we //are in rollforward recovery, so just throw the //exception. throw se; } else { if (SanityManager.DEBUG) { // by shutting down system in debug mode, maybe // we can catch root cause of the interrupt. throw dataFactory.markCorrupt(se); } else { // No need to shut down runtime database on read // error in delivered system, throwing exception // should be enough. Thrown exception has nested // IO exception which is root cause of error. throw se; } } } } } } /** * write the page from this CachedPage object to disk. * <p> * * @param newIdentity indentity (ie. page number) of the page to read * @param syncMe does the write of this single page have to be sync'd? * * @exception StandardException Standard exception policy. **/ private void writePage( PageKey identity, boolean syncMe) throws StandardException { // make subclass write the page format writeFormatId(identity); // let subclass have a chance to write any cached data to page data // array writePage(identity); // force WAL - and check to see if database is corrupt or is frozen. // last log Instant may be null if the page is being forced // to disk on a createPage (which violates the WAL protocol actually). // See FileContainer.newPage LogInstant flushLogTo = getLastLogInstant(); dataFactory.flush(flushLogTo); if (flushLogTo != null) { clearLastLogInstant(); } // find the container and file access object FileContainer myContainer = (FileContainer) containerCache.find(identity.getContainerId()); if (myContainer != null) { try { myContainer.writePage( identity.getPageNumber(), pageData, syncMe); // // Do some in memory unlogged bookkeeping tasks while we have // the container. // if (!isOverflowPage() && isDirty()) { // let the container knows whether this page is a not // filled, non-overflow page myContainer.trackUnfilledPage( identity.getPageNumber(), unfilled()); // if this is not an overflow page, see if the page's row // count has changed since it come into the cache. // // if the page is not invalid, row count is 0. Otherwise, // count non-deleted records on page. // // Cannot call nonDeletedRecordCount because the page is // unlatched now even though nobody is changing it int currentRowCount = internalNonDeletedRecordCount(); if (currentRowCount != initialRowCount) { myContainer.updateEstimatedRowCount( currentRowCount - initialRowCount); setContainerRowCount( myContainer.getEstimatedRowCount(0)); initialRowCount = currentRowCount; } } } catch (IOException ioe) { // page cannot be written throw StandardException.newException( SQLState.FILE_WRITE_PAGE_EXCEPTION, ioe, identity, new Integer(myContainer.getPageSize())); } finally { containerCache.release(myContainer); myContainer = null; } } else { StandardException nested = StandardException.newException( SQLState.DATA_CONTAINER_VANISHED, identity.getContainerId()); throw dataFactory.markCorrupt( StandardException.newException( SQLState.FILE_WRITE_PAGE_EXCEPTION, nested, identity, new Integer(myContainer.getPageSize()))); } synchronized (this) { // change page state to not dirty after the successful write isDirty = false; preDirty = false; } } public void setContainerRowCount(long rowCount) { containerRowCount = rowCount; } /* ** if the page size is different from the page buffer, then make a ** new page buffer and make subclass use the new page buffer */ protected void setPageArray(int pageSize) throws StandardException { if ((pageData == null) || (pageData.length != pageSize)) { pageData = new byte[pageSize]; if (pageData == null || pageData.length != pageSize) { throw StandardException.newException( SQLState.DATA_OBJECT_ALLOCATION_FAILED, "PAGE"); } usePageBuffer(pageData); } } /* methods for subclass of cached page */ // use a new pageData buffer, initialize in memory structure that depend on // the pageData's size. The actual disk data may not have not been read in // yet so don't look at the content of the buffer protected abstract void usePageBuffer(byte[] buffer); // initialize in memory structure using the read in buffer in pageData protected abstract void initFromData(FileContainer container, PageKey id) throws StandardException; // create the page protected abstract void createPage(PageKey id, int[] args) throws StandardException; // page is about to be written, write everything to pageData array protected abstract void writePage(PageKey id) throws StandardException; // write out the formatId to the pageData protected abstract void writeFormatId(PageKey identity) throws StandardException;}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -