📄 logtofile.java
字号:
/** If not null then something is corrupt in the raw store and this represents the original error. */ protected volatile StandardException corrupt; /** If frozen, don't allow anything on disk to change. */ private boolean isFrozen; /** Product Version information. Invarient after boot. */ ProductVersionHolder jbmsVersion; /** On disk database version information. When running in soft upgrade this version may be different to jbmsVersion. */ private int onDiskMajorVersion; private int onDiskMinorVersion; private boolean onDiskBeta; private CRC32 checksum = new CRC32(); // holder for the checksum /** * Note: Why logging system support file sync and write sync ? * Note : The reason to support file and write sync of logs is * there was no support to do write sync until jdk1.4 and then * there was write sync jvm bug in jdk1.4.1, only in jdk1.4.2 write * sync(rws mode) mechanism can be used corretly. * Default in JVMS >= jdk1.4.2 is write sync(see the boot method for jvm checks). * * Write sync mechanism support is added for performance reasons. * On commits, logging system has to make sure the log for committed * transaction is on disk. With out write sync , log is written to the * disk and then fsync() is used on commits to make log is written to the * disk for sure. On most of the OS , fsync() calls are expensive. * On heavey commit oriented systems , file sync make the system run slow. * This problem is solved by using write sync on preallocated log file. * write sync is much faster than doing write and file sync to a file. * File should be preallocated for write syncs to perform better than * the file sync method. Whenever a new log file is created, * logSwitchInterval size is preallocated by writing zeros after file after the header. */ /*If set to true , write sync will be used to do log write other file * level sync is used. */ private boolean isWriteSynced = false; /** MT- not needed for constructor */ public LogToFile() { keepAllLogs = PropertyUtil.getSystemBoolean(RawStoreFactory.KEEP_TRANSACTION_LOG); if (Performance.MEASURE) mon_LogSyncStatistics = PropertyUtil.getSystemBoolean(LOG_SYNC_STATISTICS); } /* ** Methods of Corruptable */ /** Once the log factory is makred as corrupt then the raw sto */ public StandardException markCorrupt(StandardException originalError) { boolean firsttime = false; synchronized (this) { if (corrupt == null && originalError != null) { corrupt = originalError; firsttime = true; } } // only print the first error if (corrupt == originalError) logErrMsg(corrupt); // this is the first time someone detects error, shutdown the // system as much as possible without further damaging it if (firsttime) { synchronized(this) { stopped = true; if (logOut != null) { try { logOut.corrupt(); // get rid of open file descriptor } catch (IOException ioe) { // don't worry about it, just trying to clean up } } // NullPointerException is preferred over corrupting the database logOut = null; } if (dataFactory != null) dataFactory.markCorrupt(null); } return originalError; } private void checkCorrupt() throws StandardException { synchronized (this) { if (corrupt != null) { throw StandardException.newException( SQLState.LOG_STORE_CORRUPT, corrupt); } } } /* ** Methods of LogFactory */ /** MT- not needed */ public Logger getLogger() { if (ReadOnlyDB) return null; else return new FileLogger(this); } /** Recover the rawStore to a consistent state using the log. <P> In this implementation, the log is a stream of log records stored in one or more flat files. Recovery is done in 2 passes: redo and undo. <BR> <B>Redo pass</B> <BR> In the redo pass, reconstruct the state of the rawstore by repeating exactly what happened before as recorded in the log. <BR><B>Undo pass</B> <BR> In the undo pass, all incomplete transactions are rolled back in the order from the most recently started to the oldest. <P>MT - synchronization provided by caller - RawStore boot. This method is guaranteed to be the only method being called and can assume single thread access on all fields. @see Loggable#needsRedo @see FileLogger#redo @exception StandardException Standard Cloudscape error policy */ public void recover( RawStoreFactory rsf, DataFactory df, TransactionFactory tf) throws StandardException { if (SanityManager.DEBUG) { SanityManager.ASSERT(rsf != null, "raw store factory == null"); SanityManager.ASSERT(df != null, "data factory == null"); } checkCorrupt(); rawStoreFactory = rsf; dataFactory = df; // initialize the log writer only after the rawstorefactory is available, // log writer requires encryption block size info from rawstore factory // to encrypt checksum log records. if (firstLog != null) logOut = new LogAccessFile(this, firstLog, logBufferSize); // we don't want to set ReadOnlyDB before recovery has a chance to look // at the latest checkpoint and determine that the database is shutdown // cleanly. If the medium is read only but there are logs that need // to be redone or in flight transactions, we are hosed. The logs that // are redone will leave dirty pages in the cache. if (recoveryNeeded) { try { ///////////////////////////////////////////////////////////// // // During boot time, the log control file is accessed and // logFileNumber is determined. LogOut is not set up. // LogFileNumber is the log file the latest checkpoint lives in, // or 1. It may not be the latest log file (the system may have // crashed between the time a new log was generated and the // checkpoint log written), that can only be determined at the // end of recovery redo. // ///////////////////////////////////////////////////////////// FileLogger logger = (FileLogger)getLogger(); ///////////////////////////////////////////////////////////// // // try to find the most recent checkpoint // ///////////////////////////////////////////////////////////// if (checkpointInstant != LogCounter.INVALID_LOG_INSTANT) { currentCheckpoint = findCheckpoint(checkpointInstant, logger); } // if we are only interested in dumping the log, start from the // beginning of the first log file if (SanityManager.DEBUG) { if (SanityManager.DEBUG_ON(DUMP_LOG_ONLY)) { currentCheckpoint = null; System.out.println("Dump log only"); // unless otherwise specified, 1st log file starts at 1 String beginLogFileNumber = PropertyUtil.getSystemProperty( DUMP_LOG_FROM_LOG_FILE); if (beginLogFileNumber != null) { logFileNumber = Long.valueOf(beginLogFileNumber).longValue(); } else { logFileNumber = 1; } } } if (SanityManager.DEBUG) { if (SanityManager.DEBUG_ON("setCheckpoint")) { currentCheckpoint = null; System.out.println("Set Checkpoint."); // unless otherwise specified, 1st log file starts at 1 String checkpointStartLogStr = PropertyUtil.getSystemProperty( "derby.storage.checkpointStartLog"); String checkpointStartOffsetStr = PropertyUtil.getSystemProperty( "derby.storage.checkpointStartOffset"); if ((checkpointStartLogStr != null) && (checkpointStartOffsetStr != null)) { checkpointInstant = LogCounter.makeLogInstantAsLong( Long.valueOf(checkpointStartLogStr).longValue(), Long.valueOf(checkpointStartOffsetStr).longValue()); } else { SanityManager.THROWASSERT( "must set derby.storage.checkpointStartLog and derby.storage.checkpointStartOffset, if setting setCheckpoint."); } currentCheckpoint = findCheckpoint(checkpointInstant, logger); } } long redoLWM = LogCounter.INVALID_LOG_INSTANT; long undoLWM = LogCounter.INVALID_LOG_INSTANT; long ttabInstant = LogCounter.INVALID_LOG_INSTANT; StreamLogScan redoScan = null; if (currentCheckpoint != null) { Formatable transactionTable = null; // RESOLVE: sku // currentCheckpoint.getTransactionTable(); // need to set the transaction table before the undo tf.useTransactionTable(transactionTable); redoLWM = currentCheckpoint.redoLWM(); undoLWM = currentCheckpoint.undoLWM(); if (transactionTable != null) ttabInstant = checkpointInstant; if (SanityManager.DEBUG) { if (SanityManager.DEBUG_ON(DBG_FLAG)) { SanityManager.DEBUG(DBG_FLAG, "Found checkpoint at " + LogCounter.toDebugString(checkpointInstant) + " " + currentCheckpoint.toString()); } } firstLogFileNumber = LogCounter.getLogFileNumber(redoLWM); // figure out where the first interesting log file is. if (LogCounter.getLogFileNumber(undoLWM) < firstLogFileNumber) { firstLogFileNumber = LogCounter.getLogFileNumber(undoLWM); } // if the checkpoint record doesn't have a transaction // table, we need to rebuild it by scanning the log from // the undoLWM. If it does have a transaction table, we // only need to scan the log from the redoLWM redoScan = (StreamLogScan) openForwardsScan(undoLWM, (LogInstant)null); } else { // no checkpoint tf.useTransactionTable((Formatable)null); long start = LogCounter.makeLogInstantAsLong( logFileNumber, LOG_FILE_HEADER_SIZE); // no checkpoint, start redo from the beginning of the // file - assume this is the first log file firstLogFileNumber = logFileNumber; redoScan = (StreamLogScan) openForwardsScan(start, (LogInstant)null); } // open a transaction that is used for redo and rollback RawTransaction recoveryTransaction = tf.startTransaction( rsf, ContextService.getFactory().getCurrentContextManager(), AccessFactoryGlobals.USER_TRANS_NAME); // make this transaction aware that it is a recovery transaction // and don't spew forth post commit work while replaying the log recoveryTransaction.recoveryTransaction(); ///////////////////////////////////////////////////////////// // // Redo loop - in FileLogger // ///////////////////////////////////////////////////////////// // // set log factory state to inRedo so that if redo caused any // dirty page to be written from the cache, it won't flush the // log since the end of the log has not been determined and we // know the log record that caused the page to change has // already been written to the log. We need the page write to // go thru the log factory because if the redo has a problem, // the log factory is corrupt and the only way we know not to // write out the page in a checkpoint is if it check with the // log factory, and that is done via a flush - we use the WAL // protocol to stop corrupt pages from writing to the disk. // inRedo = true; long logEnd = logger.redo( recoveryTransaction, tf, redoScan, redoLWM, ttabInstant); inRedo = false; // if we are only interested in dumping the log, don't alter // the database and prevent anyone from using the log if (SanityManager.DEBUG) { if (SanityManager.DEBUG_ON(LogToFile.DUMP_LOG_ONLY)) { Monitor.logMessage("_____________________________________________________"); Monitor.logMessage("\n\t\t Log dump finished");
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -