📄 filelogger.java
字号:
/* Derby - Class org.apache.derby.impl.store.raw.log.FileLogger Copyright 1997, 2004 The Apache Software Foundation or its licensors, as applicable. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */package org.apache.derby.impl.store.raw.log;import org.apache.derby.iapi.reference.SQLState;import org.apache.derby.iapi.reference.MessageId;import org.apache.derby.impl.store.raw.log.LogCounter;import org.apache.derby.impl.store.raw.log.LogRecord;import org.apache.derby.impl.store.raw.log.StreamLogScan;import org.apache.derby.iapi.store.access.TransactionController;import org.apache.derby.iapi.store.raw.RawStoreFactory;import org.apache.derby.iapi.store.raw.log.LogInstant;import org.apache.derby.iapi.store.raw.log.Logger;import org.apache.derby.iapi.store.raw.log.LogScan;import org.apache.derby.iapi.store.raw.xact.RawTransaction;import org.apache.derby.iapi.store.raw.xact.TransactionFactory;import org.apache.derby.iapi.store.raw.xact.TransactionId;import org.apache.derby.iapi.store.raw.Compensation;import org.apache.derby.iapi.store.raw.ContainerHandle;import org.apache.derby.iapi.store.raw.LockingPolicy;import org.apache.derby.iapi.store.raw.Loggable;import org.apache.derby.iapi.store.raw.Page;import org.apache.derby.iapi.store.raw.RePreparable;import org.apache.derby.iapi.store.raw.Undoable;import org.apache.derby.iapi.services.io.FormatIdOutputStream;import org.apache.derby.iapi.services.sanity.SanityManager;import org.apache.derby.iapi.error.StandardException;import org.apache.derby.iapi.services.i18n.MessageService;import org.apache.derby.iapi.services.io.ArrayInputStream;import org.apache.derby.iapi.services.io.ArrayOutputStream;import org.apache.derby.iapi.util.ByteArray;import org.apache.derby.iapi.services.io.DynamicByteArrayOutputStream;import org.apache.derby.iapi.services.io.LimitObjectInput;import java.io.IOException;import org.apache.derby.impl.store.raw.data.InitPageOperation;/** Write log records to a log file as a stream (ie. log records added to the end of the file, no concept of pages).<P> The format of a log record that is not a compensation operation is <PRE> @format_id no formatId, format is implied by the log file format and the log record content. @purpose the log record and optional data @upgrade @disk_layout Log Record (see org.apache.derby.impl.store.raw.log.LogRecord) length(int) length of optional data optionalData(byte[length]) optional data written by the log record @end_format </PRE> <HR WIDTH="100%"> <P> The form of a log record that is a compensation operation is <PRE> @format_id no formatId, format is implied by the log file format and the log record content. @purpose undo a previous log record @upgrade @disk_layout Log Record that contains the compenstation operation (see org.apache.derby.impl.store.raw.log.LogRecord) undoInstant(long) the log instant of the operation that is to be rolled back The undo instant is logically part of the LogRecord but is written by the logger because it is used and controlled by the rollback code but not by the log operation. There is no optional data in a compensation operation, all data necessary for the rollback must be stored in the operation being undone. @end_format </PRE> <BR> <P>Multithreading considerations:<BR> Logger must be MT-safe. Each RawTransaction has its own private FileLogger object. Each logger has a logOutputBuffer and a log input buffer which are used to read and write to the log. Since multiple threads can be in the same transaction, fileLogger must be synchronized. @see LogRecord*/public class FileLogger implements Logger { private LogRecord logRecord; protected byte[] encryptionBuffer; private DynamicByteArrayOutputStream logOutputBuffer; private FormatIdOutputStream logicalOut; private ArrayInputStream logIn; private LogToFile logFactory; // actually writes the log records. /** Make a new Logger with its own log record buffers MT - not needed for constructor */ public FileLogger(LogToFile logFactory) { this.logFactory = logFactory; logOutputBuffer = new DynamicByteArrayOutputStream(1024); // init size 1K logicalOut = new FormatIdOutputStream(logOutputBuffer); // logIn and logOutputBuffer must share the same buffer because they // combined to form an IO stream to access the same log record. // // Before each use of logIn, you must reset logIn's data to the // byte array you want to read from. // // To log a record, set logIn's data to point to logOutputBuffer's // byte array when you know you have everything you need in the output // buffer, then set limit on logIn and send it to the log operation's // doMe. // // Keep in mind the dynamic nature of the logOutputBuffer which means // it could switch buffer from underneath the logOutputBuffer on every // write. logIn = new ArrayInputStream(); logRecord = new LogRecord(); } /** Close the logger. MT - caller provide synchronization (RESOLVE: not called by anyone ??) */ public void close() throws IOException { if (logOutputBuffer != null) { logOutputBuffer.close(); logOutputBuffer = null; } logIn = null; logFactory = null; logicalOut = null; logRecord = null; } /* ** Methods of Logger */ /** Writes out a log record to the log stream, and call its doMe method to apply the change to the rawStore. <BR>Any optional data the doMe method need is first written to the log stream using operation.writeOptionalData, then whatever is written to the log stream is passed back to the operation for the doMe method. <P>MT - there could be multiple threads running in the same raw transactions and they can be calling the same logger to log different log operations. This whole method is synchronized to make sure log records are logged one at a time. @param xact the transaction logging the change @param operation the log operation @return the instant in the log that can be used to identify the log record @exception StandardException Cloudscape Standard error policy */ public synchronized LogInstant logAndDo(RawTransaction xact, Loggable operation) throws StandardException { boolean isLogPrepared = false; boolean inUserCode = false; byte[] preparedLog; try { logOutputBuffer.reset(); // always use the short Id, only the BeginXact log record contains // the XactId (long form) TransactionId transactionId = xact.getId(); // write out the log header with the operation embedded // this is by definition not a compensation log record, // those are called thru the logAndUndo interface logRecord.setValue(transactionId, operation); inUserCode = true; logicalOut.writeObject(logRecord); inUserCode = false; int optionalDataLength = 0; int optionalDataOffset = 0; int completeLength = 0; ByteArray preparedLogArray = operation.getPreparedLog(); if (preparedLogArray != null) { preparedLog = preparedLogArray.getArray(); optionalDataLength = preparedLogArray.getLength(); optionalDataOffset = preparedLogArray.getOffset(); // There is a race condition if the operation is a begin tran in // that between the time the beginXact log record is written to // disk and the time the transaction object is updated in the // beginXact.doMe method, other log records may be written. // This will render the transaction table in an inconsistent state // since it may think a later transaction is the earliest // transaction or it may think that there is no active transactions // where there is a bunch of them sitting on the log. // // Similarly, there is a race condition for endXact, i.e., // 1) endXact is written to the log, // 2) checkpoint gets that (committed) transaction as the // firstUpdateTransaction // 3) the transaction calls postComplete, nulling out itself // 4) checkpoint tries to access a closed transaction object // // The solution is to sync between the time a begin tran or end // tran log record is sent to the log stream and its doMe method is // called to update the transaction table and in memory state // // We only need to serialized the begin and end Xact log records // because once a transaction has been started and in the // transaction table, its order and transaction state does not // change. // // Use the logFactory as the sync object so that a checkpoint can // take its snap shot of the undoLWM before or after a transaction // is started, but not in the middle. (see LogToFile.checkpoint) // // now set the input limit to be the optional data. // This limits amount of data availiable to logIn that doMe can // use logIn.setData(preparedLog); logIn.setPosition(optionalDataOffset); logIn.setLimit(optionalDataLength); if (SanityManager.DEBUG) { if ((optionalDataLength) != logIn.available()) SanityManager.THROWASSERT( " stream not set correctly " + optionalDataLength + " != " + logIn.available()); } } else { preparedLog = null; optionalDataLength = 0; } logicalOut.writeInt(optionalDataLength); completeLength = logOutputBuffer.getPosition() + optionalDataLength; LogInstant logInstant = null; int encryptedLength = 0; // in case of encryption, we need to pad try { if (logFactory.databaseEncrypted()) { // we must pad the encryption data to be multiple of block // size, which is logFactory.getEncryptionBlockSize() encryptedLength = completeLength; if ((encryptedLength % logFactory.getEncryptionBlockSize()) != 0) encryptedLength = encryptedLength + logFactory.getEncryptionBlockSize() - (encryptedLength % logFactory.getEncryptionBlockSize()); if (encryptionBuffer == null || encryptionBuffer.length < encryptedLength) encryptionBuffer = new byte[encryptedLength]; System.arraycopy(logOutputBuffer.getByteArray(), 0, encryptionBuffer, 0, completeLength-optionalDataLength); if (optionalDataLength > 0) System.arraycopy(preparedLog, optionalDataOffset, encryptionBuffer, completeLength-optionalDataLength, optionalDataLength); // do not bother to clear out the padding area int len = logFactory.encrypt(encryptionBuffer, 0, encryptedLength, encryptionBuffer, 0); if (SanityManager.DEBUG) SanityManager.ASSERT(len == encryptedLength, "encrypted log buffer length != log buffer len"); } if ((operation.group() & (Loggable.FIRST | Loggable.LAST)) != 0) { synchronized (logFactory) { long instant = 0; if (logFactory.databaseEncrypted()) { // encryption has completely drained both the the // logOuputBuffer array and the preparedLog array instant = logFactory. appendLogRecord(encryptionBuffer, 0, encryptedLength, null, -1, 0); } else { instant = logFactory. appendLogRecord(logOutputBuffer.getByteArray(), 0, completeLength, preparedLog, optionalDataOffset, optionalDataLength); } logInstant = new LogCounter(instant); operation.doMe(xact, logInstant, logIn); } } else { long instant = 0; if (logFactory.databaseEncrypted()) { // encryption has completely drained both the the // logOuputBuffer array and the preparedLog array instant = logFactory. appendLogRecord(encryptionBuffer, 0, encryptedLength, null, -1, 0); } else { instant = logFactory. appendLogRecord(logOutputBuffer.getByteArray(), 0, completeLength, preparedLog, optionalDataOffset, optionalDataLength); } logInstant = new LogCounter(instant); operation.doMe(xact, logInstant, logIn); } } catch (StandardException se) { throw logFactory.markCorrupt( StandardException.newException( SQLState.LOG_DO_ME_FAIL, se, operation)); } catch (IOException ioe) { throw logFactory.markCorrupt( StandardException.newException( SQLState.LOG_DO_ME_FAIL, ioe, operation)); } finally { logIn.clearLimit(); } if (SanityManager.DEBUG) { if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG)) {
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -