📄 disktable.java
字号:
/* * $Id: DiskTable.java,v 1.30 2003/07/08 06:55:39 rwald Exp $ * ======================================================================= * Copyright (c) 2002-2003 Axion Development Team. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * 3. The names "Tigris", "Axion", nor the names of its contributors may * not be used to endorse or promote products derived from this * software without specific prior written permission. * * 4. Products derived from this software may not be called "Axion", nor * may "Tigris" or "Axion" appear in their names without specific prior * written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ======================================================================= */package org.axiondb.engine;import java.io.BufferedInputStream;import java.io.ByteArrayOutputStream;import java.io.DataOutputStream;import java.io.File;import java.io.FileInputStream;import java.io.IOException;import java.io.InputStream;import java.io.RandomAccessFile;import java.util.Iterator;import java.util.NoSuchElementException;import org.apache.commons.collections.LRUMap;import org.apache.commons.collections.primitives.ArrayIntList;import org.apache.commons.collections.primitives.ArrayLongList;import org.apache.commons.collections.primitives.ArrayUnsignedIntList;import org.apache.commons.collections.primitives.IntIterator;import org.apache.commons.collections.primitives.IntList;import org.apache.commons.collections.primitives.LongList;import org.apache.commons.logging.Log;import org.apache.commons.logging.LogFactory;import org.axiondb.AxionException;import org.axiondb.Column;import org.axiondb.Index;import org.axiondb.IndexLoader;import org.axiondb.Row;import org.axiondb.RowIterator;import org.axiondb.Table;import org.axiondb.engine.rowiterators.BaseRowIterator;import org.axiondb.event.RowInsertedEvent;import org.axiondb.types.FileLobLocator;import org.axiondb.types.FileOffsetLobLocator;import org.axiondb.types.FileOffsetLobLocatorFactory;import org.axiondb.types.LOBType;import org.axiondb.types.LobLocator;/** * A disk-resident {@link Table}. * * @version $Revision: 1.30 $ $Date: 2003/07/08 06:55:39 $ * @author Chuck Burdick * @author Rodney Waldhoff */public final class DiskTable extends BaseDiskTable implements Table { //------------------------------------------------------------- Constructors public DiskTable(String name, File parentdir) throws AxionException { super(name); if(_log.isDebugEnabled()) { _log.debug("Constructing DiskTable " + name + " in " + parentdir.toString()); } _dir = new File(parentdir,name); if(!_dir.exists()) { _log.debug("Directory \"" + _dir.toString() + "\" doesn't exist, creating it."); if(!_dir.mkdirs()) { throw new AxionException( "Unable to create directory \"" + _dir.toString() + "\" for DiskTable \"" + name + "\"."); } } // create the type file if it doesn't already exist { File typefile = new File(_dir,name + ".type"); if(!typefile.exists()) { writeNameToFile(typefile,new DiskTableFactory()); } } _pidx = new ArrayUnsignedIntList(); _freeIds = new ArrayIntList(); _lobDir = new File(_dir,"lobs"); createOrLoadMetaFile(); createOrLoadPidxFile(); createOrLoadFreeIdsFile(); initializeRowCount(); // indices - directory containing index files { _indexDir = new File(_dir,"indices"); if(_indexDir.exists()) { loadIndices(_indexDir); } else { _indexDir.mkdirs(); } } createOrLoadDataFile(); createRowCache(); _log.debug("done loading table"); } //------------------------------------------------------------------ Public public synchronized int getNextRowId() { _modCount++; int id = -1; if(_freeIds.isEmpty()) { id = _pidx.size(); _pidx.add(INVALID_OFFSET); } else { id = _freeIds.removeElementAt(0); _pidx.set(id,INVALID_OFFSET); } return id; } public synchronized void freeRowId(int id) { _modCount++; _pidx.set(id,INVALID_OFFSET); _freeIds.add(id); } public int getRowCount() { return _rowCount; } public void applyDeletes(IntIterator iter) throws AxionException { if(iter.hasNext()) { _modCount++; for(int rowid;iter.hasNext();) { rowid = iter.next(); _pidx.set(rowid,INVALID_OFFSET); _freeIds.add(rowid); uncacheRow(rowid); _rowCount--; } writePidxFile(); writeFridFile(); } } public void applyUpdates(Iterator rows) throws AxionException { _modCount++; // write all the rows to a buffer, keeping track of the offsets ByteArrayOutputStream buffer = new ByteArrayOutputStream(); long initoffset = getDataFile().length(); LongList offsets = new ArrayLongList(); { DataOutputStream out = new DataOutputStream(buffer); while(rows.hasNext()) { Row row = (Row)(rows.next()); long curoffset = initoffset + (long)(buffer.size()); offsets.add(curoffset); _pidx.set(row.getIdentifier(),curoffset); for(int i = 0; i < getColumnCount(); i++) { try { getColumn(i).getDataType().write(row.get(i),out); } catch(IOException e) { throw new AxionException("Error buffering column " + i + " data.",e); } } _pidx.set(row.getIdentifier(),curoffset); // update the slot in the pidx file to point to the new data cacheRow(row.getIdentifier(),row); // do we still want this? } try { out.flush(); } catch(IOException e) { throw new AxionException("Error flushing buffer.",e); } } // now write out the buffered rows RandomAccessFile out = null; try { out = getWriteFile(); out.seek(initoffset); out.write(buffer.toByteArray()); appendToPidxFile(offsets); writePidxFile(); } catch(IOException e) { throw new AxionException("Error writing buffer.",e); } finally { try { out.getFD().sync(); } catch(IOException e) { // ignored } catch(NullPointerException e) { // ignored } } } public void checkpoint() throws AxionException { if(_savedAtModCount != _modCount) { saveIndices(); _savedAtModCount = _modCount; } } public void populateIndex(Index index) throws AxionException { for(int i=0,I=_pidx.size();i<I;i++) { long ptr = _pidx.get(i); if(ptr != INVALID_OFFSET) { index.rowInserted( new RowInsertedEvent(this,null,getRowByOffset(i,ptr))); } } File dataDir = new File(_indexDir,index.getName()); if(!dataDir.exists()) { dataDir.mkdirs(); } File typefile = new File(dataDir,index.getName() + ".type"); IndexLoader loader = index.getIndexLoader(); writeNameToFile(typefile,loader); index.save(dataDir); } protected RowIterator getRowIterator() throws AxionException { return new BaseRowIterator() { Row _current = null; int _nextIndex = 0; int _currentIndex = -1; int _nextId = 0; int _currentId = -1; public void reset() { _current = null; _nextIndex = 0; _currentIndex = -1; _nextId = 0; } public Row current() { if(!hasCurrent()) { throw new NoSuchElementException("No current row."); } else { return _current; } } public boolean hasCurrent() { return (null != _current); } public int currentIndex() { return _currentIndex; } public int nextIndex() { return _nextIndex; } public int previousIndex() { return _nextIndex - 1; } public boolean hasNext() { return nextIndex() < getRowCount(); } public boolean hasPrevious() { return nextIndex() > 0; } public Row next() throws AxionException { if(!hasNext()) { throw new NoSuchElementException("No next row"); } else { do { _currentId = _nextId++; long offset = _pidx.get(_currentId); if(offset == INVALID_OFFSET) { _current = null; } else { _current = getRowByOffset(_currentId,offset); } } while(null == _current); _currentIndex = _nextIndex; _nextIndex++; return _current; } } public Row previous() throws AxionException { if(!hasPrevious()) { throw new NoSuchElementException("No previous row"); } else { do { _currentId = (--_nextId); long offset = _pidx.get(_currentId); if(offset == INVALID_OFFSET) { _current = null; } else { _current = getRowByOffset(_currentId,offset); } } while(null == _current); _nextIndex--; _currentIndex = _nextIndex; return _current; } } public void remove() throws AxionException { if(-1 == _currentIndex) { throw new IllegalStateException("No current row."); } else { deleteRow(_current); _nextIndex--; _currentIndex = -1; } } public void set(Row row) throws AxionException { if(-1 == _currentIndex) { throw new IllegalStateException("No current row."); } else { updateRow(_current,row); } } }; } public Row getRow(int id) throws AxionException { Row cached = getCachedRow(id); if(null != cached) { return cached; } else { long ptr = _pidx.get(id); Row row = getRowByOffset(id,ptr); cacheRow(id,row); return row; } } public void remount(File newdir, boolean datafilesonly) throws AxionException { //TODO: hack - prevent error when remounting while transactions //are pending if NEVER_APPLY is true if(_savedAtModCount != _modCount && TransactionManagerImpl.NEVER_APPLY == false) { throw new AxionException("Can't remount without a checkpoint first."); } else { closeFiles(); initFiles(newdir,datafilesonly); super.remount(newdir,datafilesonly); } } public void defrag() throws Exception { // the new pidx list LongList pidx2 = new ArrayUnsignedIntList((_pidx.size()*2)/3); // the new .data file File df2 = new File(getRootDir(),getName() + ".data.defrag"); RandomAccessFile data2 = new RandomAccessFile(df2,"rw"); // for each row for(int i=0;i<_pidx.size();i++) { long offset = _pidx.get(i); if(INVALID_OFFSET == offset) { // skip the invalid ones } else { // for valid ones, read the old row Row row = getRowByOffset(i,offset);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -