📄 hashfile.java
字号:
/* Sesame - Storage and Querying architecture for RDF and RDF Schema * Copyright (C) 2001-2005 Aduna * * Contact: * Aduna * Prinses Julianaplein 14 b * 3817 CS Amersfoort * The Netherlands * tel. +33 (0)33 465 99 87 * fax. +33 (0)33 465 99 87 * * http://aduna.biz/ * http://www.openrdf.org/ * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */package org.openrdf.sesame.sailimpl.nativerdf.datastore;import java.io.File;import java.io.IOException;import java.io.PrintStream;import java.io.RandomAccessFile;import java.nio.ByteBuffer;import java.nio.channels.FileChannel;/** * Class supplying access to a hash file. This class is a wrapper class. The * actual hash file stuff is implemented in the inner class HashFile0. * * @author Arjohn Kampman * @version $Revision: 1.10 $ **/public class HashFile {/*-------------+| Constants |+-------------*/ // The size of an item (32-bit hash + 64-bit offset), in bytes private static final int ITEM_SIZE = 12; // The size of the header field in bytes private static final long HEADER_LENGTH = 12L; private static final int INIT_BUCKET_COUNT = 64; private static final int INIT_BUCKET_SIZE = 8;/*-------------+| Variables |+-------------*/ private File _file; private HashFile0 _hashFile; private File _txnFile; private HashFile0 _txnHashFile; /** Flag indicating whether the current transaction is/should be an isolated one. **/ private boolean _isolatedTransaction;/*-------------+| Constructors |+-------------*/ public HashFile(File file) throws IOException { _file = file; // Make sure the file exists _file.createNewFile(); // Create a hash file _hashFile = new HashFile0(_file); }/*----------+| Methods |+----------*/ public void startTransaction(boolean isolateTransaction) throws IOException { _isolatedTransaction = isolateTransaction; if (isolateTransaction) { // Create working copy of the hash file _txnFile = new File(_file.getParentFile(), "txn_" + _file.getName()); RandomAccessFile txnRaf = _createEmptyFile(_txnFile); FileChannel txnChannel = txnRaf.getChannel(); // Copy the hash file data to the working copy _hashFile.sync(); FileChannel channel = _hashFile.getFileChannel(); TransferUtil.transferTo(channel, 0L, channel.size(), txnChannel); _txnHashFile = new HashFile0(_txnFile, txnRaf); } else { _txnHashFile = _hashFile; } } public void commitTransaction() throws IOException { if (_isolatedTransaction) { // Close both hash files; all file channels need to // be closed before the txn file can be renamed _hashFile.close(); _hashFile = null; _txnHashFile.sync(); _txnHashFile.close(); _txnHashFile = null; // Delete the data file _file.delete(); // Rename the txn file to the data file boolean success = _txnFile.renameTo(_file); if (!success) { throw new IOException("Unable to rename file '"+_txnFile+"' to '"+_file+"'"); } // Recreate the hash file _hashFile = new HashFile0(_file); _txnFile = null; } else { // Transaction wasn't isolated so all changes have already been committed. _txnHashFile = null; _hashFile.sync(); } //_hashFile.dumpContents(System.out); } public void rollbackTransaction() throws IOException { if (_isolatedTransaction) { // Discard the working copy _txnHashFile.close(); _txnHashFile = null; _txnFile.delete(); _txnFile = null; } else { throw new IOException("Unisolated transactions cannot be rolled back"); } } public void storeOffset(int hash, long dataOffset) throws IOException { _txnHashFile.storeOffset(hash, dataOffset); } public void clear() throws IOException { // Clear the working copy, it will overwrite the existing data on commit _txnHashFile.clear(); } public OffsetIterator getOffsetIterator(int hash, boolean dirtyReads) throws IOException { HashFile0 hashFile = dirtyReads ? _txnHashFile : _hashFile; return new OffsetIterator(hashFile, hash); } public void close() throws IOException { if (_txnHashFile != null && _isolatedTransaction) { rollbackTransaction(); } _hashFile.close(); } private RandomAccessFile _createEmptyFile(File file) throws IOException { // Make sure the file exists if (!file.exists()) { file.createNewFile(); } // Open the file in read-write mode and make sure the file is empty RandomAccessFile raf = new RandomAccessFile(file, "rw"); raf.setLength(0L); return raf; }/*----------------------------------------------------+| Inner class HashFile0, the actual hash file wrapper |+----------------------------------------------------*/class HashFile0 {/*-------------+| Variables |+-------------*/ private File _file; private RandomAccessFile _raf; private FileChannel _fileChannel; // The number of (non-overflow) buckets in the hash file private int _bucketCount; // The number of items that can be stored in a bucket private int _bucketSize; // The number of items in the hash file private int _itemCount; // Load factor (fixed, for now) private float _loadFactor = 0.75f; // _recordSize = ITEM_SIZE * _bucketSize + 4 private int _recordSize; private ByteBuffer _txnBucket;/*-------------+| Constructors |+-------------*/ public HashFile0(File file) throws IOException { this(file, new RandomAccessFile(file, "rw")); } public HashFile0(File file, RandomAccessFile raf) throws IOException { _file = file; _raf = raf; _fileChannel = raf.getChannel(); if (_fileChannel.size() == 0L) { // Empty file, insert bucket count, bucket size // and item count at the start of the file _bucketCount = INIT_BUCKET_COUNT; _bucketSize = INIT_BUCKET_SIZE; _itemCount = 0; _recordSize = ITEM_SIZE * _bucketSize + 4; _writeFileHeader(); // Initialize the file by writing <_bucketCount> empty buckets _writeEmptyBuckets(HEADER_LENGTH, _bucketCount); } else { // Read bucket count, bucket size and item count from the file _readFileHeader(); _recordSize = ITEM_SIZE * _bucketSize + 4; } _txnBucket = ByteBuffer.allocate(_recordSize); }/*----------+| Methods |+----------*/ public FileChannel getFileChannel() { return _fileChannel; } public int getBucketCount() { return _bucketCount; } public int getBucketSize() { return _bucketSize; } public int getItemCount() { return _itemCount; } public int getRecordSize() { return _recordSize; } /** * Stores the offset of a new data entry with the specified ID. **/ public void storeOffset(int hash, long dataOffset) throws IOException { // Calculate bucket offset for initial bucket long bucketOffset = _getBucketOffset(hash); _storeOffset(bucketOffset, hash, dataOffset); _itemCount++; if (_itemCount >= _loadFactor * _bucketCount * _bucketSize) { _increaseHashTable(); } } private void _storeOffset(long bucketOffset, int hash, long dataOffset) throws IOException { boolean offsetStored = false; while (!offsetStored) { _txnBucket.clear(); _fileChannel.read(_txnBucket, bucketOffset); // Find first empty slot in bucket int slotID = _findEmptySlotInBucket(_txnBucket); if (slotID >= 0) { // Empty slot found, store dataOffset in it _txnBucket.putInt(ITEM_SIZE*slotID, hash); _txnBucket.putLong(ITEM_SIZE*slotID + 4, dataOffset); _txnBucket.rewind(); _fileChannel.write(_txnBucket, bucketOffset); offsetStored = true; } else { // No empty slot found, check if bucket has an overflow bucket int overflowID = _txnBucket.getInt(ITEM_SIZE*_bucketSize); if (overflowID == 0) { // No overflow bucket yet, create one overflowID = _createOverflowBucket(); // Link overflow bucket to current bucket _txnBucket.putInt(ITEM_SIZE*_bucketSize, overflowID); _txnBucket.rewind(); _fileChannel.write(_txnBucket, bucketOffset); } // Continue searching for an empty slot in the overflow bucket bucketOffset = _getOverflowBucketOffset(overflowID); } } } public void clear() throws IOException { // Truncate the file to remove any overflow buffers _fileChannel.truncate(HEADER_LENGTH + (long)_bucketCount*_recordSize); // Overwrite normal buckets with empty ones _writeEmptyBuckets(HEADER_LENGTH, _bucketCount); _itemCount = 0; }
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -