📄 diskfile.java
字号:
/*
* Copyright 2004-2008 H2 Group. Licensed under the H2 License, Version 1.0
* (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.store;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.sql.SQLException;
import java.util.Comparator;
import java.util.HashSet;
import java.util.Iterator;
import org.h2.api.DatabaseEventListener;
import org.h2.constant.ErrorCode;
import org.h2.constant.SysProperties;
import org.h2.engine.Constants;
import org.h2.engine.Database;
import org.h2.engine.Session;
import org.h2.log.LogSystem;
import org.h2.log.RedoLogRecord;
import org.h2.message.Message;
import org.h2.message.Trace;
import org.h2.util.BitField;
import org.h2.util.Cache;
import org.h2.util.Cache2Q;
import org.h2.util.CacheLRU;
import org.h2.util.CacheObject;
import org.h2.util.CacheWriter;
import org.h2.util.FileUtils;
import org.h2.util.IntArray;
import org.h2.util.MathUtils;
import org.h2.util.ObjectArray;
import org.h2.util.ObjectUtils;
/**
* This class represents a file that is usually written to disk. The two main
* files are .data.db and .index.db. For each such file, a number of
* {@link Storage} objects exists. The disk file is responsible for caching;
* each object contains a {@link Cache} object. Changes in the file are logged
* in a {@link LogSystem} object. Reading and writing to the file is delegated
* to the {@link FileStore} class.
* <p>
* There are 'blocks' of 128 bytes (DiskFile.BLOCK_SIZE). Each objects own one
* or more pages; each page size is 64 blocks (DiskFile.BLOCKS_PER_PAGE). That
* is 8 KB page size. However pages are not read or written as one unit; only
* individual objects (multiple blocks at a time) are read or written.
* <p>
* Currently there are no in-place updates. Each row occupies one or multiple
* blocks. Row can occupy multiple pages. Rows are always contiguous (except
* LOBs, they are stored in their own files).
*/
public class DiskFile implements CacheWriter {
/**
* The size of a block in bytes.
* A block is the minimum row size.
*/
public static final int BLOCK_SIZE = 128;
/**
* The size of a page in blocks.
* Each page contains blocks from the same storage.
*/
static final int BLOCK_PAGE_PAGE_SHIFT = 6;
public static final int BLOCKS_PER_PAGE = 1 << BLOCK_PAGE_PAGE_SHIFT;
public static final int OFFSET = FileStore.HEADER_LENGTH;
static final int FREE_PAGE = -1;
// TODO storage: header should probably be 4 KB or so
// (to match block size of operating system)
private Database database;
private String fileName;
private FileStore file;
private BitField used;
private BitField deleted;
private HashSet potentiallyFreePages;
private int fileBlockCount;
private IntArray pageOwners;
private Cache cache;
private LogSystem log;
private DataPage rowBuff;
private DataPage freeBlock;
private boolean dataFile;
private boolean logChanges;
private int recordOverhead;
private boolean init, initAlreadyTried;
private ObjectArray redoBuffer;
private int redoBufferSize;
private int readCount, writeCount;
private String mode;
private int nextDeleteId = 1;
public DiskFile(Database database, String fileName, String mode, boolean dataFile, boolean logChanges, int cacheSize) throws SQLException {
reset();
this.database = database;
this.log = database.getLog();
this.fileName = fileName;
this.mode = mode;
this.dataFile = dataFile;
this.logChanges = logChanges;
String cacheType = database.getCacheType();
if (Cache2Q.TYPE_NAME.equals(cacheType)) {
this.cache = new Cache2Q(this, cacheSize);
} else {
this.cache = new CacheLRU(this, cacheSize);
}
rowBuff = DataPage.create(database, BLOCK_SIZE);
// TODO: the overhead is larger in the log file, so this value is too high :-(
recordOverhead = 4 * rowBuff.getIntLen() + 1 + rowBuff.getFillerLength();
freeBlock = DataPage.create(database, BLOCK_SIZE);
freeBlock.fill(BLOCK_SIZE);
freeBlock.updateChecksum();
try {
if (FileUtils.exists(fileName)) {
file = database.openFile(fileName, mode, true);
long length = file.length();
database.notifyFileSize(length);
int blocks = (int) ((length - OFFSET) / BLOCK_SIZE);
setBlockCount(blocks);
} else {
create();
}
} catch (SQLException e) {
close();
throw e;
}
}
private void reset() {
used = new BitField();
deleted = new BitField();
pageOwners = new IntArray();
// init pageOwners
setBlockCount(fileBlockCount);
redoBuffer = new ObjectArray();
potentiallyFreePages = new HashSet();
}
private void setBlockCount(int count) {
fileBlockCount = count;
int pages = getPage(count);
while (pages >= pageOwners.size()) {
pageOwners.add(FREE_PAGE);
}
}
int getBlockCount() {
return fileBlockCount;
}
private void create() throws SQLException {
file = database.openFile(fileName, mode, false);
DataPage header = DataPage.create(database, OFFSET);
file.seek(FileStore.HEADER_LENGTH);
header.fill(OFFSET);
header.updateChecksum();
file.write(header.getBytes(), 0, OFFSET);
}
private void freeUnusedPages() throws SQLException {
for (int i = 0; i < pageOwners.size(); i++) {
if (pageOwners.get(i) != FREE_PAGE && isPageFree(i)) {
setPageOwner(i, FREE_PAGE);
}
}
}
public byte[] getSummary() throws SQLException {
synchronized (database) {
try {
ByteArrayOutputStream buff = new ByteArrayOutputStream();
DataOutputStream out = new DataOutputStream(buff);
int blocks = (int) ((file.length() - OFFSET) / BLOCK_SIZE);
out.writeInt(blocks);
for (int i = 0, x = 0; i < blocks / 8; i++) {
int mask = 0;
for (int j = 0; j < 8; j++) {
if (used.get(x)) {
mask |= 1 << j;
}
x++;
}
out.write(mask);
}
out.writeInt(pageOwners.size());
ObjectArray storages = new ObjectArray();
for (int i = 0; i < pageOwners.size(); i++) {
int s = pageOwners.get(i);
out.writeInt(s);
if (s >= 0 && (s >= storages.size() || storages.get(s) == null)) {
Storage storage = database.getStorage(s, this);
while (storages.size() <= s) {
storages.add(null);
}
storages.set(s, storage);
}
}
for (int i = 0; i < storages.size(); i++) {
Storage storage = (Storage) storages.get(i);
if (storage != null) {
out.writeInt(i);
out.writeInt(storage.getRecordCount());
}
}
out.writeInt(-1);
out.close();
byte[] b2 = buff.toByteArray();
return b2;
} catch (IOException e) {
// will probably never happen, because only in-memory structures are
// used
return null;
}
}
}
boolean isPageFree(int page) {
for (int i = page * BLOCKS_PER_PAGE; i < (page + 1) * BLOCKS_PER_PAGE; i++) {
if (used.get(i)) {
return false;
}
}
return true;
}
public void initFromSummary(byte[] summary) {
synchronized (database) {
if (summary == null || summary.length == 0) {
ObjectArray list = database.getAllStorages();
for (int i = 0; i < list.size(); i++) {
Storage s = (Storage) list.get(i);
if (s != null && s.getDiskFile() == this) {
database.removeStorage(s.getId(), this);
}
}
reset();
initAlreadyTried = false;
init = false;
return;
}
if (database.getRecovery() || (initAlreadyTried && (!dataFile || !SysProperties.CHECK))) {
return;
}
initAlreadyTried = true;
int stage = 0;
try {
DataInputStream in = new DataInputStream(new ByteArrayInputStream(summary));
int b2 = in.readInt();
if (b2 > fileBlockCount) {
database.getTrace(Trace.DATABASE).info(
"unexpected size " + b2 + " when initializing summary for " + fileName + " expected:"
+ fileBlockCount);
return;
}
stage++;
for (int i = 0, x = 0; i < b2 / 8; i++) {
int mask = in.read();
if (init) {
for (int j = 0; j < 8; j++, x++) {
if (used.get(x) != ((mask & (1 << j)) != 0)) {
throw Message.getInternalError("Redo failure, block: " + x + " expected in-use bit: " + used.get(x));
}
}
} else {
for (int j = 0; j < 8; j++, x++) {
if ((mask & (1 << j)) != 0) {
used.set(x);
}
}
}
}
stage++;
int len = in.readInt();
ObjectArray storages = new ObjectArray();
for (int i = 0; i < len; i++) {
int s = in.readInt();
while (storages.size() <= s) {
storages.add(null);
}
if (init) {
int old = getPageOwner(i);
if (old != -1 && old != s) {
throw Message.getInternalError("Redo failure, expected page owner: " + old + " got: " + s);
}
} else {
if (s >= 0) {
Storage storage = database.getStorage(s, this);
storages.set(s, storage);
storage.addPage(i);
}
setPageOwner(i, s);
}
}
stage++;
while (true) {
int s = in.readInt();
if (s < 0) {
break;
}
int recordCount = in.readInt();
Storage storage = (Storage) storages.get(s);
if (init) {
if (storage != null) {
int current = storage.getRecordCount();
if (current != recordCount) {
throw Message.getInternalError("Redo failure, expected row count: " + current + " got: " + recordCount);
}
}
} else {
storage.setRecordCount(recordCount);
}
}
stage++;
freeUnusedPages();
init = true;
} catch (Exception e) {
database.getTrace(Trace.DATABASE).error(
"error initializing summary for " + fileName + " size:" + summary.length + " stage:" + stage, e);
// ignore - init is still false in this case
}
}
}
public void init() throws SQLException {
synchronized (database) {
if (init) {
return;
}
ObjectArray storages = database.getAllStorages();
for (int i = 0; i < storages.size(); i++) {
Storage s = (Storage) storages.get(i);
if (s != null && s.getDiskFile() == this) {
s.setRecordCount(0);
}
}
int blockHeaderLen = Math.max(Constants.FILE_BLOCK_SIZE, 2 * rowBuff.getIntLen());
byte[] buff = new byte[blockHeaderLen];
DataPage s = DataPage.create(database, buff);
long time = 0;
for (int i = 0; i < fileBlockCount;) {
long t2 = System.currentTimeMillis();
if (t2 > time + 10) {
time = t2;
database.setProgress(DatabaseEventListener.STATE_SCAN_FILE, this.fileName, i, fileBlockCount);
}
go(i);
file.readFully(buff, 0, blockHeaderLen);
s.reset();
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -