⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 linearhashindex.java

📁 非常棒的java数据库
💻 JAVA
📖 第 1 页 / 共 2 页
字号:
/*
 * Copyright 2004-2008 H2 Group. Licensed under the H2 License, Version 1.0
 * (http://h2database.com/html/license.html).
 * Initial Developer: H2 Group
 */
package org.h2.index;

import java.sql.SQLException;

import org.h2.constant.ErrorCode;
import org.h2.constant.SysProperties;
import org.h2.engine.Constants;
import org.h2.engine.Session;
import org.h2.message.Message;
import org.h2.result.Row;
import org.h2.result.SearchRow;
import org.h2.store.DataPage;
import org.h2.store.DiskFile;
import org.h2.store.Record;
import org.h2.store.RecordReader;
import org.h2.store.Storage;
import org.h2.table.Column;
import org.h2.table.IndexColumn;
import org.h2.table.TableData;
import org.h2.util.ObjectArray;
import org.h2.value.Value;
import org.h2.value.ValueArray;

/**
 * A linear hash index implementation.
 * Theoretically, this index type should scale better than a b-tree index.
 * At this time, this index is not fully tested.
 */
public class LinearHashIndex extends BaseIndex implements RecordReader {

    // TODO index / linear hash: tune page size
    // private static final int MAX_PAGE_SIZE = 256;
    private static final int RECORDS_PER_BUCKET = 10;
    private static final int UTILIZATION_FOR_SPLIT = 70;
    private static final int UTILIZATION_FOR_MERGE = 60;
    private int readCount;
    // private static final boolean TRACE = false;
    private DiskFile diskFile;
    private Storage storage;
    private TableData tableData;
    private int bucketSize;
    private int blocksPerBucket;
    private int firstBucketBlock;
    private LinearHashHead head;
    private boolean needRebuild;
    // private ObjectArray buckets = new ObjectArray();

    public LinearHashIndex(Session session, TableData table, int id, String indexName, IndexColumn[] columns, IndexType indexType)
            throws SQLException {
        super(table, id, indexName, columns, indexType);
        this.tableData = table;
        // TODO linear hash: currently, changes are not logged
        String name = database.getName()+"."+id+Constants.SUFFIX_HASH_FILE;
        diskFile = new DiskFile(database, name, "rw", false, false, Constants.DEFAULT_CACHE_SIZE_LINEAR_INDEX);
        diskFile.init();
        bucketSize = 4 * DiskFile.BLOCK_SIZE - diskFile.getRecordOverhead();
        blocksPerBucket = 4;
        firstBucketBlock = 4;
        storage = database.getStorage(id, diskFile);
        storage.setReader(this);
        rowCount = table.getRowCount(session);
        int pos = storage.getNext(null);
        if (pos == -1) {
            truncate(session);
            needRebuild = true;
        } else {
            head = (LinearHashHead) storage.getRecord(session, pos);
        }
    }

    void writeHeader(Session session) throws SQLException {
        storage.updateRecord(session, head);
    }

//    public String getString() throws Exception {
//        // TODO index / linear hash: debug code here
//        StringBuffer buff = new StringBuffer();
//        buff.append("buckets " + bucketCount);
//        int records = 0;
//        int chained = 0;
//        int foreign = 0;
//        int access = 0;
//        for (int i = 0; i < bucketCount; i++) {
//            LinearHashBucket bucket = getBucket(i);
//            if (bucket == null) {
//                throw Message.internal("bucket=" + i + " is empty");
//            }
//            if (bucket.getRecordSize() > RECORDS_PER_BUCKET) {
//                throw Message.internal(
//                    "bucket=" + i + " records=" + bucket.getRecordSize());
//            }
//            records += bucket.getRecordSize();
//            if (bucket.getNextBucket() != -1) {
//                chained++;
//            }
//            for (int j = 0; j < bucket.getRecordSize(); j++) {
//                LinearHashEntry record = bucket.getRecord(j);
//                if (record.home != i) {
//                    foreign++;
//                }
//                int oldReadCount = readCount;
//                get(record.key);
//                access += (readCount - oldReadCount);
//            }
//        }
//        buff.append(" records " + records);
//        buff.append(" utilization " + getUtilization());
//        buff.append(" access " + ((0.0 + access) / records));
//        buff.append(" chained " + chained);
//        buff.append(" foreign " + foreign);
//        if (TRACE) {
//            for (int i = 0; i < bucketCount; i++) {
//                LinearHashBucket bucket = getBucket(i);
//                int f = getForeignHome(i);
//                if (f >= 0) {
//                    buff.append(" from " + f);
//                }
//                buff.append(i);
//                buff.append(" next ");
//                buff.append(bucket.getNextBucket());
//                buff.append("{");
//                for (int j = 0; j < bucket.getRecordSize(); j++) {
//                    if (j > 0) {
//                        buff.append(", ");
//                    }
//                    LinearHashEntry r = bucket.getRecord(j);
//                    buff.append(r.key.toString());
//                    if (r.home != i && r.home != f) {
//                        throw new Exception(
//                            "MULTIPLE LINKS TO! " + buff.toString());
//                    }
//                }
//                buff.append("} ");
//            }
//        }
//        return buff.toString();
//
//    }

    private void add(Session session, Value key, int value) throws SQLException {
        // trace.debug("add "+key.toString() + " " + value);
        if (getUtilization() >= UTILIZATION_FOR_SPLIT) {
            split(session);
        }
        int hash = key.hashCode();
        int home = getPos(hash);
        int index = home;
        LinearHashEntry record = new LinearHashEntry();
        record.hash = hash;
        record.key = key;
        record.home = home;
        record.value = value;
        int free = getNextFree(session, home);
        while (true) {

            LinearHashBucket bucket = getBucket(session, index);
            if (bucket.getRecordSize() < RECORDS_PER_BUCKET) {
                addRecord(session, bucket, record);
                break;
            }
            // this bucket is full
            int foreign = getForeignHome(session, index);
            if (foreign >= 0 && foreign != home) {
                // move out foreign records - add this record - add foreign
                // records again
                ObjectArray old = new ObjectArray();
                moveOut(session, foreign, old);
                addRecord(session, bucket, record);
                addAll(session, old);
                break;
            }
            // there is already a link to next
            if (bucket.getNextBucket() > 0) {
                index = bucket.getNextBucket();
                continue;
            }

            int nextFree = getNextFree(session, free);
            if (nextFree < 0) {
                // trace.debug("split because no chain " + head.bucketCount);
                split(session);
                add(session, key, value);
                break;
            }

            // it's possible that the bucket was removed from 
            // the cache (if searching for a bucket with space 
            // scanned many buckets)
            bucket = getBucket(session, index);

            bucket.setNext(session, free);
            free = nextFree;
            if (getForeignHome(session, free) >= 0) {
                throw Message.getInternalError("next already linked");
            }
            index = bucket.getNextBucket();
        }
    }

    private int getNextFree(Session session, int excluding) throws SQLException {
        for (int i = head.bucketCount - 1; i >= 0; i--) {
            LinearHashBucket bucket = getBucket(session, i);
            if (bucket.getRecordSize() >= RECORDS_PER_BUCKET) {
                continue;
            }
            if (getForeignHome(session, i) < 0 && i != excluding) {
                return i;
            }
        }
        return -1;
    }

    private int getForeignHome(Session session, int bucketId) throws SQLException {
        LinearHashBucket bucket = getBucket(session, bucketId);
        for (int i = 0; i < bucket.getRecordSize(); i++) {
            LinearHashEntry record = bucket.getRecord(i);
            if (record.home != bucketId) {
                return record.home;
            }
        }
        return -1;
    }

    public int getPos(int hash) {
        hash = Math.abs((hash << 7) - hash + (hash >>> 9) + (hash >>> 17));
        int pos = hash % (head.baseSize + head.baseSize);
        int len = head.bucketCount;
        return pos < len ? pos : (pos % head.baseSize);
    }

    private void split(Session session) throws SQLException {
        // trace.debug("split " + head.nextToSplit);
        ObjectArray old = new ObjectArray();
        moveOut(session, head.nextToSplit, old);
        head.nextToSplit++;
        if (head.nextToSplit >= head.baseSize) {
            head.baseSize += head.baseSize;
            head.nextToSplit = 0;
        }
        addBucket(session);
        addAll(session, old);
    }

    private void addAll(Session session, ObjectArray records) throws SQLException {
        for (int i = 0; i < records.size(); i++) {
            LinearHashEntry r = (LinearHashEntry) records.get(i);
            add(session, r.key, r.value);
        }
    }

    // moves all records of a bucket to the array (including chained)
    private void moveOut(Session session, int home, ObjectArray storeIn) throws SQLException {
        LinearHashBucket bucket = getBucket(session, home);
        int foreign = getForeignHome(session, home);
        while (true) {
            for (int i = 0; i < bucket.getRecordSize(); i++) {
                LinearHashEntry r = bucket.getRecord(i);
                if (r.home == home) {
                    storeIn.add(r);
                    removeRecord(session, bucket, i);
                    i--;
                }
            }
            if (foreign >= 0) {
                // this block contains foreign records
                // and therefore all home records have been found
                // (and it would be an error to set next to -1)
                moveOut(session, foreign, storeIn);
                if (SysProperties.CHECK && getBucket(session, foreign).getNextBucket() != -1) {
                    throw Message.getInternalError("moveOut " + foreign);
                }

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -