⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 indexmanager.java

📁 Java的面向对象数据库系统的源代码
💻 JAVA
📖 第 1 页 / 共 2 页
字号:
// You can redistribute this software and/or modify it under the terms of// the Ozone Core License version 1 published by ozone-db.org.//// Copyright (C) 2003-@year@, Leo Mekenkamp. All rights reserved.//// $Id: IndexManager.java,v 1.8 2004/04/06 18:39:39 leomekenkamp Exp $package org.ozoneDB.core.storage.gammaStore;import java.io.File;import java.io.FileInputStream;import java.io.FileOutputStream;import java.io.IOException;import java.io.ObjectInputStream;import java.io.ObjectOutputStream;import java.util.Collection;import java.util.Iterator;import java.util.LinkedList;import java.util.Properties;import java.util.logging.Level;import java.util.logging.Logger;import org.ozoneDB.OzoneInternalException;import org.ozoneDB.core.ConfigurationException;import org.ozoneDB.core.ServerComponent;import org.ozoneDB.core.storage.Cache;import org.ozoneDB.core.storage.PropertyConfigurable;import org.ozoneDB.core.storage.PropertyConfigurableFactory;import org.ozoneDB.core.storage.PropertyInfo;import org.ozoneDB.core.storage.TrimmingCache;import org.ozoneDB.core.storage.WeakReferenceCache;/** * Takes care of storing and caching all object locations. Because of the nature * of generated object ids (a newly generated id is always one bigger than the * id generated before) this class is very fast when adding new ids, but a bit * slow when inserting ids in random order. Random order insertion will only * take place during a crash recovery. * * @author <a href="mailto:leoATmekenkampD0Tcom">Leo Mekenkamp (mind the anti sp@m)</a> * @version $Id: IndexManager.java,v 1.8 2004/04/06 18:39:39 leomekenkamp Exp $ */public final class IndexManager implements PropertyConfigurable {        private static Logger log = Logger.getLogger(IndexManager.class.getName());    public static final int MINMAXBRANCHNODESIZE = 3;        public static final int MINMAXLEAFNODESIZE = 3;    public static final PropertyInfo INDEXSTREAMFACTORY = new PropertyInfo(        ".indexStreamFactory",        "String (classname)",        null,        "factory to use to insert extra streams while (de)serializing index nodes",        new String[] {            "org.ozoneDB.core.gammaStore.ZipStreamFactory",            "org.ozoneDB.core.gammaStore.GZIPStreamFactory",        }    );    public static final PropertyInfo DIRTYINDEXNODECACHE = new PropertyInfo(        ".dirtyIndexNodeCache",        "String (classname, must implement org.ozoneDB.core.storage.TrimmingCache)",        null,        "cache for caching index nodes that have to be written to disk",        new String[] {"org.ozoneDB.core.gammaStore.FixedSizeDelayCache"}    );    public static final PropertyInfo GENERALINDEXNODECACHE = new PropertyInfo(        ".generalIndexNodeCache",        "String (classname, must implement org.ozoneDB.core.storage.TrimmingCache)",        null,        "cache for caching index nodes, both changed and unchanched",        new String[] {"org.ozoneDB.core.gammaStore.FixedSizeCache"}    );    public static final PropertyInfo MAXBRANCHNODESIZE = new PropertyInfo(        ".maxBranchNodeSize",        "int",        "474",        "Maximum number of index nodes in a branch node. Raw size of a branch node " +        "(serialized, without compression) is x + 537 + n * 16 with x = 0 for " +        "n <= 32, x = 4 for 32 < n <= 64, x = 8 for 64 < n <= 128, etc., " +        "where n is the maximum number of index nodes.",        new String[] {            "93 (files just under 2K)",            "226 (files just under 4K)",            "474 (files just under 8K)",            "982 (files just under 16K)",            "1998 (files just under 32K)",        }    );    public static final PropertyInfo BRANCHNODEMERGEREACH = new PropertyInfo(        ".branchNodeMergeReach",        "int",        "2",        "When a branch node becomes too small and wants to merge with other " +        "branches, this value determines how 'far' a node should look for other " +        "nodes to merge with. Say we have branch nodes 1 to 10 and 5 wants to " +        "merge and this value is 2, then it will try to merge with 3, 4, 6 and 7.",        new String[] { "1", "2" }    );    public static final PropertyInfo BRANCHNODEMERGESIZE = new PropertyInfo(        ".branchNodeMergeSize",        "int",        "300",        "When a child node is removed from a branch and the size of this branch " +        "is below this value, then the branch tries to merge with other branches." +        "Must be smaller than " + MAXBRANCHNODESIZE.getKey() + ". See also " +        BRANCHNODEMERGEREACH.getKey() + ".",         new String[] { "1", "100", "250" }    );    public static final PropertyInfo MAXLEAFNODESIZE = new PropertyInfo(        ".maxLeafNodeSize",        "int",        "100",        "Maximum number of locations in a leaf node. Raw size of a leaf node " +        "(serialized, without compression) is x + 598 + n * 20 with x = 0 for " +        "n <= 32, x = 4 for 32 < n <= 64, x = 8 for 64 < n <= 128, etc., " +        "where n is the maximum number of locations.",         new String[] {            "72 (files just under 2K)",            "179 (files just under 4K)",            "377 (files just under 8K)",            "784 (files just under 16K)",            "1598 (files just under 32K)",        }    );    public static final PropertyInfo LEAFNODEMERGEREACH = new PropertyInfo(        ".leafNodeMergeReach",        "int",        "2",        "When a leaf node becomes too small and wants to merge with other " +        "leaves, this value determines how 'far' a node should look for other " +        "nodes to merge with. Say we have leaf nodes 1 to 10 and 5 wants to " +        "merge and this value is 2, then it will try to merge with 3, 4, 6 and 7.",        new String[] { "1", "2" }    );    public static final PropertyInfo LEAFNODEMERGESIZE = new PropertyInfo(        ".leafNodeMergeSize",        "int",        "250",        "When a container location is removed from a leaf and the size of this leaf " +        "is below this value, then the leaf tries to merge with other leaves." +        "Must be smaller than " + MAXLEAFNODESIZE.getKey() + ". See also " +        LEAFNODEMERGEREACH.getKey() + ".",         new String[] { "10", "100" }    );    public static final PropertyInfo INDEXNODESTORAGEFACTORY = new PropertyInfo(        ".indexNodeStorageFactory",        "String (classname)",        null,        "factory to use to create Storage instances for reading/writing index nodes",        new String[] {"org.ozoneDB.code.gammaStore.FileStreamStorageFactory"}    );    public static final PropertyInfo INDEXNODESTREAMFACTORY = new PropertyInfo(        ".indexNodeStreamFactory",        "String (classname)",        "",        "factory to use to create java.io.[In|Out]putStream instances that are " +        "\"plugged in\" during the (de)serializing of index nodes",        new String[] {"org.ozoneDB.code.gammaStore.ZipStreamStorageFactory"}    );    /**     * (file)name to store / read configuration at shutdown / startup     */    private static final String CONFIGNAME = "indexmanager";        /**     * provides fast access for when new objects are created     */    private IndexLeafNode newestLeafNode;        /**     * entry point for the tree containing all index nodes     */    private IndexBranchNode rootNode;        /**     * for generating unique ids; -1 so the first returned id will be 0     */    private long nodeIdCounter = -1;        /**     * factory needed for swapping index nodes in and out     */    private StorageFactory storageFactory;    /**     * factory needed for extra streams during (de)serialization     */    private StreamFactory streamFactory;    /**     * holds all dirty nodes ("You dirty little nodes, you...")     */    private TrimmingCache dirtyNodeCache;        /**     * holds most index nodes     */    private TrimmingCache generalNodeCache;        /**     * makes sure we have access to nodes that have been thrown out of the other     * caches but are still strongly referenced somwhere     */    private WeakReferenceCache backupNodeCache;        private int maxLeafNodeSize;        private int leafNodeMergeSize;        private int leafNodeMergeReach;        private int maxBranchNodeSize;        private int branchNodeMergeSize;        private int branchNodeMergeReach;        private Serializer nodeSerializer;        /**     * takes care of deleting index nodes that have become empty and have     * been removed from the indexmanager     */    private Deleter deleter;        private transient long nodeLoaded;        private transient long nodeLoadedDirect;        private transient long nodeLoadedSerializer;    private transient long nodeLoadedCache;        private transient long nodeLoadedDisk;        private String dbDirectory;        private long size;        private String prefix;        /**     * initialize if <code>true</code> preform a full (re)initialization,     * if <code>false</code>, tries to read config.     * @param properties     * @param prefix     * @param initialize     * TODO: refactor with the new java.util.concurrent package in the 1.5 JDK     * to make fully reentrant     */    public IndexManager(Properties properties, String prefix, boolean initialize) throws IOException {                this.prefix = prefix;        backupNodeCache = new WeakReferenceCache();                setStorageFactory((StorageFactory) PropertyConfigurableFactory.create(StorageFactory.class, properties, getPrefix() + INDEXNODESTORAGEFACTORY.getKey()));        String streamFactoryClassname = properties.getProperty(getPrefix() + INDEXNODESTREAMFACTORY.getKey(), INDEXNODESTREAMFACTORY.getDefaultValue());        if (streamFactoryClassname.length() > 0) {            setStreamFactory((StreamFactory) PropertyConfigurableFactory.create(StreamFactory.class, properties, getPrefix() + INDEXNODESTREAMFACTORY.getKey()));        }        setNodeSerializer(new Serializer(getStorageFactory(), getStreamFactory(), "NodeSerializer"));                deleter = new Deleter("node storage deleter");        setDirtyNodeCache((TrimmingCache) PropertyConfigurableFactory.create(Cache.class, properties, getPrefix() + DIRTYINDEXNODECACHE.getKey()));        getDirtyNodeCache().setSynchronizer(this);        getDirtyNodeCache().setTrimHandler(new TrimmingCache.TrimHandler() {            public void trimming(Object key, Object value) {                IndexNode indexNode = (IndexNode) value;                if (log.isLoggable(Level.FINE)) log.fine("indexnode " + indexNode.getNodeId() + " is trimmed from dirty cache, going to serializer");                getGeneralNodeCache().put(new Long(indexNode.getNodeId()), indexNode);                serialize(indexNode);                if (log.isLoggable(Level.FINER)) log.finer("put in serializer: " + indexNode.getNodeId());            }        });                setGeneralNodeCache((TrimmingCache) PropertyConfigurableFactory.create(Cache.class, properties, getPrefix() + GENERALINDEXNODECACHE.getKey()));        getGeneralNodeCache().setSynchronizer(this);        getGeneralNodeCache().setTrimHandler(new TrimmingCache.TrimHandler() {            public void trimming(Object key, Object value) {                IndexNode indexNode = (IndexNode) value;if (indexNode.isDirty() && getDirtyNodeCache().get(new Long(indexNode.getNodeId())) == null) {    IndexNode serializing = (IndexNode) getNodeSerializer().remove(new Long(indexNode.getNodeId()));    if (serializing == null) {        log.severe("WTF? not in dirty and not serializing? " + indexNode.getNodeId());    }}                getBackupNodeCache().put(new Long(indexNode.getNodeId()), indexNode);                if (log.isLoggable(Level.FINE)) log.fine("indexnode " + indexNode.getNodeId() + " is trimmed from general cache, going to backup cache");            }        });                try {            String num = properties.getProperty(MAXBRANCHNODESIZE.getKey(), MAXBRANCHNODESIZE.getDefaultValue());            setMaxBranchNodeSize(Integer.parseInt(num));            num = properties.getProperty(MAXLEAFNODESIZE.getKey(), MAXLEAFNODESIZE.getDefaultValue());            setMaxLeafNodeSize(Integer.parseInt(num));        } catch (NumberFormatException e) {            throw new ConfigurationException(e);        }        dbDirectory = properties.getProperty(GammaStore.DIRECTORY.getKey());        if (initialize) {            log.info("deleting all files in index directory");            getStorageFactory().deleteAll();                        // always start with one branch node and one leaf node; that way we            // can assume that rootNode as well as newestLeafNode are both never            // null.            IndexBranchNode branchNode = new IndexBranchNode(this);            setRootNode(branchNode);            IndexLeafNode leafNode = new IndexLeafNode(this);            setNewestLeafNode(leafNode);            branchNode.putChildNode(leafNode);        } else {            ObjectInputStream config = new ObjectInputStream(new FileInputStream(new File(dbDirectory + File.separator + CONFIGNAME)));            setSize(config.readLong());            setNodeIdCounter(config.readLong());            if (log.isLoggable(Level.FINER)) log.finest("read nodeIdCounter: " + getNodeIdCounter());            long nodeId = config.readLong();            if (log.isLoggable(Level.FINER)) log.finest("read root nodeId: " + nodeId);            setRootNode((IndexBranchNode) loadNode(nodeId));            nodeId = config.readLong();            if (log.isLoggable(Level.FINER)) log.finest("read newest leaf nodeId: " + nodeId);            setNewestLeafNode((IndexLeafNode) loadNode(nodeId));        }    }        /**     * Shuts the indexmanager down; writes all data needed for the constructor     * when the <code>initialize</code> parameter is <code>false</code>     */    public void shutdown() throws IOException {        if (log.isLoggable(Level.INFO)) log.info("IndexManager shutting down");        if (log.isLoggable(Level.INFO)) log.info("nodes loaded: " + nodeLoaded + "; direct: " + nodeLoadedDirect + ", from serializer: " + nodeLoadedSerializer + ", from cache: " + nodeLoadedCache + ", from disk: " + nodeLoadedDisk);        ObjectOutputStream config = new ObjectOutputStream(new FileOutputStream(new File(dbDirectory + File.separator + CONFIGNAME)));//        Storage config = getStorageFactory().createStorage(CONFIGNAME);        config.writeLong(getSize());        config.writeLong(getNodeIdCounter());        config.writeLong(getRootNode().getNodeId());        config.writeLong(getNewestLeafNode().getNodeId());        config.close();        if (log.isLoggable(Level.INFO)) log.info("IndexManager has " + getGeneralNodeCache().size() + " cached index nodes, " + getDirtyNodeCache().size() + " are dirty");        for(Iterator i = getDirtyNodeCache().copyToMap().values().iterator(); i.hasNext(); ) {            IndexNode indexNode = (IndexNode) i.next();            serialize(indexNode);        }        IndexNode n = getRootNode();        if (n.isDirty()) {            serialize(n);        }        n.endInvoke();        n = getNewestLeafNode();        if (n.isDirty()) {            serialize(n);        }        n.endInvoke();        if(log.isLoggable(Level.INFO)) log.info("serializer has " + getNodeSerializer().size() + " nodes to serialize");        getNodeSerializer().stopWhenReady();        if (log.isLoggable(Level.INFO)) log.info("IndexManager has shut down");    }                /**      * Returns the specified index node from the cache, or from disk. The node

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -