⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 wizardstore.java

📁 Java的面向对象数据库系统的源代码
💻 JAVA
📖 第 1 页 / 共 3 页
字号:
// You can redistribute this software and/or modify it under the terms of// the Ozone Core License version 1 published by ozone-db.org.//// The original code and portions created by SMB are// Copyright (C) 1997-@year@ by SMB GmbH. All rights reserved.//// $Id: WizardStore.java,v 1.6 2004/01/26 00:57:30 wieslawf Exp $package org.ozoneDB.core.storage.wizardStore;import java.io.*;import org.ozoneDB.DxLib.*;import org.ozoneDB.*;import org.ozoneDB.io.stream.ResolvingObjectInputStream;import org.ozoneDB.core.*;import org.ozoneDB.core.storage.wizardStore.*;import org.ozoneDB.core.storage.wizardStore.WizardCluster;import org.ozoneDB.core.storage.wizardStore.ClusterStore;import org.ozoneDB.core.storage.wizardStore.IDTable;import org.ozoneDB.core.storage.wizardStore.IDTableChange;import org.ozoneDB.core.storage.wizardStore.NameTableChange;import org.ozoneDB.core.storage.wizardStore.WizardObjectContainer;import org.ozoneDB.core.storage.ClusterID;import org.ozoneDB.core.storage.StorageObjectContainer;import org.ozoneDB.util.LogWriter;/** * @author <a href="http://www.softwarebuero.de/">SMB</a> * @author <a href="http://www.medium.net/">Medium.net</a> * @version $Revision: 1.6 $Date: 2004/01/26 00:57:30 $ */public final class WizardStore        extends ServerComponent        implements StoreManager {    protected final static String ID_TABLE_NAME = "idTable.wizard";    protected final static String NAME_TABLE_NAME = "nameTable.wizard";    protected final static String COMMIT_FLAG_NAME = "commitflag.wizard";    /**     * Maps ObjectIDs to ClusterIDs     */    protected DxMap idTable;    /**     * Maps names to ObjectIDs     */    protected DxMap nameTable;    protected ClusterStore clusterStore;    /**     The garbage collector. It should be notified in the event     <UL>     <LI>that a formerly unnamed object receives a name.</LI>     <LI>that an object is freshly created</LI>     </LI>     */    protected GarbageCollector garbageCollector;    public WizardStore(Env env) {        super(env);    }    public synchronized void init(Env _env) {        env = _env;        int idTableBufferSize = env.config.intProperty(Setup.WS_TABLE_BUFF_SIZE, -1);        int idTableCacheSize = env.config.intProperty(Setup.WS_TABLE_CACHE_SIZE, -1);        int[] idTableSubtableSize = env.config.intArrayProperty(Setup.WS_TABLE_SUBTABLE_SIZE, new int[]{-1});        idTable = new IDTable(env.getDatabaseDir() + "ostab" + File.separator + "tab", idTableBufferSize, idTableCacheSize, idTableSubtableSize);        // idTable = new DxHashMap (10000);        nameTable = new DxHashMap(100);        clusterStore = new ClusterStore(_env);        this.garbageCollector = env.getGarbageCollector();    }    public synchronized void startup() throws Exception {        env.logWriter.newEntry(this, "startup...", LogWriter.INFO);        clusterStore.startup();        env.logWriter.newEntry(this, "checking for pending shadow clusters...", LogWriter.INFO);        boolean isCleanShutdown = isCleanShutdown() && clusterStore.isCleanShutdown();        boolean isSuccessfullyStarted = false;        if (isCleanShutdown) {            ObjectInputStream nameTableIn = null;            ObjectInputStream idTableIn = null;            try {                // restore nameTable                nameTableIn = new ResolvingObjectInputStream(new FileInputStream(env.getDatabaseDir() + NAME_TABLE_NAME));                int count = nameTableIn.readInt();                for (int i = 0; i < count; i++) {                    nameTable.addForKey(nameTableIn.readObject(), nameTableIn.readObject());                }                nameTableIn.close();                // restore idTable                if (!(idTable instanceof DxDiskHashMap)) {                    idTableIn = new ResolvingObjectInputStream(new FileInputStream(env.getDatabaseDir() + ID_TABLE_NAME));                    count = idTableIn.readInt();                    for (int i = 0; i < count; i++) {                        idTable.addForKey(idTableIn.readObject(), idTableIn.readObject());                    }                    idTableIn.close();                } else {                    ((DxDiskHashMap) idTable).re_use();                    ((DxDiskHashMap) idTable).setReusable(true);                }                isSuccessfullyStarted = true;            } catch (FileNotFoundException fe) {                env.logWriter.newEntry(this, "    " + fe.toString(), LogWriter.INFO);            } catch (Exception e) {                env.logWriter.newEntry(this, "    error while starting up... ", LogWriter.INFO);                env.logWriter.newEntry(this, "    exception: ", e, LogWriter.DEBUG);            } finally {                if (nameTableIn != null) {                    nameTableIn.close();                }                if (idTableIn != null) {                    idTableIn.close();                }            }        }        if (!isCleanShutdown || !isSuccessfullyStarted) {            env.logWriter.newEntry(this, "    recovering...", LogWriter.INFO);            recover();        }        env.logWriter.newEntry(this, "    " + idTable.count() + " IDs, " + nameTable.count() + " name(s))",                               LogWriter.INFO);    }    public synchronized void shutdown() throws Exception {        env.logWriter.newEntry(this, "shutdown...", LogWriter.INFO);        clusterStore.shutdown();        commitNameTable();        commitIDTable();        if (idTable instanceof DxDiskHashMap) {            ((DxDiskHashMap) idTable).printStatistics();            ((DxDiskHashMap) idTable).close();        }    }    public void save() throws Exception {    }    public DxIterator objectIDIterator() {        return idTable.iterator();    }    protected void commitNameTable() throws IOException {        env.logWriter.newEntry(this, "commitNameTable...", LogWriter.DEBUG3);        String filename = env.getDatabaseDir() + NAME_TABLE_NAME;        ObjectOutputStream out = new ObjectOutputStream(new FileOutputStream(filename));        try {            out.writeInt(nameTable.count());            DxIterator it = nameTable.iterator();            while (it.next() != null) {                out.writeObject(it.object());                out.writeObject(it.key());            }        } catch (Exception e) {            new File(filename).delete();        } finally {            out.close();        }    }    protected void commitIDTable() throws IOException {        env.logWriter.newEntry(this, "commitIDTable...", LogWriter.DEBUG3);        if (!(idTable instanceof DxDiskHashMap)) {            String filename = env.getDatabaseDir() + ID_TABLE_NAME;            ObjectOutputStream out = new ObjectOutputStream(new FileOutputStream(filename));            try {                out.writeInt(idTable.count());                DxIterator it = idTable.iterator();                while (it.next() != null) {                    out.writeObject(it.object());                    out.writeObject(it.key());                }            } catch (Exception e) {                new File(filename).delete();            } finally {                out.close();            }        } else {            ((IDTable) idTable).setReusable(false);            ((IDTable) idTable).writeDirtyTables();            ((IDTable) idTable).setReusable(true);        }    }    /**     * Fill idTable and nameTable from the information that are stored in the     * clusters directly.     *     * @throws java.lang.Exception If a cluster cannot be read.     */    protected synchronized void recover() throws Exception {        env.logWriter.newEntry(this, "    cleaning ID table...", LogWriter.INFO);        ((DxDiskHashMap) idTable).cleanFiles();        (idTable).clear();        env.logWriter.newEntry(this, "    cleaning name table...", LogWriter.INFO);        nameTable.clear();        DxSet cids = clusterStore.recoverClusterIDs();                int totalNumClusters = cids.count();        int processedNumClusters = 0;        DxIterator it = cids.iterator();        while (it.next() != null) {            ClusterID cid = (ClusterID) it.object();            org.ozoneDB.core.storage.Cluster cluster = null;            boolean exceptionWhileLoading = false;            env.logWriter.newEntry(this, "    cluster: " + cid + " (" + ++processedNumClusters + " of " + totalNumClusters + ")", LogWriter.INFO);            try {                cluster = clusterStore.restoreCluster(cid);                env.logWriter.newEntry(this, "        " + cluster.containers().count() + " containers", LogWriter.INFO);            } catch (Exception e) {                env.logWriter.newEntry(this, "exception while loading cluster: " + cid + " (" + e + ")", LogWriter.WARN);                env.logWriter.newEntry(this, "", e, LogWriter.DEBUG);                exceptionWhileLoading = true;            }            if (exceptionWhileLoading || cluster.containers().isEmpty()) {                if (exceptionWhileLoading) {                    env.logWriter.newEntry(this, "        cluster is unable to read - should be deleted!", LogWriter.INFO);                } else {                    env.logWriter.newEntry(this, "        cluster is empty          - should be deleted!", LogWriter.INFO);                }                if (cluster != null) {                    env.logWriter.newEntry(this, "        try to delete cluster...", LogWriter.INFO);                    cluster.delete();                }            } else {                // fill in idTable and nameTable                DxIterator it2 = cluster.containers().iterator();                StorageObjectContainer container;                while ((container = (StorageObjectContainer) it2.next()) != null) {                    if (env.logWriter.hasTarget(LogWriter.DEBUG)) {                        if (container.id().value()==1) {                            env.logWriter.newEntry(this, "Adding container "+container+".", LogWriter.DEBUG);                        }                    }                    if (idTable.addForKey(cluster.clusterID(), container.id()) == false) {                        throw new IllegalStateException("Unable to add container "+container+" to ID table because cluster ID "+idTable.elementForKey(container.id())+" is already registered for container ID "+container.id()+".");                    }                    if (container.name() != null) {                        env.logWriter.newEntry(this, "        adding name: " + container.name(), LogWriter.INFO);                        if (nameTable.addForKey(container.id(), container.name()) == false) {                            throw new IllegalStateException("Unable to add name to name table: " + container.name());                        }                    }                }            }            clusterStore.unloadCluster(cid, false);        }        commitIDTable();        commitNameTable();

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -