⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 clusterstore.java

📁 Java的面向对象数据库系统的源代码
💻 JAVA
📖 第 1 页 / 共 3 页
字号:
// You can redistribute this software and/or modify it under the terms of// the Ozone Core License version 1 published by ozone-db.org.//// The original code and portions created by SMB are// Copyright (C) 1997-@year@ by SMB GmbH. All rights reserved.//// $Id: ClusterStore.java,v 1.11 2004/03/10 14:59:48 leomekenkamp Exp $package org.ozoneDB.core.storage.magicStore;import java.io.BufferedInputStream;import java.io.BufferedOutputStream;import java.io.File;import java.io.FileInputStream;import java.io.FileOutputStream;import java.io.FilenameFilter;import java.io.IOException;import java.io.InputStream;import java.io.ObjectInputStream;import java.io.ObjectOutputStream;import java.io.OutputStream;import java.util.HashSet;import java.util.Iterator;import java.util.LinkedList;import java.util.Set;import java.util.zip.GZIPInputStream;import java.util.zip.GZIPOutputStream;import org.ozoneDB.Setup;import org.ozoneDB.DxLib.DxHashMap;import org.ozoneDB.DxLib.DxHashSet;import org.ozoneDB.DxLib.DxIterator;import org.ozoneDB.DxLib.DxMap;import org.ozoneDB.core.Env;import org.ozoneDB.core.Lock;import org.ozoneDB.core.MROWLock;import org.ozoneDB.core.Permissions;import org.ozoneDB.core.Transaction;import org.ozoneDB.core.TransactionID;import org.ozoneDB.core.storage.AbstractClusterStore;import org.ozoneDB.core.storage.Cache;import org.ozoneDB.core.storage.Cluster;import org.ozoneDB.core.storage.ClusterID;import org.ozoneDB.core.storage.StorageObjectContainer;import org.ozoneDB.core.storage.SoftReferenceCache;import org.ozoneDB.io.stream.ResolvingObjectInputStream;import org.ozoneDB.util.LogWriter;/** * The ClusterStore is the back-end store of the magicStore. It maintains the * cluster cache, activation/passivation and the actual persistent commits. * * * @author <a href="http://www.softwarebuero.de/">SMB</a> * @author <a href="http://www.medium.net/">Medium.net</a> * @author Leo Mekenkamp * @author Per Nyfelt * @version $Revision: 1.11 $Date: 2004/03/10 14:59:48 $ */public final class ClusterStore extends AbstractClusterStore {    // note: change splitClusterName() when you change POSTFIX_SEPARATOR    final static String POSTFIX_NEW = POSTFIX_SEPARATOR + "new";    private final static String POSTFIX_OLD = POSTFIX_SEPARATOR + "old";    protected final static int compressionFactor = 3;//    protected DxMap cachedClusters;    private transient Cache clusterCache;    protected int maxClusterSize = 64 * 1024;    /**     * Table that maps Permissions to ClusterIDs.     */    protected DxMap growingClusterIDs;    private boolean compressClusters;    private MagicStore magicStore;    ClusterStore(Env _env) {        super(_env);        maxClusterSize = env.config.intProperty(Setup.WS_CLUSTER_SIZE, -1);//        // TODO: pass properties from config to ctor        clusterCache = new SoftReferenceCache();        compressClusters = env.config.booleanProperty(Setup.WS_COMPRESS_CLUSTERS, true);    }    MagicStore getMagicStore() {        return magicStore;    }    void setMagicStore(MagicStore _magicStore) {        this.magicStore = _magicStore;    }    public void startup() throws Exception {        growingClusterIDs = new DxHashMap(32);    }    public void shutdown() {    }    /**     * Check if the ClusterStore was cleanly shutted down.     */    public boolean isCleanShutdown() {        File file = new File(env.getDatabaseDir() + Env.DATA_DIR);        String[] fileList = file.list();        for (int i = 0; i < fileList.length; i++) {            if (fileList[i].endsWith(POSTFIX_NEW) || fileList[i].endsWith(POSTFIX_OLD)) {                return false;            }        }        return true;    }    /**     * Search the DATA dir and recover all ClusterIDs.     */    public Set recoverClusterIDs() {        File file = new File(env.getDatabaseDir() + Env.DATA_DIR);        String[] fileList = file.list();        Set result = new HashSet();        for (int i = 0; i < fileList.length; i++) {            if (fileList[i].endsWith(POSTFIX_CLUSTER) || fileList[i].endsWith(POSTFIX_NEW)|| fileList[i].endsWith(POSTFIX_OLD)) {                String cidString = fileList[i].substring(0, fileList[i].indexOf(POSTFIX_SEPARATOR));                long cid = Long.parseLong(cidString);                result.add(new ClusterID(cid));            }        }        return result;    }    /**     * Returns a set containing the IDs of all tx-es that for one reason or     * another have not committed.     */    Set uncommittedTaIDs() {        File file = new File(env.getDatabaseDir() + Env.DATA_DIR);        File[] fileList = file.listFiles(new FilenameFilter() {            public boolean accept(File dir, String name) {                return name.endsWith(POSTFIX_NEW);            }        });        Set result = new HashSet();        for (int i = 0; i < fileList.length; i++) {            String[] parts = splitClusterName(fileList[i].getName());            long taID = Long.parseLong(parts[parts.length - 2]);            result.add(new TransactionID(taID));        }        return result;    }    private static String[] splitClusterName(String clusterName) {        // need to escape the regexp escape char; need escape char        // because '.' is an regexp special char        return clusterName.split("\\" + POSTFIX_SEPARATOR);    }    public long currentCacheSize() {        return clusterCache.size();    }    public int currentBytesPerContainer() {        int result = env.config.intProperty(Setup.WS_CLUSTER_SIZE_RATIO, 256);//      env.logWriter.newEntry( this, "currentBytesPerContainer(): setup:" + result, LogWriter.DEBUG );        return result;    }    /**     * @param perms Permissions of the cluster to search.     * @return MagicCluster with the specified permissions that is good to store a     * new container in it.     */    protected synchronized Cluster growingCluster(Permissions perms, MagicTransaction ta) throws Exception {        if (env.logWriter.hasTarget(LogWriter.DEBUG3)) {            env.logWriter.newEntry(this, "growingCluster() ", LogWriter.DEBUG3);        }        Cluster cluster = null;        ClusterID cid = (ClusterID) growingClusterIDs.elementForKey(perms);        // load the current growing cluster and check space        if (cid != null) {            cluster = (Cluster) clusterCache.get(cid);            if (cluster == null) {                cluster = loadCluster(cid, ta);            }            // check cluster size and if it was deactivated by the trimCache();            // use this cluster only if it isn't used by another ta            if (cluster.lock() == null || cluster.size() >= maxClusterSize ||                    cluster.lock().level(null) > Lock.LEVEL_NONE && !cluster.lock().isAcquiredBy(env.transactionManager.currentTA())) {                if (env.logWriter.hasTarget(LogWriter.DEBUG1)) {                    env.logWriter.newEntry(this,                            "growingCluster(): growing cluster not usable: cid=" + cluster.clusterID() + " size=" + cluster.size() + " lockLevel=" +                            (cluster.lock() != null ? String.valueOf(cluster.lock().level(null)) : "null"),                            LogWriter.DEBUG1);                }                growingClusterIDs.removeForKey(perms);                cluster = null;            }        }        // search all currently loaded clusters        if (cluster == null) {            for (Iterator i = clusterCache.copyToMap().values().iterator(); i.hasNext(); ) {                Cluster cursor = (Cluster) i.next();                // System.out.println (cursor.size());                if (cursor.size() < maxClusterSize && cursor.permissions().equals(perms)) {                    cluster = cursor;                    // check if the cluster deactivated be the ensureCacheSpace                    if (cluster.lock() == null) {                        env.logWriter.newEntry(this,                                "growingCluster(): loaded cluster was deactivated: " + cluster.clusterID(),                                LogWriter.DEBUG);                        cluster = null;                    } else if (cluster.lock().level(null) > Lock.LEVEL_NONE && !cluster.lock().isAcquiredBy(                            env.transactionManager.currentTA())) {                        // use this cluster only if it isn't used by another ta                        if (env.logWriter.hasTarget(LogWriter.DEBUG1)) {                            env.logWriter.newEntry(this,                                    "growingCluster(): loaded cluster is locked by another transaction: "                                    + cluster.clusterID(), LogWriter.DEBUG1);                        }                        cluster = null;                    } else {                        growingClusterIDs.addForKey(cluster.clusterID(), perms);                        if (env.logWriter.hasTarget(LogWriter.DEBUG1)) {                            env.logWriter.newEntry(this,                                    "growingCluster(): loaded cluster is now growing cluster: " + cluster.clusterID()                                    + " size:" + cluster.size(), LogWriter.DEBUG1);                        }                        break;                    }                }            }        }        // write a new, empty cluster and load it just after to ensures        // that new cluster is "regularly" loaded        if (cluster == null) {            cluster = createANewEmptyAndUsableCluster(perms);        }        return cluster;    }    /**     Creates a cluster which is     <UL>     <LI>new</LI>     <LI>empty</LI>     <LI>usable and</LI>     <LI>not locked</LI>     </UL>     */    protected synchronized Cluster createANewEmptyAndUsableCluster(Permissions perms) throws IOException, ClassNotFoundException {//      env.logWriter.newEntry( this, "growingCluster(): creating new cluster...", LogWriter.DEBUG );        Cluster cluster = new MagicCluster(new ClusterID(env.keyGenerator.nextID()), perms, (MROWLock) env.transactionManager.newLock(), 256);        activateCluster(cluster, 100);        clusterCache.put(cluster.clusterID(), cluster);        growingClusterIDs.addForKey(cluster.clusterID(), perms);//      env.logWriter.newEntry( this, "growingCluster(): new cluster created: " + cluster.clusterID(), LogWriter.DEBUG );        return cluster;    }    /**     Returns or creates a cluster which is not locked so that locking it will succeed.     The returned cluster is only guaranteed to be not locked by any other thread as long as this     method is called during synchronization to this ClusterStore.     */    protected Cluster giveMeAnUnlockedCluster(Permissions perms) throws IOException, ClassNotFoundException {        return createANewEmptyAndUsableCluster(perms);    }

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -