📄 clusterstore.java
字号:
// You can redistribute this software and/or modify it under the terms of// the Ozone Core License version 1 published by ozone-db.org.//// The original code and portions created by SMB are// Copyright (C) 1997-2000 by SMB GmbH. All rights reserved.//// $Id: ClusterStore.java,v 1.37 2000/11/14 10:20:51 daniela Exp $package org.ozoneDB.core.wizardStore;import java.io.*;import java.util.zip.*;import org.ozoneDB.DxLib.*;import org.ozoneDB.*;import org.ozoneDB.core.*;import org.ozoneDB.util.*;/** * The ClusterStore is the back-end store of the wizardStore. It maintains the * cluster cache, activation/passivation and the actual persistent commits. * * * @author <a href="http://www.softwarebuero.de/">SMB</a> * @version $Revision: 1.37 $Date: 2000/11/14 10:20:51 $ */public final class ClusterStore { public final static String POSTFIX_CLUSTER = ".cl"; public final static String POSTFIX_LOCK = ".lk"; public final static String POSTFIX_TEMP = ".tm"; public final static String POSTFIX_SHADOW = ".sh"; protected final static int compressionFactor = 3; protected transient Env env; protected transient long touchCount; protected DxMap cachedClusters; protected int maxClusterSize = 64 * 1024; /** * Table that maps Permissions to ClusterIDs. */ protected DxMap growingClusterIDs; private boolean compressClusters; ClusterStore( Env _env ) { env = _env; maxClusterSize = env.config.intProperty( Setup.WS_CLUSTER_SIZE, -1 ); cachedClusters = new DxHashMap( 64 ); compressClusters = env.config.booleanProperty( Setup.WS_COMPRESS_CLUSTERS, true ); } public void startup() throws Exception { growingClusterIDs = new DxHashMap( 32 ); } public void shutdown() { } /** * Search the DATA dir and recover all ClusterIDs. */ public DxBag recoverClusterIDs() { File file = new File( env.dir + Env.DATA_DIR ); String[] fileList = file.list(); DxBag result = new DxArrayBag(); for (int i = 0; i < fileList.length; i++) { if (fileList[i].endsWith( POSTFIX_CLUSTER ) || fileList[i].endsWith( POSTFIX_SHADOW )) { String cidString = fileList[i].substring( 0, fileList[i].indexOf( '.' ) ); long cid = Long.parseLong( cidString ); result.add( new ClusterID( cid ) ); } } return result; } public long currentCacheSize() { long result = 0; DxIterator it = cachedClusters.iterator(); Cluster cluster; while ((cluster = (Cluster)it.next()) != null) { result += cluster.size(); } return result; } protected int currentBytesPerContainer() { int result = env.config.intProperty( Setup.WS_CLUSTER_SIZE_RATIO, 256 ); env.logWriter.newEntry( this, "currentBytesPerContainer(): setup:" + result, LogWriter.DEBUG ); return result; // if (cachedClusters.count() < 3) { // int result = env.config.intProperty (Setup.WS_CLUSTER_SIZE_RATIO, 256); // env.logWriter.newEntry (this, "currentBytesPerContainer(): config:" + result, LogWriter.DEBUG); // return result; // } // else { // int bpc = 0; // int count = 0; // DxIterator it = cachedClusters.iterator(); // Cluster cluster; // while ((cluster=(Cluster)it.next()) != null) { // count ++; // bpc += cluster.bytesPerContainer; // } // int result = bpc / count; // env.logWriter.newEntry (this, "currentBytesPerContainer(): new:" + result, LogWriter.DEBUG); // return result; // } } // public Cluster lruCluster() { // search the LRU cluster to speed things up; since this is not // synchronized, checking and accessing currentCluster must be done in // one line to avoid other thread to change the variable in between // container = (currentCluster != null && currentCluster.lock != null) ? currentCluster.containerForID (id) : null; // if (container != null) { // // System.out.print ("+"); // return container.isDeleted() ? null : container; // } /** * @param perms Permissions of the cluster to search. * @return Cluster with the specified permissions that is good to store a * new container in it. */ protected synchronized Cluster growingCluster( Permissions perms ) throws Exception { if (env.logWriter.hasTarget( LogWriter.DEBUG3 )) { env.logWriter.newEntry( this, "growingCluster() ", LogWriter.DEBUG3 ); } Cluster cluster = null; ClusterID cid = (ClusterID)growingClusterIDs.elementForKey( perms ); // load the current growing cluster and check space if (cid != null) { cluster = (Cluster)cachedClusters.elementForKey( cid ); if (cluster == null) { cluster = loadCluster( cid ); } // check cluster size and if it was deactivated by the trimCache(); // use this cluster only if it isn't used by another ta if (cluster.lock == null || cluster.size() >= maxClusterSize || cluster.lock.level( null ) > Lock.LEVEL_NONE && !cluster.lock.isAcquiredBy( env.transactionManager.currentTA() )) { if (env.logWriter.hasTarget( LogWriter.DEBUG )) { env.logWriter.newEntry( this, "growingCluster(): growing cluster not usable: cid=" + cluster.clusterID() + " size=" + cluster.size() + " lockLevel=" + (cluster.lock != null ? String.valueOf( cluster.lock.level( null ) ) : "null"), LogWriter.DEBUG ); } growingClusterIDs.removeForKey( perms ); cluster = null; } } // search all currently loaded clusters if (cluster == null) { DxIterator it = cachedClusters.iterator(); Cluster cursor; while ((cursor = (Cluster)it.next()) != null) { // System.out.println (cursor.size()); if (cursor.size() < maxClusterSize && cursor.permissions.equals( perms )) { cluster = cursor; // make sure that there is enough space for the clusters to be // able to grow to the max size // ensureCacheSpace (maxClusterSize - cluster.size()); trimCache(); // check if the cluster deactivated be the ensureCacheSpace if (cluster.lock == null) { env.logWriter.newEntry( this, "growingCluster(): loaded cluster was deactivated: " + cluster.clusterID(), LogWriter.DEBUG ); cluster = null; } else if (cluster.lock.level( null ) > Lock.LEVEL_NONE && !cluster.lock.isAcquiredBy( env.transactionManager.currentTA() )) { // use this cluster only if it isn't used by another ta env.logWriter.newEntry( this, "growingCluster(): loaded cluster is locked by another transaction: " + cluster.clusterID(), LogWriter.DEBUG ); cluster = null; } else { growingClusterIDs.addForKey( cluster.clusterID(), perms ); env.logWriter.newEntry( this, "growingCluster(): loaded cluster is now growing cluster: " + cluster.clusterID() + " size:" + cluster.size(), LogWriter.DEBUG ); break; } } } } // write a new, empty cluster and load it just after to ensures // that new cluster is "regularly" loaded if (cluster == null) { env.logWriter.newEntry( this, "growingCluster(): creating new cluster...", LogWriter.DEBUG ); cluster = new Cluster( new ClusterID( env.nextID() ), perms, env.transactionManager.newLock(), 256 ); // the new cluster has to be written to disk in order to make // saveShadow() and things work; storeData( cluster, basename( cluster.clusterID() ) + POSTFIX_CLUSTER ); // since we don't check the cache size after registering a cont // we have to make sure that there is enough space for this cluster // to grow to the max size // ensureCacheSpace (maxClusterSize); trimCache(); cluster = loadCluster( cluster.clusterID() ); growingClusterIDs.addForKey( cluster.clusterID(), perms ); env.logWriter.newEntry( this, "growingCluster(): new cluster created: " + cluster.clusterID(), LogWriter.DEBUG ); } return cluster; } /** * Associates the specified container with a cluster. * @param container Container to be registered with one cluster. */ public void registerContainer( WizardObjectContainer container, Permissions perms ) throws Exception { if (env.logWriter.hasTarget( LogWriter.DEBUG3 )) { env.logWriter.newEntry( this, "registerContainer()", LogWriter.DEBUG3 ); } Cluster cluster = growingCluster( perms ); cluster.registerContainer( container ); if (env.logWriter.hasTarget( LogWriter.DEBUG3 )) { env.logWriter.newEntry( this, " cluster: " + cluster.clusterID(), LogWriter.DEBUG3 ); } } public synchronized void invalidateContainer( WizardObjectContainer container ) throws Exception { container.cluster.removeContainer( container ); container.cluster = null; } protected Cluster restoreCluster( ClusterID cid ) throws Exception { String basename = basename( cid ); Cluster cluster; File shadowFile = new File( basename + POSTFIX_SHADOW ); if (shadowFile.exists()) { cluster = (Cluster)loadData( basename + POSTFIX_SHADOW ); activateCluster( cluster, 0 ); cluster.restoreShadow(); } else { cluster = (Cluster)loadData( basename + POSTFIX_CLUSTER ); activateCluster( cluster, 0 ); } new File( basename + POSTFIX_LOCK ).delete(); new File( basename + POSTFIX_TEMP ).delete(); return cluster; } /** * Make sure the corresponding cluster is in the cache. While loading * clusters, we may have to throw away (and maybe store) some currently * cached clusters. * * * @param cid ClusterID of the cluster to load. */ public Cluster loadCluster( ClusterID cid ) throws Exception { Cluster cluster = (Cluster)cachedClusters.elementForKey( cid ); if (cluster == null) { if (env.logWriter.hasTarget( LogWriter.DEBUG )) { env.logWriter.newEntry( this, "loadCluster(): load cluster from disk: " + cid.toString(), LogWriter.DEBUG ); } String basename = basename( cid ); String clusterName = basename + POSTFIX_CLUSTER; String lockName = basename + POSTFIX_LOCK; int clusterByteSize = (int)new File( clusterName ).length(); if (compressClusters) { clusterByteSize *= compressionFactor; } // make sure that many different threads don't load // to much data before the currently synchronized thread // can trim the cache trimCache(); cluster = (Cluster)loadData( clusterName ); synchronized (this) { // now we have to check the cachedClusters table inside the // synchronized block to see if someone did register this // cluster while we loaded it Cluster interimCluster = (Cluster)cachedClusters.elementForKey( cid ); if (interimCluster != null) { env.logWriter.newEntry( this, "loadCluster(): cluster was loaded by another thread too; droping my copy", LogWriter.WARN ); cluster = interimCluster; } else { // we are going to mess with the cluster; it seems that the cluster // is not visible to other thread until it is added to cachedClusters, // however, IBM jdk throws an exception in cluster.updateLockLevel, which // seems to be related to the initialization in the following block synchronized (cluster) { // locks are only there if the lock level is >= READ try { cluster.lock = (Lock)loadData( lockName ); new File( lockName ).delete(); } catch (Exception e) { if (env.logWriter.hasTarget( LogWriter.DEBUG3 )) { env.logWriter.newEntry( this, " Unable to load lock from disk - creating a new lock.", LogWriter.DEBUG3 ); } cluster.lock = env.transactionManager.newLock(); } activateCluster( cluster, clusterByteSize ); } if (clusterByteSize > maxClusterSize * 2) { splitCluster( cluster );
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -