📄 wizardstore.java
字号:
// You can redistribute this software and/or modify it under the terms of// the Ozone Core License version 1 published by ozone-db.org.//// The original code and portions created by SMB are// Copyright (C) 1997-2000 by SMB GmbH. All rights reserved.//// $Id: WizardStore.java,v 1.40 2000/11/10 17:05:04 daniela Exp $package org.ozoneDB.core.wizardStore;import java.io.*;import org.ozoneDB.DxLib.*;import org.ozoneDB.*;import org.ozoneDB.core.*;import org.ozoneDB.util.*;/** * @author <a href="http://www.softwarebuero.de/">SMB</a> * @version $Revision: 1.40 $Date: 2000/11/10 17:05:04 $ */public final class WizardStore implements Store { protected final static String ID_TABLE_NAME = "idTable.wizard"; protected final static String NAME_TABLE_NAME = "nameTable.wizard"; protected final static String COMMIT_FLAG_NAME = "commitflag.wizard"; protected transient Env env; /** * Maps ObjectIDs to ClusterIDs */ protected DxMap idTable; /** * Maps names to ObjectIDs */ protected DxMap nameTable; protected ClusterStore clusterStore; public WizardStore() { } public synchronized void init( Env _env ) { env = _env; int idTableBufferSize = env.config.intProperty( Setup.WS_TABLE_BUFF_SIZE, -1 ); int idTableCacheSize = env.config.intProperty( Setup.WS_TABLE_CACHE_SIZE, -1 ); int idTableSubtableSize = env.config.intProperty( Setup.WS_TABLE_SUBTABLE_SIZE, -1 ); idTable = new IDTable( env.dir + "ostab" + File.separator + "tab", idTableBufferSize, idTableCacheSize, idTableSubtableSize ); // idTable = new DxHashMap (10000); nameTable = new DxHashMap( 100 ); clusterStore = new ClusterStore( _env ); } public synchronized void startup() throws Exception { env.logWriter.newEntry( this, "startup...", LogWriter.INFO ); clusterStore.startup(); boolean isCleanShutdown = new File( COMMIT_FLAG_NAME ).exists() ? false : true; boolean isSuccessfullyStarted = false; if (isCleanShutdown) { ObjectInputStream nameTableIn = null; ObjectInputStream idTableIn = null; try { // restore nameTable nameTableIn = new ObjectInputStream( new FileInputStream( env.dir + NAME_TABLE_NAME ) ); int count = nameTableIn.readInt(); for (int i = 0; i < count; i++) { nameTable.addForKey( nameTableIn.readObject(), nameTableIn.readObject() ); } nameTableIn.close(); // restore idTable if (!(idTable instanceof DxDiskHashMap)) { idTableIn = new ObjectInputStream( new FileInputStream( env.dir + ID_TABLE_NAME ) ); count = idTableIn.readInt(); for (int i = 0; i < count; i++) { idTable.addForKey( idTableIn.readObject(), idTableIn.readObject() ); } idTableIn.close(); } else { ((DxDiskHashMap)idTable).re_use(); ((DxDiskHashMap)idTable).setReusable( true ); } isSuccessfullyStarted = true; } catch (Exception e) { env.logWriter.newEntry( this, " error while starting up... ", LogWriter.INFO ); env.logWriter.newEntry( this, " exception: ", e, LogWriter.DEBUG ); } finally { if (nameTableIn != null) { nameTableIn.close(); } if (idTableIn != null) { idTableIn.close(); } } } if (!isCleanShutdown || !isSuccessfullyStarted) { env.logWriter.newEntry( this, " recovering...", LogWriter.INFO ); recover(); } env.logWriter.newEntry( this, " " + idTable.count() + " IDs, " + nameTable.count() + " name(s))", LogWriter.INFO ); } public synchronized void shutdown() throws Exception { env.logWriter.newEntry( this, "shutdown...", LogWriter.INFO ); clusterStore.shutdown(); commitNameTable(); commitIDTable(); if (!(idTable instanceof DxDiskHashMap)) { ((DxDiskHashMap)idTable).printStatistics(); ((DxDiskHashMap)idTable).close(); } } protected void commitNameTable() throws Exception { env.logWriter.newEntry( this, "commitNameTable...", LogWriter.DEBUG ); String filename = env.dir + NAME_TABLE_NAME; ObjectOutputStream out = new ObjectOutputStream( new FileOutputStream( filename ) ); try { out.writeInt( nameTable.count() ); DxIterator it = nameTable.iterator(); while (it.next() != null) { out.writeObject( it.object() ); out.writeObject( it.key() ); } } catch( Exception e) { new File( filename ).delete(); } finally { out.close(); } } protected void commitIDTable() throws Exception { env.logWriter.newEntry( this, "commitIDTable...", LogWriter.DEBUG ); if (!(idTable instanceof DxDiskHashMap)) { String filename = env.dir + ID_TABLE_NAME; ObjectOutputStream out = new ObjectOutputStream( new FileOutputStream( filename ) ); try { out.writeInt( idTable.count() ); DxIterator it = idTable.iterator(); while (it.next() != null) { out.writeObject( it.object() ); out.writeObject( it.key() ); } } catch( Exception e) { new File( filename ).delete(); } finally { out.close(); } } else { ((IDTable)idTable).setReusable( false ); ((IDTable)idTable).writeDirtyTables(); ((IDTable)idTable).setReusable( true ); } } /** * Fill idTable and nameTable from the information that are stored in the * clusters directly. * * @throws Exception If a cluster cannot be read. */ protected synchronized void recover() throws Exception { DxBag cids = clusterStore.recoverClusterIDs(); DxIterator it = cids.iterator(); while (it.next() != null) { ClusterID cid = (ClusterID)it.object(); Cluster cluster = null; boolean exceptionWhileLoading = false; env.logWriter.newEntry( this, " cluster: " + cid, LogWriter.INFO ); try { cluster = (Cluster)clusterStore.restoreCluster( cid ); env.logWriter.newEntry( this, " " + cluster.containers.count() + " containers", LogWriter.INFO ); } catch (Exception e) { env.logWriter.newEntry( this, "exception while loading: " + cid, LogWriter.WARN ); env.logWriter.newEntry( this, "", e, LogWriter.DEBUG ); exceptionWhileLoading = true; } if (exceptionWhileLoading || cluster.containers.isEmpty()) { env.logWriter.newEntry( this, " cluster is empty or unable to read - deleted", LogWriter.INFO ); if (cluster != null) { cluster.delete(); } } else { // fill in idTable and nameTable DxIterator it2 = cluster.containers.iterator(); WizardObjectContainer container; while ((container = (WizardObjectContainer)it2.next()) != null) { idTable.addForKey( cluster.clusterID(), container.id() ); if (container.name() != null) { nameTable.addForKey( container.id(), container.name() ); } } } clusterStore.unloadCluster( cid, false ); } commitIDTable(); commitNameTable(); } public Object newTransactionData() { return new TransactionData(); } public synchronized ObjectContainer newContainer( Transaction ta, OzoneCompatible target, ObjectID objID, Permissions permissions ) throws Exception { if (env.logWriter.hasTarget( LogWriter.DEBUG3 )) { env.logWriter.newEntry( this, "newContainer()", LogWriter.DEBUG3 ); } WizardObjectContainer container = new WizardObjectContainer( objID ); if (target != null) { container.setTarget( target ); } clusterStore.registerContainer( container, permissions ); TransactionData taData = (TransactionData)ta.data; ClusterID cid = container.cluster.clusterID(); ObjectID oid = container.id(); taData.idTable.addForKey( cid, oid ); taData.idTableChanges_push( new IDTableChange( oid, cid, IDTableChange.STATE_ADDED ) ); return container; } // public synchronized void deleteContainer (Transaction ta, ObjectContainer _container) // throws Exception { // if (env.logWriter.hasTarget (LogWriter.DEBUG3)) // env.logWriter.newEntry (this, "deleteContainer()", LogWriter.DEBUG3); // // WizardObjectContainer container = (WizardObjectContainer)_container; // taData.idTableChanges.push (new IDTableChange (oid, cid, IDTableChange.STATE_ADDED)); public void updateLockLevel( Transaction _ta, ObjectContainer _container ) throws Exception { if (env.logWriter.hasTarget( LogWriter.DEBUG3 )) { env.logWriter.newEntry( this, "updateLockLevel()", LogWriter.DEBUG3 ); } WizardObjectContainer container = (WizardObjectContainer)_container; container.cluster.updateLockLevel( _ta ); TransactionData taData = (TransactionData)_ta.data; taData.idTable.addForKey( container.cluster.clusterID(), container.id() ); } /** * Returns the ObjectContainer for the given ObjectID or null if there is * no such container.<p> * * * Impl. Note: For performance reasons this is the only method of this Store * that is not synchronized. This will not cause problems because the only * field that is updated inside the method (currentContainer) does not need * to be stable while this method is running. */ public ObjectContainer containerForID( Transaction ta, ObjectID id ) throws Exception { WizardObjectContainer container = null; // search the LRU cluster to speed things up; since this is not // synchronized, checking and accessing currentCluster must be done in // one line to avoid other thread to change the variable in between // container = (currentCluster != null && currentCluster.lock != null) ? currentCluster.containerForID (id) : null; // if (container != null) { // // System.out.print ("+"); // return container.isDeleted() ? null : container; // } ClusterID cid = null; // search members of current ta first if (ta != null) { TransactionData taData = (TransactionData)ta.data; cid = (ClusterID)taData.idTable.elementForKey( id ); if (cid == null && taData.lrucid != null) { Cluster lru = clusterStore.loadCluster( taData.lrucid ); container = lru != null && lru.lock != null ? lru.containerForID( id ) : null; if (container != null) { // System.out.print ("+"); return container.isDeleted() ? null : container; } } } // search global table ONLY if ta doesn't contain the container if (cid == null) { cid = (ClusterID)idTable.elementForKey( id ); } if (cid == null) { return null; } else { // System.out.println ("-"); Cluster cluster = clusterStore.loadCluster( cid ); if (cluster == null) { throw new ObjectNotFoundExc( "No object registered for ID: " + id );
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -