📄 backingstorehashtable.java
字号:
Vector row_vec; // inserted a duplicate if ((duplicate_value instanceof Vector)) { doSpaceAccounting( row, false); row_vec = (Vector) duplicate_value; } else { // allocate vector to hold duplicates row_vec = new Vector(2); // insert original row into vector row_vec.addElement(duplicate_value); doSpaceAccounting( row, true); } // insert new row into vector row_vec.addElement(row); // store vector of rows back into hash table, // overwriting the duplicate key that was // inserted. hash_table.put(key, row_vec); } } row = null; } private void doSpaceAccounting( Object[] row, boolean firstDuplicate) { inmemory_rowcnt++; if( max_inmemory_rowcnt <= 0) { max_inmemory_size -= getEstimatedMemUsage(row); if( firstDuplicate) max_inmemory_size -= vectorSize; } } // end of doSpaceAccounting /** * Determine whether a new row should be spilled to disk and, if so, do it. * * @param hash_table The in-memory hash table * @param key The row's key * @param row * * @return true if the row was spilled to disk, false if not * * @exception StandardException Standard exception policy. */ private boolean spillToDisk( Hashtable hash_table, Object key, Object[] row) throws StandardException { // Once we have started spilling all new rows will go to disk, even if we have freed up some // memory by moving duplicates to disk. This simplifies handling of duplicates and accounting. if( diskHashtable == null) { if( max_inmemory_rowcnt > 0) { if( inmemory_rowcnt < max_inmemory_rowcnt) return false; // Do not spill } else if( max_inmemory_size > 0) return false; // Want to start spilling if( ! (row instanceof DataValueDescriptor[])) { if( SanityManager.DEBUG) SanityManager.THROWASSERT( "BackingStoreHashtable row is not DataValueDescriptor[]"); // Do not know how to put it on disk return false; } diskHashtable = new DiskHashtable( tc, (DataValueDescriptor[]) row, key_column_numbers, remove_duplicates, keepAfterCommit); } Object duplicateValue = hash_table.get( key); if( duplicateValue != null) { if( remove_duplicates) return true; // a degenerate case of spilling // If we are keeping duplicates then move all the duplicates from memory to disk // This simplifies finding duplicates: they are either all in memory or all on disk. if( duplicateValue instanceof Vector) { Vector duplicateVec = (Vector) duplicateValue; for( int i = duplicateVec.size() - 1; i >= 0; i--) { Object[] dupRow = (Object[]) duplicateVec.elementAt(i); diskHashtable.put( key, dupRow); } } else diskHashtable.put( key, (Object []) duplicateValue); hash_table.remove( key); } diskHashtable.put( key, row); return true; } // end of spillToDisk /** * Take a row and return an estimate as to how much memory that * row will consume. * * @param row The row for which we want to know the memory usage. * @return A guess as to how much memory the current row will * use. */ private long getEstimatedMemUsage(Object [] row) { long rowMem = 0; for( int i = 0; i < row.length; i++) { if (row[i] instanceof DataValueDescriptor) rowMem += ((DataValueDescriptor) row[i]).estimateMemoryUsage(); rowMem += ClassSize.refSize; } rowMem += ClassSize.refSize; return rowMem; } /************************************************************************** * Public Methods of This class: ************************************************************************** */ /** * Close the BackingStoreHashtable. * <p> * Perform any necessary cleanup after finishing with the hashtable. Will * deallocate/dereference objects as necessary. If the table has gone * to disk this will drop any on disk files used to support the hash table. * <p> * * @exception StandardException Standard exception policy. **/ public void close() throws StandardException { hash_table = null; if( diskHashtable != null) { diskHashtable.close(); diskHashtable = null; } return; } /** * Return an Enumeration that can be used to scan entire table. * <p> * RESOLVE - is it worth it to support this routine when we have a * disk overflow hash table? * * @return The Enumeration. * * @exception StandardException Standard exception policy. **/ public Enumeration elements() throws StandardException { if( diskHashtable == null) return(hash_table.elements()); return new BackingStoreHashtableEnumeration(); } /** * get data associated with given key. * <p> * There are 2 different types of objects returned from this routine. * <p> * In both cases, the key value is either the object stored in * row[key_column_numbers[0]], if key_column_numbers.length is 1, * otherwise it is a KeyHasher containing * the objects stored in row[key_column_numbers[0, 1, ...]]. * For every qualifying unique row value an entry is placed into the * Hashtable. * <p> * For row values with duplicates, the value of the data is a Vector of * rows. * <p> * The caller will have to call "instanceof" on the data value * object if duplicates are expected, to determine if the data value * of the Hashtable entry is a row or is a Vector of rows. * <p> * The BackingStoreHashtable "owns" the objects returned from the get() * routine. They remain valid until the next access to the * BackingStoreHashtable. If the client needs to keep references to these * objects, it should clone copies of the objects. A valid * BackingStoreHashtable can place all rows into a disk based conglomerate, * declare a row buffer and then reuse that row buffer for every get() * call. * * @return The value to which the key is mapped in this hashtable; * null if the key is not mapped to any value in this hashtable. * * @param key The key to hash on. * * @exception StandardException Standard exception policy. **/ public Object get(Object key) throws StandardException { Object obj = hash_table.get(key); if( diskHashtable == null || obj != null) return obj; return diskHashtable.get( key); } /** * Return runtime stats to caller by adding them to prop. * <p> * * @param prop The set of properties to append to. * * @exception StandardException Standard exception policy. **/ public void getAllRuntimeStats(Properties prop) throws StandardException { if (auxillary_runtimestats != null) org.apache.derby.iapi.util.PropertyUtil.copyProperties(auxillary_runtimestats, prop); } /** * remove a row from the hash table. * <p> * a remove of a duplicate removes the entire duplicate list. * * @param key The key of the row to remove. * * @exception StandardException Standard exception policy. **/ public Object remove( Object key) throws StandardException { Object obj = hash_table.remove(key); if( obj != null || diskHashtable == null) return obj; return diskHashtable.remove(key); } /** * Set the auxillary runtime stats. * <p> * getRuntimeStats() will return both the auxillary stats and any * BackingStoreHashtable() specific stats. Note that each call to * setAuxillaryRuntimeStats() overwrites the Property set that was * set previously. * * @param prop The set of properties to append from. * * @exception StandardException Standard exception policy. **/ public void setAuxillaryRuntimeStats(Properties prop) throws StandardException { auxillary_runtimestats = prop; } /** * Put a row into the hash table. * <p> * The in memory hash table will need to keep a reference to the row * after the put call has returned. If "needsToClone" is true then the * hash table will make a copy of the row and put that, else if * "needsToClone" is false then the hash table will keep a reference to * the row passed in and no copy will be made. * <p> * If rouine returns false, then no reference is kept to the duplicate * row which was rejected (thus allowing caller to reuse the object). * * @param needsToClone does this routine have to make a copy of the row, * in order to keep a reference to it after return? * @param row The row to insert into the table. * * @return true if row was inserted into the hash table. Returns * false if the BackingStoreHashtable is eliminating * duplicates, and the row being inserted is a duplicate, * or if we are skipping rows with 1 or more null key columns * and we find a null key column. * * @exception StandardException Standard exception policy. **/ public boolean put( boolean needsToClone, Object[] row) throws StandardException { // Are any key columns null? if (skipNullKeyColumns) { int index = 0; for ( ; index < key_column_numbers.length; index++) { if (SanityManager.DEBUG) { if (! (row[key_column_numbers[index]] instanceof Storable)) { SanityManager.THROWASSERT( "row[key_column_numbers[index]] expected to be Storable, not " + row[key_column_numbers[index]].getClass().getName()); } } Storable storable = (Storable) row[key_column_numbers[index]]; if (storable.isNull()) { return false; } } } if (needsToClone) { row = cloneRow(row); } Object key = KeyHasher.buildHashKey(row, key_column_numbers); if ((remove_duplicates) && (get(key) != null)) { return(false); } else { add_row_to_hash_table(hash_table, key, row); return(true); } } /** * Return number of unique rows in the hash table. * <p> * * @return The number of unique rows in the hash table. * * @exception StandardException Standard exception policy. **/ public int size() throws StandardException { if( diskHashtable == null) return(hash_table.size()); return hash_table.size() + diskHashtable.size(); } private class BackingStoreHashtableEnumeration implements Enumeration { private Enumeration memoryEnumeration; private Enumeration diskEnumeration; BackingStoreHashtableEnumeration() { memoryEnumeration = hash_table.elements(); if( diskHashtable != null) { try { diskEnumeration = diskHashtable.elements(); } catch( StandardException se) { diskEnumeration = null; } } } public boolean hasMoreElements() { if( memoryEnumeration != null) { if( memoryEnumeration.hasMoreElements()) return true; memoryEnumeration = null; } if( diskEnumeration == null) return false; return diskEnumeration.hasMoreElements(); } public Object nextElement() throws NoSuchElementException { if( memoryEnumeration != null) { if( memoryEnumeration.hasMoreElements()) return memoryEnumeration.nextElement(); memoryEnumeration = null; } return diskEnumeration.nextElement(); } } // end of class BackingStoreHashtableEnumeration}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -