📄 abstractconcurrentreadcache.java
字号:
}
}
/**
* Creates or Updates a cache group using the persistence listener.
* @param groupName The name of the group to update
* @param group The entries for the group
*/
protected void persistStoreGroup(String groupName, Set group) {
if (log.isDebugEnabled()) {
log.debug("persistStoreGroup called (groupName=" + groupName + ")");
}
if (persistenceListener != null) {
try {
if ((group == null) || group.isEmpty()) {
persistenceListener.removeGroup(groupName);
} else {
persistenceListener.storeGroup(groupName, group);
}
} catch (CachePersistenceException e) {
log.error("[oscache] Exception persisting group " + groupName, e);
}
}
}
/**
* Removes the entire cache from persistent storage.
*/
protected void persistClear() {
if (log.isDebugEnabled()) {
log.debug("persistClear called");
;
}
if (persistenceListener != null) {
try {
persistenceListener.clear();
} catch (CachePersistenceException e) {
log.error("[oscache] Exception clearing persistent cache", e);
}
}
}
/**
* Notify the underlying implementation that an item was put in the cache.
*
* @param key The cache key of the item that was put.
*/
protected abstract void itemPut(Object key);
/**
* Notify any underlying algorithm that an item has been retrieved from the cache.
*
* @param key The cache key of the item that was retrieved.
*/
protected abstract void itemRetrieved(Object key);
/**
* Notify the underlying implementation that an item was removed from the cache.
*
* @param key The cache key of the item that was removed.
*/
protected abstract void itemRemoved(Object key);
/**
* The cache has reached its cacpacity and an item needs to be removed.
* (typically according to an algorithm such as LRU or FIFO).
*
* @return The key of whichever item was removed.
*/
protected abstract Object removeItem();
/**
* Reconstitute the <tt>AbstractConcurrentReadCache</tt>.
* instance from a stream (i.e.,
* deserialize it).
*/
private synchronized void readObject(java.io.ObjectInputStream s) throws IOException, ClassNotFoundException {
// Read in the threshold, loadfactor, and any hidden stuff
s.defaultReadObject();
// Read in number of buckets and allocate the bucket array;
int numBuckets = s.readInt();
table = new Entry[numBuckets];
// Read in size (number of Mappings)
int size = s.readInt();
// Read the keys and values, and put the mappings in the table
for (int i = 0; i < size; i++) {
Object key = s.readObject();
Object value = s.readObject();
put(key, value);
}
}
/**
* Rehashes the contents of this map into a new table with a larger capacity.
* This method is called automatically when the
* number of keys in this map exceeds its capacity and load factor.
*/
protected void rehash() {
Entry[] oldMap = table;
int oldCapacity = oldMap.length;
if (oldCapacity >= MAXIMUM_CAPACITY) {
return;
}
int newCapacity = oldCapacity << 1;
Entry[] newMap = new Entry[newCapacity];
threshold = (int) (newCapacity * loadFactor);
/*
We need to guarantee that any existing reads of oldMap can
proceed. So we cannot yet null out each oldMap bin.
Because we are using power-of-two expansion, the elements
from each bin must either stay at same index, or move
to oldCapacity+index. We also minimize new node creation by
catching cases where old nodes can be reused because their
.next fields won't change. (This is checked only for sequences
of one and two. It is not worth checking longer ones.)
*/
for (int i = 0; i < oldCapacity; ++i) {
Entry l = null;
Entry h = null;
Entry e = oldMap[i];
while (e != null) {
int hash = e.hash;
Entry next = e.next;
if ((hash & oldCapacity) == 0) {
// stays at newMap[i]
if (l == null) {
// try to reuse node
if ((next == null) || ((next.next == null) && ((next.hash & oldCapacity) == 0))) {
l = e;
break;
}
}
l = new Entry(hash, e.key, e.value, l);
} else {
// moves to newMap[oldCapacity+i]
if (h == null) {
if ((next == null) || ((next.next == null) && ((next.hash & oldCapacity) != 0))) {
h = e;
break;
}
}
h = new Entry(hash, e.key, e.value, h);
}
e = next;
}
newMap[i] = l;
newMap[oldCapacity + i] = h;
}
table = newMap;
recordModification(newMap);
}
/**
* Continuation of put(), called only when synch lock is
* held and interference has been detected.
**/
/** OpenSymphony BEGIN */
/* Previous code
protected Object sput(Object key, Object value, int hash) {*/
protected Object sput(Object key, Object value, int hash, boolean persist) {
/** OpenSymphony END */
Entry[] tab = table;
int index = hash & (tab.length - 1);
Entry first = tab[index];
Entry e = first;
for (;;) {
if (e == null) {
/** OpenSymphony BEGIN */
// Previous code
// Entry newEntry = new Entry(hash, key, value, first);
Entry newEntry;
if (memoryCaching) {
newEntry = new Entry(hash, key, value, first);
} else {
newEntry = new Entry(hash, key, NULL, first);
}
itemPut(key);
// Persist if required
if (persist && !overflowPersistence) {
persistStore(key, value);
}
// If we have a CacheEntry, update the group lookups
if (value instanceof CacheEntry) {
updateGroups(null, (CacheEntry) value, persist);
}
/** OpenSymphony END */
tab[index] = newEntry;
if (++count >= threshold) {
rehash();
} else {
recordModification(newEntry);
}
return null;
} else if ((key == e.key) || ((e.hash == hash) && key.equals(e.key))) {
Object oldValue = e.value;
/** OpenSymphony BEGIN */
/* Previous code
e.value = value; */
if (memoryCaching) {
e.value = value;
}
// Persist if required
if (persist && overflowPersistence) {
persistRemove(key);
} else if (persist) {
persistStore(key, value);
}
updateGroups(oldValue, value, persist);
itemPut(key);
/** OpenSymphony END */
return oldValue;
} else {
e = e.next;
}
}
}
/**
* Continuation of remove(), called only when synch lock is
* held and interference has been detected.
**/
/** OpenSymphony BEGIN */
/* Previous code
protected Object sremove(Object key, int hash) { */
protected Object sremove(Object key, int hash, boolean invokeAlgorithm) {
/** OpenSymphony END */
Entry[] tab = table;
int index = hash & (tab.length - 1);
Entry first = tab[index];
Entry e = first;
for (;;) {
if (e == null) {
return null;
} else if ((key == e.key) || ((e.hash == hash) && key.equals(e.key))) {
Object oldValue = e.value;
if (persistenceListener != null && (oldValue == NULL)) {
oldValue = persistRetrieve(key);
}
e.value = null;
count--;
/** OpenSymphony BEGIN */
if (!unlimitedDiskCache && !overflowPersistence) {
persistRemove(e.key);
// If we have a CacheEntry, update the groups
if (oldValue instanceof CacheEntry) {
CacheEntry oldEntry = (CacheEntry)oldValue;
removeGroupMappings(oldEntry.getKey(),
oldEntry.getGroups(), true);
}
} else {
// only remove from memory groups
if (oldValue instanceof CacheEntry) {
CacheEntry oldEntry = (CacheEntry)oldValue;
removeGroupMappings(oldEntry.getKey(),
oldEntry.getGroups(), false);
}
}
if (overflowPersistence && ((size() + 1) >= maxEntries)) {
persistStore(key, oldValue);
// add key to persistent groups but NOT to the memory groups
if (oldValue instanceof CacheEntry) {
CacheEntry oldEntry = (CacheEntry)oldValue;
addGroupMappings(oldEntry.getKey(), oldEntry.getGroups(), true, false);
}
}
if (invokeAlgorithm) {
itemRemoved(key);
}
/** OpenSymphony END */
Entry head = e.next;
for (Entry p = first; p != e; p = p.next) {
head = new Entry(p.hash, p.key, p.value, head);
}
tab[index] = head;
recordModification(head);
return oldValue;
} else {
e = e.next;
}
}
}
/**
* Save the state of the <tt>AbstractConcurrentReadCache</tt> instance to a stream.
* (i.e., serialize it).
*
* @serialData The <i>capacity</i> of the
* AbstractConcurrentReadCache (the length of the
* bucket array) is emitted (int), followed by the
* <i>size</i> of the AbstractConcurrentReadCache (the number of key-value
* mappings), followed by the key (Object) and value (Object)
* for each key-value mapping represented by the AbstractConcurrentReadCache
* The key-value mappings are emitted in no particular order.
*/
private synchronized void writeObject(java.io.ObjectOutputStream s) throws IOException {
// Write out the threshold, loadfactor, and any hidden stuff
s.defaultWriteObject();
// Write out number of buckets
s.writeInt(table.length);
// Write out size (number of Mappings)
s.writeInt(count);
// Write out keys and values (alternating)
for (int index = table.length - 1; index >= 0; index--) {
Entry entry = table[index];
while (entry != null) {
s.writeObject(entry.key);
s.writeObject(entry.value);
entry = entry.next;
}
}
}
/**
* Return hash code for Object x.
* Since we are using power-of-two
* tables, it is worth the effort to improve hashcode via
* the same multiplicative scheme as used in IdentityHashMap.
*/
private static int hash(Object x) {
int h = x.hashCode();
// Multiply by 127 (quickly, via shifts), and mix in some high
// bits to help guard against bunching of codes that are
// consecutive or equally spaced.
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -