📄 abstractconcurrentreadcache.java
字号:
return ((h << 7) - h + (h >>> 9) + (h >>> 17));
}
/**
* Add this cache key to the groups specified groups.
* We have to treat the
* memory and disk group mappings seperately so they remain valid for their
* corresponding memory/disk caches. (eg if mem is limited to 100 entries
* and disk is unlimited, the group mappings will be different).
*
* @param key The cache key that we are ading to the groups.
* @param newGroups the set of groups we want to add this cache entry to.
* @param persist A flag to indicate whether the keys should be added to
* the persistent cache layer.
* @param memory A flag to indicate whether the key should be added to
* the memory groups (important for overflow-to-disk)
*/
private void addGroupMappings(String key, Set newGroups, boolean persist, boolean memory) {
if (newGroups == null) {
return;
}
// Add this CacheEntry to the groups that it is now a member of
for (Iterator it = newGroups.iterator(); it.hasNext();) {
String groupName = (String) it.next();
// Update the in-memory groups
if (memoryCaching && memory) {
if (groups == null) {
groups = new HashMap();
}
Set memoryGroup = (Set) groups.get(groupName);
if (memoryGroup == null) {
memoryGroup = new HashSet();
groups.put(groupName, memoryGroup);
}
memoryGroup.add(key);
}
// Update the persistent group maps
if (persist) {
Set persistentGroup = persistRetrieveGroup(groupName);
if (persistentGroup == null) {
persistentGroup = new HashSet();
}
persistentGroup.add(key);
persistStoreGroup(groupName, persistentGroup);
}
}
}
/** OpenSymphony END (pretty long!) */
/**
* Returns the appropriate capacity (power of two) for the specified
* initial capacity argument.
*/
private int p2capacity(int initialCapacity) {
int cap = initialCapacity;
// Compute the appropriate capacity
int result;
if ((cap > MAXIMUM_CAPACITY) || (cap < 0)) {
result = MAXIMUM_CAPACITY;
} else {
result = MINIMUM_CAPACITY;
while (result < cap) {
result <<= 1;
}
}
return result;
}
/* Previous code
public Object put(Object key, Object value)*/
private Object put(Object key, Object value, boolean persist) {
/** OpenSymphony END */
if (value == null) {
throw new NullPointerException();
}
int hash = hash(key);
Entry[] tab = table;
int index = hash & (tab.length - 1);
Entry first = tab[index];
Entry e = first;
for (;;) {
if (e == null) {
synchronized (this) {
tab = table;
/** OpenSymphony BEGIN */
// Previous code
/* if (first == tab[index]) {
// Add to front of list
Entry newEntry = new Entry(hash, key, value, first);
tab[index] = newEntry;
if (++count >= threshold) rehash();
else recordModification(newEntry);
return null; */
Object oldValue = null;
// Remove an item if the cache is full
if (size() >= maxEntries) {
// part of fix CACHE-255: method should return old value
oldValue = remove(removeItem(), false, false);
}
if (first == tab[index]) {
// Add to front of list
Entry newEntry = null;
if (memoryCaching) {
newEntry = new Entry(hash, key, value, first);
} else {
newEntry = new Entry(hash, key, NULL, first);
}
tab[index] = newEntry;
itemPut(key);
// Persist if required
if (persist && !overflowPersistence) {
persistStore(key, value);
}
// If we have a CacheEntry, update the group lookups
if (value instanceof CacheEntry) {
updateGroups(null, (CacheEntry) value, persist);
}
if (++count >= threshold) {
rehash();
} else {
recordModification(newEntry);
}
return oldValue;
/** OpenSymphony END */
} else {
// wrong list -- retry
/** OpenSymphony BEGIN */
/* Previous code
return sput(key, value, hash);*/
return sput(key, value, hash, persist);
/** OpenSymphony END */
}
}
} else if ((key == e.key) || ((e.hash == hash) && key.equals(e.key))) {
// synch to avoid race with remove and to
// ensure proper serialization of multiple replaces
synchronized (this) {
tab = table;
Object oldValue = e.value;
// [CACHE-118] - get the old cache entry even if there's no memory cache
if (persist && (oldValue == NULL)) {
oldValue = persistRetrieve(key);
}
if ((first == tab[index]) && (oldValue != null)) {
/** OpenSymphony BEGIN */
/* Previous code
e.value = value;
return oldValue; */
if (memoryCaching) {
e.value = value;
}
// Persist if required
if (persist && overflowPersistence) {
persistRemove(key);
} else if (persist) {
persistStore(key, value);
}
updateGroups(oldValue, value, persist);
itemPut(key);
return oldValue;
/** OpenSymphony END */
} else {
// retry if wrong list or lost race against concurrent remove
/** OpenSymphony BEGIN */
/* Previous code
return sput(key, value, hash);*/
return sput(key, value, hash, persist);
/** OpenSymphony END */
}
}
} else {
e = e.next;
}
}
}
private synchronized Object remove(Object key, boolean invokeAlgorithm, boolean forcePersist)
/* Previous code
public Object remove(Object key) */
/** OpenSymphony END */ {
/*
Strategy:
Find the entry, then
1. Set value field to null, to force get() to retry
2. Rebuild the list without this entry.
All entries following removed node can stay in list, but
all preceeding ones need to be cloned. Traversals rely
on this strategy to ensure that elements will not be
repeated during iteration.
*/
/** OpenSymphony BEGIN */
if (key == null) {
return null;
}
/** OpenSymphony END */
int hash = hash(key);
Entry[] tab = table;
int index = hash & (tab.length - 1);
Entry first = tab[index];
Entry e = first;
for (;;) {
if (e == null) {
tab = getTableForReading();
if (first == tab[index]) {
return null;
} else {
// Wrong list -- must restart traversal at new first
/** OpenSymphony BEGIN */
/* Previous Code
return sremove(key, hash); */
return sremove(key, hash, invokeAlgorithm);
/** OpenSymphony END */
}
} else if ((key == e.key) || ((e.hash == hash) && key.equals(e.key))) {
synchronized (this) {
tab = table;
Object oldValue = e.value;
if (persistenceListener != null && (oldValue == NULL)) {
oldValue = persistRetrieve(key);
}
// re-find under synch if wrong list
if ((first != tab[index]) || (oldValue == null)) {
/** OpenSymphony BEGIN */
/* Previous Code
return sremove(key, hash); */
return sremove(key, hash, invokeAlgorithm);
}
/** OpenSymphony END */
e.value = null;
count--;
/** OpenSymphony BEGIN */
if (forcePersist || (!unlimitedDiskCache && !overflowPersistence)) {
persistRemove(e.key);
// If we have a CacheEntry, update the group lookups
if (oldValue instanceof CacheEntry) {
CacheEntry oldEntry = (CacheEntry) oldValue;
removeGroupMappings(oldEntry.getKey(),
oldEntry.getGroups(), true);
}
} else {
// only remove from memory groups
if (oldValue instanceof CacheEntry) {
CacheEntry oldEntry = (CacheEntry) oldValue;
removeGroupMappings(oldEntry.getKey(), oldEntry
.getGroups(), false);
}
}
if (!forcePersist && overflowPersistence && ((size() + 1) >= maxEntries)) {
persistStore(key, oldValue);
// add key to persistent groups but NOT to the memory groups
if (oldValue instanceof CacheEntry) {
CacheEntry oldEntry = (CacheEntry) oldValue;
addGroupMappings(oldEntry.getKey(), oldEntry.getGroups(), true, false);
}
}
if (invokeAlgorithm) {
itemRemoved(key);
}
// introduced to fix bug CACHE-255
if (oldValue instanceof CacheEntry) {
CacheEntry oldEntry = (CacheEntry) oldValue;
oldValue = oldEntry.getContent();
}
/** OpenSymphony END */
Entry head = e.next;
for (Entry p = first; p != e; p = p.next) {
head = new Entry(p.hash, p.key, p.value, head);
}
tab[index] = head;
recordModification(head);
return oldValue;
}
} else {
e = e.next;
}
}
}
/**
* Remove this CacheEntry from the groups it no longer belongs to.
* We have to treat the memory and disk group mappings separately so they remain
* valid for their corresponding memory/disk caches. (eg if mem is limited
* to 100 entries and disk is unlimited, the group mappings will be
* different).
*
* @param key The cache key that we are removing from the groups.
* @param oldGroups the set of groups we want to remove the cache entry
* from.
* @param persist A flag to indicate whether the keys should be removed
* from the persistent cache layer.
*/
private void removeGroupMappings(String key, Set oldGroups, boolean persist) {
if (oldGroups == null) {
return;
}
for (Iterator it = oldGroups.iterator(); it.hasNext();) {
String groupName = (String) it.next();
// Update the in-memory groups
if (memoryCaching && (this.groups != null)) {
Set memoryGroup = (Set) groups.get(groupName);
if (memoryGroup != null) {
memoryGroup.remove(key);
if (memoryGroup.isEmpty()) {
groups.remove(groupNam
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -