📄 jsdhash.c
字号:
if (MATCH_ENTRY_KEYHASH(entry, keyHash) && matchEntry(table, entry, key)) { METER(table->stats.hits++); return entry; } /* Collision: double hash. */ sizeLog2 = JS_DHASH_BITS - table->hashShift; hash2 = HASH2(keyHash, sizeLog2, hashShift); sizeMask = JS_BITMASK(sizeLog2); /* Save the first removed entry pointer so JS_DHASH_ADD can recycle it. */ if (ENTRY_IS_REMOVED(entry)) { firstRemoved = entry; } else { firstRemoved = NULL; if (op == JS_DHASH_ADD) entry->keyHash |= COLLISION_FLAG; } for (;;) { METER(table->stats.steps++); hash1 -= hash2; hash1 &= sizeMask; entry = ADDRESS_ENTRY(table, hash1); if (JS_DHASH_ENTRY_IS_FREE(entry)) { METER(table->stats.misses++); return (firstRemoved && op == JS_DHASH_ADD) ? firstRemoved : entry; } if (MATCH_ENTRY_KEYHASH(entry, keyHash) && matchEntry(table, entry, key)) { METER(table->stats.hits++); return entry; } if (ENTRY_IS_REMOVED(entry)) { if (!firstRemoved) firstRemoved = entry; } else { if (op == JS_DHASH_ADD) entry->keyHash |= COLLISION_FLAG; } } /* NOTREACHED */ return NULL;}static JSBoolChangeTable(JSDHashTable *table, int deltaLog2){ int oldLog2, newLog2; uint32 oldCapacity, newCapacity; char *newEntryStore, *oldEntryStore, *oldEntryAddr; uint32 entrySize, i, nbytes; JSDHashEntryHdr *oldEntry, *newEntry; JSDHashGetKey getKey; JSDHashMoveEntry moveEntry;#ifdef DEBUG uint32 recursionLevel;#endif /* Look, but don't touch, until we succeed in getting new entry store. */ oldLog2 = JS_DHASH_BITS - table->hashShift; newLog2 = oldLog2 + deltaLog2; oldCapacity = JS_BIT(oldLog2); newCapacity = JS_BIT(newLog2); if (newCapacity >= JS_DHASH_SIZE_LIMIT) return JS_FALSE; entrySize = table->entrySize; nbytes = newCapacity * entrySize; newEntryStore = table->ops->allocTable(table, nbytes + ENTRY_STORE_EXTRA); if (!newEntryStore) return JS_FALSE; /* We can't fail from here on, so update table parameters. */#ifdef DEBUG recursionLevel = RECURSION_LEVEL(table);#endif table->hashShift = JS_DHASH_BITS - newLog2; table->removedCount = 0; table->generation++; /* Assign the new entry store to table. */ memset(newEntryStore, 0, nbytes); oldEntryAddr = oldEntryStore = table->entryStore; table->entryStore = newEntryStore; getKey = table->ops->getKey; moveEntry = table->ops->moveEntry;#ifdef DEBUG RECURSION_LEVEL(table) = recursionLevel;#endif /* Copy only live entries, leaving removed ones behind. */ for (i = 0; i < oldCapacity; i++) { oldEntry = (JSDHashEntryHdr *)oldEntryAddr; if (ENTRY_IS_LIVE(oldEntry)) { oldEntry->keyHash &= ~COLLISION_FLAG; newEntry = SearchTable(table, getKey(table, oldEntry), oldEntry->keyHash, JS_DHASH_ADD); JS_ASSERT(JS_DHASH_ENTRY_IS_FREE(newEntry)); moveEntry(table, oldEntry, newEntry); newEntry->keyHash = oldEntry->keyHash; } oldEntryAddr += entrySize; } table->ops->freeTable(table, oldEntryStore); return JS_TRUE;}JS_PUBLIC_API(JSDHashEntryHdr *) JS_DHASH_FASTCALLJS_DHashTableOperate(JSDHashTable *table, const void *key, JSDHashOperator op){ JSDHashNumber keyHash; JSDHashEntryHdr *entry; uint32 size; int deltaLog2; JS_ASSERT(op == JS_DHASH_LOOKUP || RECURSION_LEVEL(table) == 0); INCREMENT_RECURSION_LEVEL(table); keyHash = table->ops->hashKey(table, key); keyHash *= JS_DHASH_GOLDEN_RATIO; /* Avoid 0 and 1 hash codes, they indicate free and removed entries. */ ENSURE_LIVE_KEYHASH(keyHash); keyHash &= ~COLLISION_FLAG; switch (op) { case JS_DHASH_LOOKUP: METER(table->stats.lookups++); entry = SearchTable(table, key, keyHash, op); break; case JS_DHASH_ADD: /* * If alpha is >= .75, grow or compress the table. If key is already * in the table, we may grow once more than necessary, but only if we * are on the edge of being overloaded. */ size = JS_DHASH_TABLE_SIZE(table); if (table->entryCount + table->removedCount >= MAX_LOAD(table, size)) { /* Compress if a quarter or more of all entries are removed. */ if (table->removedCount >= size >> 2) { METER(table->stats.compresses++); deltaLog2 = 0; } else { METER(table->stats.grows++); deltaLog2 = 1; } /* * Grow or compress table, returning null if ChangeTable fails and * falling through might claim the last free entry. */ if (!ChangeTable(table, deltaLog2) && table->entryCount + table->removedCount == size - 1) { METER(table->stats.addFailures++); entry = NULL; break; } } /* * Look for entry after possibly growing, so we don't have to add it, * then skip it while growing the table and re-add it after. */ entry = SearchTable(table, key, keyHash, op); if (!ENTRY_IS_LIVE(entry)) { /* Initialize the entry, indicating that it's no longer free. */ METER(table->stats.addMisses++); if (ENTRY_IS_REMOVED(entry)) { METER(table->stats.addOverRemoved++); table->removedCount--; keyHash |= COLLISION_FLAG; } if (table->ops->initEntry && !table->ops->initEntry(table, entry, key)) { /* We haven't claimed entry yet; fail with null return. */ memset(entry + 1, 0, table->entrySize - sizeof *entry); entry = NULL; break; } entry->keyHash = keyHash; table->entryCount++; } METER(else table->stats.addHits++); break; case JS_DHASH_REMOVE: entry = SearchTable(table, key, keyHash, op); if (ENTRY_IS_LIVE(entry)) { /* Clear this entry and mark it as "removed". */ METER(table->stats.removeHits++); JS_DHashTableRawRemove(table, entry); /* Shrink if alpha is <= .25 and table isn't too small already. */ size = JS_DHASH_TABLE_SIZE(table); if (size > JS_DHASH_MIN_SIZE && table->entryCount <= MIN_LOAD(table, size)) { METER(table->stats.shrinks++); (void) ChangeTable(table, -1); } } METER(else table->stats.removeMisses++); entry = NULL; break; default: JS_ASSERT(0); entry = NULL; } DECREMENT_RECURSION_LEVEL(table); return entry;}JS_PUBLIC_API(void)JS_DHashTableRawRemove(JSDHashTable *table, JSDHashEntryHdr *entry){ JSDHashNumber keyHash; /* load first in case clearEntry goofs it */ JS_ASSERT(JS_DHASH_ENTRY_IS_LIVE(entry)); keyHash = entry->keyHash; table->ops->clearEntry(table, entry); if (keyHash & COLLISION_FLAG) { MARK_ENTRY_REMOVED(entry); table->removedCount++; } else { METER(table->stats.removeFrees++); MARK_ENTRY_FREE(entry); } table->entryCount--;}JS_PUBLIC_API(uint32)JS_DHashTableEnumerate(JSDHashTable *table, JSDHashEnumerator etor, void *arg){ char *entryAddr, *entryLimit; uint32 i, capacity, entrySize, ceiling; JSBool didRemove; JSDHashEntryHdr *entry; JSDHashOperator op; INCREMENT_RECURSION_LEVEL(table); entryAddr = table->entryStore; entrySize = table->entrySize; capacity = JS_DHASH_TABLE_SIZE(table); entryLimit = entryAddr + capacity * entrySize; i = 0; didRemove = JS_FALSE; while (entryAddr < entryLimit) { entry = (JSDHashEntryHdr *)entryAddr; if (ENTRY_IS_LIVE(entry)) { op = etor(table, entry, i++, arg); if (op & JS_DHASH_REMOVE) { METER(table->stats.removeEnums++); JS_DHashTableRawRemove(table, entry); didRemove = JS_TRUE; } if (op & JS_DHASH_STOP) break; } entryAddr += entrySize; } JS_ASSERT(!didRemove || RECURSION_LEVEL(table) == 1); /* * Shrink or compress if a quarter or more of all entries are removed, or * if the table is underloaded according to the configured minimum alpha, * and is not minimal-size already. Do this only if we removed above, so * non-removing enumerations can count on stable table->entryStore until * the next non-lookup-Operate or removing-Enumerate. */ if (didRemove && (table->removedCount >= capacity >> 2 || (capacity > JS_DHASH_MIN_SIZE && table->entryCount <= MIN_LOAD(table, capacity)))) { METER(table->stats.enumShrinks++); capacity = table->entryCount; capacity += capacity >> 1; if (capacity < JS_DHASH_MIN_SIZE) capacity = JS_DHASH_MIN_SIZE; JS_CEILING_LOG2(ceiling, capacity); ceiling -= JS_DHASH_BITS - table->hashShift; (void) ChangeTable(table, ceiling); } DECREMENT_RECURSION_LEVEL(table); return i;}#ifdef JS_DHASHMETER#include <math.h>JS_PUBLIC_API(void)JS_DHashTableDumpMeter(JSDHashTable *table, JSDHashEnumerator dump, FILE *fp){ char *entryAddr; uint32 entrySize, entryCount; int hashShift, sizeLog2; uint32 i, tableSize, sizeMask, chainLen, maxChainLen, chainCount; JSDHashNumber hash1, hash2, saveHash1, maxChainHash1, maxChainHash2; double sqsum, mean, variance, sigma; JSDHashEntryHdr *entry, *probe; entryAddr = table->entryStore; entrySize = table->entrySize; hashShift = table->hashShift; sizeLog2 = JS_DHASH_BITS - hashShift; tableSize = JS_DHASH_TABLE_SIZE(table); sizeMask = JS_BITMASK(sizeLog2); chainCount = maxChainLen = 0; hash2 = 0; sqsum = 0; for (i = 0; i < tableSize; i++) { entry = (JSDHashEntryHdr *)entryAddr; entryAddr += entrySize; if (!ENTRY_IS_LIVE(entry)) continue; hash1 = HASH1(entry->keyHash & ~COLLISION_FLAG, hashShift); saveHash1 = hash1; probe = ADDRESS_ENTRY(table, hash1); chainLen = 1; if (probe == entry) { /* Start of a (possibly unit-length) chain. */ chainCount++; } else { hash2 = HASH2(entry->keyHash & ~COLLISION_FLAG, sizeLog2, hashShift); do { chainLen++; hash1 -= hash2; hash1 &= sizeMask; probe = ADDRESS_ENTRY(table, hash1); } while (probe != entry); } sqsum += chainLen * chainLen; if (chainLen > maxChainLen) { maxChainLen = chainLen; maxChainHash1 = saveHash1; maxChainHash2 = hash2; } } entryCount = table->entryCount; if (entryCount && chainCount) { mean = (double)entryCount / chainCount; variance = chainCount * sqsum - entryCount * entryCount; if (variance < 0 || chainCount == 1) variance = 0; else variance /= chainCount * (chainCount - 1); sigma = sqrt(variance); } else { mean = sigma = 0; } fprintf(fp, "Double hashing statistics:\n"); fprintf(fp, " table size (in entries): %u\n", tableSize); fprintf(fp, " number of entries: %u\n", table->entryCount); fprintf(fp, " number of removed entries: %u\n", table->removedCount); fprintf(fp, " number of searches: %u\n", table->stats.searches); fprintf(fp, " number of hits: %u\n", table->stats.hits); fprintf(fp, " number of misses: %u\n", table->stats.misses); fprintf(fp, " mean steps per search: %g\n", table->stats.searches ? (double)table->stats.steps / table->stats.searches : 0.); fprintf(fp, " mean hash chain length: %g\n", mean); fprintf(fp, " standard deviation: %g\n", sigma); fprintf(fp, " maximum hash chain length: %u\n", maxChainLen); fprintf(fp, " number of lookups: %u\n", table->stats.lookups); fprintf(fp, " adds that made a new entry: %u\n", table->stats.addMisses); fprintf(fp, "adds that recycled removeds: %u\n", table->stats.addOverRemoved); fprintf(fp, " adds that found an entry: %u\n", table->stats.addHits); fprintf(fp, " add failures: %u\n", table->stats.addFailures); fprintf(fp, " useful removes: %u\n", table->stats.removeHits); fprintf(fp, " useless removes: %u\n", table->stats.removeMisses); fprintf(fp, "removes that freed an entry: %u\n", table->stats.removeFrees); fprintf(fp, " removes while enumerating: %u\n", table->stats.removeEnums); fprintf(fp, " number of grows: %u\n", table->stats.grows); fprintf(fp, " number of shrinks: %u\n", table->stats.shrinks); fprintf(fp, " number of compresses: %u\n", table->stats.compresses); fprintf(fp, "number of enumerate shrinks: %u\n", table->stats.enumShrinks); if (dump && maxChainLen && hash2) { fputs("Maximum hash chain:\n", fp); hash1 = maxChainHash1; hash2 = maxChainHash2; entry = ADDRESS_ENTRY(table, hash1); i = 0; do { if (dump(table, entry, i++, fp) != JS_DHASH_NEXT) break; hash1 -= hash2; hash1 &= sizeMask; entry = ADDRESS_ENTRY(table, hash1); } while (JS_DHASH_ENTRY_IS_BUSY(entry)); }}#endif /* JS_DHASHMETER */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -