📄 store_digest.c
字号:
sd_stats.rej_coll_count++; }}/* rebuilds digest from scratch */static voidstoreDigestRebuildStart(void *datanotused){ assert(store_digest); /* prevent overlapping if rebuild schedule is too tight */ if (sd_state.rebuild_lock) { debug(71, 1) ("storeDigestRebuildStart: overlap detected, consider increasing rebuild period\n"); return; } sd_state.rebuild_lock = 1; debug(71, 2) ("storeDigestRebuildStart: rebuild #%d\n", sd_state.rebuild_count + 1); if (sd_state.rewrite_lock) { debug(71, 2) ("storeDigestRebuildStart: waiting for Rewrite to finish.\n"); return; } storeDigestRebuildResume();}/* called be Rewrite to push Rebuild forward */static voidstoreDigestRebuildResume(void){ assert(sd_state.rebuild_lock); assert(!sd_state.rewrite_lock); sd_state.rebuild_offset = 0; /* resize or clear */ if (!storeDigestResize()) cacheDigestClear(store_digest); /* not clean()! */ memset(&sd_stats, 0, sizeof(sd_stats)); eventAdd("storeDigestRebuildStep", storeDigestRebuildStep, NULL, 0.0, 1);}/* finishes swap out sequence for the digest; schedules next rebuild */static voidstoreDigestRebuildFinish(void){ assert(sd_state.rebuild_lock); sd_state.rebuild_lock = 0; sd_state.rebuild_count++; debug(71, 2) ("storeDigestRebuildFinish: done.\n"); eventAdd("storeDigestRebuildStart", storeDigestRebuildStart, NULL, (double) StoreDigestRebuildPeriod, 1); /* resume pending Rewrite if any */ if (sd_state.rewrite_lock) storeDigestRewriteResume();}/* recalculate a few hash buckets per invocation; schedules next step */static voidstoreDigestRebuildStep(void *datanotused){ int bcount = (int) ceil(store_hash_buckets * StoreDigestRebuildChunkPercent); assert(sd_state.rebuild_lock); if (sd_state.rebuild_offset + bcount > store_hash_buckets) bcount = store_hash_buckets - sd_state.rebuild_offset; debug(71, 3) ("storeDigestRebuildStep: buckets: %d offset: %d chunk: %d buckets\n", store_hash_buckets, sd_state.rebuild_offset, bcount); while (bcount--) { hash_link *link_ptr = hash_get_bucket(store_table, sd_state.rebuild_offset); for (; link_ptr; link_ptr = link_ptr->next) { storeDigestAdd((StoreEntry *) link_ptr); } sd_state.rebuild_offset++; } /* are we done ? */ if (sd_state.rebuild_offset >= store_hash_buckets) storeDigestRebuildFinish(); else eventAdd("storeDigestRebuildStep", storeDigestRebuildStep, NULL, 0.0, 1);}/* starts swap out sequence for the digest */static voidstoreDigestRewriteStart(void *datanotused){ request_flags flags; char *url; StoreEntry *e; assert(store_digest); /* prevent overlapping if rewrite schedule is too tight */ if (sd_state.rewrite_lock) { debug(71, 1) ("storeDigestRewrite: overlap detected, consider increasing rewrite period\n"); return; } debug(71, 2) ("storeDigestRewrite: start rewrite #%d\n", sd_state.rewrite_count + 1); /* make new store entry */ url = internalLocalUri("/squid-internal-periodic/", StoreDigestFileName); flags = null_request_flags; flags.cachable = 1; sd_state.rewrite_lock = e = storeCreateEntry(url, url, flags, METHOD_GET); assert(sd_state.rewrite_lock); cbdataAdd(sd_state.rewrite_lock, NULL, 0); debug(71, 3) ("storeDigestRewrite: url: %s key: %s\n", url, storeKeyText(e->key)); e->mem_obj->request = requestLink(urlParse(METHOD_GET, url)); /* wait for rebuild (if any) to finish */ if (sd_state.rebuild_lock) { debug(71, 2) ("storeDigestRewriteStart: waiting for rebuild to finish.\n"); return; } storeDigestRewriteResume();}static voidstoreDigestRewriteResume(void){ StoreEntry *e = sd_state.rewrite_lock; assert(sd_state.rewrite_lock); assert(!sd_state.rebuild_lock); sd_state.rewrite_offset = 0; EBIT_SET(e->flags, ENTRY_SPECIAL); /* setting public key will purge old digest entry if any */ storeSetPublicKey(e); /* fake reply */ httpReplyReset(e->mem_obj->reply); httpReplySetHeaders(e->mem_obj->reply, 1.0, 200, "Cache Digest OK", "application/cache-digest", store_digest->mask_size + sizeof(sd_state.cblock), squid_curtime, squid_curtime + StoreDigestRewritePeriod); debug(71, 3) ("storeDigestRewrite: entry expires on %d (%+d)\n", e->mem_obj->reply->expires, e->mem_obj->reply->expires - squid_curtime); storeBuffer(e); httpReplySwapOut(e->mem_obj->reply, e); storeDigestCBlockSwapOut(e); storeBufferFlush(e); eventAdd("storeDigestSwapOutStep", storeDigestSwapOutStep, sd_state.rewrite_lock, 0.0, 1);}/* finishes swap out sequence for the digest; schedules next rewrite */static voidstoreDigestRewriteFinish(StoreEntry * e){ assert(e == sd_state.rewrite_lock); storeComplete(e); storeTimestampsSet(e); debug(71, 2) ("storeDigestRewriteFinish: digest expires at %d (%+d)\n", e->expires, e->expires - squid_curtime); /* is this the write order? @?@ */ requestUnlink(e->mem_obj->request); e->mem_obj->request = NULL; storeUnlockObject(e); /* * note, it won't really get free()'d here because we used * MEM_DONTFREE in the call to cbdataAdd(). */ cbdataFree(sd_state.rewrite_lock); sd_state.rewrite_lock = e = NULL; sd_state.rewrite_count++; eventAdd("storeDigestRewriteStart", storeDigestRewriteStart, NULL, (double) StoreDigestRewritePeriod, 1); /* resume pending Rebuild if any */ if (sd_state.rebuild_lock) storeDigestRebuildResume();}/* swaps out one digest "chunk" per invocation; schedules next swap out */static voidstoreDigestSwapOutStep(void *data){ StoreEntry *e = data; int chunk_size = StoreDigestSwapOutChunkSize; assert(e); assert(e == sd_state.rewrite_lock); /* _add_ check that nothing bad happened while we were waiting @?@ @?@ */ if (sd_state.rewrite_offset + chunk_size > store_digest->mask_size) chunk_size = store_digest->mask_size - sd_state.rewrite_offset; storeAppend(e, store_digest->mask + sd_state.rewrite_offset, chunk_size); debug(71, 3) ("storeDigestSwapOutStep: size: %d offset: %d chunk: %d bytes\n", store_digest->mask_size, sd_state.rewrite_offset, chunk_size); sd_state.rewrite_offset += chunk_size; /* are we done ? */ if (sd_state.rewrite_offset >= store_digest->mask_size) storeDigestRewriteFinish(e); else eventAdd("storeDigestSwapOutStep", storeDigestSwapOutStep, e, 0.0, 1);}static voidstoreDigestCBlockSwapOut(StoreEntry * e){ memset(&sd_state.cblock, 0, sizeof(sd_state.cblock)); sd_state.cblock.ver.current = htons(CacheDigestVer.current); sd_state.cblock.ver.required = htons(CacheDigestVer.required); sd_state.cblock.capacity = htonl(store_digest->capacity); sd_state.cblock.count = htonl(store_digest->count); sd_state.cblock.del_count = htonl(store_digest->del_count); sd_state.cblock.mask_size = htonl(store_digest->mask_size); sd_state.cblock.bits_per_entry = (unsigned char) StoreDigestBitsPerEntry; sd_state.cblock.hash_func_count = (unsigned char) CacheDigestHashFuncCount; storeAppend(e, (char *) &sd_state.cblock, sizeof(sd_state.cblock));}/* calculates digest capacity */static intstoreDigestCalcCap(void){ /* * To-Do: Bloom proved that the optimal filter utilization is 50% (half of * the bits are off). However, we do not have a formula to calculate the * number of _entries_ we want to pre-allocate for. */ const int hi_cap = Config.Swap.maxSize / Config.Store.avgObjectSize; const int lo_cap = 1 + store_swap_size / Config.Store.avgObjectSize; const int e_count = memInUse(MEM_STOREENTRY); int cap = e_count ? e_count : hi_cap; debug(71, 2) ("storeDigestCalcCap: have: %d, want %d entries; limits: [%d, %d]\n", e_count, cap, lo_cap, hi_cap); if (cap < lo_cap) cap = lo_cap; /* do not enforce hi_cap limit, average-based estimation may be wrong *if (cap > hi_cap) * cap = hi_cap; */ return cap;}/* returns true if we actually resized the digest */static intstoreDigestResize(void){ const int cap = storeDigestCalcCap(); int diff; assert(store_digest); diff = abs(cap - store_digest->capacity); debug(71, 2) ("storeDigestResize: %d -> %d; change: %d (%d%%)\n", store_digest->capacity, cap, diff, xpercentInt(diff, store_digest->capacity)); /* avoid minor adjustments */ if (diff <= store_digest->capacity / 10) { debug(71, 2) ("storeDigestResize: small change, will not resize.\n"); return 0; } else { debug(71, 2) ("storeDigestResize: big change, resizing.\n"); cacheDigestChangeCap(store_digest, cap); return 1; }}#endif /* USE_CACHE_DIGESTS */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -