📄 peer_digest.c
字号:
/* * $Id: peer_digest.c,v 1.70 1999/01/29 21:28:17 wessels Exp $ * * DEBUG: section 72 Peer Digest Routines * AUTHOR: Alex Rousskov * * SQUID Internet Object Cache http://squid.nlanr.net/Squid/ * ---------------------------------------------------------- * * Squid is the result of efforts by numerous individuals from the * Internet community. Development is led by Duane Wessels of the * National Laboratory for Applied Network Research and funded by the * National Science Foundation. Squid is Copyrighted (C) 1998 by * Duane Wessels and the University of California San Diego. Please * see the COPYRIGHT file for full details. Squid incorporates * software developed and/or copyrighted by other sources. Please see * the CREDITS file for full details. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA. * */#include "squid.h"#if USE_CACHE_DIGESTS/* local types *//* local prototypes */static time_t peerDigestIncDelay(const PeerDigest * pd);static time_t peerDigestNewDelay(const StoreEntry * e);static void peerDigestSetCheck(PeerDigest * pd, time_t delay);static void peerDigestClean(PeerDigest *);static EVH peerDigestCheck;static void peerDigestRequest(PeerDigest * pd);static STCB peerDigestFetchReply;static STCB peerDigestSwapInHeaders;static STCB peerDigestSwapInCBlock;static STCB peerDigestSwapInMask;static int peerDigestFetchedEnough(DigestFetchState * fetch, char *buf, ssize_t size, const char *step_name);static void peerDigestFetchStop(DigestFetchState * fetch, char *buf, const char *reason);static void peerDigestFetchAbort(DigestFetchState * fetch, char *buf, const char *reason);static void peerDigestReqFinish(DigestFetchState * fetch, char *buf, int, int, int, const char *reason, int err);static void peerDigestPDFinish(DigestFetchState * fetch, int pcb_valid, int err);static void peerDigestFetchFinish(DigestFetchState * fetch, int err);static void peerDigestFetchSetStats(DigestFetchState * fetch);static int peerDigestSetCBlock(PeerDigest * pd, const char *buf);static int peerDigestUseful(const PeerDigest * pd);/* local constants */#define StoreDigestCBlockSize sizeof(StoreDigestCBlock)/* min interval for requesting digests from a given peer */static const time_t PeerDigestReqMinGap = 5 * 60; /* seconds *//* min interval for requesting digests (cumulative request stream) */static const time_t GlobDigestReqMinGap = 1 * 60; /* seconds *//* local vars */static time_t pd_last_req_time = 0; /* last call to Check *//* initialize peer digest */static voidpeerDigestInit(PeerDigest * pd, peer * p){ assert(pd && p); memset(pd, 0, sizeof(*pd)); pd->peer = p; /* if peer disappears, we will know it's name */ stringInit(&pd->host, p->host); pd->times.initialized = squid_curtime;}static voidpeerDigestClean(PeerDigest * pd){ assert(pd); if (pd->cd) cacheDigestDestroy(pd->cd); stringClean(&pd->host);}/* allocate new peer digest, call Init, and lock everything */PeerDigest *peerDigestCreate(peer * p){ PeerDigest *pd; assert(p); pd = memAllocate(MEM_PEER_DIGEST); cbdataAdd(pd, memFree, MEM_PEER_DIGEST); peerDigestInit(pd, p); cbdataLock(pd->peer); /* we will use the peer */ return pd;}/* call Clean and free/unlock everything */voidpeerDigestDestroy(PeerDigest * pd){ peer *p; assert(pd); p = pd->peer; pd->peer = NULL; /* inform peer (if any) that we are gone */ if (cbdataValid(p)) peerNoteDigestGone(p); cbdataUnlock(p); /* must unlock, valid or not */ peerDigestClean(pd); cbdataFree(pd);}/* called by peer to indicate that somebody actually needs this digest */voidpeerDigestNeeded(PeerDigest * pd){ assert(pd); assert(!pd->flags.needed); assert(!pd->cd); pd->flags.needed = 1; pd->times.needed = squid_curtime; peerDigestSetCheck(pd, 0); /* check asap */}/* currently we do not have a reason to disable without destroying */#if FUTURE_CODE/* disables peer for good */static voidpeerDigestDisable(PeerDigest * pd){ debug(72, 2) ("peerDigestDisable: peer %s disabled for good\n", strBuf(pd->host)); pd->times.disabled = squid_curtime; pd->times.next_check = -1; /* never */ pd->flags.usable = 0; if (pd->cd) { cacheDigestDestroy(pd->cd); pd->cd = NULL; } /* we do not destroy the pd itself to preserve its "history" and stats */}#endif/* increment retry delay [after an unsuccessful attempt] */static time_tpeerDigestIncDelay(const PeerDigest * pd){ assert(pd); return pd->times.retry_delay > 0 ? 2 * pd->times.retry_delay : /* exponential backoff */ PeerDigestReqMinGap; /* minimal delay */}/* artificially increases Expires: setting to avoid race conditions * returns the delay till that [increased] expiration time */static time_tpeerDigestNewDelay(const StoreEntry * e){ assert(e); if (e->expires > 0) return e->expires + PeerDigestReqMinGap - squid_curtime; return PeerDigestReqMinGap;}/* registers next digest verification */static voidpeerDigestSetCheck(PeerDigest * pd, time_t delay){ eventAdd("peerDigestCheck", peerDigestCheck, pd, (double) delay, 1); pd->times.next_check = squid_curtime + delay; debug(72, 3) ("peerDigestSetCheck: will check peer %s in %d secs\n", strBuf(pd->host), delay);}/* * called when peer is about to disappear or have already disappeared */voidpeerDigestNotePeerGone(PeerDigest * pd){ if (pd->flags.requested) { debug(72, 2) ("peerDigest: peer %s gone, will destroy after fetch.\n", strBuf(pd->host)); /* do nothing now, the fetching chain will notice and take action */ } else { debug(72, 2) ("peerDigest: peer %s is gone, destroying now.\n", strBuf(pd->host)); peerDigestDestroy(pd); }}/* callback for eventAdd() (with peer digest locked) * request new digest if our copy is too old or if we lack one; * schedule next check otherwise */static voidpeerDigestCheck(void *data){ PeerDigest *pd = data; time_t req_time; /* * you can't assert(cbdataValid(pd)) -- if its not valid this * function never gets called */ assert(!pd->flags.requested); pd->times.next_check = 0; /* unknown */ if (!cbdataValid(pd->peer)) { peerDigestNotePeerGone(pd); return; } debug(72, 3) ("peerDigestCheck: peer %s:%d\n", pd->peer->host, pd->peer->http_port); debug(72, 3) ("peerDigestCheck: time: %d, last received: %d (%+d)\n", squid_curtime, pd->times.received, (squid_curtime - pd->times.received)); /* decide when we should send the request: * request now unless too close to other requests */ req_time = squid_curtime; /* per-peer limit */ if (req_time - pd->times.received < PeerDigestReqMinGap) { debug(72, 2) ("peerDigestCheck: %s, avoiding close peer requests (%d < %d secs).\n", strBuf(pd->host), req_time - pd->times.received, PeerDigestReqMinGap); req_time = pd->times.received + PeerDigestReqMinGap; } /* global limit */ if (req_time - pd_last_req_time < GlobDigestReqMinGap) { debug(72, 2) ("peerDigestCheck: %s, avoiding close requests (%d < %d secs).\n", strBuf(pd->host), req_time - pd_last_req_time, GlobDigestReqMinGap); req_time = pd_last_req_time + GlobDigestReqMinGap; } if (req_time <= squid_curtime) peerDigestRequest(pd); /* will set pd->flags.requested */ else peerDigestSetCheck(pd, req_time - squid_curtime);}/* ask store for a digest */static voidpeerDigestRequest(PeerDigest * pd){ peer *p = pd->peer; StoreEntry *e, *old_e; char *url; const cache_key *key; request_t *req; DigestFetchState *fetch = NULL; pd->req_result = NULL; pd->flags.requested = 1; /* compute future request components */ url = internalRemoteUri(p->host, p->http_port, "/squid-internal-periodic/", StoreDigestFileName); key = storeKeyPublic(url, METHOD_GET); debug(72, 2) ("peerDigestRequest: %s key: %s\n", url, storeKeyText(key)); req = urlParse(METHOD_GET, url); assert(req); /* add custom headers */ assert(!req->header.len); httpHeaderPutStr(&req->header, HDR_ACCEPT, StoreDigestMimeStr); httpHeaderPutStr(&req->header, HDR_ACCEPT, "text/html"); /* create fetch state structure */ fetch = memAllocate(MEM_DIGEST_FETCH_STATE); cbdataAdd(fetch, memFree, MEM_DIGEST_FETCH_STATE); fetch->request = requestLink(req); fetch->pd = pd; fetch->offset = 0; /* update timestamps */ fetch->start_time = squid_curtime; pd->times.requested = squid_curtime; pd_last_req_time = squid_curtime; req->flags.cachable = 1; /* the rest is based on clientProcessExpired() */ req->flags.refresh = 1; old_e = fetch->old_entry = storeGet(key); if (old_e) { debug(72, 5) ("peerDigestRequest: found old entry\n"); storeLockObject(old_e); storeCreateMemObject(old_e, url, url); storeClientListAdd(old_e, fetch); } e = fetch->entry = storeCreateEntry(url, url, req->flags, req->method); assert(EBIT_TEST(e->flags, KEY_PRIVATE)); storeClientListAdd(e, fetch); /* set lastmod to trigger IMS request if possible */ if (old_e) e->lastmod = old_e->lastmod; /* push towards peer cache */ debug(72, 3) ("peerDigestRequest: forwarding to fwdStart...\n"); fwdStart(-1, e, req, no_addr, no_addr); cbdataLock(fetch); cbdataLock(fetch->pd); storeClientCopy(e, 0, 0, 4096, memAllocate(MEM_4K_BUF), peerDigestFetchReply, fetch);}/* wait for full http headers to be received then parse them */static voidpeerDigestFetchReply(void *data, char *buf, ssize_t size){ DigestFetchState *fetch = data; PeerDigest *pd = fetch->pd; assert(pd && buf); assert(!fetch->offset); if (peerDigestFetchedEnough(fetch, buf, size, "peerDigestFetchReply")) return; if (headersEnd(buf, size)) { http_status status; HttpReply *reply = fetch->entry->mem_obj->reply; assert(reply); httpReplyParse(reply, buf); status = reply->sline.status; debug(72, 3) ("peerDigestFetchReply: %s status: %d, expires: %d (%+d)\n", strBuf(pd->host), status, reply->expires, reply->expires - squid_curtime); /* this "if" is based on clientHandleIMSReply() */ if (status == HTTP_NOT_MODIFIED) { request_t *r = NULL; /* our old entry is fine */ assert(fetch->old_entry); if (!fetch->old_entry->mem_obj->request) fetch->old_entry->mem_obj->request = r = requestLink(fetch->entry->mem_obj->request); assert(fetch->old_entry->mem_obj->request); httpReplyUpdateOnNotModified(fetch->old_entry->mem_obj->reply, reply); storeTimestampsSet(fetch->old_entry); /* get rid of 304 reply */ storeUnregister(fetch->entry, fetch); storeUnlockObject(fetch->entry); fetch->entry = fetch->old_entry; fetch->old_entry = NULL; /* preserve request -- we need its size to update counters */ /* requestUnlink(r); */ /* fetch->entry->mem_obj->request = NULL; */ } else if (status == HTTP_OK) { /* get rid of old entry if any */ if (fetch->old_entry) { debug(72, 3) ("peerDigestFetchReply: got new digest, releasing old one\n"); storeUnregister(fetch->old_entry, fetch); storeReleaseRequest(fetch->old_entry); storeUnlockObject(fetch->old_entry); fetch->old_entry = NULL; } } else { /* some kind of a bug */ peerDigestFetchAbort(fetch, buf, httpStatusLineReason(&reply->sline)); return; } /* must have a ready-to-use store entry if we got here */ /* can we stay with the old in-memory digest? */ if (status == HTTP_NOT_MODIFIED && fetch->pd->cd) peerDigestFetchStop(fetch, buf, "Not modified"); else storeClientCopy(fetch->entry, /* have to swap in */ 0, 0, SM_PAGE_SIZE, buf, peerDigestSwapInHeaders, fetch); } else { /* need more data, do we have space? */ if (size >= SM_PAGE_SIZE) peerDigestFetchAbort(fetch, buf, "reply header too big"); else storeClientCopy(fetch->entry, size, 0, SM_PAGE_SIZE, buf, peerDigestFetchReply, fetch); }}/* fetch headers from disk, pass on to SwapInCBlock */static voidpeerDigestSwapInHeaders(void *data, char *buf, ssize_t size){ DigestFetchState *fetch = data; size_t hdr_size; if (peerDigestFetchedEnough(fetch, buf, size, "peerDigestSwapInHeaders")) return; assert(!fetch->offset); if ((hdr_size = headersEnd(buf, size))) { assert(fetch->entry->mem_obj->reply); if (!fetch->entry->mem_obj->reply->sline.status) httpReplyParse(fetch->entry->mem_obj->reply, buf); if (fetch->entry->mem_obj->reply->sline.status != HTTP_OK) { debug(72, 1) ("peerDigestSwapInHeaders: %s status %d got cached!\n", strBuf(fetch->pd->host), fetch->entry->mem_obj->reply->sline.status); peerDigestFetchAbort(fetch, buf, "internal status error"); return; } fetch->offset += hdr_size; storeClientCopy(fetch->entry, size, fetch->offset, SM_PAGE_SIZE, buf, peerDigestSwapInCBlock, fetch); } else { /* need more data, do we have space? */ if (size >= SM_PAGE_SIZE) peerDigestFetchAbort(fetch, buf, "stored header too big"); else storeClientCopy(fetch->entry, size, 0, SM_PAGE_SIZE, buf, peerDigestSwapInHeaders, fetch); }}static voidpeerDigestSwapInCBlock(void *data, char *buf, ssize_t size)
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -