⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 cache.c

📁 一个很有名的浏览器
💻 C
📖 第 1 页 / 共 2 页
字号:
/* Cache subsystem *//* $Id: cache.c,v 1.194.2.4 2005/05/02 21:14:03 jonas Exp $ */#ifdef HAVE_CONFIG_H#include "config.h"#endif#include <string.h>#include "elinks.h"#include "bfu/dialog.h"#include "cache/cache.h"#include "cache/dialogs.h"#include "config/options.h"#include "main.h"#include "protocol/protocol.h"#include "protocol/proxy.h"#include "protocol/uri.h"#include "sched/connection.h"#include "util/error.h"#include "util/memory.h"#include "util/object.h"#include "util/string.h"#include "util/types.h"/* The list of cache entries */static INIT_LIST_HEAD(cache_entries);static long cache_size;static int id_counter = 1;/* Change 0 to 1 to enable cache debugging features (redirect stderr to a file). */#if 0#define DEBUG_CACHE#endif#ifdef DEBUG_CACHE#define dump_frag(frag, count) \do { \	DBG(" [%d] f=%p offset=%li length=%li real_length=%li", \	      count, frag, frag->offset, frag->length, frag->real_length); \} while (0)#define dump_frags(entry, comment) \do { \	struct fragment *frag; \        int count = 0;	\ \	DBG("%s: url=%s, cache_size=%li", comment, entry->url, cache_size); \	foreach (frag, entry->frag) \		dump_frag(frag, ++count); \} while (0)#else#define dump_frags(entry, comment)#endif /* DEBUG_CACHE */static intis_entry_used(struct cache_entry *cached){	struct connection *conn;	foreach (conn, queue)		if (conn->cached == cached)			return 1;	return 0;}longcache_info(int type){	int i = 0;	struct cache_entry *cached;	switch (type) {		case INFO_BYTES:			return cache_size;		case INFO_FILES:			foreach (cached, cache_entries) i++;			return i;		case INFO_LOCKED:			foreach (cached, cache_entries)				i += is_object_used(cached);			return i;		case INFO_LOADING:			foreach (cached, cache_entries)				i += is_entry_used(cached);			return i;	}	return 0;}struct cache_entry *find_in_cache(struct uri *uri){	struct cache_entry *cached;	int proxy = (uri->protocol == PROTOCOL_PROXY);	foreach (cached, cache_entries) {		struct uri *c_uri;		if (!cached->valid) continue;		c_uri = proxy ? cached->proxy_uri : cached->uri;		if (!compare_uri(c_uri, uri, URI_BASE))			continue;		/* Move it on the top of the list. */		del_from_list(cached);		add_to_list(cache_entries, cached);		return cached;	}	return NULL;}struct cache_entry *get_cache_entry(struct uri *uri){	struct cache_entry *cached = find_in_cache(uri);	assertm(!uri->fragment, "Fragment in URI (%s)", struri(uri));	if (cached) return cached;	shrink_memory(0);	cached = mem_calloc(1, sizeof(*cached));	if (!cached) return NULL;	cached->uri = get_proxied_uri(uri);	if (!cached->uri) {		mem_free(cached);		return NULL;	}	cached->proxy_uri = get_proxy_uri(uri, NULL);	if (!cached->proxy_uri) {		done_uri(cached->uri);		mem_free(cached);		return NULL;	}	cached->incomplete = 1;	cached->valid = 1;	init_list(cached->frag);	cached->id = id_counter++;	object_nolock(cached, "cache_entry"); /* Debugging purpose. */	add_to_list(cache_entries, cached);	cached->box_item = add_listbox_leaf(&cache_browser, NULL, cached);	return cached;}static intcache_entry_has_expired(struct cache_entry *cached){	return cached->max_age <= time(NULL);}struct cache_entry *get_validated_cache_entry(struct uri *uri, enum cache_mode cache_mode){	struct cache_entry *cached;	/* We have to check if something should be reloaded */	if (cache_mode > CACHE_MODE_NORMAL)		return NULL;	/* We only consider complete entries */	cached = find_in_cache(uri);	if (!cached || cached->incomplete)		return NULL;	/* Check if the entry can be deleted */	/* FIXME: This does not make sense to me. Why should the usage pattern	 * of the cache entry matter? Only reason I can think of is to avoid	 * reloading when spawning a new tab which could potentially be a big	 * penalty but shouldn't that be taken care of on a higher level?	 * --jonas */	if (is_object_used(cached)) {#if 0		/* Never use expired entries. */		if (cached->expire && cache_entry_has_expired(cached))			return NULL;#endif		return cached;	}	/* A bit of a gray zone. Delete the entry if the it has the stricktest	 * cache mode and we don't want the most aggressive mode or we have to	 * remove the redirect or the entry expired. Please enlighten me.	 * --jonas */	if ((cached->cache_mode == CACHE_MODE_NEVER && cache_mode != CACHE_MODE_ALWAYS)	    || (cached->redirect && !get_opt_bool("document.cache.cache_redirects"))	    || (cached->expire && cache_entry_has_expired(cached))) {		delete_cache_entry(cached);		return NULL;	}	return cached;}intcache_entry_is_valid(struct cache_entry *cached){	struct cache_entry *valid_cached;	foreach (valid_cached, cache_entries) {		if (valid_cached == cached)			return 1;	}	return 0;}struct cache_entry *follow_cached_redirects(struct cache_entry *cached){	int redirects = 0;	while (cached) {		if (!cached->redirect) {			/* XXX: This is not quite true, but does that difference			 * matter here? */			return cached;		}		if (++redirects > MAX_REDIRECTS) break;		cached = find_in_cache(cached->redirect);	}	return NULL;}struct cache_entry *get_redirected_cache_entry(struct uri *uri){	struct cache_entry *cached = find_in_cache(uri);	return cached ? follow_cached_redirects(cached) : NULL;}static inline voidenlarge_entry(struct cache_entry *cached, int size){	cached->data_size += size;	assertm(cached->data_size >= 0,		"cache entry data_size underflow: %ld", cached->data_size);	if_assert_failed { cached->data_size = 0; }	cache_size += size;	assertm(cache_size >= 0, "cache_size underflow: %ld", cache_size);	if_assert_failed { cache_size = 0; }}#define CACHE_PAD(x) (((x) | 0x3fff) + 1)/* One byte is reserved for data in struct fragment. */#define FRAGSIZE(x) (sizeof(struct fragment) + (x) - 1)/* We store the fragments themselves in a private vault, safely separated from * the rest of memory structures. If we lived in the main libc memory pool, we * would trigger annoying pathological behaviour like artificially enlarging * the memory pool to 50M, then securing it with some stupid cookie record at * the top and then no matter how you flush the cache the data segment is still * 50M big. * * Cool, but we don't want that, so fragments (where the big data is stored) * live in their little mmap()ed worlds. There is some overhead, but if we * assume single fragment per cache entry and page size (mmap() allocation * granularity) 4096, for a squad of ten 1kb documents this amounts 30kb. * That's not *that* horrible when you realize that the freshmeat front page * takes 300kb in memory and we usually do not deal with documents so small * that max. 4kb overhead would be visible there. * * The alternative would be of course to manage an entire custom memory pool, * but that is feasible only when we are able to resize it efficiently. We * aren't, except on Linux. * * Of course for all this to really completely prevent the pathological cases, * we need to stuff the rendered documents in too, because they seem to amount * the major memory bursts. */static struct fragment *frag_alloc(size_t size){	struct fragment *f = mem_mmap_alloc(FRAGSIZE(size));	if (!f) return NULL;	memset(f, 0, FRAGSIZE(size));	return f;}static struct fragment *frag_realloc(struct fragment *f, size_t size){	return mem_mmap_realloc(f, FRAGSIZE(f->real_length), FRAGSIZE(size));}static voidfrag_free(struct fragment *f){	mem_mmap_free(f, FRAGSIZE(f->real_length));}/* Contatenate overlapping fragments. */static voidremove_overlaps(struct cache_entry *cached, struct fragment *f, int *trunc){	int f_end_offset = f->offset + f->length;	/* Iterate thru all fragments we still overlap to. */	while (list_has_next(cached->frag, f)		&& f_end_offset > f->next->offset) {		struct fragment *nf;		int end_offset = f->next->offset + f->next->length;		if (f_end_offset < end_offset) {			/* We end before end of the following fragment, though.			 * So try to append overlapping part of that fragment			 * to us. */			nf = frag_realloc(f, end_offset - f->offset);			if (nf) {				nf->prev->next = nf;				nf->next->prev = nf;				f = nf;				if (memcmp(f->data + f->next->offset - f->offset,					   f->next->data,					   f->offset + f->length - f->next->offset))					*trunc = 1;				memcpy(f->data + f->length,				       f->next->data + f_end_offset - f->next->offset,				       end_offset - f_end_offset);				enlarge_entry(cached, end_offset - f_end_offset);				f->length = f->real_length = end_offset - f->offset;			}		} else {			/* We will just discard this, it's complete subset of			 * our new fragment. */			if (memcmp(f->data + f->next->offset - f->offset,				   f->next->data,				   f->next->length))				*trunc = 1;		}		/* Remove the fragment, it influences our new one! */		nf = f->next;		enlarge_entry(cached, -nf->length);		del_from_list(nf);		frag_free(nf);	}}/* Note that this function is maybe overcommented, but I'm certainly not * unhappy from that. */intadd_fragment(struct cache_entry *cached, int offset,	     unsigned char *data, int length){	struct fragment *f, *nf;	int trunc = 0;	int end_offset;	if (!length) return 0;	end_offset = offset + length;	if (cached->length < end_offset)		cached->length = end_offset;	/* id marks each entry, and change each time it's modified,	 * used in HTML renderer. */	cached->id = id_counter++;	/* Possibly insert the new data in the middle of existing fragment. */	foreach (f, cached->frag) {		int ret = 0;		int f_end_offset = f->offset + f->length;		/* No intersection? */		if (f->offset > offset) break;		if (f_end_offset < offset) continue;		if (end_offset > f_end_offset) {			/* Overlap - we end further than original fragment. */			if (end_offset - f->offset <= f->real_length) {				/* We fit here, so let's enlarge it by delta of				 * old and new end.. */				enlarge_entry(cached, end_offset - f_end_offset);				/* ..and length is now total length. */				f->length = end_offset - f->offset;				ret = 1; /* It was enlarged. */			} else {				/* We will reduce fragment length only to the				 * starting non-interjecting size and add new				 * fragment directly after this one. */				f->length = offset - f->offset;				f = f->next;				break;			}		} /* else We are subset of original fragment. */		/* Copy the stuff over there. */		memcpy(f->data + offset - f->offset, data, length);		remove_overlaps(cached, f, &trunc);		/* We truncate the entry even if the data contents is the		 * same as what we have in the fragment, because that does		 * not mean that what is going to follow won't differ, This		 * is a serious problem when rendering HTML frame with onload		 * snippets - we "guess" the rest of the document here,		 * interpret the snippet, then it turns out in the real		 * document the snippet is different and we are in trouble.		 *		 * Debugging this took me about 1.5 day (really), the diff with

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -