📄 cache.c
字号:
* all the debugging print commands amounted about 20kb (gdb * wasn't much useful since it stalled the download, de facto * eliminating the bad behaviour). */ truncate_entry(cached, end_offset, 0); dump_frags(cached, "add_fragment"); return ret; } /* Make up new fragment. */ nf = frag_alloc(CACHE_PAD(length)); if (!nf) return -1; nf->offset = offset; nf->length = length; nf->real_length = CACHE_PAD(length); memcpy(nf->data, data, length); add_at_pos(f->prev, nf); enlarge_entry(cached, length); remove_overlaps(cached, nf, &trunc); if (trunc) truncate_entry(cached, end_offset, 0); dump_frags(cached, "add_fragment"); return 1;}struct fragment *get_cache_fragment(struct cache_entry *cached){ struct fragment *first_frag, *adj_frag, *frag, *new_frag; int new_frag_len; if (list_empty(cached->frag)) return NULL; first_frag = cached->frag.next; if (first_frag->offset) return NULL; /* Find the first pair of fragments that overlap. It will be used to * figure out what sequence of fragments to include in the * defragmentation. */ for (adj_frag = first_frag->next; adj_frag != (void *) &cached->frag; adj_frag = adj_frag->next) { long overlay = adj_frag->offset - (adj_frag->prev->offset + adj_frag->prev->length); if (overlay > 0) break; if (overlay == 0) continue; INTERNAL("fragments overlay"); return NULL; } /* Only one fragment so no defragmentation is needed */ if (adj_frag == first_frag->next) return first_frag; /* Calculate the length of the defragmented fragment. */ for (new_frag_len = 0, frag = first_frag; frag != adj_frag; frag = frag->next) new_frag_len += frag->length; /* XXX: Even tho' the defragmentation fails because of allocation * failure just fall back to return the first fragment and pretend all * is well. */ /* FIXME: Is this terribly brain-dead? It corresponds to the semantic of * the code this extended version of the old defrag_entry() is supposed * to replace. --jonas */ new_frag = frag_alloc(new_frag_len); if (!new_frag) return first_frag->length ? first_frag : NULL; new_frag->length = new_frag_len; new_frag->real_length = new_frag_len; for (new_frag_len = 0, frag = first_frag; frag != adj_frag; frag = frag->next) { struct fragment *tmp = frag; memcpy(new_frag->data + new_frag_len, frag->data, frag->length); new_frag_len += frag->length; frag = frag->prev; del_from_list(tmp); frag_free(tmp); } add_to_list(cached->frag, new_frag); dump_frags(cached, "get_cache_fragment"); return new_frag;}static voiddelete_fragment(struct cache_entry *cached, struct fragment *f){ while ((void *) f != &cached->frag) { struct fragment *tmp = f->next; enlarge_entry(cached, -f->length); del_from_list(f); frag_free(f); f = tmp; }}voidtruncate_entry(struct cache_entry *cached, int offset, int final){ struct fragment *f; if (cached->length > offset) { cached->length = offset; cached->incomplete = 1; } foreach (f, cached->frag) { long size = offset - f->offset; /* XXX: is zero length fragment really legal here ? --Zas */ assert(f->length >= 0); if (size >= f->length) continue; if (size > 0) { enlarge_entry(cached, -(f->length - size)); f->length = size; if (final) { struct fragment *nf; nf = frag_realloc(f, f->length); if (nf) { nf->next->prev = nf; nf->prev->next = nf; f = nf; f->real_length = f->length; } } f = f->next; } delete_fragment(cached, f); dump_frags(cached, "truncate_entry"); return; }}voidfree_entry_to(struct cache_entry *cached, int offset){ struct fragment *f; foreach (f, cached->frag) { if (f->offset + f->length <= offset) { struct fragment *tmp = f; enlarge_entry(cached, -f->length); f = f->prev; del_from_list(tmp); frag_free(tmp); } else if (f->offset < offset) { long size = offset - f->offset; enlarge_entry(cached, -size); f->length -= size; memmove(f->data, f->data + size, f->length); f->offset = offset; } else break; }}voiddelete_entry_content(struct cache_entry *cached){ enlarge_entry(cached, -cached->data_size); while (cached->frag.next != (void *) &cached->frag) { struct fragment *f = cached->frag.next; del_from_list(f); frag_free(f); } cached->id = id_counter++; cached->length = 0; cached->incomplete = 1; mem_free_set(&cached->last_modified, NULL); mem_free_set(&cached->etag, NULL);}voiddelete_cache_entry(struct cache_entry *cached){ assertm(!is_object_used(cached), "deleting locked cache entry"); assertm(!is_entry_used(cached), "deleting loading cache entry"); delete_entry_content(cached); del_from_list(cached); if (cached->box_item) done_listbox_item(&cache_browser, cached->box_item); if (cached->uri) done_uri(cached->uri); if (cached->proxy_uri) done_uri(cached->proxy_uri); if (cached->redirect) done_uri(cached->redirect); mem_free_if(cached->head); mem_free_if(cached->content_type); mem_free_if(cached->last_modified); mem_free_if(cached->ssl_info); mem_free_if(cached->encoding_info); mem_free_if(cached->etag); mem_free(cached);}struct uri *redirect_cache(struct cache_entry *cached, unsigned char *location, int get, int incomplete){ unsigned char *uristring; /* XXX: I am a little puzzled whether we should only use the cache * entry's URI if it is valid. Hopefully always using it won't hurt. * Currently we handle direction redirects where "/" should be appended * special dunno if join_urls() could be made to handle that. * --jonas */ /* XXX: We are assuming here that incomplete will only be zero when * doing these fake redirects which only purpose is to add an ending * slash *cough* dirseparator to the end of the URI. */ if (incomplete == 0 && location[0] == '/' && location[1] == 0) { /* To be sure use get_uri_string() to get rid of post data */ uristring = get_uri_string(cached->uri, URI_ORIGINAL); if (uristring) add_to_strn(&uristring, location); } else { uristring = join_urls(cached->uri, location); } if (!uristring) return NULL; /* Only add the post data if the redirect should not use GET method. * This is tied to the HTTP handling of the 303 and (if the * protocol.http.bugs.broken_302_redirect is enabled) the 302 status * code handling. */ if (cached->uri->post && !cached->redirect_get && !get) { /* XXX: Add POST_CHAR and post data assuming URI components * belong to one string. */ /* To be certain we don't append post data twice in some * conditions... --Zas */ assert(!strchr(uristring, POST_CHAR)); add_to_strn(&uristring, cached->uri->post - 1); } if (cached->redirect) done_uri(cached->redirect); cached->redirect = get_uri(uristring, 0); cached->redirect_get = get; if (incomplete >= 0) cached->incomplete = incomplete; mem_free(uristring); return cached->redirect;}voidgarbage_collection(int whole){ struct cache_entry *cached; /* We recompute cache_size when scanning cache entries, to ensure * consistency. */ long old_cache_size = 0; /* The maximal cache size tolerated by user. Note that this is only * size of the "just stored" unused cache entries, used cache entries * are not counted to that. */ long opt_cache_size = get_opt_long("document.cache.memory.size"); /* The low-treshold cache size. Basically, when the cache size is * higher than opt_cache_size, we free the cache so that there is no * more than this value in the cache anymore. This is to make sure we * aren't cleaning cache too frequently when working with a lot of * small cache entries but rather free more and then let it grow a * little more as well. */ long gc_cache_size = opt_cache_size * MEMORY_CACHE_GC_PERCENT / 100; /* The cache size we aim to reach. */ long new_cache_size = cache_size;#ifdef DEBUG_CACHE /* Whether we've hit an used (unfreeable) entry when collecting * garbage. */ int obstacle_entry = 0;#endif#ifdef DEBUG_CACHE DBG("gc whole=%d opt_cache_size=%ld gc_cache_size=%ld", whole, opt_cache_size,gc_cache_size);#endif if (!whole && cache_size <= opt_cache_size) return; /* Scanning cache, pass #1: * Weed out the used cache entries from @new_cache_size, so that we * will work only with the unused entries from then on. Also ensure * that @cache_size is in sync. */ foreach (cached, cache_entries) { old_cache_size += cached->data_size; if (!is_object_used(cached) && !is_entry_used(cached)) continue; new_cache_size -= cached->data_size; assertm(new_cache_size >= 0, "cache_size (%ld) underflow: %ld", cache_size, new_cache_size); if_assert_failed { new_cache_size = 0; } } assertm(old_cache_size == cache_size, "cache_size out of sync: %ld != (actual) %ld", cache_size, old_cache_size); if_assert_failed { cache_size = old_cache_size; } if (!whole && new_cache_size <= opt_cache_size) return; /* Scanning cache, pass #2: * Mark potential targets for destruction, from the oldest to the * newest. */ foreachback (cached, cache_entries) { /* We would have shrinked enough already? */ if (!whole && new_cache_size <= gc_cache_size) goto shrinked_enough; /* Skip used cache entries. */ if (is_object_used(cached) || is_entry_used(cached)) {#ifdef DEBUG_CACHE obstacle_entry = 1;#endif cached->gc_target = 0; continue; } /* FIXME: Optionally take cached->max_age into consideration, * but that will probably complicate things too much. We'd have * to sort entries so prioritize removing the oldest entries. */ /* Mark me for destruction, sir. */ cached->gc_target = 1; new_cache_size -= cached->data_size; assertm(new_cache_size >= 0, "cache_size (%ld) underflow: %ld", cache_size, new_cache_size); if_assert_failed { new_cache_size = 0; } } /* If we'd free the whole cache... */ assertm(new_cache_size == 0, "cache_size (%ld) overflow: %ld", cache_size, new_cache_size); if_assert_failed { new_cache_size = 0; }shrinked_enough: /* Now turn around and start walking in the opposite direction. */ cached = cached->next; /* Something is strange when we decided all is ok before dropping any * cache entry. */ if ((void *) cached == &cache_entries) return; if (!whole) { struct cache_entry *entry; /* Scanning cache, pass #3: * Walk back in the cache and unmark the cache entries which * could still fit into the cache. */ /* This makes sense when the newest entry is HUGE and after it, * there's just plenty of tiny entries. By this point, all the * tiny entries would be marked for deletion even though it'd * be enough to free the huge entry. This actually fixes that * situation. */ for (entry = cached; (void *) entry != &cache_entries; entry = entry->next) { long newer_cache_size = new_cache_size + entry->data_size; if (newer_cache_size > gc_cache_size) continue; new_cache_size = newer_cache_size; entry->gc_target = 0; } } /* Scanning cache, pass #4: * Destroy the marked entries. So sad, but that's life, bro'. */ for (; (void *) cached != &cache_entries; ) { cached = cached->next; if (cached->prev->gc_target) delete_cache_entry(cached->prev); }#ifdef DEBUG_CACHE if ((whole || !obstacle_entry) && cache_size > gc_cache_size) { DBG("garbage collection doesn't work, cache size %ld > %ld, " "document.cache.memory.size set to: %ld bytes", cache_size, gc_cache_size, get_opt_long("document.cache.memory.size")); }#endif}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -