📄 vmscan.c
字号:
return 1;cannot_free: spin_unlock_irq(&mapping->tree_lock); return 0;}/* * Attempt to detach a locked page from its ->mapping. If it is dirty or if * someone else has a ref on the page, abort and return 0. If it was * successfully detached, return 1. Assumes the caller has a single ref on * this page. */int remove_mapping(struct address_space *mapping, struct page *page){ if (__remove_mapping(mapping, page)) { /* * Unfreezing the refcount with 1 rather than 2 effectively * drops the pagecache ref for us without requiring another * atomic operation. */ page_unfreeze_refs(page, 1); return 1; } return 0;}/** * putback_lru_page - put previously isolated page onto appropriate LRU list * @page: page to be put back to appropriate lru list * * Add previously isolated @page to appropriate LRU list. * Page may still be unevictable for other reasons. * * lru_lock must not be held, interrupts must be enabled. */#ifdef CONFIG_UNEVICTABLE_LRUvoid putback_lru_page(struct page *page){ int lru; int active = !!TestClearPageActive(page); int was_unevictable = PageUnevictable(page); VM_BUG_ON(PageLRU(page));redo: ClearPageUnevictable(page); if (page_evictable(page, NULL)) { /* * For evictable pages, we can use the cache. * In event of a race, worst case is we end up with an * unevictable page on [in]active list. * We know how to handle that. */ lru = active + page_is_file_cache(page); lru_cache_add_lru(page, lru); } else { /* * Put unevictable pages directly on zone's unevictable * list. */ lru = LRU_UNEVICTABLE; add_page_to_unevictable_list(page); } /* * page's status can change while we move it among lru. If an evictable * page is on unevictable list, it never be freed. To avoid that, * check after we added it to the list, again. */ if (lru == LRU_UNEVICTABLE && page_evictable(page, NULL)) { if (!isolate_lru_page(page)) { put_page(page); goto redo; } /* This means someone else dropped this page from LRU * So, it will be freed or putback to LRU again. There is * nothing to do here. */ } if (was_unevictable && lru != LRU_UNEVICTABLE) count_vm_event(UNEVICTABLE_PGRESCUED); else if (!was_unevictable && lru == LRU_UNEVICTABLE) count_vm_event(UNEVICTABLE_PGCULLED); put_page(page); /* drop ref from isolate */}#else /* CONFIG_UNEVICTABLE_LRU */void putback_lru_page(struct page *page){ int lru; VM_BUG_ON(PageLRU(page)); lru = !!TestClearPageActive(page) + page_is_file_cache(page); lru_cache_add_lru(page, lru); put_page(page);}#endif /* CONFIG_UNEVICTABLE_LRU *//* * shrink_page_list() returns the number of reclaimed pages */static unsigned long shrink_page_list(struct list_head *page_list, struct scan_control *sc, enum pageout_io sync_writeback){ LIST_HEAD(ret_pages); struct pagevec freed_pvec; int pgactivate = 0; unsigned long nr_reclaimed = 0; cond_resched(); pagevec_init(&freed_pvec, 1); while (!list_empty(page_list)) { struct address_space *mapping; struct page *page; int may_enter_fs; int referenced; cond_resched(); page = lru_to_page(page_list); list_del(&page->lru); if (!trylock_page(page)) goto keep; VM_BUG_ON(PageActive(page)); sc->nr_scanned++; if (unlikely(!page_evictable(page, NULL))) goto cull_mlocked; if (!sc->may_swap && page_mapped(page)) goto keep_locked; /* Double the slab pressure for mapped and swapcache pages */ if (page_mapped(page) || PageSwapCache(page)) sc->nr_scanned++; may_enter_fs = (sc->gfp_mask & __GFP_FS) || (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO)); if (PageWriteback(page)) { /* * Synchronous reclaim is performed in two passes, * first an asynchronous pass over the list to * start parallel writeback, and a second synchronous * pass to wait for the IO to complete. Wait here * for any page for which writeback has already * started. */ if (sync_writeback == PAGEOUT_IO_SYNC && may_enter_fs) wait_on_page_writeback(page); else goto keep_locked; } referenced = page_referenced(page, 1, sc->mem_cgroup); /* In active use or really unfreeable? Activate it. */ if (sc->order <= PAGE_ALLOC_COSTLY_ORDER && referenced && page_mapping_inuse(page)) goto activate_locked; /* * Anonymous process memory has backing store? * Try to allocate it some swap space here. */ if (PageAnon(page) && !PageSwapCache(page)) { if (!(sc->gfp_mask & __GFP_IO)) goto keep_locked; if (!add_to_swap(page)) goto activate_locked; may_enter_fs = 1; } mapping = page_mapping(page); /* * The page is mapped into the page tables of one or more * processes. Try to unmap it here. */ if (page_mapped(page) && mapping) { switch (try_to_unmap(page, 0)) { case SWAP_FAIL: goto activate_locked; case SWAP_AGAIN: goto keep_locked; case SWAP_MLOCK: goto cull_mlocked; case SWAP_SUCCESS: ; /* try to free the page below */ } } if (PageDirty(page)) { if (sc->order <= PAGE_ALLOC_COSTLY_ORDER && referenced) goto keep_locked; if (!may_enter_fs) goto keep_locked; if (!sc->may_writepage) goto keep_locked; /* Page is dirty, try to write it out here */ switch (pageout(page, mapping, sync_writeback)) { case PAGE_KEEP: goto keep_locked; case PAGE_ACTIVATE: goto activate_locked; case PAGE_SUCCESS: if (PageWriteback(page) || PageDirty(page)) goto keep; /* * A synchronous write - probably a ramdisk. Go * ahead and try to reclaim the page. */ if (!trylock_page(page)) goto keep; if (PageDirty(page) || PageWriteback(page)) goto keep_locked; mapping = page_mapping(page); case PAGE_CLEAN: ; /* try to free the page below */ } } /* * If the page has buffers, try to free the buffer mappings * associated with this page. If we succeed we try to free * the page as well. * * We do this even if the page is PageDirty(). * try_to_release_page() does not perform I/O, but it is * possible for a page to have PageDirty set, but it is actually * clean (all its buffers are clean). This happens if the * buffers were written out directly, with submit_bh(). ext3 * will do this, as well as the blockdev mapping. * try_to_release_page() will discover that cleanness and will * drop the buffers and mark the page clean - it can be freed. * * Rarely, pages can have buffers and no ->mapping. These are * the pages which were not successfully invalidated in * truncate_complete_page(). We try to drop those buffers here * and if that worked, and the page is no longer mapped into * process address space (page_count == 1) it can be freed. * Otherwise, leave the page on the LRU so it is swappable. */ if (PagePrivate(page)) { if (!try_to_release_page(page, sc->gfp_mask)) goto activate_locked; if (!mapping && page_count(page) == 1) { unlock_page(page); if (put_page_testzero(page)) goto free_it; else { /* * rare race with speculative reference. * the speculative reference will free * this page shortly, so we may * increment nr_reclaimed here (and * leave it off the LRU). */ nr_reclaimed++; continue; } } } if (!mapping || !__remove_mapping(mapping, page)) goto keep_locked; /* * At this point, we have no other references and there is * no way to pick any more up (removed from LRU, removed * from pagecache). Can use non-atomic bitops now (and * we obviously don't have to worry about waking up a process * waiting on the page lock, because there are no references. */ __clear_page_locked(page);free_it: nr_reclaimed++; if (!pagevec_add(&freed_pvec, page)) { __pagevec_free(&freed_pvec); pagevec_reinit(&freed_pvec); } continue;cull_mlocked: if (PageSwapCache(page)) try_to_free_swap(page); unlock_page(page); putback_lru_page(page); continue;activate_locked: /* Not a candidate for swapping, so reclaim swap space. */ if (PageSwapCache(page) && vm_swap_full()) try_to_free_swap(page); VM_BUG_ON(PageActive(page)); SetPageActive(page); pgactivate++;keep_locked: unlock_page(page);keep: list_add(&page->lru, &ret_pages); VM_BUG_ON(PageLRU(page) || PageUnevictable(page)); } list_splice(&ret_pages, page_list); if (pagevec_count(&freed_pvec)) __pagevec_free(&freed_pvec); count_vm_events(PGACTIVATE, pgactivate); return nr_reclaimed;}/* LRU Isolation modes. */#define ISOLATE_INACTIVE 0 /* Isolate inactive pages. */#define ISOLATE_ACTIVE 1 /* Isolate active pages. */#define ISOLATE_BOTH 2 /* Isolate both active and inactive pages. *//* * Attempt to remove the specified page from its LRU. Only take this page * if it is of the appropriate PageActive status. Pages which are being * freed elsewhere are also ignored. * * page: page to consider * mode: one of the LRU isolation modes defined above * * returns 0 on success, -ve errno on failure. */int __isolate_lru_page(struct page *page, int mode, int file){ int ret = -EINVAL; /* Only take pages on the LRU. */ if (!PageLRU(page)) return ret; /* * When checking the active state, we need to be sure we are * dealing with comparible boolean values. Take the logical not * of each. */ if (mode != ISOLATE_BOTH && (!PageActive(page) != !mode)) return ret; if (mode != ISOLATE_BOTH && (!page_is_file_cache(page) != !file)) return ret; /* * When this function is being called for lumpy reclaim, we * initially look into all LRU pages, active, inactive and * unevictable; only give shrink_page_list evictable pages. */ if (PageUnevictable(page)) return ret; ret = -EBUSY; if (likely(get_page_unless_zero(page))) { /* * Be careful not to clear PageLRU until after we're * sure the page is not being freed elsewhere -- the * page release code relies on it. */ ClearPageLRU(page); ret = 0; mem_cgroup_del_lru(page); } return ret;}/* * zone->lru_lock is heavily contended. Some of the functions that * shrink the lists perform better by taking out a batch of pages * and working on them outside the LRU lock. * * For pagecache intensive workloads, this function is the hottest * spot in the kernel (apart from copy_*_user functions). * * Appropriate locks must be held before calling this function. * * @nr_to_scan: The number of pages to look through on the list. * @src: The LRU list to pull pages off. * @dst: The temp list to put pages on to. * @scanned: The number of pages that were scanned. * @order: The caller's attempted allocation order * @mode: One of the LRU isolation modes * @file: True [1] if isolating file [!anon] pages * * returns how many pages were moved onto *@dst. */static unsigned long isolate_lru_pages(unsigned long nr_to_scan, struct list_head *src, struct list_head *dst, unsigned long *scanned, int order, int mode, int file){ unsigned long nr_taken = 0; unsigned long scan; for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) { struct page *page; unsigned long pfn; unsigned long end_pfn; unsigned long page_pfn; int zone_id; page = lru_to_page(src); prefetchw_prev_lru_page(page, src, flags); VM_BUG_ON(!PageLRU(page)); switch (__isolate_lru_page(page, mode, file)) { case 0: list_move(&page->lru, dst); nr_taken++; break; case -EBUSY: /* else it is being freed elsewhere */ list_move(&page->lru, src); continue; default: BUG(); } if (!order) continue; /* * Attempt to take all pages in the order aligned region * surrounding the tag page. Only take those pages of * the same active state as that tag page. We may safely * round the target page pfn down to the requested order * as the mem_map is guarenteed valid out to MAX_ORDER, * where that page is in a different zone we will detect * it from its zone id and abort this block scan. */ zone_id = page_zone_id(page); page_pfn = page_to_pfn(page); pfn = page_pfn & ~((1 << order) - 1); end_pfn = pfn + (1 << order); for (; pfn < end_pfn; pfn++) { struct page *cursor_page; /* The target page is in the block, ignore it. */ if (unlikely(pfn == page_pfn)) continue; /* Avoid holes within the zone. */ if (unlikely(!pfn_valid_within(pfn))) break; cursor_page = pfn_to_page(pfn); /* Check that we have not crossed a zone boundary. */ if (unlikely(page_zone_id(cursor_page) != zone_id)) continue; switch (__isolate_lru_page(cursor_page, mode, file)) { case 0: list_move(&cursor_page->lru, dst);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -