⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 rmap.c

📁 最新最稳定的Linux内存管理模块源代码
💻 C
📖 第 1 页 / 共 3 页
字号:
	}	page_unlock_anon_vma(anon_vma);	return referenced;}/** * page_referenced_file - referenced check for object-based rmap * @page: the page we're checking references on. * @mem_cont: target memory controller * * For an object-based mapped page, find all the places it is mapped and * check/clear the referenced flag.  This is done by following the page->mapping * pointer, then walking the chain of vmas it holds.  It returns the number * of references it found. * * This function is only called from page_referenced for object-based pages. */static int page_referenced_file(struct page *page,				struct mem_cgroup *mem_cont){	unsigned int mapcount;	struct address_space *mapping = page->mapping;	pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);	struct vm_area_struct *vma;	struct prio_tree_iter iter;	int referenced = 0;	/*	 * The caller's checks on page->mapping and !PageAnon have made	 * sure that this is a file page: the check for page->mapping	 * excludes the case just before it gets set on an anon page.	 */	BUG_ON(PageAnon(page));	/*	 * The page lock not only makes sure that page->mapping cannot	 * suddenly be NULLified by truncation, it makes sure that the	 * structure at mapping cannot be freed and reused yet,	 * so we can safely take mapping->i_mmap_lock.	 */	BUG_ON(!PageLocked(page));	spin_lock(&mapping->i_mmap_lock);	/*	 * i_mmap_lock does not stabilize mapcount at all, but mapcount	 * is more likely to be accurate if we note it after spinning.	 */	mapcount = page_mapcount(page);	vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {		/*		 * If we are reclaiming on behalf of a cgroup, skip		 * counting on behalf of references from different		 * cgroups		 */		if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont))			continue;		referenced += page_referenced_one(page, vma, &mapcount);		if (!mapcount)			break;	}	spin_unlock(&mapping->i_mmap_lock);	return referenced;}/** * page_referenced - test if the page was referenced * @page: the page to test * @is_locked: caller holds lock on the page * @mem_cont: target memory controller * * Quick test_and_clear_referenced for all mappings to a page, * returns the number of ptes which referenced the page. */int page_referenced(struct page *page, int is_locked,			struct mem_cgroup *mem_cont){	int referenced = 0;	if (TestClearPageReferenced(page))		referenced++;	if (page_mapped(page) && page->mapping) {		if (PageAnon(page))			referenced += page_referenced_anon(page, mem_cont);		else if (is_locked)			referenced += page_referenced_file(page, mem_cont);		else if (!trylock_page(page))			referenced++;		else {			if (page->mapping)				referenced +=					page_referenced_file(page, mem_cont);			unlock_page(page);		}	}	if (page_test_and_clear_young(page))		referenced++;	return referenced;}static int page_mkclean_one(struct page *page, struct vm_area_struct *vma){	struct mm_struct *mm = vma->vm_mm;	unsigned long address;	pte_t *pte;	spinlock_t *ptl;	int ret = 0;	address = vma_address(page, vma);	if (address == -EFAULT)		goto out;	pte = page_check_address(page, mm, address, &ptl, 1);	if (!pte)		goto out;	if (pte_dirty(*pte) || pte_write(*pte)) {		pte_t entry;		flush_cache_page(vma, address, pte_pfn(*pte));		entry = ptep_clear_flush_notify(vma, address, pte);		entry = pte_wrprotect(entry);		entry = pte_mkclean(entry);		set_pte_at(mm, address, pte, entry);		ret = 1;	}	pte_unmap_unlock(pte, ptl);out:	return ret;}static int page_mkclean_file(struct address_space *mapping, struct page *page){	pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);	struct vm_area_struct *vma;	struct prio_tree_iter iter;	int ret = 0;	BUG_ON(PageAnon(page));	spin_lock(&mapping->i_mmap_lock);	vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {		if (vma->vm_flags & VM_SHARED)			ret += page_mkclean_one(page, vma);	}	spin_unlock(&mapping->i_mmap_lock);	return ret;}int page_mkclean(struct page *page){	int ret = 0;	BUG_ON(!PageLocked(page));	if (page_mapped(page)) {		struct address_space *mapping = page_mapping(page);		if (mapping) {			ret = page_mkclean_file(mapping, page);			if (page_test_dirty(page)) {				page_clear_dirty(page);				ret = 1;			}		}	}	return ret;}EXPORT_SYMBOL_GPL(page_mkclean);/** * __page_set_anon_rmap - setup new anonymous rmap * @page:	the page to add the mapping to * @vma:	the vm area in which the mapping is added * @address:	the user virtual address mapped */static void __page_set_anon_rmap(struct page *page,	struct vm_area_struct *vma, unsigned long address){	struct anon_vma *anon_vma = vma->anon_vma;	BUG_ON(!anon_vma);	anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;	page->mapping = (struct address_space *) anon_vma;	page->index = linear_page_index(vma, address);	/*	 * nr_mapped state can be updated without turning off	 * interrupts because it is not modified via interrupt.	 */	__inc_zone_page_state(page, NR_ANON_PAGES);}/** * __page_check_anon_rmap - sanity check anonymous rmap addition * @page:	the page to add the mapping to * @vma:	the vm area in which the mapping is added * @address:	the user virtual address mapped */static void __page_check_anon_rmap(struct page *page,	struct vm_area_struct *vma, unsigned long address){#ifdef CONFIG_DEBUG_VM	/*	 * The page's anon-rmap details (mapping and index) are guaranteed to	 * be set up correctly at this point.	 *	 * We have exclusion against page_add_anon_rmap because the caller	 * always holds the page locked, except if called from page_dup_rmap,	 * in which case the page is already known to be setup.	 *	 * We have exclusion against page_add_new_anon_rmap because those pages	 * are initially only visible via the pagetables, and the pte is locked	 * over the call to page_add_new_anon_rmap.	 */	struct anon_vma *anon_vma = vma->anon_vma;	anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;	BUG_ON(page->mapping != (struct address_space *)anon_vma);	BUG_ON(page->index != linear_page_index(vma, address));#endif}/** * page_add_anon_rmap - add pte mapping to an anonymous page * @page:	the page to add the mapping to * @vma:	the vm area in which the mapping is added * @address:	the user virtual address mapped * * The caller needs to hold the pte lock and the page must be locked. */void page_add_anon_rmap(struct page *page,	struct vm_area_struct *vma, unsigned long address){	VM_BUG_ON(!PageLocked(page));	VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);	if (atomic_inc_and_test(&page->_mapcount))		__page_set_anon_rmap(page, vma, address);	else		__page_check_anon_rmap(page, vma, address);}/** * page_add_new_anon_rmap - add pte mapping to a new anonymous page * @page:	the page to add the mapping to * @vma:	the vm area in which the mapping is added * @address:	the user virtual address mapped * * Same as page_add_anon_rmap but must only be called on *new* pages. * This means the inc-and-test can be bypassed. * Page does not have to be locked. */void page_add_new_anon_rmap(struct page *page,	struct vm_area_struct *vma, unsigned long address){	VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);	SetPageSwapBacked(page);	atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */	__page_set_anon_rmap(page, vma, address);	if (page_evictable(page, vma))		lru_cache_add_lru(page, LRU_ACTIVE_ANON);	else		add_page_to_unevictable_list(page);}/** * page_add_file_rmap - add pte mapping to a file page * @page: the page to add the mapping to * * The caller needs to hold the pte lock. */void page_add_file_rmap(struct page *page){	if (atomic_inc_and_test(&page->_mapcount))		__inc_zone_page_state(page, NR_FILE_MAPPED);}#ifdef CONFIG_DEBUG_VM/** * page_dup_rmap - duplicate pte mapping to a page * @page:	the page to add the mapping to * @vma:	the vm area being duplicated * @address:	the user virtual address mapped * * For copy_page_range only: minimal extract from page_add_file_rmap / * page_add_anon_rmap, avoiding unnecessary tests (already checked) so it's * quicker. * * The caller needs to hold the pte lock. */void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address){	if (PageAnon(page))		__page_check_anon_rmap(page, vma, address);	atomic_inc(&page->_mapcount);}#endif/** * page_remove_rmap - take down pte mapping from a page * @page: page to remove mapping from * * The caller needs to hold the pte lock. */void page_remove_rmap(struct page *page){	if (atomic_add_negative(-1, &page->_mapcount)) {		/*		 * Now that the last pte has gone, s390 must transfer dirty		 * flag from storage key to struct page.  We can usually skip		 * this if the page is anon, so about to be freed; but perhaps		 * not if it's in swapcache - there might be another pte slot		 * containing the swap entry, but page not yet written to swap.		 */		if ((!PageAnon(page) || PageSwapCache(page)) &&		    page_test_dirty(page)) {			page_clear_dirty(page);			set_page_dirty(page);		}		if (PageAnon(page))			mem_cgroup_uncharge_page(page);		__dec_zone_page_state(page,			PageAnon(page) ? NR_ANON_PAGES : NR_FILE_MAPPED);		/*		 * It would be tidy to reset the PageAnon mapping here,		 * but that might overwrite a racing page_add_anon_rmap		 * which increments mapcount after us but sets mapping		 * before us: so leave the reset to free_hot_cold_page,		 * and remember that it's only reliable while mapped.		 * Leaving it set also helps swapoff to reinstate ptes		 * faster for those pages still in swapcache.		 */	}}/* * Subfunctions of try_to_unmap: try_to_unmap_one called * repeatedly from either try_to_unmap_anon or try_to_unmap_file. */static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,				int migration){	struct mm_struct *mm = vma->vm_mm;	unsigned long address;	pte_t *pte;	pte_t pteval;	spinlock_t *ptl;	int ret = SWAP_AGAIN;	address = vma_address(page, vma);	if (address == -EFAULT)		goto out;	pte = page_check_address(page, mm, address, &ptl, 0);	if (!pte)		goto out;	/*	 * If the page is mlock()d, we cannot swap it out.	 * If it's recently referenced (perhaps page_referenced	 * skipped over this mm) then we should reactivate it.	 */	if (!migration) {		if (vma->vm_flags & VM_LOCKED) {			ret = SWAP_MLOCK;			goto out_unmap;		}		if (ptep_clear_flush_young_notify(vma, address, pte)) {			ret = SWAP_FAIL;			goto out_unmap;		}  	}	/* Nuke the page table entry. */	flush_cache_page(vma, address, page_to_pfn(page));	pteval = ptep_clear_flush_notify(vma, address, pte);	/* Move the dirty bit to the physical page now the pte is gone. */	if (pte_dirty(pteval))		set_page_dirty(page);	/* Update high watermark before we lower rss */	update_hiwater_rss(mm);	if (PageAnon(page)) {		swp_entry_t entry = { .val = page_private(page) };		if (PageSwapCache(page)) {			/*			 * Store the swap location in the pte.			 * See handle_pte_fault() ...			 */			swap_duplicate(entry);			if (list_empty(&mm->mmlist)) {				spin_lock(&mmlist_lock);				if (list_empty(&mm->mmlist))					list_add(&mm->mmlist, &init_mm.mmlist);				spin_unlock(&mmlist_lock);			}			dec_mm_counter(mm, anon_rss);		} else if (PAGE_MIGRATION) {			/*			 * Store the pfn of the page in a special migration

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -