⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 rmap.c

📁 最新最稳定的Linux内存管理模块源代码
💻 C
📖 第 1 页 / 共 3 页
字号:
			 * pte. do_swap_page() will wait until the migration			 * pte is removed and then restart fault handling.			 */			BUG_ON(!migration);			entry = make_migration_entry(page, pte_write(pteval));		}		set_pte_at(mm, address, pte, swp_entry_to_pte(entry));		BUG_ON(pte_file(*pte));	} else if (PAGE_MIGRATION && migration) {		/* Establish migration entry for a file page */		swp_entry_t entry;		entry = make_migration_entry(page, pte_write(pteval));		set_pte_at(mm, address, pte, swp_entry_to_pte(entry));	} else		dec_mm_counter(mm, file_rss);	page_remove_rmap(page);	page_cache_release(page);out_unmap:	pte_unmap_unlock(pte, ptl);out:	return ret;}/* * objrmap doesn't work for nonlinear VMAs because the assumption that * offset-into-file correlates with offset-into-virtual-addresses does not hold. * Consequently, given a particular page and its ->index, we cannot locate the * ptes which are mapping that page without an exhaustive linear search. * * So what this code does is a mini "virtual scan" of each nonlinear VMA which * maps the file to which the target page belongs.  The ->vm_private_data field * holds the current cursor into that scan.  Successive searches will circulate * around the vma's virtual address space. * * So as more replacement pressure is applied to the pages in a nonlinear VMA, * more scanning pressure is placed against them as well.   Eventually pages * will become fully unmapped and are eligible for eviction. * * For very sparsely populated VMAs this is a little inefficient - chances are * there there won't be many ptes located within the scan cluster.  In this case * maybe we could scan further - to the end of the pte page, perhaps. * * Mlocked pages:  check VM_LOCKED under mmap_sem held for read, if we can * acquire it without blocking.  If vma locked, mlock the pages in the cluster, * rather than unmapping them.  If we encounter the "check_page" that vmscan is * trying to unmap, return SWAP_MLOCK, else default SWAP_AGAIN. */#define CLUSTER_SIZE	min(32*PAGE_SIZE, PMD_SIZE)#define CLUSTER_MASK	(~(CLUSTER_SIZE - 1))static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount,		struct vm_area_struct *vma, struct page *check_page){	struct mm_struct *mm = vma->vm_mm;	pgd_t *pgd;	pud_t *pud;	pmd_t *pmd;	pte_t *pte;	pte_t pteval;	spinlock_t *ptl;	struct page *page;	unsigned long address;	unsigned long end;	int ret = SWAP_AGAIN;	int locked_vma = 0;	address = (vma->vm_start + cursor) & CLUSTER_MASK;	end = address + CLUSTER_SIZE;	if (address < vma->vm_start)		address = vma->vm_start;	if (end > vma->vm_end)		end = vma->vm_end;	pgd = pgd_offset(mm, address);	if (!pgd_present(*pgd))		return ret;	pud = pud_offset(pgd, address);	if (!pud_present(*pud))		return ret;	pmd = pmd_offset(pud, address);	if (!pmd_present(*pmd))		return ret;	/*	 * MLOCK_PAGES => feature is configured.	 * if we can acquire the mmap_sem for read, and vma is VM_LOCKED,	 * keep the sem while scanning the cluster for mlocking pages.	 */	if (MLOCK_PAGES && down_read_trylock(&vma->vm_mm->mmap_sem)) {		locked_vma = (vma->vm_flags & VM_LOCKED);		if (!locked_vma)			up_read(&vma->vm_mm->mmap_sem); /* don't need it */	}	pte = pte_offset_map_lock(mm, pmd, address, &ptl);	/* Update high watermark before we lower rss */	update_hiwater_rss(mm);	for (; address < end; pte++, address += PAGE_SIZE) {		if (!pte_present(*pte))			continue;		page = vm_normal_page(vma, address, *pte);		BUG_ON(!page || PageAnon(page));		if (locked_vma) {			mlock_vma_page(page);   /* no-op if already mlocked */			if (page == check_page)				ret = SWAP_MLOCK;			continue;	/* don't unmap */		}		if (ptep_clear_flush_young_notify(vma, address, pte))			continue;		/* Nuke the page table entry. */		flush_cache_page(vma, address, pte_pfn(*pte));		pteval = ptep_clear_flush_notify(vma, address, pte);		/* If nonlinear, store the file page offset in the pte. */		if (page->index != linear_page_index(vma, address))			set_pte_at(mm, address, pte, pgoff_to_pte(page->index));		/* Move the dirty bit to the physical page now the pte is gone. */		if (pte_dirty(pteval))			set_page_dirty(page);		page_remove_rmap(page);		page_cache_release(page);		dec_mm_counter(mm, file_rss);		(*mapcount)--;	}	pte_unmap_unlock(pte - 1, ptl);	if (locked_vma)		up_read(&vma->vm_mm->mmap_sem);	return ret;}/* * common handling for pages mapped in VM_LOCKED vmas */static int try_to_mlock_page(struct page *page, struct vm_area_struct *vma){	int mlocked = 0;	if (down_read_trylock(&vma->vm_mm->mmap_sem)) {		if (vma->vm_flags & VM_LOCKED) {			mlock_vma_page(page);			mlocked++;	/* really mlocked the page */		}		up_read(&vma->vm_mm->mmap_sem);	}	return mlocked;}/** * try_to_unmap_anon - unmap or unlock anonymous page using the object-based * rmap method * @page: the page to unmap/unlock * @unlock:  request for unlock rather than unmap [unlikely] * @migration:  unmapping for migration - ignored if @unlock * * Find all the mappings of a page using the mapping pointer and the vma chains * contained in the anon_vma struct it points to. * * This function is only called from try_to_unmap/try_to_munlock for * anonymous pages. * When called from try_to_munlock(), the mmap_sem of the mm containing the vma * where the page was found will be held for write.  So, we won't recheck * vm_flags for that VMA.  That should be OK, because that vma shouldn't be * 'LOCKED. */static int try_to_unmap_anon(struct page *page, int unlock, int migration){	struct anon_vma *anon_vma;	struct vm_area_struct *vma;	unsigned int mlocked = 0;	int ret = SWAP_AGAIN;	if (MLOCK_PAGES && unlikely(unlock))		ret = SWAP_SUCCESS;	/* default for try_to_munlock() */	anon_vma = page_lock_anon_vma(page);	if (!anon_vma)		return ret;	list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {		if (MLOCK_PAGES && unlikely(unlock)) {			if (!((vma->vm_flags & VM_LOCKED) &&			      page_mapped_in_vma(page, vma)))				continue;  /* must visit all unlocked vmas */			ret = SWAP_MLOCK;  /* saw at least one mlocked vma */		} else {			ret = try_to_unmap_one(page, vma, migration);			if (ret == SWAP_FAIL || !page_mapped(page))				break;		}		if (ret == SWAP_MLOCK) {			mlocked = try_to_mlock_page(page, vma);			if (mlocked)				break;	/* stop if actually mlocked page */		}	}	page_unlock_anon_vma(anon_vma);	if (mlocked)		ret = SWAP_MLOCK;	/* actually mlocked the page */	else if (ret == SWAP_MLOCK)		ret = SWAP_AGAIN;	/* saw VM_LOCKED vma */	return ret;}/** * try_to_unmap_file - unmap/unlock file page using the object-based rmap method * @page: the page to unmap/unlock * @unlock:  request for unlock rather than unmap [unlikely] * @migration:  unmapping for migration - ignored if @unlock * * Find all the mappings of a page using the mapping pointer and the vma chains * contained in the address_space struct it points to. * * This function is only called from try_to_unmap/try_to_munlock for * object-based pages. * When called from try_to_munlock(), the mmap_sem of the mm containing the vma * where the page was found will be held for write.  So, we won't recheck * vm_flags for that VMA.  That should be OK, because that vma shouldn't be * 'LOCKED. */static int try_to_unmap_file(struct page *page, int unlock, int migration){	struct address_space *mapping = page->mapping;	pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);	struct vm_area_struct *vma;	struct prio_tree_iter iter;	int ret = SWAP_AGAIN;	unsigned long cursor;	unsigned long max_nl_cursor = 0;	unsigned long max_nl_size = 0;	unsigned int mapcount;	unsigned int mlocked = 0;	if (MLOCK_PAGES && unlikely(unlock))		ret = SWAP_SUCCESS;	/* default for try_to_munlock() */	spin_lock(&mapping->i_mmap_lock);	vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {		if (MLOCK_PAGES && unlikely(unlock)) {			if (!((vma->vm_flags & VM_LOCKED) &&						page_mapped_in_vma(page, vma)))				continue;	/* must visit all vmas */			ret = SWAP_MLOCK;		} else {			ret = try_to_unmap_one(page, vma, migration);			if (ret == SWAP_FAIL || !page_mapped(page))				goto out;		}		if (ret == SWAP_MLOCK) {			mlocked = try_to_mlock_page(page, vma);			if (mlocked)				break;  /* stop if actually mlocked page */		}	}	if (mlocked)		goto out;	if (list_empty(&mapping->i_mmap_nonlinear))		goto out;	list_for_each_entry(vma, &mapping->i_mmap_nonlinear,						shared.vm_set.list) {		if (MLOCK_PAGES && unlikely(unlock)) {			if (!(vma->vm_flags & VM_LOCKED))				continue;	/* must visit all vmas */			ret = SWAP_MLOCK;	/* leave mlocked == 0 */			goto out;		/* no need to look further */		}		if (!MLOCK_PAGES && !migration && (vma->vm_flags & VM_LOCKED))			continue;		cursor = (unsigned long) vma->vm_private_data;		if (cursor > max_nl_cursor)			max_nl_cursor = cursor;		cursor = vma->vm_end - vma->vm_start;		if (cursor > max_nl_size)			max_nl_size = cursor;	}	if (max_nl_size == 0) {	/* all nonlinears locked or reserved ? */		ret = SWAP_FAIL;		goto out;	}	/*	 * We don't try to search for this page in the nonlinear vmas,	 * and page_referenced wouldn't have found it anyway.  Instead	 * just walk the nonlinear vmas trying to age and unmap some.	 * The mapcount of the page we came in with is irrelevant,	 * but even so use it as a guide to how hard we should try?	 */	mapcount = page_mapcount(page);	if (!mapcount)		goto out;	cond_resched_lock(&mapping->i_mmap_lock);	max_nl_size = (max_nl_size + CLUSTER_SIZE - 1) & CLUSTER_MASK;	if (max_nl_cursor == 0)		max_nl_cursor = CLUSTER_SIZE;	do {		list_for_each_entry(vma, &mapping->i_mmap_nonlinear,						shared.vm_set.list) {			if (!MLOCK_PAGES && !migration &&			    (vma->vm_flags & VM_LOCKED))				continue;			cursor = (unsigned long) vma->vm_private_data;			while ( cursor < max_nl_cursor &&				cursor < vma->vm_end - vma->vm_start) {				ret = try_to_unmap_cluster(cursor, &mapcount,								vma, page);				if (ret == SWAP_MLOCK)					mlocked = 2;	/* to return below */				cursor += CLUSTER_SIZE;				vma->vm_private_data = (void *) cursor;				if ((int)mapcount <= 0)					goto out;			}			vma->vm_private_data = (void *) max_nl_cursor;		}		cond_resched_lock(&mapping->i_mmap_lock);		max_nl_cursor += CLUSTER_SIZE;	} while (max_nl_cursor <= max_nl_size);	/*	 * Don't loop forever (perhaps all the remaining pages are	 * in locked vmas).  Reset cursor on all unreserved nonlinear	 * vmas, now forgetting on which ones it had fallen behind.	 */	list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list)		vma->vm_private_data = NULL;out:	spin_unlock(&mapping->i_mmap_lock);	if (mlocked)		ret = SWAP_MLOCK;	/* actually mlocked the page */	else if (ret == SWAP_MLOCK)		ret = SWAP_AGAIN;	/* saw VM_LOCKED vma */	return ret;}/** * try_to_unmap - try to remove all page table mappings to a page * @page: the page to get unmapped * @migration: migration flag * * Tries to remove all the page table entries which are mapping this * page, used in the pageout path.  Caller must hold the page lock. * Return values are: * * SWAP_SUCCESS	- we succeeded in removing all mappings * SWAP_AGAIN	- we missed a mapping, try again later * SWAP_FAIL	- the page is unswappable * SWAP_MLOCK	- page is mlocked. */int try_to_unmap(struct page *page, int migration){	int ret;	BUG_ON(!PageLocked(page));	if (PageAnon(page))		ret = try_to_unmap_anon(page, 0, migration);	else		ret = try_to_unmap_file(page, 0, migration);	if (ret != SWAP_MLOCK && !page_mapped(page))		ret = SWAP_SUCCESS;	return ret;}#ifdef CONFIG_UNEVICTABLE_LRU/** * try_to_munlock - try to munlock a page * @page: the page to be munlocked * * Called from munlock code.  Checks all of the VMAs mapping the page * to make sure nobody else has this page mlocked. The page will be * returned with PG_mlocked cleared if no other vmas have it mlocked. * * Return values are: * * SWAP_SUCCESS	- no vma's holding page mlocked. * SWAP_AGAIN	- page mapped in mlocked vma -- couldn't acquire mmap sem * SWAP_MLOCK	- page is now mlocked. */int try_to_munlock(struct page *page){	VM_BUG_ON(!PageLocked(page) || PageLRU(page));	if (PageAnon(page))		return try_to_unmap_anon(page, 1, 0);	else		return try_to_unmap_file(page, 1, 0);}#endif

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -