⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 migrate.c

📁 最新最稳定的Linux内存管理模块源代码
💻 C
📖 第 1 页 / 共 2 页
字号:
/* * Memory Migration functionality - linux/mm/migration.c * * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter * * Page migration was first developed in the context of the memory hotplug * project. The main authors of the migration code are: * * IWAMOTO Toshihiro <iwamoto@valinux.co.jp> * Hirokazu Takahashi <taka@valinux.co.jp> * Dave Hansen <haveblue@us.ibm.com> * Christoph Lameter */#include <linux/migrate.h>#include <linux/module.h>#include <linux/swap.h>#include <linux/swapops.h>#include <linux/pagemap.h>#include <linux/buffer_head.h>#include <linux/mm_inline.h>#include <linux/nsproxy.h>#include <linux/pagevec.h>#include <linux/rmap.h>#include <linux/topology.h>#include <linux/cpu.h>#include <linux/cpuset.h>#include <linux/writeback.h>#include <linux/mempolicy.h>#include <linux/vmalloc.h>#include <linux/security.h>#include <linux/memcontrol.h>#include <linux/syscalls.h>#include "internal.h"#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))/* * migrate_prep() needs to be called before we start compiling a list of pages * to be migrated using isolate_lru_page(). */int migrate_prep(void){	/*	 * Clear the LRU lists so pages can be isolated.	 * Note that pages may be moved off the LRU after we have	 * drained them. Those pages will fail to migrate like other	 * pages that may be busy.	 */	lru_add_drain_all();	return 0;}/* * Add isolated pages on the list back to the LRU under page lock * to avoid leaking evictable pages back onto unevictable list. * * returns the number of pages put back. */int putback_lru_pages(struct list_head *l){	struct page *page;	struct page *page2;	int count = 0;	list_for_each_entry_safe(page, page2, l, lru) {		list_del(&page->lru);		putback_lru_page(page);		count++;	}	return count;}/* * Restore a potential migration pte to a working pte entry */static void remove_migration_pte(struct vm_area_struct *vma,		struct page *old, struct page *new){	struct mm_struct *mm = vma->vm_mm;	swp_entry_t entry; 	pgd_t *pgd; 	pud_t *pud; 	pmd_t *pmd;	pte_t *ptep, pte; 	spinlock_t *ptl;	unsigned long addr = page_address_in_vma(new, vma);	if (addr == -EFAULT)		return; 	pgd = pgd_offset(mm, addr);	if (!pgd_present(*pgd))                return;	pud = pud_offset(pgd, addr);	if (!pud_present(*pud))                return;	pmd = pmd_offset(pud, addr);	if (!pmd_present(*pmd))		return;	ptep = pte_offset_map(pmd, addr);	if (!is_swap_pte(*ptep)) {		pte_unmap(ptep); 		return; 	} 	ptl = pte_lockptr(mm, pmd); 	spin_lock(ptl);	pte = *ptep;	if (!is_swap_pte(pte))		goto out;	entry = pte_to_swp_entry(pte);	if (!is_migration_entry(entry) || migration_entry_to_page(entry) != old)		goto out;	get_page(new);	pte = pte_mkold(mk_pte(new, vma->vm_page_prot));	if (is_write_migration_entry(entry))		pte = pte_mkwrite(pte);	flush_cache_page(vma, addr, pte_pfn(pte));	set_pte_at(mm, addr, ptep, pte);	if (PageAnon(new))		page_add_anon_rmap(new, vma, addr);	else		page_add_file_rmap(new);	/* No need to invalidate - it was non-present before */	update_mmu_cache(vma, addr, pte);out:	pte_unmap_unlock(ptep, ptl);}/* * Note that remove_file_migration_ptes will only work on regular mappings, * Nonlinear mappings do not use migration entries. */static void remove_file_migration_ptes(struct page *old, struct page *new){	struct vm_area_struct *vma;	struct address_space *mapping = page_mapping(new);	struct prio_tree_iter iter;	pgoff_t pgoff = new->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);	if (!mapping)		return;	spin_lock(&mapping->i_mmap_lock);	vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff)		remove_migration_pte(vma, old, new);	spin_unlock(&mapping->i_mmap_lock);}/* * Must hold mmap_sem lock on at least one of the vmas containing * the page so that the anon_vma cannot vanish. */static void remove_anon_migration_ptes(struct page *old, struct page *new){	struct anon_vma *anon_vma;	struct vm_area_struct *vma;	unsigned long mapping;	mapping = (unsigned long)new->mapping;	if (!mapping || (mapping & PAGE_MAPPING_ANON) == 0)		return;	/*	 * We hold the mmap_sem lock. So no need to call page_lock_anon_vma.	 */	anon_vma = (struct anon_vma *) (mapping - PAGE_MAPPING_ANON);	spin_lock(&anon_vma->lock);	list_for_each_entry(vma, &anon_vma->head, anon_vma_node)		remove_migration_pte(vma, old, new);	spin_unlock(&anon_vma->lock);}/* * Get rid of all migration entries and replace them by * references to the indicated page. */static void remove_migration_ptes(struct page *old, struct page *new){	if (PageAnon(new))		remove_anon_migration_ptes(old, new);	else		remove_file_migration_ptes(old, new);}/* * Something used the pte of a page under migration. We need to * get to the page and wait until migration is finished. * When we return from this function the fault will be retried. * * This function is called from do_swap_page(). */void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,				unsigned long address){	pte_t *ptep, pte;	spinlock_t *ptl;	swp_entry_t entry;	struct page *page;	ptep = pte_offset_map_lock(mm, pmd, address, &ptl);	pte = *ptep;	if (!is_swap_pte(pte))		goto out;	entry = pte_to_swp_entry(pte);	if (!is_migration_entry(entry))		goto out;	page = migration_entry_to_page(entry);	/*	 * Once radix-tree replacement of page migration started, page_count	 * *must* be zero. And, we don't want to call wait_on_page_locked()	 * against a page without get_page().	 * So, we use get_page_unless_zero(), here. Even failed, page fault	 * will occur again.	 */	if (!get_page_unless_zero(page))		goto out;	pte_unmap_unlock(ptep, ptl);	wait_on_page_locked(page);	put_page(page);	return;out:	pte_unmap_unlock(ptep, ptl);}/* * Replace the page in the mapping. * * The number of remaining references must be: * 1 for anonymous pages without a mapping * 2 for pages with a mapping * 3 for pages with a mapping and PagePrivate set. */static int migrate_page_move_mapping(struct address_space *mapping,		struct page *newpage, struct page *page){	int expected_count;	void **pslot;	if (!mapping) {		/* Anonymous page without mapping */		if (page_count(page) != 1)			return -EAGAIN;		return 0;	}	spin_lock_irq(&mapping->tree_lock);	pslot = radix_tree_lookup_slot(&mapping->page_tree, 					page_index(page));	expected_count = 2 + !!PagePrivate(page);	if (page_count(page) != expected_count ||			(struct page *)radix_tree_deref_slot(pslot) != page) {		spin_unlock_irq(&mapping->tree_lock);		return -EAGAIN;	}	if (!page_freeze_refs(page, expected_count)) {		spin_unlock_irq(&mapping->tree_lock);		return -EAGAIN;	}	/*	 * Now we know that no one else is looking at the page.	 */	get_page(newpage);	/* add cache reference */	if (PageSwapCache(page)) {		SetPageSwapCache(newpage);		set_page_private(newpage, page_private(page));	}	radix_tree_replace_slot(pslot, newpage);	page_unfreeze_refs(page, expected_count);	/*	 * Drop cache reference from old page.	 * We know this isn't the last reference.	 */	__put_page(page);	/*	 * If moved to a different zone then also account	 * the page for that zone. Other VM counters will be	 * taken care of when we establish references to the	 * new page and drop references to the old page.	 *	 * Note that anonymous pages are accounted for	 * via NR_FILE_PAGES and NR_ANON_PAGES if they	 * are mapped to swap space.	 */	__dec_zone_page_state(page, NR_FILE_PAGES);	__inc_zone_page_state(newpage, NR_FILE_PAGES);	spin_unlock_irq(&mapping->tree_lock);	return 0;}/* * Copy the page to its new location */static void migrate_page_copy(struct page *newpage, struct page *page){	int anon;	copy_highpage(newpage, page);	if (PageError(page))		SetPageError(newpage);	if (PageReferenced(page))		SetPageReferenced(newpage);	if (PageUptodate(page))		SetPageUptodate(newpage);	if (TestClearPageActive(page)) {		VM_BUG_ON(PageUnevictable(page));		SetPageActive(newpage);	} else		unevictable_migrate_page(newpage, page);	if (PageChecked(page))		SetPageChecked(newpage);	if (PageMappedToDisk(page))		SetPageMappedToDisk(newpage);	if (PageDirty(page)) {		clear_page_dirty_for_io(page);		/*		 * Want to mark the page and the radix tree as dirty, and		 * redo the accounting that clear_page_dirty_for_io undid,		 * but we can't use set_page_dirty because that function		 * is actually a signal that all of the page has become dirty.		 * Wheras only part of our page may be dirty.		 */		__set_page_dirty_nobuffers(newpage); 	}	mlock_migrate_page(newpage, page);	ClearPageSwapCache(page);	ClearPagePrivate(page);	set_page_private(page, 0);	/* page->mapping contains a flag for PageAnon() */	anon = PageAnon(page);	page->mapping = NULL;	/*	 * If any waiters have accumulated on the new page then	 * wake them up.	 */	if (PageWriteback(newpage))		end_page_writeback(newpage);}/************************************************************ *                    Migration functions ***********************************************************//* Always fail migration. Used for mappings that are not movable */int fail_migrate_page(struct address_space *mapping,			struct page *newpage, struct page *page){	return -EIO;}EXPORT_SYMBOL(fail_migrate_page);/* * Common logic to directly migrate a single page suitable for * pages that do not use PagePrivate. * * Pages are locked upon entry and exit. */int migrate_page(struct address_space *mapping,		struct page *newpage, struct page *page){	int rc;	BUG_ON(PageWriteback(page));	/* Writeback must be complete */	rc = migrate_page_move_mapping(mapping, newpage, page);	if (rc)		return rc;	migrate_page_copy(newpage, page);	return 0;}EXPORT_SYMBOL(migrate_page);#ifdef CONFIG_BLOCK/* * Migration function for pages with buffers. This function can only be used * if the underlying filesystem guarantees that no other references to "page" * exist. */int buffer_migrate_page(struct address_space *mapping,		struct page *newpage, struct page *page){	struct buffer_head *bh, *head;	int rc;	if (!page_has_buffers(page))		return migrate_page(mapping, newpage, page);	head = page_buffers(page);	rc = migrate_page_move_mapping(mapping, newpage, page);	if (rc)		return rc;	bh = head;	do {		get_bh(bh);		lock_buffer(bh);		bh = bh->b_this_page;	} while (bh != head);	ClearPagePrivate(page);	set_page_private(newpage, page_private(page));	set_page_private(page, 0);	put_page(page);	get_page(newpage);	bh = head;	do {		set_bh_page(bh, newpage, bh_offset(bh));		bh = bh->b_this_page;	} while (bh != head);	SetPagePrivate(newpage);	migrate_page_copy(newpage, page);	bh = head;	do {		unlock_buffer(bh); 		put_bh(bh);		bh = bh->b_this_page;	} while (bh != head);	return 0;}EXPORT_SYMBOL(buffer_migrate_page);#endif/* * Writeback a page to clean the dirty state */static int writeout(struct address_space *mapping, struct page *page){	struct writeback_control wbc = {		.sync_mode = WB_SYNC_NONE,		.nr_to_write = 1,		.range_start = 0,		.range_end = LLONG_MAX,		.nonblocking = 1,		.for_reclaim = 1	};	int rc;	if (!mapping->a_ops->writepage)		/* No write method for the address space */		return -EINVAL;	if (!clear_page_dirty_for_io(page))		/* Someone else already triggered a write */		return -EAGAIN;	/*	 * A dirty page may imply that the underlying filesystem has	 * the page on some queue. So the page must be clean for	 * migration. Writeout may mean we loose the lock and the	 * page state is no longer what we checked for earlier.	 * At this point we know that the migration attempt cannot	 * be successful.	 */	remove_migration_ptes(page, page);	rc = mapping->a_ops->writepage(page, &wbc);	if (rc != AOP_WRITEPAGE_ACTIVATE)		/* unlocked. Relock */		lock_page(page);	return (rc < 0) ? -EIO : -EAGAIN;}/* * Default handling if a filesystem does not provide a migration function. */static int fallback_migrate_page(struct address_space *mapping,	struct page *newpage, struct page *page){	if (PageDirty(page))		return writeout(mapping, page);	/*	 * Buffers may be managed in a filesystem specific way.	 * We must have no buffers or drop them.	 */	if (PagePrivate(page) &&	    !try_to_release_page(page, GFP_KERNEL))		return -EAGAIN;	return migrate_page(mapping, newpage, page);}/* * Move a page to a newly allocated page * The page is locked and all ptes have been successfully removed. * * The new page will have replaced the old page if this function * is successful. * * Return value: *   < 0 - error code *  == 0 - success */static int move_to_new_page(struct page *newpage, struct page *page){	struct address_space *mapping;	int rc;	/*	 * Block others from accessing the page when we get around to	 * establishing additional references. We are the only one	 * holding a reference to the new page at this point.	 */	if (!trylock_page(newpage))		BUG();	/* Prepare mapping for the new page.*/	newpage->index = page->index;	newpage->mapping = page->mapping;	if (PageSwapBacked(page))		SetPageSwapBacked(newpage);	mapping = page_mapping(page);	if (!mapping)		rc = migrate_page(mapping, newpage, page);	else if (mapping->a_ops->migratepage)		/*		 * Most pages have a mapping and most filesystems		 * should provide a migration function. Anonymous		 * pages are part of swap space which also has its		 * own migration function. This is the most common		 * path for page migration.

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -