📄 filemap.c
字号:
/* * linux/mm/filemap.c * * Copyright (C) 1994-1999 Linus Torvalds *//* * This file handles the generic file mmap semantics used by * most "normal" filesystems (but you don't /have/ to use this: * the NFS filesystem used to do this differently, for example) */#include <linux/module.h>#include <linux/slab.h>#include <linux/shm.h>#include <linux/mman.h>#include <linux/locks.h>#include <linux/pagemap.h>#include <linux/swap.h>#include <linux/smp_lock.h>#include <linux/blkdev.h>#include <linux/file.h>#include <linux/swapctl.h>#include <linux/init.h>#include <linux/mm.h>#include <linux/iobuf.h>#include <linux/compiler.h>#include <asm/pgalloc.h>#include <asm/uaccess.h>#include <asm/mman.h>#include <linux/highmem.h>/* * Shared mappings implemented 30.11.1994. It's not fully working yet, * though. * * Shared mappings now work. 15.8.1995 Bruno. * * finished 'unifying' the page and buffer cache and SMP-threaded the * page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com> * * SMP-threaded pagemap-LRU 1999, Andrea Arcangeli <andrea@suse.de> */atomic_t page_cache_size = ATOMIC_INIT(0);unsigned int page_hash_bits;struct page **page_hash_table;int vm_max_readahead = 31;int vm_min_readahead = 3;EXPORT_SYMBOL(vm_max_readahead);EXPORT_SYMBOL(vm_min_readahead);spinlock_t pagecache_lock __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;/* * NOTE: to avoid deadlocking you must never acquire the pagemap_lru_lock * with the pagecache_lock held. * * Ordering: * swap_lock -> * pagemap_lru_lock -> * pagecache_lock */spinlock_t pagemap_lru_lock __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;#define CLUSTER_PAGES (1 << page_cluster)#define CLUSTER_OFFSET(x) (((x) >> page_cluster) << page_cluster)static void FASTCALL(add_page_to_hash_queue(struct page * page, struct page **p));static void add_page_to_hash_queue(struct page * page, struct page **p){ struct page *next = *p; *p = page; page->next_hash = next; page->pprev_hash = p; if (next) next->pprev_hash = &page->next_hash; if (page->buffers) PAGE_BUG(page); atomic_inc(&page_cache_size);}static inline void add_page_to_inode_queue(struct address_space *mapping, struct page * page){ struct list_head *head = &mapping->clean_pages; mapping->nrpages++; list_add(&page->list, head); page->mapping = mapping;}static inline void remove_page_from_inode_queue(struct page * page){ struct address_space * mapping = page->mapping; mapping->nrpages--; list_del(&page->list); page->mapping = NULL;}static inline void remove_page_from_hash_queue(struct page * page){ struct page *next = page->next_hash; struct page **pprev = page->pprev_hash; if (next) next->pprev_hash = pprev; *pprev = next; page->pprev_hash = NULL; atomic_dec(&page_cache_size);}/* * Remove a page from the page cache and free it. Caller has to make * sure the page is locked and that nobody else uses it - or that usage * is safe. */void __remove_inode_page(struct page *page){ if (PageDirty(page)) BUG(); remove_page_from_inode_queue(page); remove_page_from_hash_queue(page);}void remove_inode_page(struct page *page){ if (!PageLocked(page)) PAGE_BUG(page); spin_lock(&pagecache_lock); __remove_inode_page(page); spin_unlock(&pagecache_lock);}static inline int sync_page(struct page *page){ struct address_space *mapping = page->mapping; if (mapping && mapping->a_ops && mapping->a_ops->sync_page) return mapping->a_ops->sync_page(page); return 0;}/* * Add a page to the dirty page list. */void set_page_dirty(struct page *page){ if (!test_and_set_bit(PG_dirty, &page->flags)) { struct address_space *mapping = page->mapping; if (mapping) { spin_lock(&pagecache_lock); list_del(&page->list); list_add(&page->list, &mapping->dirty_pages); spin_unlock(&pagecache_lock); if (mapping->host) mark_inode_dirty_pages(mapping->host); } }}/** * invalidate_inode_pages - Invalidate all the unlocked pages of one inode * @inode: the inode which pages we want to invalidate * * This function only removes the unlocked pages, if you want to * remove all the pages of one inode, you must call truncate_inode_pages. */void invalidate_inode_pages(struct inode * inode){ struct list_head *head, *curr; struct page * page; head = &inode->i_mapping->clean_pages; spin_lock(&pagemap_lru_lock); spin_lock(&pagecache_lock); curr = head->next; while (curr != head) { page = list_entry(curr, struct page, list); curr = curr->next; /* We cannot invalidate something in dirty.. */ if (PageDirty(page)) continue; /* ..or locked */ if (TryLockPage(page)) continue; if (page->buffers && !try_to_free_buffers(page, 0)) goto unlock; if (page_count(page) != 1) goto unlock; __lru_cache_del(page); __remove_inode_page(page); UnlockPage(page); page_cache_release(page); continue;unlock: UnlockPage(page); continue; } spin_unlock(&pagecache_lock); spin_unlock(&pagemap_lru_lock);}static int do_flushpage(struct page *page, unsigned long offset){ int (*flushpage) (struct page *, unsigned long); flushpage = page->mapping->a_ops->flushpage; if (flushpage) return (*flushpage)(page, offset); return block_flushpage(page, offset);}static inline void truncate_partial_page(struct page *page, unsigned partial){ memclear_highpage_flush(page, partial, PAGE_CACHE_SIZE-partial); if (page->buffers) do_flushpage(page, partial);}static void truncate_complete_page(struct page *page){ /* Leave it on the LRU if it gets converted into anonymous buffers */ if (!page->buffers || do_flushpage(page, 0)) lru_cache_del(page); /* * We remove the page from the page cache _after_ we have * destroyed all buffer-cache references to it. Otherwise some * other process might think this inode page is not in the * page cache and creates a buffer-cache alias to it causing * all sorts of fun problems ... */ ClearPageDirty(page); ClearPageUptodate(page); remove_inode_page(page); page_cache_release(page);}static int FASTCALL(truncate_list_pages(struct list_head *, unsigned long, unsigned *));static int truncate_list_pages(struct list_head *head, unsigned long start, unsigned *partial){ struct list_head *curr; struct page * page; int unlocked = 0; restart: curr = head->prev; while (curr != head) { unsigned long offset; page = list_entry(curr, struct page, list); offset = page->index; /* Is one of the pages to truncate? */ if ((offset >= start) || (*partial && (offset + 1) == start)) { int failed; page_cache_get(page); failed = TryLockPage(page); list_del(head); if (!failed) /* Restart after this page */ list_add_tail(head, curr); else /* Restart on this page */ list_add(head, curr); spin_unlock(&pagecache_lock); unlocked = 1; if (!failed) { if (*partial && (offset + 1) == start) { truncate_partial_page(page, *partial); *partial = 0; } else truncate_complete_page(page); UnlockPage(page); } else wait_on_page(page); page_cache_release(page); if (current->need_resched) { __set_current_state(TASK_RUNNING); schedule(); } spin_lock(&pagecache_lock); goto restart; } curr = curr->prev; } return unlocked;}/** * truncate_inode_pages - truncate *all* the pages from an offset * @mapping: mapping to truncate * @lstart: offset from with to truncate * * Truncate the page cache at a set offset, removing the pages * that are beyond that offset (and zeroing out partial pages). * If any page is locked we wait for it to become unlocked. */void truncate_inode_pages(struct address_space * mapping, loff_t lstart) { unsigned long start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; unsigned partial = lstart & (PAGE_CACHE_SIZE - 1); int unlocked; spin_lock(&pagecache_lock); do { unlocked = truncate_list_pages(&mapping->clean_pages, start, &partial); unlocked |= truncate_list_pages(&mapping->dirty_pages, start, &partial); unlocked |= truncate_list_pages(&mapping->locked_pages, start, &partial); } while (unlocked); /* Traversed all three lists without dropping the lock */ spin_unlock(&pagecache_lock);}static inline int invalidate_this_page2(struct page * page, struct list_head * curr, struct list_head * head){ int unlocked = 1; /* * The page is locked and we hold the pagecache_lock as well * so both page_count(page) and page->buffers stays constant here. */ if (page_count(page) == 1 + !!page->buffers) { /* Restart after this page */ list_del(head); list_add_tail(head, curr); page_cache_get(page); spin_unlock(&pagecache_lock); truncate_complete_page(page); } else { if (page->buffers) { /* Restart after this page */ list_del(head); list_add_tail(head, curr); page_cache_get(page); spin_unlock(&pagecache_lock); block_invalidate_page(page); } else unlocked = 0; ClearPageDirty(page); ClearPageUptodate(page); } return unlocked;}static int FASTCALL(invalidate_list_pages2(struct list_head *));static int invalidate_list_pages2(struct list_head *head){ struct list_head *curr; struct page * page; int unlocked = 0; restart: curr = head->prev; while (curr != head) { page = list_entry(curr, struct page, list); if (!TryLockPage(page)) { int __unlocked; __unlocked = invalidate_this_page2(page, curr, head); UnlockPage(page); unlocked |= __unlocked; if (!__unlocked) { curr = curr->prev; continue; } } else { /* Restart on this page */ list_del(head); list_add(head, curr); page_cache_get(page); spin_unlock(&pagecache_lock); unlocked = 1; wait_on_page(page); } page_cache_release(page); if (current->need_resched) { __set_current_state(TASK_RUNNING); schedule(); } spin_lock(&pagecache_lock); goto restart; } return unlocked;}/** * invalidate_inode_pages2 - Clear all the dirty bits around if it can't * free the pages because they're mapped. * @mapping: the address_space which pages we want to invalidate */void invalidate_inode_pages2(struct address_space * mapping){ int unlocked; spin_lock(&pagecache_lock); do { unlocked = invalidate_list_pages2(&mapping->clean_pages); unlocked |= invalidate_list_pages2(&mapping->dirty_pages); unlocked |= invalidate_list_pages2(&mapping->locked_pages); } while (unlocked); spin_unlock(&pagecache_lock);}static inline struct page * __find_page_nolock(struct address_space *mapping, unsigned long offset, struct page *page){ goto inside; for (;;) { page = page->next_hash;inside: if (!page) goto not_found; if (page->mapping != mapping) continue; if (page->index == offset) break; }not_found: return page;}static int do_buffer_fdatasync(struct list_head *head, unsigned long start, unsigned long end, int (*fn)(struct page *)){ struct list_head *curr; struct page *page; int retval = 0; spin_lock(&pagecache_lock); curr = head->next; while (curr != head) { page = list_entry(curr, struct page, list); curr = curr->next; if (!page->buffers) continue; if (page->index >= end) continue; if (page->index < start) continue; page_cache_get(page); spin_unlock(&pagecache_lock); lock_page(page); /* The buffers could have been free'd while we waited for the page lock */ if (page->buffers) retval |= fn(page); UnlockPage(page); spin_lock(&pagecache_lock); curr = page->list.next; page_cache_release(page); } spin_unlock(&pagecache_lock); return retval;}/* * Two-stage data sync: first start the IO, then go back and * collect the information.. */int generic_buffer_fdatasync(struct inode *inode, unsigned long start_idx, unsigned long end_idx){ int retval; /* writeout dirty buffers on pages from both clean and dirty lists */ retval = do_buffer_fdatasync(&inode->i_mapping->dirty_pages, start_idx, end_idx, writeout_one_page); retval |= do_buffer_fdatasync(&inode->i_mapping->clean_pages, start_idx, end_idx, writeout_one_page); retval |= do_buffer_fdatasync(&inode->i_mapping->locked_pages, start_idx, end_idx, writeout_one_page); /* now wait for locked buffers on pages from both clean and dirty lists */ retval |= do_buffer_fdatasync(&inode->i_mapping->dirty_pages, start_idx, end_idx, waitfor_one_page); retval |= do_buffer_fdatasync(&inode->i_mapping->clean_pages, start_idx, end_idx, waitfor_one_page); retval |= do_buffer_fdatasync(&inode->i_mapping->locked_pages, start_idx, end_idx, waitfor_one_page); return retval;}/* * In-memory filesystems have to fail their
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -