📄 memory.c
字号:
static int do_wp_page(struct mm_struct *mm, struct vm_area_struct * vma,
unsigned long address, pte_t *page_table, pte_t pte)
{
struct page *old_page, *new_page;
old_page = pte_page(pte);
if (!VALID_PAGE(old_page))
goto bad_wp_page;
/*
* We can avoid the copy if:
* - we're the only user (count == 1)
* - the only other user is the swap cache,
* and the only swap cache user is itself,
* in which case we can just continue to
* use the same swap cache (it will be
* marked dirty).
*/
switch (page_count(old_page)) {
case 2:
/*
* Lock the page so that no one can look it up from
* the swap cache, grab a reference and start using it.
* Can not do lock_page, holding page_table_lock.
*/
if (!PageSwapCache(old_page) || TryLockPage(old_page))
break;
if (is_page_shared(old_page)) {
UnlockPage(old_page);
break;
}
UnlockPage(old_page);
/* FallThrough */
case 1:
flush_cache_page(vma, address);
establish_pte(vma, address, page_table, pte_mkyoung(pte_mkdirty(pte_mkwrite(pte))));
spin_unlock(&mm->page_table_lock);
return 1; /* Minor fault */
}
/*
* Ok, we need to copy. Oh, well..
*/
spin_unlock(&mm->page_table_lock);
new_page = page_cache_alloc();
if (!new_page)
return -1;
spin_lock(&mm->page_table_lock);
/*
* Re-check the pte - we dropped the lock
*/
if (pte_same(*page_table, pte)) {
if (PageReserved(old_page))
++mm->rss;
break_cow(vma, old_page, new_page, address, page_table);
/* Free the old page.. */
new_page = old_page;
}
spin_unlock(&mm->page_table_lock);
page_cache_release(new_page);
return 1; /* Minor fault */
bad_wp_page:
spin_unlock(&mm->page_table_lock);
printk("do_wp_page: bogus page at address %08lx (page 0x%lx)\n",address,(unsigned long)old_page);
return -1;
}
static void vmtruncate_list(struct vm_area_struct *mpnt,
unsigned long pgoff, unsigned long partial)
{
do {
struct mm_struct *mm = mpnt->vm_mm;
unsigned long start = mpnt->vm_start;
unsigned long end = mpnt->vm_end;
unsigned long len = end - start;
unsigned long diff;
/* mapping wholly truncated? */
if (mpnt->vm_pgoff >= pgoff) {
flush_cache_range(mm, start, end);
zap_page_range(mm, start, len);
flush_tlb_range(mm, start, end);
continue;
}
/* mapping wholly unaffected? */
len = len >> PAGE_SHIFT;
diff = pgoff - mpnt->vm_pgoff;
if (diff >= len)
continue;
/* Ok, partially affected.. */
start += diff << PAGE_SHIFT;
len = (len - diff) << PAGE_SHIFT;
flush_cache_range(mm, start, end);
zap_page_range(mm, start, len);
flush_tlb_range(mm, start, end);
} while ((mpnt = mpnt->vm_next_share) != NULL);
}
/*
* Handle all mappings that got truncated by a "truncate()"
* system call.
*
* NOTE! We have to be ready to update the memory sharing
* between the file and the memory map for a potential last
* incomplete page. Ugly, but necessary.
*/
void vmtruncate(struct inode * inode, loff_t offset)
{
unsigned long partial, pgoff;
struct address_space *mapping = inode->i_mapping;
unsigned long limit;
if (inode->i_size < offset)
goto do_expand;
inode->i_size = offset;
truncate_inode_pages(mapping, offset);
spin_lock(&mapping->i_shared_lock);
if (!mapping->i_mmap && !mapping->i_mmap_shared)
goto out_unlock;
pgoff = (offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
partial = (unsigned long)offset & (PAGE_CACHE_SIZE - 1);
if (mapping->i_mmap != NULL)
vmtruncate_list(mapping->i_mmap, pgoff, partial);
if (mapping->i_mmap_shared != NULL)
vmtruncate_list(mapping->i_mmap_shared, pgoff, partial);
out_unlock:
spin_unlock(&mapping->i_shared_lock);
/* this should go into ->truncate */
inode->i_size = offset;
if (inode->i_op && inode->i_op->truncate)
inode->i_op->truncate(inode);
return;
do_expand:
limit = current->rlim[RLIMIT_FSIZE].rlim_cur;
if (limit != RLIM_INFINITY) {
if (inode->i_size >= limit) {
send_sig(SIGXFSZ, current, 0);
goto out;
}
if (offset > limit) {
send_sig(SIGXFSZ, current, 0);
offset = limit;
}
}
inode->i_size = offset;
if (inode->i_op && inode->i_op->truncate)
inode->i_op->truncate(inode);
out:
return;
}
/*
* Primitive swap readahead code. We simply read an aligned block of
* (1 << page_cluster) entries in the swap area. This method is chosen
* because it doesn't cost us any seek time. We also make sure to queue
* the 'original' request together with the readahead ones...
*/
void swapin_readahead(swp_entry_t entry)
{
int i, num;
struct page *new_page;
unsigned long offset;
/*
* Get the number of handles we should do readahead io to. Also,
* grab temporary references on them, releasing them as io completes.
*/
num = valid_swaphandles(entry, &offset);
for (i = 0; i < num; offset++, i++) {
/* Don't block on I/O for read-ahead */
if (atomic_read(&nr_async_pages) >= pager_daemon.swap_cluster
* (1 << page_cluster)) {
while (i++ < num)
swap_free(SWP_ENTRY(SWP_TYPE(entry), offset++));
break;
}
/* Ok, do the async read-ahead now */
new_page = read_swap_cache_async(SWP_ENTRY(SWP_TYPE(entry), offset), 0);
if (new_page != NULL)
page_cache_release(new_page);
swap_free(SWP_ENTRY(SWP_TYPE(entry), offset));
}
return;
}
static int do_swap_page(struct mm_struct * mm,
struct vm_area_struct * vma, unsigned long address,
pte_t * page_table, swp_entry_t entry, int write_access)
{
struct page *page = lookup_swap_cache(entry);
pte_t pte;
if (!page) {
lock_kernel();
swapin_readahead(entry);
page = read_swap_cache(entry);
unlock_kernel();
if (!page)
return -1;
flush_page_to_ram(page);
flush_icache_page(vma, page);
}
mm->rss++;
pte = mk_pte(page, vma->vm_page_prot);
/*
* Freeze the "shared"ness of the page, ie page_count + swap_count.
* Must lock page before transferring our swap count to already
* obtained page count.
*/
lock_page(page);
swap_free(entry);
if (write_access && !is_page_shared(page))
pte = pte_mkwrite(pte_mkdirty(pte));
UnlockPage(page);
set_pte(page_table, pte);
/* No need to invalidate - it was non-present before */
update_mmu_cache(vma, address, pte);
return 1; /* Minor fault */
}
/*
* This only needs the MM semaphore
*/
static int do_anonymous_page(struct mm_struct * mm, struct vm_area_struct * vma, pte_t *page_table, int write_access, unsigned long addr)
{
struct page *page = NULL;
pte_t entry = pte_wrprotect(mk_pte(ZERO_PAGE(addr), vma->vm_page_prot));
if (write_access) {
page = alloc_page(GFP_HIGHUSER);
if (!page)
return -1;
clear_user_highpage(page, addr);
entry = pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
mm->rss++;
flush_page_to_ram(page);
}
set_pte(page_table, entry);
/* No need to invalidate - it was non-present before */
update_mmu_cache(vma, addr, entry);
return 1; /* Minor fault */
}
/*
* do_no_page() tries to create a new page mapping. It aggressively
* tries to share with existing pages, but makes a separate copy if
* the "write_access" parameter is true in order to avoid the next
* page fault.
*
* As this is called only for pages that do not currently exist, we
* do not need to flush old virtual caches or the TLB.
*
* This is called with the MM semaphore held.
*/
static int do_no_page(struct mm_struct * mm, struct vm_area_struct * vma,
unsigned long address, int write_access, pte_t *page_table)
{
struct page * new_page;
pte_t entry;
if (!vma->vm_ops || !vma->vm_ops->nopage)
return do_anonymous_page(mm, vma, page_table, write_access, address);
/*
* The third argument is "no_share", which tells the low-level code
* to copy, not share the page even if sharing is possible. It's
* essentially an early COW detection.
*/
new_page = vma->vm_ops->nopage(vma, address & PAGE_MASK, (vma->vm_flags & VM_SHARED)?0:write_access);
if (new_page == NULL) /* no page was available -- SIGBUS */
return 0;
if (new_page == NOPAGE_OOM)
return -1;
++mm->rss;
/*
* This silly early PAGE_DIRTY setting removes a race
* due to the bad i386 page protection. But it's valid
* for other architectures too.
*
* Note that if write_access is true, we either now have
* an exclusive copy of the page, or this is a shared mapping,
* so we can make it writable and dirty to avoid having to
* handle that later.
*/
flush_page_to_ram(new_page);
flush_icache_page(vma, new_page);
entry = mk_pte(new_page, vma->vm_page_prot);
if (write_access) {
entry = pte_mkwrite(pte_mkdirty(entry));
} else if (page_count(new_page) > 1 &&
!(vma->vm_flags & VM_SHARED))
entry = pte_wrprotect(entry);
set_pte(page_table, entry);
/* no need to invalidate: a not-present page shouldn't be cached */
update_mmu_cache(vma, address, entry);
return 2; /* Major fault */
}
/*
* These routines also need to handle stuff like marking pages dirty
* and/or accessed for architectures that don't do it in hardware (most
* RISC architectures). The early dirtying is also good on the i386.
*
* There is also a hook called "update_mmu_cache()" that architectures
* with external mmu caches can use to update those (ie the Sparc or
* PowerPC hashed page tables that act as extended TLBs).
*
* Note the "page_table_lock". It is to protect against kswapd removing
* pages from under us. Note that kswapd only ever _removes_ pages, never
* adds them. As such, once we have noticed that the page is not present,
* we can drop the lock early.
*
* The adding of pages is protected by the MM semaphore (which we hold),
* so we don't need to worry about a page being suddenly been added into
* our VM.
*/
static inline int handle_pte_fault(struct mm_struct *mm,
struct vm_area_struct * vma, unsigned long address,
int write_access, pte_t * pte)
{
pte_t entry;
/*
* We need the page table lock to synchronize with kswapd
* and the SMP-safe atomic PTE updates.
*/
spin_lock(&mm->page_table_lock);
entry = *pte;
if (!pte_present(entry)) {
/*
* If it truly wasn't present, we know that kswapd
* and the PTE updates will not touch it later. So
* drop the lock.
*/
spin_unlock(&mm->page_table_lock);
if (pte_none(entry))
return do_no_page(mm, vma, address, write_access, pte);
return do_swap_page(mm, vma, address, pte, pte_to_swp_entry(entry), write_access);
}
if (write_access) {
if (!pte_write(entry))
return do_wp_page(mm, vma, address, pte, entry);
entry = pte_mkdirty(entry);
}
entry = pte_mkyoung(entry);
establish_pte(vma, address, pte, entry);
spin_unlock(&mm->page_table_lock);
return 1;
}
/*
* By the time we get here, we already hold the mm semaphore
*/
int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct * vma,
unsigned long address, int write_access)
{
int ret = -1;
pgd_t *pgd;
pmd_t *pmd;
pgd = pgd_offset(mm, address);
pmd = pmd_alloc(pgd, address);
if (pmd) {
pte_t * pte = pte_alloc(pmd, address);
if (pte)
ret = handle_pte_fault(mm, vma, address, write_access, pte);
}
return ret;
}
/*
* Simplistic page force-in..
*/
int make_pages_present(unsigned long addr, unsigned long end)
{
int write;
struct mm_struct *mm = current->mm;
struct vm_area_struct * vma;
vma = find_vma(mm, addr);
write = (vma->vm_flags & VM_WRITE) != 0;
if (addr >= end)
BUG();
do {
if (handle_mm_fault(mm, vma, addr, write) < 0)
return -1;
addr += PAGE_SIZE;
} while (addr < end);
return 0;
}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -