📄 mmap.c
字号:
/*
* linux/mm/mmap.c
*
* Written by obz.
*/
#include <linux/slab.h>
#include <linux/shm.h>
#include <linux/mman.h>
#include <linux/pagemap.h>
#include <linux/swap.h>
#include <linux/swapctl.h>
#include <linux/smp_lock.h>
#include <linux/init.h>
#include <linux/file.h>
#include <asm/uaccess.h>
#include <asm/pgalloc.h>
/* description of effects of mapping type and prot in current implementation.
* this is due to the limited x86 page protection hardware. The expected
* behavior is in parens:
*
* map_type prot
* PROT_NONE PROT_READ PROT_WRITE PROT_EXEC
* MAP_SHARED r: (no) no r: (yes) yes r: (no) yes r: (no) yes
* w: (no) no w: (no) no w: (yes) yes w: (no) no
* x: (no) no x: (no) yes x: (no) yes x: (yes) yes
*
* MAP_PRIVATE r: (no) no r: (yes) yes r: (no) yes r: (no) yes
* w: (no) no w: (no) no w: (copy) copy w: (no) no
* x: (no) no x: (no) yes x: (no) yes x: (yes) yes
*
*/
pgprot_t protection_map[16] = {
__P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
__S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
};
int sysctl_overcommit_memory;
/* Check that a process has enough memory to allocate a
* new virtual mapping.
*/
int vm_enough_memory(long pages)
{
/* Stupid algorithm to decide if we have enough memory: while
* simple, it hopefully works in most obvious cases.. Easy to
* fool it, but this should catch most mistakes.
*/
/* 23/11/98 NJC: Somewhat less stupid version of algorithm,
* which tries to do "TheRightThing". Instead of using half of
* (buffers+cache), use the minimum values. Allow an extra 2%
* of num_physpages for safety margin.
*/
long free;
/* Sometimes we want to use more memory than we have. */
if (sysctl_overcommit_memory)
return 1;
free = atomic_read(&buffermem_pages);
free += atomic_read(&page_cache_size);
free += nr_free_pages();
free += nr_swap_pages;
return free > pages;
}
/* Remove one vm structure from the inode's i_mapping address space. */
static inline void __remove_shared_vm_struct(struct vm_area_struct *vma)
{
struct file * file = vma->vm_file;
if (file) {
struct inode *inode = file->f_dentry->d_inode;
if (vma->vm_flags & VM_DENYWRITE)
atomic_inc(&inode->i_writecount);
if(vma->vm_next_share)
vma->vm_next_share->vm_pprev_share = vma->vm_pprev_share;
*vma->vm_pprev_share = vma->vm_next_share;
}
}
static inline void remove_shared_vm_struct(struct vm_area_struct *vma)
{
lock_vma_mappings(vma);
__remove_shared_vm_struct(vma);
unlock_vma_mappings(vma);
}
void lock_vma_mappings(struct vm_area_struct *vma)
{
struct address_space *mapping;
mapping = NULL;
if (vma->vm_file)
mapping = vma->vm_file->f_dentry->d_inode->i_mapping;
if (mapping)
spin_lock(&mapping->i_shared_lock);
}
void unlock_vma_mappings(struct vm_area_struct *vma)
{
struct address_space *mapping;
mapping = NULL;
if (vma->vm_file)
mapping = vma->vm_file->f_dentry->d_inode->i_mapping;
if (mapping)
spin_unlock(&mapping->i_shared_lock);
}
/*
* sys_brk() for the most part doesn't need the global kernel
* lock, except when an application is doing something nasty
* like trying to un-brk an area that has already been mapped
* to a regular file. in this case, the unmapping will need
* to invoke file system routines that need the global lock.
*/
asmlinkage unsigned long sys_brk(unsigned long brk)
{
unsigned long rlim, retval;
unsigned long newbrk, oldbrk;
struct mm_struct *mm = current->mm;
down(&mm->mmap_sem);
if (brk < mm->end_code)
goto out;
newbrk = PAGE_ALIGN(brk);
oldbrk = PAGE_ALIGN(mm->brk);
if (oldbrk == newbrk)
goto set_brk;
/* Always allow shrinking brk. */
if (brk <= mm->brk) {
if (!do_munmap(mm, newbrk, oldbrk-newbrk))
goto set_brk;
goto out;
}
/* Check against rlimit.. */
rlim = current->rlim[RLIMIT_DATA].rlim_cur;
if (rlim < RLIM_INFINITY && brk - mm->start_data > rlim)
goto out;
/* Check against existing mmap mappings. */
if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE))
goto out;
/* Check if we have enough memory.. */
if (!vm_enough_memory((newbrk-oldbrk) >> PAGE_SHIFT))
goto out;
/* Ok, looks good - let it rip. */
if (do_brk(oldbrk, newbrk-oldbrk) != oldbrk)
goto out;
set_brk:
mm->brk = brk;
out:
retval = mm->brk;
up(&mm->mmap_sem);
return retval;
}
/* Combine the mmap "prot" and "flags" argument into one "vm_flags" used
* internally. Essentially, translate the "PROT_xxx" and "MAP_xxx" bits
* into "VM_xxx".
*/
static inline unsigned long vm_flags(unsigned long prot, unsigned long flags)
{
#define _trans(x,bit1,bit2) \
((bit1==bit2)?(x&bit1):(x&bit1)?bit2:0)
unsigned long prot_bits, flag_bits;
prot_bits =
_trans(prot, PROT_READ, VM_READ) |
_trans(prot, PROT_WRITE, VM_WRITE) |
_trans(prot, PROT_EXEC, VM_EXEC);
flag_bits =
_trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN) |
_trans(flags, MAP_DENYWRITE, VM_DENYWRITE) |
_trans(flags, MAP_EXECUTABLE, VM_EXECUTABLE);
return prot_bits | flag_bits;
#undef _trans
}
unsigned long do_mmap_pgoff(struct file * file, unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags, unsigned long pgoff)
{
struct mm_struct * mm = current->mm;
struct vm_area_struct * vma;
int correct_wcount = 0;
int error;
if (file && (!file->f_op || !file->f_op->mmap))
return -ENODEV;
if ((len = PAGE_ALIGN(len)) == 0)
return addr;
if (len > TASK_SIZE || addr > TASK_SIZE-len)
return -EINVAL;
/* offset overflow? */
if ((pgoff + (len >> PAGE_SHIFT)) < pgoff)
return -EINVAL;
/* Too many mappings? */
if (mm->map_count > MAX_MAP_COUNT)
return -ENOMEM;
/* mlock MCL_FUTURE? */
if (mm->def_flags & VM_LOCKED) {
unsigned long locked = mm->locked_vm << PAGE_SHIFT;
locked += len;
if (locked > current->rlim[RLIMIT_MEMLOCK].rlim_cur)
return -EAGAIN;
}
/* Do simple checking here so the lower-level routines won't have
* to. we assume access permissions have been handled by the open
* of the memory object, so we don't do any here.
*/
if (file != NULL) {
switch (flags & MAP_TYPE) {
case MAP_SHARED:
if ((prot & PROT_WRITE) && !(file->f_mode & FMODE_WRITE))
return -EACCES;
/* Make sure we don't allow writing to an append-only file.. */
if (IS_APPEND(file->f_dentry->d_inode) && (file->f_mode & FMODE_WRITE))
return -EACCES;
/* make sure there are no mandatory locks on the file. */
if (locks_verify_locked(file->f_dentry->d_inode))
return -EAGAIN;
/* fall through */
case MAP_PRIVATE:
if (!(file->f_mode & FMODE_READ))
return -EACCES;
break;
default:
return -EINVAL;
}
}
/* Obtain the address to map to. we verify (or select) it and ensure
* that it represents a valid section of the address space.
*/
if (flags & MAP_FIXED) {
if (addr & ~PAGE_MASK)
return -EINVAL;
} else {
addr = get_unmapped_area(addr, len);
if (!addr)
return -ENOMEM;
}
/* Determine the object being mapped and call the appropriate
* specific mapper. the address has already been validated, but
* not unmapped, but the maps are removed from the list.
*/
vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
if (!vma)
return -ENOMEM;
vma->vm_mm = mm;
vma->vm_start = addr;
vma->vm_end = addr + len;
vma->vm_flags = vm_flags(prot,flags) | mm->def_flags;
if (file) {
VM_ClearReadHint(vma);
vma->vm_raend = 0;
if (file->f_mode & FMODE_READ)
vma->vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
if (flags & MAP_SHARED) {
vma->vm_flags |= VM_SHARED | VM_MAYSHARE;
/* This looks strange, but when we don't have the file open
* for writing, we can demote the shared mapping to a simpler
* private mapping. That also takes care of a security hole
* with ptrace() writing to a shared mapping without write
* permissions.
*
* We leave the VM_MAYSHARE bit on, just to get correct output
* from /proc/xxx/maps..
*/
if (!(file->f_mode & FMODE_WRITE))
vma->vm_flags &= ~(VM_MAYWRITE | VM_SHARED);
}
} else {
vma->vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
if (flags & MAP_SHARED)
vma->vm_flags |= VM_SHARED | VM_MAYSHARE;
}
vma->vm_page_prot = protection_map[vma->vm_flags & 0x0f];
vma->vm_ops = NULL;
vma->vm_pgoff = pgoff;
vma->vm_file = NULL;
vma->vm_private_data = NULL;
/* Clear old maps */
error = -ENOMEM;
if (do_munmap(mm, addr, len))
goto free_vma;
/* Check against address space limit. */
if ((mm->total_vm << PAGE_SHIFT) + len
> current->rlim[RLIMIT_AS].rlim_cur)
goto free_vma;
/* Private writable mapping? Check memory availability.. */
if ((vma->vm_flags & (VM_SHARED | VM_WRITE)) == VM_WRITE &&
!(flags & MAP_NORESERVE) &&
!vm_enough_memory(len >> PAGE_SHIFT))
goto free_vma;
if (file) {
if (vma->vm_flags & VM_DENYWRITE) {
error = deny_write_access(file);
if (error)
goto free_vma;
correct_wcount = 1;
}
vma->vm_file = file;
get_file(file);
error = file->f_op->mmap(file, vma);
if (error)
goto unmap_and_free_vma;
} else if (flags & MAP_SHARED) {
error = shmem_zero_setup(vma);
if (error)
goto free_vma;
}
/* Can addr have changed??
*
* Answer: Yes, several device drivers can do it in their
* f_op->mmap method. -DaveM
*/
flags = vma->vm_flags;
addr = vma->vm_start;
insert_vm_struct(mm, vma);
if (correct_wcount)
atomic_inc(&file->f_dentry->d_inode->i_writecount);
mm->total_vm += len >> PAGE_SHIFT;
if (flags & VM_LOCKED) {
mm->locked_vm += len >> PAGE_SHIFT;
make_pages_present(addr, addr + len);
}
return addr;
unmap_and_free_vma:
if (correct_wcount)
atomic_inc(&file->f_dentry->d_inode->i_writecount);
vma->vm_file = NULL;
fput(file);
/* Undo any partial mapping done by a device driver. */
flush_cache_range(mm, vma->vm_start, vma->vm_end);
zap_page_range(mm, vma->vm_start, vma->vm_end - vma->vm_start);
flush_tlb_range(mm, vma->vm_start, vma->vm_end);
free_vma:
kmem_cache_free(vm_area_cachep, vma);
return error;
}
/* Get an address range which is currently unmapped.
* For mmap() without MAP_FIXED and shmat() with addr=0.
* Return value 0 means ENOMEM.
*/
#ifndef HAVE_ARCH_UNMAPPED_AREA
unsigned long get_unmapped_area(unsigned long addr, unsigned long len)
{
struct vm_area_struct * vmm;
if (len > TASK_SIZE)
return 0;
if (!addr)
addr = TASK_UNMAPPED_BASE;
addr = PAGE_ALIGN(addr);
for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) {
/* At this point: (!vmm || addr < vmm->vm_end). */
if (TASK_SIZE - len < addr)
return 0;
if (!vmm || addr + len <= vmm->vm_start)
return addr;
addr = vmm->vm_end;
}
}
#endif
#define vm_avl_empty (struct vm_area_struct *) NULL
#include "mmap_avl.c"
/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr)
{
struct vm_area_struct *vma = NULL;
if (mm) {
/* Check the cache first. */
/* (Cache hit rate is typically around 35%.) */
vma = mm->mmap_cache;
if (!(vma && vma->vm_end > addr && vma->vm_start <= addr)) {
if (!mm->mmap_avl) {
/* Go through the linear list. */
vma = mm->mmap;
while (vma && vma->vm_end <= addr)
vma = vma->vm_next;
} else {
/* Then go through the AVL tree quickly. */
struct vm_area_struct * tree = mm->mmap_avl;
vma = NULL;
for (;;) {
if (tree == vm_avl_empty)
break;
if (tree->vm_end > addr) {
vma = tree;
if (tree->vm_start <= addr)
break;
tree = tree->vm_avl_left;
} else
tree = tree->vm_avl_right;
}
}
if (vma)
mm->mmap_cache = vma;
}
}
return vma;
}
/* Same as find_vma, but also return a pointer to the previous VMA in *pprev. */
struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
struct vm_area_struct **pprev)
{
if (mm) {
if (!mm->mmap_avl) {
/* Go through the linear list. */
struct vm_area_struct * prev = NULL;
struct vm_area_struct * vma = mm->mmap;
while (vma && vma->vm_end <= addr) {
prev = vma;
vma = vma->vm_next;
}
*pprev = prev;
return vma;
} else {
/* Go through the AVL tree quickly. */
struct vm_area_struct * vma = NULL;
struct vm_area_struct * last_turn_right = NULL;
struct vm_area_struct * prev = NULL;
struct vm_area_struct * tree = mm->mmap_avl;
for (;;) {
if (tree == vm_avl_empty)
break;
if (tree->vm_end > addr) {
vma = tree;
prev = last_turn_right;
if (tree->vm_start <= addr)
break;
tree = tree->vm_avl_left;
} else {
last_turn_right = tree;
tree = tree->vm_avl_right;
}
}
if (vma) {
if (vma->vm_avl_left != vm_avl_empty) {
prev = vma->vm_avl_left;
while (prev->vm_avl_right != vm_avl_empty)
prev = prev->vm_avl_right;
}
if ((prev ? prev->vm_next : mm->mmap) != vma)
printk("find_vma_prev: tree inconsistent with list\n");
*pprev = prev;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -