📄 mmap.c
字号:
struct vm_area_struct * vma, * prev; unsigned int vm_flags; int correct_wcount = 0; int error; rb_node_t ** rb_link, * rb_parent; if (file && (!file->f_op || !file->f_op->mmap)) return -ENODEV; if ((len = PAGE_ALIGN(len)) == 0) return addr; if (len > TASK_SIZE) return -EINVAL; /* offset overflow? */ if ((pgoff + (len >> PAGE_SHIFT)) < pgoff) return -EINVAL; /* Too many mappings? */ if (mm->map_count > MAX_MAP_COUNT) return -ENOMEM; /* Obtain the address to map to. we verify (or select) it and ensure * that it represents a valid section of the address space. */ addr = get_unmapped_area(file, addr, len, pgoff, flags); if (addr & ~PAGE_MASK) return addr; /* Do simple checking here so the lower-level routines won't have * to. we assume access permissions have been handled by the open * of the memory object, so we don't do any here. */ vm_flags = calc_vm_flags(prot,flags) | mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; /* mlock MCL_FUTURE? */ if (vm_flags & VM_LOCKED) { unsigned long locked = mm->locked_vm << PAGE_SHIFT; locked += len; if (locked > current->rlim[RLIMIT_MEMLOCK].rlim_cur) return -EAGAIN; } if (file) { switch (flags & MAP_TYPE) { case MAP_SHARED: if ((prot & PROT_WRITE) && !(file->f_mode & FMODE_WRITE)) return -EACCES; /* Make sure we don't allow writing to an append-only file.. */ if (IS_APPEND(file->f_dentry->d_inode) && (file->f_mode & FMODE_WRITE)) return -EACCES; /* make sure there are no mandatory locks on the file. */ if (locks_verify_locked(file->f_dentry->d_inode)) return -EAGAIN; vm_flags |= VM_SHARED | VM_MAYSHARE; if (!(file->f_mode & FMODE_WRITE)) vm_flags &= ~(VM_MAYWRITE | VM_SHARED); /* fall through */ case MAP_PRIVATE: if (!(file->f_mode & FMODE_READ)) return -EACCES; break; default: return -EINVAL; } } else { vm_flags |= VM_SHARED | VM_MAYSHARE; switch (flags & MAP_TYPE) { default: return -EINVAL; case MAP_PRIVATE: vm_flags &= ~(VM_SHARED | VM_MAYSHARE); /* fall through */ case MAP_SHARED: break; } } /* Clear old maps */ error = -ENOMEM;munmap_back: vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent); if (vma && vma->vm_start < addr + len) { if (do_munmap(mm, addr, len)) return -ENOMEM; goto munmap_back; } /* Check against address space limit. */ if ((mm->total_vm << PAGE_SHIFT) + len > current->rlim[RLIMIT_AS].rlim_cur) return -ENOMEM; /* Private writable mapping? Check memory availability.. */ if ((vm_flags & (VM_SHARED | VM_WRITE)) == VM_WRITE && !(flags & MAP_NORESERVE) && !vm_enough_memory(len >> PAGE_SHIFT)) return -ENOMEM; /* Can we just expand an old anonymous mapping? */ if (!file && !(vm_flags & VM_SHARED) && rb_parent) if (vma_merge(mm, prev, rb_parent, addr, addr + len, vm_flags)) goto out; /* Determine the object being mapped and call the appropriate * specific mapper. the address has already been validated, but * not unmapped, but the maps are removed from the list. */ vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); if (!vma) return -ENOMEM; vma->vm_mm = mm; vma->vm_start = addr; vma->vm_end = addr + len; vma->vm_flags = vm_flags; vma->vm_page_prot = protection_map[vm_flags & 0x0f]; vma->vm_ops = NULL; vma->vm_pgoff = pgoff; vma->vm_file = NULL; vma->vm_private_data = NULL; vma->vm_raend = 0; if (file) { error = -EINVAL; if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP)) goto free_vma; if (vm_flags & VM_DENYWRITE) { error = deny_write_access(file); if (error) goto free_vma; correct_wcount = 1; } vma->vm_file = file; get_file(file); error = file->f_op->mmap(file, vma); if (error) goto unmap_and_free_vma; } else if (flags & MAP_SHARED) { error = shmem_zero_setup(vma); if (error) goto free_vma; } /* Can addr have changed?? * * Answer: Yes, several device drivers can do it in their * f_op->mmap method. -DaveM */ addr = vma->vm_start; vma_link(mm, vma, prev, rb_link, rb_parent); if (correct_wcount) atomic_inc(&file->f_dentry->d_inode->i_writecount);out: mm->total_vm += len >> PAGE_SHIFT; if (vm_flags & VM_LOCKED) { mm->locked_vm += len >> PAGE_SHIFT; make_pages_present(addr, addr + len); } return addr;unmap_and_free_vma: if (correct_wcount) atomic_inc(&file->f_dentry->d_inode->i_writecount); vma->vm_file = NULL; fput(file); /* Undo any partial mapping done by a device driver. */ zap_page_range(mm, vma->vm_start, vma->vm_end - vma->vm_start);free_vma: kmem_cache_free(vm_area_cachep, vma); return error;}/* Get an address range which is currently unmapped. * For shmat() with addr=0. * * Ugly calling convention alert: * Return value with the low bits set means error value, * ie * if (ret & ~PAGE_MASK) * error = ret; * * This function "knows" that -ENOMEM has the bits set. */#ifndef HAVE_ARCH_UNMAPPED_AREAstatic inline unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags){ struct vm_area_struct *vma; if (len > TASK_SIZE) return -ENOMEM; if (addr) { addr = PAGE_ALIGN(addr); vma = find_vma(current->mm, addr); if (TASK_SIZE - len >= addr && (!vma || addr + len <= vma->vm_start)) return addr; } addr = PAGE_ALIGN(TASK_UNMAPPED_BASE); for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) { /* At this point: (!vma || addr < vma->vm_end). */ if (TASK_SIZE - len < addr) return -ENOMEM; if (!vma || addr + len <= vma->vm_start) return addr; addr = vma->vm_end; }}#elseextern unsigned long arch_get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);#endif unsigned long get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags){ if (flags & MAP_FIXED) { if (addr > TASK_SIZE - len) return -ENOMEM; if (addr & ~PAGE_MASK) return -EINVAL; return addr; } if (file && file->f_op && file->f_op->get_unmapped_area) return file->f_op->get_unmapped_area(file, addr, len, pgoff, flags); return arch_get_unmapped_area(file, addr, len, pgoff, flags);}/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr){ struct vm_area_struct *vma = NULL; if (mm) { /* Check the cache first. */ /* (Cache hit rate is typically around 35%.) */ vma = mm->mmap_cache; if (!(vma && vma->vm_end > addr && vma->vm_start <= addr)) { rb_node_t * rb_node; rb_node = mm->mm_rb.rb_node; vma = NULL; while (rb_node) { struct vm_area_struct * vma_tmp; vma_tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb); if (vma_tmp->vm_end > addr) { vma = vma_tmp; if (vma_tmp->vm_start <= addr) break; rb_node = rb_node->rb_left; } else rb_node = rb_node->rb_right; } if (vma) mm->mmap_cache = vma; } } return vma;}/* Same as find_vma, but also return a pointer to the previous VMA in *pprev. */struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr, struct vm_area_struct **pprev){ if (mm) { /* Go through the RB tree quickly. */ struct vm_area_struct * vma; rb_node_t * rb_node, * rb_last_right, * rb_prev; rb_node = mm->mm_rb.rb_node; rb_last_right = rb_prev = NULL; vma = NULL; while (rb_node) { struct vm_area_struct * vma_tmp; vma_tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb); if (vma_tmp->vm_end > addr) { vma = vma_tmp; rb_prev = rb_last_right; if (vma_tmp->vm_start <= addr) break; rb_node = rb_node->rb_left; } else { rb_last_right = rb_node; rb_node = rb_node->rb_right; } } if (vma) { if (vma->vm_rb.rb_left) { rb_prev = vma->vm_rb.rb_left; while (rb_prev->rb_right) rb_prev = rb_prev->rb_right; } *pprev = NULL; if (rb_prev) *pprev = rb_entry(rb_prev, struct vm_area_struct, vm_rb); if ((rb_prev ? (*pprev)->vm_next : mm->mmap) != vma) BUG(); return vma; } } *pprev = NULL; return NULL;}struct vm_area_struct * find_extend_vma(struct mm_struct * mm, unsigned long addr){ struct vm_area_struct * vma; unsigned long start; addr &= PAGE_MASK; vma = find_vma(mm,addr); if (!vma) return NULL; if (vma->vm_start <= addr) return vma; if (!(vma->vm_flags & VM_GROWSDOWN)) return NULL; start = vma->vm_start; if (expand_stack(vma, addr)) return NULL; if (vma->vm_flags & VM_LOCKED) { make_pages_present(addr, start); } return vma;}/* Normal function to fix up a mapping * This function is the default for when an area has no specific * function. This may be used as part of a more specific routine. * This function works out what part of an area is affected and * adjusts the mapping information. Since the actual page * manipulation is done in do_mmap(), none need be done here, * though it would probably be more appropriate. * * By the time this function is called, the area struct has been * removed from the process mapping list, so it needs to be * reinserted if necessary. * * The 4 main cases are: * Unmapping the whole area * Unmapping from the start of the segment to a point in it * Unmapping from an intermediate point to the end * Unmapping between to intermediate points, making a hole. * * Case 4 involves the creation of 2 new areas, for each side of * the hole. If possible, we reuse the existing area rather than * allocate a new one, and the return indicates whether the old * area was reused. */static struct vm_area_struct * unmap_fixup(struct mm_struct *mm, struct vm_area_struct *area, unsigned long addr, size_t len, struct vm_area_struct *extra){ struct vm_area_struct *mpnt; unsigned long end = addr + len; area->vm_mm->total_vm -= len >> PAGE_SHIFT; if (area->vm_flags & VM_LOCKED) area->vm_mm->locked_vm -= len >> PAGE_SHIFT; /* Unmapping the whole area. */ if (addr == area->vm_start && end == area->vm_end) { if (area->vm_ops && area->vm_ops->close) area->vm_ops->close(area); if (area->vm_file) fput(area->vm_file); kmem_cache_free(vm_area_cachep, area); return extra; } /* Work out to one of the ends. */ if (end == area->vm_end) { /* * here area isn't visible to the semaphore-less readers * so we don't need to update it under the spinlock. */ area->vm_end = addr; lock_vma_mappings(area); spin_lock(&mm->page_table_lock); } else if (addr == area->vm_start) { area->vm_pgoff += (end - area->vm_start) >> PAGE_SHIFT;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -