📄 nommu.c
字号:
} else if ((prot & PROT_READ) && (prot & PROT_EXEC) && !(capabilities & BDI_CAP_EXEC_MAP) ) { /* backing file is not executable, try to copy */ capabilities &= ~BDI_CAP_MAP_DIRECT; } } else { /* anonymous mappings are always memory backed and can be * privately mapped */ capabilities = BDI_CAP_MAP_COPY; /* handle PROT_EXEC implication by PROT_READ */ if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC)) prot |= PROT_EXEC; } /* allow the security API to have its say */ ret = security_file_mmap(file, reqprot, prot, flags, addr, 0); if (ret < 0) return ret; /* looks okay */ *_capabilities = capabilities; return 0;}/* * we've determined that we can make the mapping, now translate what we * now know into VMA flags */static unsigned long determine_vm_flags(struct file *file, unsigned long prot, unsigned long flags, unsigned long capabilities){ unsigned long vm_flags; vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags); vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; /* vm_flags |= mm->def_flags; */ if (!(capabilities & BDI_CAP_MAP_DIRECT)) { /* attempt to share read-only copies of mapped file chunks */ if (file && !(prot & PROT_WRITE)) vm_flags |= VM_MAYSHARE; } else { /* overlay a shareable mapping on the backing device or inode * if possible - used for chardevs, ramfs/tmpfs/shmfs and * romfs/cramfs */ if (flags & MAP_SHARED) vm_flags |= VM_MAYSHARE | VM_SHARED; else if ((((vm_flags & capabilities) ^ vm_flags) & BDI_CAP_VMFLAGS) == 0) vm_flags |= VM_MAYSHARE; } /* refuse to let anyone share private mappings with this process if * it's being traced - otherwise breakpoints set in it may interfere * with another untraced process */ if ((flags & MAP_PRIVATE) && tracehook_expect_breakpoints(current)) vm_flags &= ~VM_MAYSHARE; return vm_flags;}/* * set up a shared mapping on a file (the driver or filesystem provides and * pins the storage) */static int do_mmap_shared_file(struct vm_area_struct *vma){ int ret; ret = vma->vm_file->f_op->mmap(vma->vm_file, vma); if (ret == 0) { vma->vm_region->vm_top = vma->vm_region->vm_end; return ret; } if (ret != -ENOSYS) return ret; /* getting an ENOSYS error indicates that direct mmap isn't * possible (as opposed to tried but failed) so we'll fall * through to making a private copy of the data and mapping * that if we can */ return -ENODEV;}/* * set up a private mapping or an anonymous shared mapping */static int do_mmap_private(struct vm_area_struct *vma, struct vm_region *region, unsigned long len){ struct page *pages; unsigned long total, point, n, rlen; void *base; int ret, order; /* invoke the file's mapping function so that it can keep track of * shared mappings on devices or memory * - VM_MAYSHARE will be set if it may attempt to share */ if (vma->vm_file) { ret = vma->vm_file->f_op->mmap(vma->vm_file, vma); if (ret == 0) { /* shouldn't return success if we're not sharing */ BUG_ON(!(vma->vm_flags & VM_MAYSHARE)); vma->vm_region->vm_top = vma->vm_region->vm_end; return ret; } if (ret != -ENOSYS) return ret; /* getting an ENOSYS error indicates that direct mmap isn't * possible (as opposed to tried but failed) so we'll try to * make a private copy of the data and map that instead */ } rlen = PAGE_ALIGN(len); /* allocate some memory to hold the mapping * - note that this may not return a page-aligned address if the object * we're allocating is smaller than a page */ order = get_order(rlen); kdebug("alloc order %d for %lx", order, len); pages = alloc_pages(GFP_KERNEL, order); if (!pages) goto enomem; total = 1 << order; atomic_add(total, &mmap_pages_allocated); point = rlen >> PAGE_SHIFT; /* we allocated a power-of-2 sized page set, so we may want to trim off * the excess */ if (sysctl_nr_trim_pages && total - point >= sysctl_nr_trim_pages) { while (total > point) { order = ilog2(total - point); n = 1 << order; kdebug("shave %lu/%lu @%lu", n, total - point, total); atomic_sub(n, &mmap_pages_allocated); total -= n; set_page_refcounted(pages + total); __free_pages(pages + total, order); } } for (point = 1; point < total; point++) set_page_refcounted(&pages[point]); base = page_address(pages); region->vm_flags = vma->vm_flags |= VM_MAPPED_COPY; region->vm_start = (unsigned long) base; region->vm_end = region->vm_start + rlen; region->vm_top = region->vm_start + (total << PAGE_SHIFT); vma->vm_start = region->vm_start; vma->vm_end = region->vm_start + len; if (vma->vm_file) { /* read the contents of a file into the copy */ mm_segment_t old_fs; loff_t fpos; fpos = vma->vm_pgoff; fpos <<= PAGE_SHIFT; old_fs = get_fs(); set_fs(KERNEL_DS); ret = vma->vm_file->f_op->read(vma->vm_file, base, rlen, &fpos); set_fs(old_fs); if (ret < 0) goto error_free; /* clear the last little bit */ if (ret < rlen) memset(base + ret, 0, rlen - ret); } else { /* if it's an anonymous mapping, then just clear it */ memset(base, 0, rlen); } return 0;error_free: free_page_series(region->vm_start, region->vm_end); region->vm_start = vma->vm_start = 0; region->vm_end = vma->vm_end = 0; region->vm_top = 0; return ret;enomem: printk("Allocation of length %lu from process %d (%s) failed\n", len, current->pid, current->comm); show_free_areas(); return -ENOMEM;}/* * handle mapping creation for uClinux */unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long pgoff){ struct vm_area_struct *vma; struct vm_region *region; struct rb_node *rb; unsigned long capabilities, vm_flags, result; int ret; kenter(",%lx,%lx,%lx,%lx,%lx", addr, len, prot, flags, pgoff); if (!(flags & MAP_FIXED)) addr = round_hint_to_min(addr); /* decide whether we should attempt the mapping, and if so what sort of * mapping */ ret = validate_mmap_request(file, addr, len, prot, flags, pgoff, &capabilities); if (ret < 0) { kleave(" = %d [val]", ret); return ret; } /* we've determined that we can make the mapping, now translate what we * now know into VMA flags */ vm_flags = determine_vm_flags(file, prot, flags, capabilities); /* we're going to need to record the mapping */ region = kmem_cache_zalloc(vm_region_jar, GFP_KERNEL); if (!region) goto error_getting_region; vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); if (!vma) goto error_getting_vma; atomic_set(®ion->vm_usage, 1); region->vm_flags = vm_flags; region->vm_pgoff = pgoff; INIT_LIST_HEAD(&vma->anon_vma_node); vma->vm_flags = vm_flags; vma->vm_pgoff = pgoff; if (file) { region->vm_file = file; get_file(file); vma->vm_file = file; get_file(file); if (vm_flags & VM_EXECUTABLE) { added_exe_file_vma(current->mm); vma->vm_mm = current->mm; } } down_write(&nommu_region_sem); /* if we want to share, we need to check for regions created by other * mmap() calls that overlap with our proposed mapping * - we can only share with a superset match on most regular files * - shared mappings on character devices and memory backed files are * permitted to overlap inexactly as far as we are concerned for in * these cases, sharing is handled in the driver or filesystem rather * than here */ if (vm_flags & VM_MAYSHARE) { struct vm_region *pregion; unsigned long pglen, rpglen, pgend, rpgend, start; pglen = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; pgend = pgoff + pglen; for (rb = rb_first(&nommu_region_tree); rb; rb = rb_next(rb)) { pregion = rb_entry(rb, struct vm_region, vm_rb); if (!(pregion->vm_flags & VM_MAYSHARE)) continue; /* search for overlapping mappings on the same file */ if (pregion->vm_file->f_path.dentry->d_inode != file->f_path.dentry->d_inode) continue; if (pregion->vm_pgoff >= pgend) continue; rpglen = pregion->vm_end - pregion->vm_start; rpglen = (rpglen + PAGE_SIZE - 1) >> PAGE_SHIFT; rpgend = pregion->vm_pgoff + rpglen; if (pgoff >= rpgend) continue; /* handle inexactly overlapping matches between * mappings */ if ((pregion->vm_pgoff != pgoff || rpglen != pglen) && !(pgoff >= pregion->vm_pgoff && pgend <= rpgend)) { /* new mapping is not a subset of the region */ if (!(capabilities & BDI_CAP_MAP_DIRECT)) goto sharing_violation; continue; } /* we've found a region we can share */ atomic_inc(&pregion->vm_usage); vma->vm_region = pregion; start = pregion->vm_start; start += (pgoff - pregion->vm_pgoff) << PAGE_SHIFT; vma->vm_start = start; vma->vm_end = start + len; if (pregion->vm_flags & VM_MAPPED_COPY) { kdebug("share copy"); vma->vm_flags |= VM_MAPPED_COPY; } else { kdebug("share mmap"); ret = do_mmap_shared_file(vma); if (ret < 0) { vma->vm_region = NULL; vma->vm_start = 0; vma->vm_end = 0; atomic_dec(&pregion->vm_usage); pregion = NULL; goto error_just_free; } } fput(region->vm_file); kmem_cache_free(vm_region_jar, region); region = pregion; result = start; goto share; } /* obtain the address at which to make a shared mapping * - this is the hook for quasi-memory character devices to * tell us the location of a shared mapping */ if (file && file->f_op->get_unmapped_area) { addr = file->f_op->get_unmapped_area(file, addr, len, pgoff, flags); if (IS_ERR((void *) addr)) { ret = addr; if (ret != (unsigned long) -ENOSYS) goto error_just_free; /* the driver refused to tell us where to site * the mapping so we'll have to attempt to copy * it */ ret = (unsigned long) -ENODEV; if (!(capabilities & BDI_CAP_MAP_COPY)) goto error_just_free; capabilities &= ~BDI_CAP_MAP_DIRECT; } else { vma->vm_start = region->vm_start = addr; vma->vm_end = region->vm_end = addr + len; } } } vma->vm_region = region; /* set up the mapping */ if (file && vma->vm_flags & VM_SHARED) ret = do_mmap_shared_file(vma); else ret = do_mmap_private(vma, region, len); if (ret < 0) goto error_put_region; add_nommu_region(region); /* okay... we have a mapping; now we have to register it */ result = vma->vm_start; current->mm->total_vm += len >> PAGE_SHIFT;share: add_vma_to_mm(current->mm, vma); up_write(&nommu_region_sem); if (prot & PROT_EXEC) flush_icache_range(result, result + len); kleave(" = %lx", result); return result;error_put_region: __put_nommu_region(region); if (vma) { if (vma->vm_file) { fput(vma->vm_file); if (vma->vm_flags & VM_EXECUTABLE) removed_exe_file_vma(vma->vm_mm); } kmem_cache_free(vm_area_cachep, vma); } kleave(" = %d [pr]", ret); return ret;error_just_free: up_write(&nommu_region_sem);error: fput(region->vm_file); kmem_cache_free(vm_region_jar, region); fput(vma->vm_file); if (vma->vm_flags & VM_EXECUTABLE) removed_exe_file_vma(vma->vm_mm); kmem_cache_free(vm_area_cachep, vma); kleave(" = %d", ret); return ret;sharing_violation: up_write(&nommu_region_sem); printk(KERN_WARNING "Attempt to share mismatched mappings\n"); ret = -EINVAL; goto error;error_getting_vma: kmem_cache_free(vm_region_jar, region); printk(KERN_WARNING "Allocation of vma for %lu byte allocation" " from process %d failed\n", len, current->pid); show_free_areas(); return -ENOMEM;error_getting_region: printk(KERN_WARNING "Allocation of vm region for %lu byte allocation" " from process %d failed\n", len, current->pid); show_free_areas(); return -ENOMEM;}EXPORT_SYMBOL(do_mmap_pgoff);/* * split a vma into two pieces at address 'addr', a new vma is allocated either * for the first part or the tail. */int split_vma(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, int new_below){ struct vm_area_struct *new; struct vm_region *region; unsigned long npages; kenter(""); /* we're only permitted to split anonymous regions that have a single * owner */ if (vma->vm_file || atomic_read(&vma->vm_region->vm_usage) != 1) return -ENOMEM; if (mm->map_count >= sysctl_max_map_count) return -ENOMEM; region = kmem_cache_alloc(vm_region_jar, GFP_KERNEL); if (!region) return -ENOMEM;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -