⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 mlock.c

📁 最新最稳定的Linux内存管理模块源代码
💻 C
📖 第 1 页 / 共 2 页
字号:
 *  For mremap(), munmap() and exit(). * * Called with @vma VM_LOCKED. * * Returns with VM_LOCKED cleared.  Callers must be prepared to * deal with this. * * We don't save and restore VM_LOCKED here because pages are * still on lru.  In unmap path, pages might be scanned by reclaim * and re-mlocked by try_to_{munlock|unmap} before we unmap and * free them.  This will result in freeing mlocked pages. */void munlock_vma_pages_range(struct vm_area_struct *vma,			   unsigned long start, unsigned long end){	vma->vm_flags &= ~VM_LOCKED;	__mlock_vma_pages_range(vma, start, end, 0);}/* * mlock_fixup  - handle mlock[all]/munlock[all] requests. * * Filters out "special" vmas -- VM_LOCKED never gets set for these, and * munlock is a no-op.  However, for some special vmas, we go ahead and * populate the ptes via make_pages_present(). * * For vmas that pass the filters, merge/split as appropriate. */static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev,	unsigned long start, unsigned long end, unsigned int newflags){	struct mm_struct *mm = vma->vm_mm;	pgoff_t pgoff;	int nr_pages;	int ret = 0;	int lock = newflags & VM_LOCKED;	if (newflags == vma->vm_flags ||			(vma->vm_flags & (VM_IO | VM_PFNMAP)))		goto out;	/* don't set VM_LOCKED,  don't count */	if ((vma->vm_flags & (VM_DONTEXPAND | VM_RESERVED)) ||			is_vm_hugetlb_page(vma) ||			vma == get_gate_vma(current)) {		if (lock)			make_pages_present(start, end);		goto out;	/* don't set VM_LOCKED,  don't count */	}	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);	*prev = vma_merge(mm, *prev, start, end, newflags, vma->anon_vma,			  vma->vm_file, pgoff, vma_policy(vma));	if (*prev) {		vma = *prev;		goto success;	}	if (start != vma->vm_start) {		ret = split_vma(mm, vma, start, 1);		if (ret)			goto out;	}	if (end != vma->vm_end) {		ret = split_vma(mm, vma, end, 0);		if (ret)			goto out;	}success:	/*	 * Keep track of amount of locked VM.	 */	nr_pages = (end - start) >> PAGE_SHIFT;	if (!lock)		nr_pages = -nr_pages;	mm->locked_vm += nr_pages;	/*	 * vm_flags is protected by the mmap_sem held in write mode.	 * It's okay if try_to_unmap_one unmaps a page just after we	 * set VM_LOCKED, __mlock_vma_pages_range will bring it back.	 */	vma->vm_flags = newflags;	if (lock) {		ret = __mlock_vma_pages_range(vma, start, end, 1);		if (ret > 0) {			mm->locked_vm -= ret;			ret = 0;		} else			ret = __mlock_posix_error_return(ret); /* translate if needed */	} else {		__mlock_vma_pages_range(vma, start, end, 0);	}out:	*prev = vma;	return ret;}static int do_mlock(unsigned long start, size_t len, int on){	unsigned long nstart, end, tmp;	struct vm_area_struct * vma, * prev;	int error;	len = PAGE_ALIGN(len);	end = start + len;	if (end < start)		return -EINVAL;	if (end == start)		return 0;	vma = find_vma_prev(current->mm, start, &prev);	if (!vma || vma->vm_start > start)		return -ENOMEM;	if (start > vma->vm_start)		prev = vma;	for (nstart = start ; ; ) {		unsigned int newflags;		/* Here we know that  vma->vm_start <= nstart < vma->vm_end. */		newflags = vma->vm_flags | VM_LOCKED;		if (!on)			newflags &= ~VM_LOCKED;		tmp = vma->vm_end;		if (tmp > end)			tmp = end;		error = mlock_fixup(vma, &prev, nstart, tmp, newflags);		if (error)			break;		nstart = tmp;		if (nstart < prev->vm_end)			nstart = prev->vm_end;		if (nstart >= end)			break;		vma = prev->vm_next;		if (!vma || vma->vm_start != nstart) {			error = -ENOMEM;			break;		}	}	return error;}SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len){	unsigned long locked;	unsigned long lock_limit;	int error = -ENOMEM;	if (!can_do_mlock())		return -EPERM;	lru_add_drain_all();	/* flush pagevec */	down_write(&current->mm->mmap_sem);	len = PAGE_ALIGN(len + (start & ~PAGE_MASK));	start &= PAGE_MASK;	locked = len >> PAGE_SHIFT;	locked += current->mm->locked_vm;	lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;	lock_limit >>= PAGE_SHIFT;	/* check against resource limits */	if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))		error = do_mlock(start, len, 1);	up_write(&current->mm->mmap_sem);	return error;}SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len){	int ret;	down_write(&current->mm->mmap_sem);	len = PAGE_ALIGN(len + (start & ~PAGE_MASK));	start &= PAGE_MASK;	ret = do_mlock(start, len, 0);	up_write(&current->mm->mmap_sem);	return ret;}static int do_mlockall(int flags){	struct vm_area_struct * vma, * prev = NULL;	unsigned int def_flags = 0;	if (flags & MCL_FUTURE)		def_flags = VM_LOCKED;	current->mm->def_flags = def_flags;	if (flags == MCL_FUTURE)		goto out;	for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {		unsigned int newflags;		newflags = vma->vm_flags | VM_LOCKED;		if (!(flags & MCL_CURRENT))			newflags &= ~VM_LOCKED;		/* Ignore errors */		mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags);	}out:	return 0;}SYSCALL_DEFINE1(mlockall, int, flags){	unsigned long lock_limit;	int ret = -EINVAL;	if (!flags || (flags & ~(MCL_CURRENT | MCL_FUTURE)))		goto out;	ret = -EPERM;	if (!can_do_mlock())		goto out;	lru_add_drain_all();	/* flush pagevec */	down_write(&current->mm->mmap_sem);	lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;	lock_limit >>= PAGE_SHIFT;	ret = -ENOMEM;	if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||	    capable(CAP_IPC_LOCK))		ret = do_mlockall(flags);	up_write(&current->mm->mmap_sem);out:	return ret;}SYSCALL_DEFINE0(munlockall){	int ret;	down_write(&current->mm->mmap_sem);	ret = do_mlockall(0);	up_write(&current->mm->mmap_sem);	return ret;}/* * Objects with different lifetime than processes (SHM_LOCK and SHM_HUGETLB * shm segments) get accounted against the user_struct instead. */static DEFINE_SPINLOCK(shmlock_user_lock);int user_shm_lock(size_t size, struct user_struct *user){	unsigned long lock_limit, locked;	int allowed = 0;	locked = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;	lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;	if (lock_limit == RLIM_INFINITY)		allowed = 1;	lock_limit >>= PAGE_SHIFT;	spin_lock(&shmlock_user_lock);	if (!allowed &&	    locked + user->locked_shm > lock_limit && !capable(CAP_IPC_LOCK))		goto out;	get_uid(user);	user->locked_shm += locked;	allowed = 1;out:	spin_unlock(&shmlock_user_lock);	return allowed;}void user_shm_unlock(size_t size, struct user_struct *user){	spin_lock(&shmlock_user_lock);	user->locked_shm -= (size + PAGE_SIZE - 1) >> PAGE_SHIFT;	spin_unlock(&shmlock_user_lock);	free_uid(user);}void *alloc_locked_buffer(size_t size){	unsigned long rlim, vm, pgsz;	void *buffer = NULL;	pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT;	down_write(&current->mm->mmap_sem);	rlim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;	vm   = current->mm->total_vm + pgsz;	if (rlim < vm)		goto out;	rlim = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;	vm   = current->mm->locked_vm + pgsz;	if (rlim < vm)		goto out;	buffer = kzalloc(size, GFP_KERNEL);	if (!buffer)		goto out;	current->mm->total_vm  += pgsz;	current->mm->locked_vm += pgsz; out:	up_write(&current->mm->mmap_sem);	return buffer;}void release_locked_buffer(void *buffer, size_t size){	unsigned long pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT;	down_write(&current->mm->mmap_sem);	current->mm->total_vm  -= pgsz;	current->mm->locked_vm -= pgsz;	up_write(&current->mm->mmap_sem);}void free_locked_buffer(void *buffer, size_t size){	release_locked_buffer(buffer, size);	kfree(buffer);}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -