⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 sys_ia32.c

📁 linux-2.6.15.6
💻 C
📖 第 1 页 / 共 5 页
字号:
/* * sys_ia32.c: Conversion between 32bit and 64bit native syscalls. Derived from sys_sparc32.c. * * Copyright (C) 2000		VA Linux Co * Copyright (C) 2000		Don Dugger <n0ano@valinux.com> * Copyright (C) 1999		Arun Sharma <arun.sharma@intel.com> * Copyright (C) 1997,1998	Jakub Jelinek (jj@sunsite.mff.cuni.cz) * Copyright (C) 1997		David S. Miller (davem@caip.rutgers.edu) * Copyright (C) 2000-2003, 2005 Hewlett-Packard Co *	David Mosberger-Tang <davidm@hpl.hp.com> * Copyright (C) 2004		Gordon Jin <gordon.jin@intel.com> * * These routines maintain argument size conversion between 32bit and 64bit * environment. */#include <linux/config.h>#include <linux/kernel.h>#include <linux/syscalls.h>#include <linux/sysctl.h>#include <linux/sched.h>#include <linux/fs.h>#include <linux/file.h>#include <linux/signal.h>#include <linux/resource.h>#include <linux/times.h>#include <linux/utsname.h>#include <linux/timex.h>#include <linux/smp.h>#include <linux/smp_lock.h>#include <linux/sem.h>#include <linux/msg.h>#include <linux/mm.h>#include <linux/shm.h>#include <linux/slab.h>#include <linux/uio.h>#include <linux/nfs_fs.h>#include <linux/quota.h>#include <linux/syscalls.h>#include <linux/sunrpc/svc.h>#include <linux/nfsd/nfsd.h>#include <linux/nfsd/cache.h>#include <linux/nfsd/xdr.h>#include <linux/nfsd/syscall.h>#include <linux/poll.h>#include <linux/eventpoll.h>#include <linux/personality.h>#include <linux/ptrace.h>#include <linux/stat.h>#include <linux/ipc.h>#include <linux/compat.h>#include <linux/vfs.h>#include <linux/mman.h>#include <asm/intrinsics.h>#include <asm/semaphore.h>#include <asm/types.h>#include <asm/uaccess.h>#include <asm/unistd.h>#include "ia32priv.h"#include <net/scm.h>#include <net/sock.h>#define DEBUG	0#if DEBUG# define DBG(fmt...)	printk(KERN_DEBUG fmt)#else# define DBG(fmt...)#endif#define ROUND_UP(x,a)	((__typeof__(x))(((unsigned long)(x) + ((a) - 1)) & ~((a) - 1)))#define OFFSET4K(a)		((a) & 0xfff)#define PAGE_START(addr)	((addr) & PAGE_MASK)#define MINSIGSTKSZ_IA32	2048#define high2lowuid(uid) ((uid) > 65535 ? 65534 : (uid))#define high2lowgid(gid) ((gid) > 65535 ? 65534 : (gid))/* * Anything that modifies or inspects ia32 user virtual memory must hold this semaphore * while doing so. *//* XXX make per-mm: */static DECLARE_MUTEX(ia32_mmap_sem);asmlinkage longsys32_execve (char __user *name, compat_uptr_t __user *argv, compat_uptr_t __user *envp,	      struct pt_regs *regs){	long error;	char *filename;	unsigned long old_map_base, old_task_size, tssd;	filename = getname(name);	error = PTR_ERR(filename);	if (IS_ERR(filename))		return error;	old_map_base  = current->thread.map_base;	old_task_size = current->thread.task_size;	tssd = ia64_get_kr(IA64_KR_TSSD);	/* we may be exec'ing a 64-bit process: reset map base, task-size, and io-base: */	current->thread.map_base  = DEFAULT_MAP_BASE;	current->thread.task_size = DEFAULT_TASK_SIZE;	ia64_set_kr(IA64_KR_IO_BASE, current->thread.old_iob);	ia64_set_kr(IA64_KR_TSSD, current->thread.old_k1);	error = compat_do_execve(filename, argv, envp, regs);	putname(filename);	if (error < 0) {		/* oops, execve failed, switch back to old values... */		ia64_set_kr(IA64_KR_IO_BASE, IA32_IOBASE);		ia64_set_kr(IA64_KR_TSSD, tssd);		current->thread.map_base  = old_map_base;		current->thread.task_size = old_task_size;	}	return error;}int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf){	int err;	if ((u64) stat->size > MAX_NON_LFS ||	    !old_valid_dev(stat->dev) ||	    !old_valid_dev(stat->rdev))		return -EOVERFLOW;	if (clear_user(ubuf, sizeof(*ubuf)))		return -EFAULT;	err  = __put_user(old_encode_dev(stat->dev), &ubuf->st_dev);	err |= __put_user(stat->ino, &ubuf->st_ino);	err |= __put_user(stat->mode, &ubuf->st_mode);	err |= __put_user(stat->nlink, &ubuf->st_nlink);	err |= __put_user(high2lowuid(stat->uid), &ubuf->st_uid);	err |= __put_user(high2lowgid(stat->gid), &ubuf->st_gid);	err |= __put_user(old_encode_dev(stat->rdev), &ubuf->st_rdev);	err |= __put_user(stat->size, &ubuf->st_size);	err |= __put_user(stat->atime.tv_sec, &ubuf->st_atime);	err |= __put_user(stat->atime.tv_nsec, &ubuf->st_atime_nsec);	err |= __put_user(stat->mtime.tv_sec, &ubuf->st_mtime);	err |= __put_user(stat->mtime.tv_nsec, &ubuf->st_mtime_nsec);	err |= __put_user(stat->ctime.tv_sec, &ubuf->st_ctime);	err |= __put_user(stat->ctime.tv_nsec, &ubuf->st_ctime_nsec);	err |= __put_user(stat->blksize, &ubuf->st_blksize);	err |= __put_user(stat->blocks, &ubuf->st_blocks);	return err;}#if PAGE_SHIFT > IA32_PAGE_SHIFTstatic intget_page_prot (struct vm_area_struct *vma, unsigned long addr){	int prot = 0;	if (!vma || vma->vm_start > addr)		return 0;	if (vma->vm_flags & VM_READ)		prot |= PROT_READ;	if (vma->vm_flags & VM_WRITE)		prot |= PROT_WRITE;	if (vma->vm_flags & VM_EXEC)		prot |= PROT_EXEC;	return prot;}/* * Map a subpage by creating an anonymous page that contains the union of the old page and * the subpage. */static unsigned longmmap_subpage (struct file *file, unsigned long start, unsigned long end, int prot, int flags,	      loff_t off){	void *page = NULL;	struct inode *inode;	unsigned long ret = 0;	struct vm_area_struct *vma = find_vma(current->mm, start);	int old_prot = get_page_prot(vma, start);	DBG("mmap_subpage(file=%p,start=0x%lx,end=0x%lx,prot=%x,flags=%x,off=0x%llx)\n",	    file, start, end, prot, flags, off);	/* Optimize the case where the old mmap and the new mmap are both anonymous */	if ((old_prot & PROT_WRITE) && (flags & MAP_ANONYMOUS) && !vma->vm_file) {		if (clear_user((void __user *) start, end - start)) {			ret = -EFAULT;			goto out;		}		goto skip_mmap;	}	page = (void *) get_zeroed_page(GFP_KERNEL);	if (!page)		return -ENOMEM;	if (old_prot)		copy_from_user(page, (void __user *) PAGE_START(start), PAGE_SIZE);	down_write(&current->mm->mmap_sem);	{		ret = do_mmap(NULL, PAGE_START(start), PAGE_SIZE, prot | PROT_WRITE,			      flags | MAP_FIXED | MAP_ANONYMOUS, 0);	}	up_write(&current->mm->mmap_sem);	if (IS_ERR((void *) ret))		goto out;	if (old_prot) {		/* copy back the old page contents.  */		if (offset_in_page(start))			copy_to_user((void __user *) PAGE_START(start), page,				     offset_in_page(start));		if (offset_in_page(end))			copy_to_user((void __user *) end, page + offset_in_page(end),				     PAGE_SIZE - offset_in_page(end));	}	if (!(flags & MAP_ANONYMOUS)) {		/* read the file contents */		inode = file->f_dentry->d_inode;		if (!inode->i_fop || !file->f_op->read		    || ((*file->f_op->read)(file, (char __user *) start, end - start, &off) < 0))		{			ret = -EINVAL;			goto out;		}	} skip_mmap:	if (!(prot & PROT_WRITE))		ret = sys_mprotect(PAGE_START(start), PAGE_SIZE, prot | old_prot);  out:	if (page)		free_page((unsigned long) page);	return ret;}/* SLAB cache for partial_page structures */kmem_cache_t *partial_page_cachep;/* * init partial_page_list. * return 0 means kmalloc fail. */struct partial_page_list*ia32_init_pp_list(void){	struct partial_page_list *p;	if ((p = kmalloc(sizeof(*p), GFP_KERNEL)) == NULL)		return p;	p->pp_head = NULL;	p->ppl_rb = RB_ROOT;	p->pp_hint = NULL;	atomic_set(&p->pp_count, 1);	return p;}/* * Search for the partial page with @start in partial page list @ppl. * If finds the partial page, return the found partial page. * Else, return 0 and provide @pprev, @rb_link, @rb_parent to * be used by later __ia32_insert_pp(). */static struct partial_page *__ia32_find_pp(struct partial_page_list *ppl, unsigned int start,	struct partial_page **pprev, struct rb_node ***rb_link,	struct rb_node **rb_parent){	struct partial_page *pp;	struct rb_node **__rb_link, *__rb_parent, *rb_prev;	pp = ppl->pp_hint;	if (pp && pp->base == start)		return pp;	__rb_link = &ppl->ppl_rb.rb_node;	rb_prev = __rb_parent = NULL;	while (*__rb_link) {		__rb_parent = *__rb_link;		pp = rb_entry(__rb_parent, struct partial_page, pp_rb);		if (pp->base == start) {			ppl->pp_hint = pp;			return pp;		} else if (pp->base < start) {			rb_prev = __rb_parent;			__rb_link = &__rb_parent->rb_right;		} else {			__rb_link = &__rb_parent->rb_left;		}	}	*rb_link = __rb_link;	*rb_parent = __rb_parent;	*pprev = NULL;	if (rb_prev)		*pprev = rb_entry(rb_prev, struct partial_page, pp_rb);	return NULL;}/* * insert @pp into @ppl. */static void__ia32_insert_pp(struct partial_page_list *ppl, struct partial_page *pp,	 struct partial_page *prev, struct rb_node **rb_link,	struct rb_node *rb_parent){	/* link list */	if (prev) {		pp->next = prev->next;		prev->next = pp;	} else {		ppl->pp_head = pp;		if (rb_parent)			pp->next = rb_entry(rb_parent,				struct partial_page, pp_rb);		else			pp->next = NULL;	}	/* link rb */	rb_link_node(&pp->pp_rb, rb_parent, rb_link);	rb_insert_color(&pp->pp_rb, &ppl->ppl_rb);	ppl->pp_hint = pp;}/* * delete @pp from partial page list @ppl. */static void__ia32_delete_pp(struct partial_page_list *ppl, struct partial_page *pp,	struct partial_page *prev){	if (prev) {		prev->next = pp->next;		if (ppl->pp_hint == pp)			ppl->pp_hint = prev;	} else {		ppl->pp_head = pp->next;		if (ppl->pp_hint == pp)			ppl->pp_hint = pp->next;	}	rb_erase(&pp->pp_rb, &ppl->ppl_rb);	kmem_cache_free(partial_page_cachep, pp);}static struct partial_page *__pp_prev(struct partial_page *pp){	struct rb_node *prev = rb_prev(&pp->pp_rb);	if (prev)		return rb_entry(prev, struct partial_page, pp_rb);	else		return NULL;}/* * Delete partial pages with address between @start and @end. * @start and @end are page aligned. */static void__ia32_delete_pp_range(unsigned int start, unsigned int end){	struct partial_page *pp, *prev;	struct rb_node **rb_link, *rb_parent;	if (start >= end)		return;	pp = __ia32_find_pp(current->thread.ppl, start, &prev,					&rb_link, &rb_parent);	if (pp)		prev = __pp_prev(pp);	else {		if (prev)			pp = prev->next;		else			pp = current->thread.ppl->pp_head;	}	while (pp && pp->base < end) {		struct partial_page *tmp = pp->next;		__ia32_delete_pp(current->thread.ppl, pp, prev);		pp = tmp;	}}/* * Set the range between @start and @end in bitmap. * @start and @end should be IA32 page aligned and in the same IA64 page. */static int__ia32_set_pp(unsigned int start, unsigned int end, int flags){	struct partial_page *pp, *prev;	struct rb_node ** rb_link, *rb_parent;	unsigned int pstart, start_bit, end_bit, i;	pstart = PAGE_START(start);	start_bit = (start % PAGE_SIZE) / IA32_PAGE_SIZE;	end_bit = (end % PAGE_SIZE) / IA32_PAGE_SIZE;	if (end_bit == 0)		end_bit = PAGE_SIZE / IA32_PAGE_SIZE;	pp = __ia32_find_pp(current->thread.ppl, pstart, &prev,					&rb_link, &rb_parent);	if (pp) {		for (i = start_bit; i < end_bit; i++)			set_bit(i, &pp->bitmap);		/*		 * Check: if this partial page has been set to a full page,		 * then delete it.		 */		if (find_first_zero_bit(&pp->bitmap, sizeof(pp->bitmap)*8) >=				PAGE_SIZE/IA32_PAGE_SIZE) {			__ia32_delete_pp(current->thread.ppl, pp, __pp_prev(pp));		}		return 0;	}	/*	 * MAP_FIXED may lead to overlapping mmap.	 * In this case, the requested mmap area may already mmaped as a full	 * page. So check vma before adding a new partial page.	 */	if (flags & MAP_FIXED) {		struct vm_area_struct *vma = find_vma(current->mm, pstart);		if (vma && vma->vm_start <= pstart)			return 0;	}	/* new a partial_page */	pp = kmem_cache_alloc(partial_page_cachep, GFP_KERNEL);	if (!pp)		return -ENOMEM;	pp->base = pstart;	pp->bitmap = 0;	for (i=start_bit; i<end_bit; i++)		set_bit(i, &(pp->bitmap));	pp->next = NULL;	__ia32_insert_pp(current->thread.ppl, pp, prev, rb_link, rb_parent);	return 0;}/* * @start and @end should be IA32 page aligned, but don't need to be in the * same IA64 page. Split @start and @end to make sure they're in the same IA64 * page, then call __ia32_set_pp(). */static voidia32_set_pp(unsigned int start, unsigned int end, int flags){	down_write(&current->mm->mmap_sem);	if (flags & MAP_FIXED) {		/*		 * MAP_FIXED may lead to overlapping mmap. When this happens,		 * a series of complete IA64 pages results in deletion of		 * old partial pages in that range.		 */		__ia32_delete_pp_range(PAGE_ALIGN(start), PAGE_START(end));	}	if (end < PAGE_ALIGN(start)) {		__ia32_set_pp(start, end, flags);	} else {		if (offset_in_page(start))			__ia32_set_pp(start, PAGE_ALIGN(start), flags);		if (offset_in_page(end))			__ia32_set_pp(PAGE_START(end), end, flags);	}	up_write(&current->mm->mmap_sem);}/* * Unset the range between @start and @end in bitmap. * @start and @end should be IA32 page aligned and in the same IA64 page. * After doing that, if the bitmap is 0, then free the page and return 1, * 	else return 0; * If not find the partial page in the list, then * 	If the vma exists, then the full page is set to a partial page; *	Else return -ENOMEM. */static int__ia32_unset_pp(unsigned int start, unsigned int end){	struct partial_page *pp, *prev;	struct rb_node ** rb_link, *rb_parent;	unsigned int pstart, start_bit, end_bit, i;	struct vm_area_struct *vma;	pstart = PAGE_START(start);	start_bit = (start % PAGE_SIZE) / IA32_PAGE_SIZE;	end_bit = (end % PAGE_SIZE) / IA32_PAGE_SIZE;	if (end_bit == 0)		end_bit = PAGE_SIZE / IA32_PAGE_SIZE;	pp = __ia32_find_pp(current->thread.ppl, pstart, &prev,					&rb_link, &rb_parent);	if (pp) {		for (i = start_bit; i < end_bit; i++)			clear_bit(i, &pp->bitmap);		if (pp->bitmap == 0) {			__ia32_delete_pp(current->thread.ppl, pp, __pp_prev(pp));

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -