⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 sys.c

📁 linux 2.6.19 kernel source code before patching
💻 C
📖 第 1 页 / 共 4 页
字号:
			}			GROUP_AT(group_info, right) = tmp;		}		stride /= 3;	}}/* a simple bsearch */int groups_search(struct group_info *group_info, gid_t grp){	unsigned int left, right;	if (!group_info)		return 0;	left = 0;	right = group_info->ngroups;	while (left < right) {		unsigned int mid = (left+right)/2;		int cmp = grp - GROUP_AT(group_info, mid);		if (cmp > 0)			left = mid + 1;		else if (cmp < 0)			right = mid;		else			return 1;	}	return 0;}/* validate and set current->group_info */int set_current_groups(struct group_info *group_info){	int retval;	struct group_info *old_info;	retval = security_task_setgroups(group_info);	if (retval)		return retval;	groups_sort(group_info);	get_group_info(group_info);	task_lock(current);	old_info = current->group_info;	current->group_info = group_info;	task_unlock(current);	put_group_info(old_info);	return 0;}EXPORT_SYMBOL(set_current_groups);asmlinkage long sys_getgroups(int gidsetsize, gid_t __user *grouplist){	int i = 0;	/*	 *	SMP: Nobody else can change our grouplist. Thus we are	 *	safe.	 */	if (gidsetsize < 0)		return -EINVAL;	/* no need to grab task_lock here; it cannot change */	i = current->group_info->ngroups;	if (gidsetsize) {		if (i > gidsetsize) {			i = -EINVAL;			goto out;		}		if (groups_to_user(grouplist, current->group_info)) {			i = -EFAULT;			goto out;		}	}out:	return i;}/* *	SMP: Our groups are copy-on-write. We can set them safely *	without another task interfering. */ asmlinkage long sys_setgroups(int gidsetsize, gid_t __user *grouplist){	struct group_info *group_info;	int retval;	if (!capable(CAP_SETGID))		return -EPERM;	if ((unsigned)gidsetsize > NGROUPS_MAX)		return -EINVAL;	group_info = groups_alloc(gidsetsize);	if (!group_info)		return -ENOMEM;	retval = groups_from_user(group_info, grouplist);	if (retval) {		put_group_info(group_info);		return retval;	}	retval = set_current_groups(group_info);	put_group_info(group_info);	return retval;}/* * Check whether we're fsgid/egid or in the supplemental group.. */int in_group_p(gid_t grp){	int retval = 1;	if (grp != current->fsgid)		retval = groups_search(current->group_info, grp);	return retval;}EXPORT_SYMBOL(in_group_p);int in_egroup_p(gid_t grp){	int retval = 1;	if (grp != current->egid)		retval = groups_search(current->group_info, grp);	return retval;}EXPORT_SYMBOL(in_egroup_p);DECLARE_RWSEM(uts_sem);EXPORT_SYMBOL(uts_sem);asmlinkage long sys_newuname(struct new_utsname __user * name){	int errno = 0;	down_read(&uts_sem);	if (copy_to_user(name, utsname(), sizeof *name))		errno = -EFAULT;	up_read(&uts_sem);	return errno;}asmlinkage long sys_sethostname(char __user *name, int len){	int errno;	char tmp[__NEW_UTS_LEN];	if (!capable(CAP_SYS_ADMIN))		return -EPERM;	if (len < 0 || len > __NEW_UTS_LEN)		return -EINVAL;	down_write(&uts_sem);	errno = -EFAULT;	if (!copy_from_user(tmp, name, len)) {		memcpy(utsname()->nodename, tmp, len);		utsname()->nodename[len] = 0;		errno = 0;	}	up_write(&uts_sem);	return errno;}#ifdef __ARCH_WANT_SYS_GETHOSTNAMEasmlinkage long sys_gethostname(char __user *name, int len){	int i, errno;	if (len < 0)		return -EINVAL;	down_read(&uts_sem);	i = 1 + strlen(utsname()->nodename);	if (i > len)		i = len;	errno = 0;	if (copy_to_user(name, utsname()->nodename, i))		errno = -EFAULT;	up_read(&uts_sem);	return errno;}#endif/* * Only setdomainname; getdomainname can be implemented by calling * uname() */asmlinkage long sys_setdomainname(char __user *name, int len){	int errno;	char tmp[__NEW_UTS_LEN];	if (!capable(CAP_SYS_ADMIN))		return -EPERM;	if (len < 0 || len > __NEW_UTS_LEN)		return -EINVAL;	down_write(&uts_sem);	errno = -EFAULT;	if (!copy_from_user(tmp, name, len)) {		memcpy(utsname()->domainname, tmp, len);		utsname()->domainname[len] = 0;		errno = 0;	}	up_write(&uts_sem);	return errno;}asmlinkage long sys_getrlimit(unsigned int resource, struct rlimit __user *rlim){	if (resource >= RLIM_NLIMITS)		return -EINVAL;	else {		struct rlimit value;		task_lock(current->group_leader);		value = current->signal->rlim[resource];		task_unlock(current->group_leader);		return copy_to_user(rlim, &value, sizeof(*rlim)) ? -EFAULT : 0;	}}#ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT/* *	Back compatibility for getrlimit. Needed for some apps. */ asmlinkage long sys_old_getrlimit(unsigned int resource, struct rlimit __user *rlim){	struct rlimit x;	if (resource >= RLIM_NLIMITS)		return -EINVAL;	task_lock(current->group_leader);	x = current->signal->rlim[resource];	task_unlock(current->group_leader);	if (x.rlim_cur > 0x7FFFFFFF)		x.rlim_cur = 0x7FFFFFFF;	if (x.rlim_max > 0x7FFFFFFF)		x.rlim_max = 0x7FFFFFFF;	return copy_to_user(rlim, &x, sizeof(x))?-EFAULT:0;}#endifasmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim){	struct rlimit new_rlim, *old_rlim;	unsigned long it_prof_secs;	int retval;	if (resource >= RLIM_NLIMITS)		return -EINVAL;	if (copy_from_user(&new_rlim, rlim, sizeof(*rlim)))		return -EFAULT;	if (new_rlim.rlim_cur > new_rlim.rlim_max)		return -EINVAL;	old_rlim = current->signal->rlim + resource;	if ((new_rlim.rlim_max > old_rlim->rlim_max) &&	    !capable(CAP_SYS_RESOURCE))		return -EPERM;	if (resource == RLIMIT_NOFILE && new_rlim.rlim_max > NR_OPEN)		return -EPERM;	retval = security_task_setrlimit(resource, &new_rlim);	if (retval)		return retval;	if (resource == RLIMIT_CPU && new_rlim.rlim_cur == 0) {		/*		 * The caller is asking for an immediate RLIMIT_CPU		 * expiry.  But we use the zero value to mean "it was		 * never set".  So let's cheat and make it one second		 * instead		 */		new_rlim.rlim_cur = 1;	}	task_lock(current->group_leader);	*old_rlim = new_rlim;	task_unlock(current->group_leader);	if (resource != RLIMIT_CPU)		goto out;	/*	 * RLIMIT_CPU handling.   Note that the kernel fails to return an error	 * code if it rejected the user's attempt to set RLIMIT_CPU.  This is a	 * very long-standing error, and fixing it now risks breakage of	 * applications, so we live with it	 */	if (new_rlim.rlim_cur == RLIM_INFINITY)		goto out;	it_prof_secs = cputime_to_secs(current->signal->it_prof_expires);	if (it_prof_secs == 0 || new_rlim.rlim_cur <= it_prof_secs) {		unsigned long rlim_cur = new_rlim.rlim_cur;		cputime_t cputime;		cputime = secs_to_cputime(rlim_cur);		read_lock(&tasklist_lock);		spin_lock_irq(&current->sighand->siglock);		set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL);		spin_unlock_irq(&current->sighand->siglock);		read_unlock(&tasklist_lock);	}out:	return 0;}/* * It would make sense to put struct rusage in the task_struct, * except that would make the task_struct be *really big*.  After * task_struct gets moved into malloc'ed memory, it would * make sense to do this.  It will make moving the rest of the information * a lot simpler!  (Which we're not doing right now because we're not * measuring them yet). * * When sampling multiple threads for RUSAGE_SELF, under SMP we might have * races with threads incrementing their own counters.  But since word * reads are atomic, we either get new values or old values and we don't * care which for the sums.  We always take the siglock to protect reading * the c* fields from p->signal from races with exit.c updating those * fields when reaping, so a sample either gets all the additions of a * given child after it's reaped, or none so this sample is before reaping. * * Locking: * We need to take the siglock for CHILDEREN, SELF and BOTH * for  the cases current multithreaded, non-current single threaded * non-current multithreaded.  Thread traversal is now safe with * the siglock held. * Strictly speaking, we donot need to take the siglock if we are current and * single threaded,  as no one else can take our signal_struct away, no one * else can  reap the  children to update signal->c* counters, and no one else * can race with the signal-> fields. If we do not take any lock, the * signal-> fields could be read out of order while another thread was just * exiting. So we should  place a read memory barrier when we avoid the lock. * On the writer side,  write memory barrier is implied in  __exit_signal * as __exit_signal releases  the siglock spinlock after updating the signal-> * fields. But we don't do this yet to keep things simple. * */static void k_getrusage(struct task_struct *p, int who, struct rusage *r){	struct task_struct *t;	unsigned long flags;	cputime_t utime, stime;	memset((char *) r, 0, sizeof *r);	utime = stime = cputime_zero;	rcu_read_lock();	if (!lock_task_sighand(p, &flags)) {		rcu_read_unlock();		return;	}	switch (who) {		case RUSAGE_BOTH:		case RUSAGE_CHILDREN:			utime = p->signal->cutime;			stime = p->signal->cstime;			r->ru_nvcsw = p->signal->cnvcsw;			r->ru_nivcsw = p->signal->cnivcsw;			r->ru_minflt = p->signal->cmin_flt;			r->ru_majflt = p->signal->cmaj_flt;			r->ru_inblock = p->signal->cinblock;			r->ru_oublock = p->signal->coublock;			if (who == RUSAGE_CHILDREN)				break;		case RUSAGE_SELF:			utime = cputime_add(utime, p->signal->utime);			stime = cputime_add(stime, p->signal->stime);			r->ru_nvcsw += p->signal->nvcsw;			r->ru_nivcsw += p->signal->nivcsw;			r->ru_minflt += p->signal->min_flt;			r->ru_majflt += p->signal->maj_flt;			r->ru_inblock += p->signal->inblock;			r->ru_oublock += p->signal->oublock;			t = p;			do {				utime = cputime_add(utime, t->utime);				stime = cputime_add(stime, t->stime);				r->ru_nvcsw += t->nvcsw;				r->ru_nivcsw += t->nivcsw;				r->ru_minflt += t->min_flt;				r->ru_majflt += t->maj_flt;				r->ru_inblock += task_io_get_inblock(t);				r->ru_oublock += task_io_get_oublock(t);				t = next_thread(t);			} while (t != p);			break;		default:			BUG();	}	unlock_task_sighand(p, &flags);	rcu_read_unlock();	cputime_to_timeval(utime, &r->ru_utime);	cputime_to_timeval(stime, &r->ru_stime);}int getrusage(struct task_struct *p, int who, struct rusage __user *ru){	struct rusage r;	k_getrusage(p, who, &r);	return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0;}asmlinkage long sys_getrusage(int who, struct rusage __user *ru){	if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN)		return -EINVAL;	return getrusage(current, who, ru);}asmlinkage long sys_umask(int mask){	mask = xchg(&current->fs->umask, mask & S_IRWXUGO);	return mask;}    asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,			  unsigned long arg4, unsigned long arg5){	long error;	error = security_task_prctl(option, arg2, arg3, arg4, arg5);	if (error)		return error;	switch (option) {		case PR_SET_PDEATHSIG:			if (!valid_signal(arg2)) {				error = -EINVAL;				break;			}			current->pdeath_signal = arg2;			break;		case PR_GET_PDEATHSIG:			error = put_user(current->pdeath_signal, (int __user *)arg2);			break;		case PR_GET_DUMPABLE:			error = current->mm->dumpable;			break;		case PR_SET_DUMPABLE:			if (arg2 < 0 || arg2 > 1) {				error = -EINVAL;				break;			}			current->mm->dumpable = arg2;			break;		case PR_SET_UNALIGN:			error = SET_UNALIGN_CTL(current, arg2);			break;		case PR_GET_UNALIGN:			error = GET_UNALIGN_CTL(current, arg2);			break;		case PR_SET_FPEMU:			error = SET_FPEMU_CTL(current, arg2);			break;		case PR_GET_FPEMU:			error = GET_FPEMU_CTL(current, arg2);			break;		case PR_SET_FPEXC:			error = SET_FPEXC_CTL(current, arg2);			break;		case PR_GET_FPEXC:			error = GET_FPEXC_CTL(current, arg2);			break;		case PR_GET_TIMING:			error = PR_TIMING_STATISTICAL;			break;		case PR_SET_TIMING:			if (arg2 == PR_TIMING_STATISTICAL)				error = 0;			else				error = -EINVAL;			break;		case PR_GET_KEEPCAPS:			if (current->keep_capabilities)				error = 1;			break;		case PR_SET_KEEPCAPS:			if (arg2 != 0 && arg2 != 1) {				error = -EINVAL;				break;			}			current->keep_capabilities = arg2;			break;		case PR_SET_NAME: {			struct task_struct *me = current;			unsigned char ncomm[sizeof(me->comm)];			ncomm[sizeof(me->comm)-1] = 0;			if (strncpy_from_user(ncomm, (char __user *)arg2,						sizeof(me->comm)-1) < 0)				return -EFAULT;			set_task_comm(me, ncomm);			return 0;		}		case PR_GET_NAME: {			struct task_struct *me = current;			unsigned char tcomm[sizeof(me->comm)];			get_task_comm(tcomm, me);			if (copy_to_user((char __user *)arg2, tcomm, sizeof(tcomm)))				return -EFAULT;			return 0;		}		case PR_GET_ENDIAN:			error = GET_ENDIAN(current, arg2);			break;		case PR_SET_ENDIAN:			error = SET_ENDIAN(current, arg2);			break;		default:			error = -EINVAL;			break;	}	return error;}asmlinkage long sys_getcpu(unsigned __user *cpup, unsigned __user *nodep,	   		   struct getcpu_cache __user *cache){	int err = 0;	int cpu = raw_smp_processor_id();	if (cpup)		err |= put_user(cpu, cpup);	if (nodep)		err |= put_user(cpu_to_node(cpu), nodep);	if (cache) {		/*		 * The cache is not needed for this implementation,		 * but make sure user programs pass something		 * valid. vsyscall implementations can instead make		 * good use of the cache. Only use t0 and t1 because		 * these are available in both 32bit and 64bit ABI (no		 * need for a compat_getcpu). 32bit has enough		 * padding		 */		unsigned long t0, t1;		get_user(t0, &cache->blob[0]);		get_user(t1, &cache->blob[1]);		t0++;		t1++;		put_user(t0, &cache->blob[0]);		put_user(t1, &cache->blob[1]);	}	return err ? -EFAULT : 0;}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -