⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 syscall.c

📁 xenomai 很好的linux实时补丁
💻 C
📖 第 1 页 / 共 5 页
字号:
/** * @file * This file is part of the Xenomai project. * * @note Copyright (C) 2004 Philippe Gerum <rpm@xenomai.org>  * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */#include <linux/ioport.h>#include <nucleus/pod.h>#include <nucleus/heap.h>#include <nucleus/shadow.h>#include <nucleus/registry.h>#include <native/syscall.h>#include <native/task.h>#include <native/timer.h>#include <native/sem.h>#include <native/event.h>#include <native/mutex.h>#include <native/cond.h>#include <native/queue.h>#include <native/heap.h>#include <native/alarm.h>#include <native/intr.h>#include <native/pipe.h>/* This file implements the Xenomai syscall wrappers; * * o Unchecked uaccesses are used to fetch args since the syslib is * trusted. We currently assume that the caller's memory is locked and * committed. * * o All skin services (re-)check the object descriptor they are * passed; so there is no race between a call to xnregistry_fetch() * where the user-space handle is converted to a descriptor pointer, * and the use of it in the actual syscall. */static int __muxid;static int __rt_bind_helper (struct task_struct *curr,			     struct pt_regs *regs,			     xnhandle_t *handlep,			     unsigned magic,			     void **objaddrp){    char name[XNOBJECT_NAME_LEN];    RTIME timeout;    void *objaddr;    spl_t s;    int err;    if (!__xn_access_ok(curr,VERIFY_READ,__xn_reg_arg2(regs),sizeof(name)))	return -EFAULT;    __xn_strncpy_from_user(curr,name,(const char __user *)__xn_reg_arg2(regs),sizeof(name) - 1);    name[sizeof(name) - 1] = '\0';    __xn_copy_from_user(curr,&timeout,(void __user *)__xn_reg_arg3(regs),sizeof(timeout));    err = xnregistry_bind(name,timeout,handlep);    if (!err)	{	xnlock_get_irqsave(&nklock,s);	objaddr = xnregistry_fetch(*handlep);		/* Also validate the type of the bound object. */	if (xeno_test_magic(objaddr,magic))	    {	    if (objaddrp)		*objaddrp = objaddr;	    }	else	    err = -EACCES;	xnlock_put_irqrestore(&nklock,s);	}    return err;}static RT_TASK *__rt_task_current (struct task_struct *curr){    xnthread_t *thread = xnshadow_thread(curr);    /* Don't call rt_task_self() which does not know about relaxed       tasks, but rather use the shadow information directly. */    if (!thread || xnthread_get_magic(thread) != XENO_SKIN_MAGIC)	return NULL;    return thread2rtask(thread); /* Convert TCB pointers. */}/* * int __rt_task_create(struct rt_arg_bulk *bulk, *                      xncompletion_t __user *u_completion) * * bulk = { * a1: RT_TASK_PLACEHOLDER *task; * a2: const char *name; * a3: int prio; * } */static int __rt_task_create (struct task_struct *curr, struct pt_regs *regs){    xncompletion_t __user *u_completion;    char name[XNOBJECT_NAME_LEN];    struct rt_arg_bulk bulk;    RT_TASK_PLACEHOLDER ph;    int err, prio, mode;    RT_TASK *task;    if (xnshadow_thread(curr))	return -EBUSY;    __xn_copy_from_user(curr,&bulk,(void __user *)__xn_reg_arg1(regs),sizeof(bulk));    if (!__xn_access_ok(curr,VERIFY_WRITE,bulk.a1,sizeof(ph)))	return -EFAULT;    if (bulk.a2)	{	if (!__xn_access_ok(curr,VERIFY_READ,bulk.a2,sizeof(name)))	    return -EFAULT;	__xn_strncpy_from_user(curr,name,(const char __user *)bulk.a2,sizeof(name) - 1);	name[sizeof(name) - 1] = '\0';	strncpy(curr->comm,name,sizeof(curr->comm));	curr->comm[sizeof(curr->comm) - 1] = '\0';	}    else	*name = '\0';    /* Task priority. */    prio = bulk.a3;    /* Task init mode & CPU affinity. */    mode = bulk.a4 & (T_CPUMASK|T_SUSP);    /* Completion descriptor our parent thread is pending on -- may be NULL. */    u_completion = (xncompletion_t __user *)__xn_reg_arg2(regs);    task = (RT_TASK *)xnmalloc(sizeof(*task));    if (!task)	return -ENOMEM;    /* Force FPU support in user-space. This will lead to a no-op if       the platform does not support it. */    err = rt_task_create(task,name,0,prio,XNFPU|XNSHADOW|mode);    if (err == 0)	{	/* Copy back the registry handle to the ph struct. */	ph.opaque = xnthread_handle(&task->thread_base);	ph.opaque2 = bulk.a5;	/* hidden pthread_t identifier. */	__xn_copy_to_user(curr,(void __user *)bulk.a1,&ph,sizeof(ph));	err = xnshadow_map(&task->thread_base,u_completion);	}    else	{	xnfree(task);	/* Unblock and pass back error code. */	if (u_completion)	    xnshadow_signal_completion(u_completion,err);	}    return err;}/* * int __rt_task_bind(RT_TASK_PLACEHOLDER *ph, *                    const char *name, *                    RTIME *timeoutp) */static int __rt_task_bind (struct task_struct *curr, struct pt_regs *regs){    RT_TASK_PLACEHOLDER ph;    int err;    if (!__xn_access_ok(curr,VERIFY_WRITE,__xn_reg_arg1(regs),sizeof(ph)))	return -EFAULT;    err = __rt_bind_helper(curr,regs,&ph.opaque,XENO_TASK_MAGIC,NULL);    if (!err)	__xn_copy_to_user(curr,(void __user *)__xn_reg_arg1(regs),&ph,sizeof(ph));    return err;}/* * int __rt_task_start(RT_TASK_PLACEHOLDER *ph, *                     void (*entry)(void *cookie), *                     void *cookie) */static int __rt_task_start (struct task_struct *curr, struct pt_regs *regs){    RT_TASK_PLACEHOLDER ph;    RT_TASK *task;    if (!__xn_access_ok(curr,VERIFY_READ,__xn_reg_arg1(regs),sizeof(ph)))	return -EFAULT;    __xn_copy_from_user(curr,&ph,(void __user *)__xn_reg_arg1(regs),sizeof(ph));    task = (RT_TASK *)xnregistry_fetch(ph.opaque);    if (!task)	return -ESRCH;    return rt_task_start(task,			 (void (*)(void *))__xn_reg_arg2(regs),			 (void *)__xn_reg_arg3(regs));}/* * int __rt_task_suspend(RT_TASK_PLACEHOLDER *ph) */static int __rt_task_suspend (struct task_struct *curr, struct pt_regs *regs){    RT_TASK_PLACEHOLDER ph;    RT_TASK *task;    if (__xn_reg_arg1(regs))	{	if (!__xn_access_ok(curr,VERIFY_READ,__xn_reg_arg1(regs),sizeof(ph)))	    return -EFAULT;	__xn_copy_from_user(curr,&ph,(void __user *)__xn_reg_arg1(regs),sizeof(ph));	task = (RT_TASK *)xnregistry_fetch(ph.opaque);	}    else	task = __rt_task_current(curr);    if (!task)	return -ESRCH;    return rt_task_suspend(task);}/* * int __rt_task_resume(RT_TASK_PLACEHOLDER *ph) */static int __rt_task_resume (struct task_struct *curr, struct pt_regs *regs){    RT_TASK_PLACEHOLDER ph;    RT_TASK *task;    if (!__xn_access_ok(curr,VERIFY_READ,__xn_reg_arg1(regs),sizeof(ph)))	return -EFAULT;    __xn_copy_from_user(curr,&ph,(void __user *)__xn_reg_arg1(regs),sizeof(ph));    task = (RT_TASK *)xnregistry_fetch(ph.opaque);    if (!task)	return -ESRCH;    return rt_task_resume(task);}/* * int __rt_task_delete(RT_TASK_PLACEHOLDER *ph) */static int __rt_task_delete (struct task_struct *curr, struct pt_regs *regs){    RT_TASK_PLACEHOLDER ph;    RT_TASK *task;    if (__xn_reg_arg1(regs))	{	if (!__xn_access_ok(curr,VERIFY_READ,__xn_reg_arg1(regs),sizeof(ph)))	    return -EFAULT;	__xn_copy_from_user(curr,&ph,(void __user *)__xn_reg_arg1(regs),sizeof(ph));	task = (RT_TASK *)xnregistry_fetch(ph.opaque);	}    else	task = __rt_task_current(curr);    if (!task)	return -ESRCH;    return rt_task_delete(task); /* TCB freed in delete hook. */}/* * int __rt_task_yield(void) */static int __rt_task_yield (struct task_struct *curr, struct pt_regs *regs) {    return rt_task_yield();}/* * int __rt_task_set_periodic(RT_TASK_PLACEHOLDER *ph, *			         RTIME idate, *			         RTIME period) */static int __rt_task_set_periodic (struct task_struct *curr, struct pt_regs *regs){    RT_TASK_PLACEHOLDER ph;    RTIME idate, period;    RT_TASK *task;    if (__xn_reg_arg1(regs))	{	if (!__xn_access_ok(curr,VERIFY_READ,__xn_reg_arg1(regs),sizeof(ph)))	    return -EFAULT;	__xn_copy_from_user(curr,&ph,(void __user *)__xn_reg_arg1(regs),sizeof(ph));	task = (RT_TASK *)xnregistry_fetch(ph.opaque);	}    else	task = __rt_task_current(curr);    if (!task)	return -ESRCH;    __xn_copy_from_user(curr,&idate,(void __user *)__xn_reg_arg2(regs),sizeof(idate));    __xn_copy_from_user(curr,&period,(void __user *)__xn_reg_arg3(regs),sizeof(period));    return rt_task_set_periodic(task,idate,period);}/* * int __rt_task_wait_period(unsigned long *overruns_r) */static int __rt_task_wait_period (struct task_struct *curr, struct pt_regs *regs){    unsigned long overruns;    int err;    if (__xn_reg_arg1(regs) &&	!__xn_access_ok(curr,VERIFY_WRITE,__xn_reg_arg1(regs),sizeof(overruns)))	return -EFAULT;    err = rt_task_wait_period(&overruns);    if (__xn_reg_arg1(regs) && (err == 0 || err == -ETIMEDOUT))	__xn_copy_to_user(curr,(void __user *)__xn_reg_arg1(regs),&overruns,sizeof(overruns));    return err;}/* * int __rt_task_set_priority(RT_TASK_PLACEHOLDER *ph, *                            int prio) */static int __rt_task_set_priority (struct task_struct *curr, struct pt_regs *regs){    RT_TASK_PLACEHOLDER ph;    RT_TASK *task;    int prio;    if (__xn_reg_arg1(regs))	{	if (!__xn_access_ok(curr,VERIFY_READ,__xn_reg_arg1(regs),sizeof(ph)))	    return -EFAULT;	__xn_copy_from_user(curr,&ph,(void __user *)__xn_reg_arg1(regs),sizeof(ph));	task = (RT_TASK *)xnregistry_fetch(ph.opaque);	}    else	task = __rt_task_current(curr);    if (!task)	return -ESRCH;    prio = __xn_reg_arg2(regs);    return rt_task_set_priority(task,prio);}/* * int __rt_task_sleep(RTIME delay) */static int __rt_task_sleep (struct task_struct *curr, struct pt_regs *regs){    RTIME delay;    __xn_copy_from_user(curr,&delay,(void __user *)__xn_reg_arg1(regs),sizeof(delay));    return rt_task_sleep(delay);}/* * int __rt_task_sleep(RTIME delay) */static int __rt_task_sleep_until (struct task_struct *curr, struct pt_regs *regs){    RTIME date;    __xn_copy_from_user(curr,&date,(void __user *)__xn_reg_arg1(regs),sizeof(date));    return rt_task_sleep_until(date);}/* * int __rt_task_unblock(RT_TASK_PLACEHOLDER *ph) */static int __rt_task_unblock (struct task_struct *curr, struct pt_regs *regs){    RT_TASK_PLACEHOLDER ph;    RT_TASK *task;    if (!__xn_access_ok(curr,VERIFY_READ,__xn_reg_arg1(regs),sizeof(ph)))	return -EFAULT;    __xn_copy_from_user(curr,&ph,(void __user *)__xn_reg_arg1(regs),sizeof(ph));    task = (RT_TASK *)xnregistry_fetch(ph.opaque);    if (!task)	return -ESRCH;    return rt_task_unblock(task);}/* * int __rt_task_inquire(RT_TASK_PLACEHOLDER *ph, *                       RT_TASK_INFO *infop) */static int __rt_task_inquire (struct task_struct *curr, struct pt_regs *regs){    RT_TASK_PLACEHOLDER ph;    RT_TASK_INFO info;    RT_TASK *task;    int err;    if (!__xn_access_ok(curr,VERIFY_WRITE,__xn_reg_arg2(regs),sizeof(info)))	return -EFAULT;    if (__xn_reg_arg1(regs))	{	if (!__xn_access_ok(curr,VERIFY_READ,__xn_reg_arg1(regs),sizeof(ph)))	    return -EFAULT;	__xn_copy_from_user(curr,&ph,(void __user *)__xn_reg_arg1(regs),sizeof(ph));	task = (RT_TASK *)xnregistry_fetch(ph.opaque);	}    else	task = __rt_task_current(curr);    if (!task)	return -ESRCH;    err = rt_task_inquire(task,&info);    if (!err)	__xn_copy_to_user(curr,(void __user *)__xn_reg_arg2(regs),&info,sizeof(info));    return err;}/* * int __rt_task_notify(RT_TASK_PLACEHOLDER *ph, *                      rt_sigset_t signals) */static int __rt_task_notify (struct task_struct *curr, struct pt_regs *regs){    RT_TASK_PLACEHOLDER ph;    rt_sigset_t signals;    RT_TASK *task;    if (__xn_reg_arg1(regs))	{	if (!__xn_access_ok(curr,VERIFY_READ,__xn_reg_arg1(regs),sizeof(ph)))	    return -EFAULT;	__xn_copy_from_user(curr,&ph,(void __user *)__xn_reg_arg1(regs),sizeof(ph));	task = (RT_TASK *)xnregistry_fetch(ph.opaque);	}    else	task = __rt_task_current(curr);    if (!task)	return -ESRCH;    signals = (rt_sigset_t)__xn_reg_arg2(regs);    return rt_task_notify(task,signals);}/* * int __rt_task_set_mode(int clrmask, *                        int setmask, *                        int *mode_r) */static int __rt_task_set_mode (struct task_struct *curr, struct pt_regs *regs){    int err, setmask, clrmask, mode_r;    if (__xn_reg_arg3(regs) &&	!__xn_access_ok(curr,VERIFY_WRITE,__xn_reg_arg3(regs),sizeof(int)))	return -EFAULT;    clrmask = __xn_reg_arg1(regs);    setmask = __xn_reg_arg2(regs);    err = rt_task_set_mode(clrmask & ~T_PRIMARY,setmask & ~T_PRIMARY,&mode_r);    if (err)	return err;    if (__xn_reg_arg3(regs))	__xn_copy_to_user(curr,(void __user *)__xn_reg_arg3(regs),&mode_r,sizeof(mode_r));    if ((clrmask & T_PRIMARY) != 0)	xnshadow_relax(0);    return 0;}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -