⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 syscall.c

📁 xenomai 很好的linux实时补丁
💻 C
📖 第 1 页 / 共 5 页
字号:
/* * int __rt_task_self(RT_TASK_PLACEHOLDER *ph) */static int __rt_task_self (struct task_struct *curr, struct pt_regs *regs){    RT_TASK_PLACEHOLDER ph;    RT_TASK *task;    if (!__xn_access_ok(curr,VERIFY_WRITE,__xn_reg_arg1(regs),sizeof(ph)))	return -EFAULT;    task = __rt_task_current(curr);    if (!task)	/* Calls on behalf of a non-task context beget an error for	   the user-space interface. */	return -ESRCH;    ph.opaque = xnthread_handle(&task->thread_base); /* Copy back the task handle. */    __xn_copy_to_user(curr,(void __user *)__xn_reg_arg1(regs),&ph,sizeof(ph));    return 0;}/* * int __rt_task_slice(RT_TASK_PLACEHOLDER *ph, *                     RTIME quantum) */static int __rt_task_slice (struct task_struct *curr, struct pt_regs *regs){    RT_TASK_PLACEHOLDER ph;    RT_TASK *task;    RTIME quantum;    if (__xn_reg_arg1(regs))	{	if (!__xn_access_ok(curr,VERIFY_READ,__xn_reg_arg1(regs),sizeof(ph)))	    return -EFAULT;	__xn_copy_from_user(curr,&ph,(void __user *)__xn_reg_arg1(regs),sizeof(ph));	task = (RT_TASK *)xnregistry_fetch(ph.opaque);	}    else	task = __rt_task_current(curr);    if (!task)	return -ESRCH;    __xn_copy_from_user(curr,&quantum,(void __user *)__xn_reg_arg2(regs),sizeof(quantum));    return rt_task_slice(task,quantum);}#ifdef CONFIG_XENO_OPT_NATIVE_MPS/* * int __rt_task_send(RT_TASK_PLACEHOLDER *ph, *                    RT_TASK_MCB *mcb_s, *                    RT_TASK_MCB *mcb_r, *                    RTIME timeout) */static int __rt_task_send (struct task_struct *curr, struct pt_regs *regs){    char tmp_buf[RT_MCB_FSTORE_LIMIT];    RT_TASK_MCB mcb_s, mcb_r;    caddr_t tmp_area, data_r;    RT_TASK_PLACEHOLDER ph;    RT_TASK *task;    RTIME timeout;    size_t xsize;    ssize_t err;    if (__xn_reg_arg1(regs))	{	if (!__xn_access_ok(curr,VERIFY_READ,__xn_reg_arg1(regs),sizeof(ph)))	    return -EFAULT;	__xn_copy_from_user(curr,&ph,(void __user *)__xn_reg_arg1(regs),sizeof(ph));	task = (RT_TASK *)xnregistry_fetch(ph.opaque);	}    else	task = __rt_task_current(curr);    if (!task)	return -ESRCH;    if (!__xn_access_ok(curr,VERIFY_READ,__xn_reg_arg2(regs),sizeof(mcb_s)))	return -EFAULT;    __xn_copy_from_user(curr,&mcb_s,(void __user *)__xn_reg_arg2(regs),sizeof(mcb_s));    if (mcb_s.size > 0 && !__xn_access_ok(curr,VERIFY_READ,mcb_s.data,mcb_s.size))	return -EFAULT;    if (__xn_reg_arg3(regs))	{	if (!__xn_access_ok(curr,VERIFY_READ,__xn_reg_arg3(regs),sizeof(mcb_r)))	    return -EFAULT;	__xn_copy_from_user(curr,&mcb_r,(void __user *)__xn_reg_arg3(regs),sizeof(mcb_r));	if (mcb_r.size > 0 && !__xn_access_ok(curr,VERIFY_WRITE,mcb_r.data,mcb_r.size))	    return -EFAULT;	}    else	{	mcb_r.data = NULL;	mcb_r.size = 0;	}    if (!__xn_access_ok(curr,VERIFY_READ,__xn_reg_arg4(regs),sizeof(timeout)))	return -EFAULT;    __xn_copy_from_user(curr,&timeout,(void __user *)__xn_reg_arg4(regs),sizeof(timeout));    xsize = mcb_s.size + mcb_r.size;    data_r = mcb_r.data;    if (xsize > 0)	{	/* Try optimizing a bit here: if the cumulated message sizes	   (initial+reply) can fit into our local buffer, use it;	   otherwise, take the slow path and fetch a larger buffer	   from the system heap. Most messages are expected to be	   short enough to fit on the stack anyway. */	if (xsize <= sizeof(tmp_buf))	    tmp_area = tmp_buf;	else	    {	    tmp_area = xnmalloc(xsize);	    if (!tmp_area)		return -ENOMEM;	    }	if (mcb_s.size > 0)	    __xn_copy_from_user(curr,tmp_area,(void __user *)mcb_s.data,mcb_s.size);	mcb_s.data = tmp_area;	mcb_r.data = tmp_area + mcb_s.size;	}    else	tmp_area = NULL;    err = rt_task_send(task,&mcb_s,&mcb_r,timeout);    if (err > 0)	__xn_copy_to_user(curr,(void __user *)data_r,mcb_r.data,mcb_r.size);    if (__xn_reg_arg3(regs))	{	mcb_r.data = data_r;	__xn_copy_to_user(curr,(void __user *)__xn_reg_arg3(regs),&mcb_r,sizeof(mcb_r));	}    if (tmp_area && tmp_area != tmp_buf)	xnfree(tmp_area);    return err;}/* * int __rt_task_receive(RT_TASK_MCB *mcb_r, *                       RTIME timeout) */static int __rt_task_receive (struct task_struct *curr, struct pt_regs *regs){    char tmp_buf[RT_MCB_FSTORE_LIMIT];    caddr_t tmp_area, data_r;    RT_TASK_MCB mcb_r;    RTIME timeout;    int err;    if (!__xn_access_ok(curr,VERIFY_READ,__xn_reg_arg1(regs),sizeof(mcb_r)))	return -EFAULT;    __xn_copy_from_user(curr,&mcb_r,(void __user *)__xn_reg_arg1(regs),sizeof(mcb_r));    if (mcb_r.size > 0 && !__xn_access_ok(curr,VERIFY_WRITE,mcb_r.data,mcb_r.size))	return -EFAULT;    if (!__xn_access_ok(curr,VERIFY_READ,__xn_reg_arg2(regs),sizeof(timeout)))	return -EFAULT;    __xn_copy_from_user(curr,&timeout,(void __user *)__xn_reg_arg2(regs),sizeof(timeout));    data_r = mcb_r.data;    if (mcb_r.size > 0)	{	/* Same optimization as in __rt_task_send(): if the size of	   the reply message can fit into our local buffer, use it;	   otherwise, take the slow path and fetch a larger buffer	   from the system heap. */	if (mcb_r.size <= sizeof(tmp_buf))	    tmp_area = tmp_buf;	else	    {	    tmp_area = xnmalloc(mcb_r.size);	    if (!tmp_area)		return -ENOMEM;	    }	mcb_r.data = tmp_area;	}    else	tmp_area = NULL;    err = rt_task_receive(&mcb_r,timeout);    if (err > 0 && mcb_r.size > 0)	__xn_copy_to_user(curr,(void __user *)data_r,mcb_r.data,mcb_r.size);    mcb_r.data = data_r;    __xn_copy_to_user(curr,(void __user *)__xn_reg_arg1(regs),&mcb_r,sizeof(mcb_r));    if (tmp_area && tmp_area != tmp_buf)	xnfree(tmp_area);    return err;}/* * int __rt_task_reply(int flowid, *                     RT_TASK_MCB *mcb_s) */static int __rt_task_reply (struct task_struct *curr, struct pt_regs *regs){    char tmp_buf[RT_MCB_FSTORE_LIMIT];    RT_TASK_MCB mcb_s;    caddr_t tmp_area;    int flowid, err;    flowid = __xn_reg_arg1(regs);    if (__xn_reg_arg2(regs))	{	if (!__xn_access_ok(curr,VERIFY_READ,__xn_reg_arg2(regs),sizeof(mcb_s)))	    return -EFAULT;	__xn_copy_from_user(curr,&mcb_s,(void __user *)__xn_reg_arg2(regs),sizeof(mcb_s));	if (mcb_s.size > 0 && !__xn_access_ok(curr,VERIFY_READ,mcb_s.data,mcb_s.size))	    return -EFAULT;	}    else	{	mcb_s.data = NULL;	mcb_s.size = 0;	}    if (mcb_s.size > 0)	{	/* Same optimization as in __rt_task_send(): if the size of	   the reply message can fit into our local buffer, use it;	   otherwise, take the slow path and fetch a larger buffer	   from the system heap. */	if (mcb_s.size <= sizeof(tmp_buf))	    tmp_area = tmp_buf;	else	    {	    tmp_area = xnmalloc(mcb_s.size);	    if (!tmp_area)		return -ENOMEM;	    }	__xn_copy_from_user(curr,tmp_area,(void __user *)mcb_s.data,mcb_s.size);	mcb_s.data = tmp_area;	}    else	tmp_area = NULL;    err = rt_task_reply(flowid,&mcb_s);    if (tmp_area && tmp_area != tmp_buf)	xnfree(tmp_area);    return err;}#else /* !CONFIG_XENO_OPT_NATIVE_MPS */#define __rt_task_send     __rt_call_not_available#define __rt_task_receive  __rt_call_not_available#define __rt_task_reply    __rt_call_not_available#endif /* CONFIG_XENO_OPT_NATIVE_MPS *//* * int __rt_timer_set_mode(RTIME *tickvalp) */static int __rt_timer_set_mode (struct task_struct *curr, struct pt_regs *regs){    RTIME tickval;    __xn_copy_from_user(curr,&tickval,(void __user *)__xn_reg_arg1(regs),sizeof(tickval));    return rt_timer_set_mode(tickval);}/* * int __rt_timer_read(RTIME *timep) */static int __rt_timer_read (struct task_struct *curr, struct pt_regs *regs){    RTIME now = rt_timer_read();    __xn_copy_to_user(curr,(void __user *)__xn_reg_arg1(regs),&now,sizeof(now));    return 0;}/* * int __rt_timer_tsc(RTIME *tscp) */static int __rt_timer_tsc (struct task_struct *curr, struct pt_regs *regs){    RTIME tsc = rt_timer_tsc();    __xn_copy_to_user(curr,(void __user *)__xn_reg_arg1(regs),&tsc,sizeof(tsc));    return 0;}/* * int __rt_timer_ns2ticks(SRTIME *ticksp, SRTIME *nsp) */static int __rt_timer_ns2ticks (struct task_struct *curr, struct pt_regs *regs){    SRTIME ns, ticks;    __xn_copy_from_user(curr,&ns,(void __user *)__xn_reg_arg2(regs),sizeof(ns));    ticks = rt_timer_ns2ticks(ns);    __xn_copy_to_user(curr,(void __user *)__xn_reg_arg1(regs),&ticks,sizeof(ticks));    return 0;}/* * int __rt_timer_ns2tsc(SRTIME *ticksp, SRTIME *nsp) */static int __rt_timer_ns2tsc (struct task_struct *curr, struct pt_regs *regs){    SRTIME ns, ticks;    __xn_copy_from_user(curr,&ns,(void __user *)__xn_reg_arg2(regs),sizeof(ns));    ticks = rt_timer_ns2tsc(ns);    __xn_copy_to_user(curr,(void __user *)__xn_reg_arg1(regs),&ticks,sizeof(ticks));    return 0;}/* * int __rt_timer_ticks2ns(SRTIME *nsp, SRTIME *ticksp) */static int __rt_timer_ticks2ns (struct task_struct *curr, struct pt_regs *regs){    SRTIME ticks, ns;    __xn_copy_from_user(curr,&ticks,(void __user *)__xn_reg_arg2(regs),sizeof(ticks));    ns = rt_timer_ticks2ns(ticks);    __xn_copy_to_user(curr,(void __user *)__xn_reg_arg1(regs),&ns,sizeof(ns));    return 0;}/* * int __rt_timer_tsc2ns(SRTIME *nsp, SRTIME *ticksp) */static int __rt_timer_tsc2ns (struct task_struct *curr, struct pt_regs *regs){    SRTIME ticks, ns;    __xn_copy_from_user(curr,&ticks,(void __user *)__xn_reg_arg2(regs),sizeof(ticks));    ns = rt_timer_tsc2ns(ticks);    __xn_copy_to_user(curr,(void __user *)__xn_reg_arg1(regs),&ns,sizeof(ns));    return 0;}/* * int __rt_timer_inquire(RT_TIMER_INFO *info) */static int __rt_timer_inquire (struct task_struct *curr, struct pt_regs *regs){    RT_TIMER_INFO info;    int err;    if (!__xn_access_ok(curr,VERIFY_WRITE,__xn_reg_arg1(regs),sizeof(info)))	return -EFAULT;    err = rt_timer_inquire(&info);    if (!err)	__xn_copy_to_user(curr,(void __user *)__xn_reg_arg1(regs),&info,sizeof(info));    return err;}/* * int __rt_timer_spin(RTIME *nsp) */static int __rt_timer_spin (struct task_struct *curr, struct pt_regs *regs){    RTIME ns;    __xn_copy_from_user(curr,&ns,(void __user *)__xn_reg_arg1(regs),sizeof(ns));    rt_timer_spin(ns);    return 0;}#ifdef CONFIG_XENO_OPT_NATIVE_SEM/* * int __rt_sem_create(RT_SEM_PLACEHOLDER *ph, *                     const char *name, *                     unsigned icount, *                     int mode) */static int __rt_sem_create (struct task_struct *curr, struct pt_regs *regs){    char name[XNOBJECT_NAME_LEN];    RT_SEM_PLACEHOLDER ph;    unsigned icount;    int err, mode;    RT_SEM *sem;    if (!__xn_access_ok(curr,VERIFY_WRITE,__xn_reg_arg1(regs),sizeof(ph)))	return -EFAULT;    if (__xn_reg_arg2(regs))	{	if (!__xn_access_ok(curr,VERIFY_READ,__xn_reg_arg2(regs),sizeof(name)))	    return -EFAULT;	__xn_strncpy_from_user(curr,name,(const char __user *)__xn_reg_arg2(regs),sizeof(name) - 1);	name[sizeof(name) - 1] = '\0';	}    else	*name = '\0';    /* Initial semaphore value. */    icount = (unsigned)__xn_reg_arg3(regs);    /* Creation mode. */    mode = (int)__xn_reg_arg4(regs);    sem = (RT_SEM *)xnmalloc(sizeof(*sem));    if (!sem)	return -ENOMEM;    err = rt_sem_create(sem,name,icount,mode);    if (err == 0)	{	sem->cpid = curr->pid;	/* Copy back the registry handle to the ph struct. */	ph.opaque = sem->handle;	__xn_copy_to_user(curr,(void __user *)__xn_reg_arg1(regs),&ph,sizeof(ph));	}    else	xnfree(sem);    return err;}/* * int __rt_sem_bind(RT_SEM_PLACEHOLDER *ph, *                   const char *name, *                   RTIME *timeoutp) */static int __rt_sem_bind (struct task_struct *curr, struct pt_regs *regs){    RT_SEM_PLACEHOLDER ph;    int err;    if (!__xn_access_ok(curr,VERIFY_WRITE,__xn_reg_arg1(regs),sizeof(ph)))	return -EFAULT;    err = __rt_bind_helper(curr,regs,&ph.opaque,XENO_SEM_MAGIC,NULL);    if (!err)	__xn_copy_to_user(curr,(void __user *)__xn_reg_arg1(regs),&ph,sizeof(ph));    return err;}/* * int __rt_sem_delete(RT_SEM_PLACEHOLDER *ph) */static int __rt_sem_delete (struct task_struct *curr, struct pt_regs *regs){    RT_SEM_PLACEHOLDER ph;    RT_SEM *sem;    int err;    if (!__xn_access_ok(curr,VERIFY_READ,__xn_reg_arg1(regs),sizeof(ph)))	return -EFAULT;    __xn_copy_from_user(curr,&ph,(void __user *)__xn_reg_arg1(regs),sizeof(ph));    sem = (RT_SEM *)xnregistry_fetch(ph.opaque);    if (!sem)	return -ESRCH;    err = rt_sem_delete(sem);    if (!err && sem->cpid)	xnfree(sem);    return err;}/* * int __rt_sem_p(RT_SEM_PLACEHOLDER *ph, *                RTIME *timeoutp) */static int __rt_sem_p (struct task_struct *curr, struct pt_regs *regs){    RT_SEM_PLACEHOLDER ph;    RTIME timeout;    RT_SEM *sem;    if (!__xn_access_ok(curr,VERIFY_READ,__xn_reg_arg1(regs),sizeof(ph)))	return -EFAULT;    __xn_copy_from_user(curr,&ph,(void __user *)__xn_reg_arg1(regs),sizeof(ph));

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -