⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 syscall.c

📁 xenomai 很好的linux实时补丁
💻 C
📖 第 1 页 / 共 5 页
字号:
    char name[XNOBJECT_NAME_LEN];    RT_COND_PLACEHOLDER ph;    RT_COND *cond;    int err;    if (!__xn_access_ok(curr,VERIFY_WRITE,__xn_reg_arg1(regs),sizeof(ph)))	return -EFAULT;    if (__xn_reg_arg2(regs))	{	if (!__xn_access_ok(curr,VERIFY_READ,__xn_reg_arg2(regs),sizeof(name)))	    return -EFAULT;	__xn_strncpy_from_user(curr,name,(const char __user *)__xn_reg_arg2(regs),sizeof(name) - 1);	name[sizeof(name) - 1] = '\0';	}    else	*name = '\0';    cond = (RT_COND *)xnmalloc(sizeof(*cond));    if (!cond)	return -ENOMEM;    err = rt_cond_create(cond,name);    if (err == 0)	{	cond->cpid = curr->pid;	/* Copy back the registry handle to the ph struct. */	ph.opaque = cond->handle;	__xn_copy_to_user(curr,(void __user *)__xn_reg_arg1(regs),&ph,sizeof(ph));	}    else	xnfree(cond);    return err;}/* * int __rt_cond_bind(RT_COND_PLACEHOLDER *ph, *                    const char *name, *                    RTIME *timeoutp) */static int __rt_cond_bind (struct task_struct *curr, struct pt_regs *regs){    RT_COND_PLACEHOLDER ph;    int err;    if (!__xn_access_ok(curr,VERIFY_WRITE,__xn_reg_arg1(regs),sizeof(ph)))	return -EFAULT;    err = __rt_bind_helper(curr,regs,&ph.opaque,XENO_COND_MAGIC,NULL);    if (!err)	__xn_copy_to_user(curr,(void __user *)__xn_reg_arg1(regs),&ph,sizeof(ph));    return err;}/* * int __rt_cond_delete(RT_COND_PLACEHOLDER *ph) */static int __rt_cond_delete (struct task_struct *curr, struct pt_regs *regs){    RT_COND_PLACEHOLDER ph;    RT_COND *cond;    int err;    if (!__xn_access_ok(curr,VERIFY_READ,__xn_reg_arg1(regs),sizeof(ph)))	return -EFAULT;    __xn_copy_from_user(curr,&ph,(void __user *)__xn_reg_arg1(regs),sizeof(ph));    cond = (RT_COND *)xnregistry_fetch(ph.opaque);    if (!cond)	return -ESRCH;    err = rt_cond_delete(cond);    if (!err && cond->cpid)	xnfree(cond);    return err;}/* * int __rt_cond_wait(RT_COND_PLACEHOLDER *cph, *                    RT_MUTEX_PLACEHOLDER *mph, *                    RTIME *timeoutp) */static int __rt_cond_wait (struct task_struct *curr, struct pt_regs *regs){    RT_COND_PLACEHOLDER cph, mph;    RT_MUTEX *mutex;    RT_COND *cond;    RTIME timeout;    if (!__xn_access_ok(curr,VERIFY_READ,__xn_reg_arg1(regs),sizeof(cph)) ||	!__xn_access_ok(curr,VERIFY_READ,__xn_reg_arg2(regs),sizeof(mph)))	return -EFAULT;    __xn_copy_from_user(curr,&cph,(void __user *)__xn_reg_arg1(regs),sizeof(cph));    __xn_copy_from_user(curr,&mph,(void __user *)__xn_reg_arg2(regs),sizeof(mph));    cond = (RT_COND *)xnregistry_fetch(cph.opaque);    if (!cond)	return -ESRCH;    mutex = (RT_MUTEX *)xnregistry_fetch(mph.opaque);    if (!mutex)	return -ESRCH;    __xn_copy_from_user(curr,&timeout,(void __user *)__xn_reg_arg3(regs),sizeof(timeout));    return rt_cond_wait(cond,mutex,timeout);}/* * int __rt_cond_signal(RT_COND_PLACEHOLDER *ph) */static int __rt_cond_signal (struct task_struct *curr, struct pt_regs *regs){    RT_COND_PLACEHOLDER ph;    RT_COND *cond;    if (!__xn_access_ok(curr,VERIFY_READ,__xn_reg_arg1(regs),sizeof(ph)))	return -EFAULT;    __xn_copy_from_user(curr,&ph,(void __user *)__xn_reg_arg1(regs),sizeof(ph));    cond = (RT_COND *)xnregistry_fetch(ph.opaque);    if (!cond)	return -ESRCH;    return rt_cond_signal(cond);}/* * int __rt_cond_broadcast(RT_COND_PLACEHOLDER *ph) */static int __rt_cond_broadcast (struct task_struct *curr, struct pt_regs *regs){    RT_COND_PLACEHOLDER ph;    RT_COND *cond;    if (!__xn_access_ok(curr,VERIFY_READ,__xn_reg_arg1(regs),sizeof(ph)))	return -EFAULT;    __xn_copy_from_user(curr,&ph,(void __user *)__xn_reg_arg1(regs),sizeof(ph));    cond = (RT_COND *)xnregistry_fetch(ph.opaque);    if (!cond)	return -ESRCH;    return rt_cond_broadcast(cond);}/* * int __rt_cond_inquire(RT_COND_PLACEHOLDER *ph, *                       RT_COND_INFO *infop) */static int __rt_cond_inquire (struct task_struct *curr, struct pt_regs *regs){    RT_COND_PLACEHOLDER ph;    RT_COND_INFO info;    RT_COND *cond;    int err;    if (!__xn_access_ok(curr,VERIFY_READ,__xn_reg_arg1(regs),sizeof(ph)))	return -EFAULT;    if (!__xn_access_ok(curr,VERIFY_WRITE,__xn_reg_arg2(regs),sizeof(info)))	return -EFAULT;    __xn_copy_from_user(curr,&ph,(void __user *)__xn_reg_arg1(regs),sizeof(ph));    cond = (RT_COND *)xnregistry_fetch(ph.opaque);    if (!cond)	return -ESRCH;    err = rt_cond_inquire(cond,&info);    if (!err)	__xn_copy_to_user(curr,(void __user *)__xn_reg_arg2(regs),&info,sizeof(info));    return err;}#else /* !CONFIG_XENO_OPT_NATIVE_COND */#define __rt_cond_create    __rt_call_not_available#define __rt_cond_bind      __rt_call_not_available#define __rt_cond_delete    __rt_call_not_available#define __rt_cond_wait      __rt_call_not_available#define __rt_cond_signal    __rt_call_not_available#define __rt_cond_broadcast __rt_call_not_available#define __rt_cond_inquire   __rt_call_not_available#endif /* CONFIG_XENO_OPT_NATIVE_COND */#ifdef CONFIG_XENO_OPT_NATIVE_QUEUE/* * int __rt_queue_create(RT_QUEUE_PLACEHOLDER *ph, *                       const char *name, *                       size_t poolsize, *                       size_t qlimit, *                       int mode) */static int __rt_queue_create (struct task_struct *curr, struct pt_regs *regs){    char name[XNOBJECT_NAME_LEN];    RT_QUEUE_PLACEHOLDER ph;    size_t poolsize, qlimit;    int err, mode;    RT_QUEUE *q;    if (!__xn_access_ok(curr,VERIFY_WRITE,__xn_reg_arg1(regs),sizeof(ph)))	return -EFAULT;    if (__xn_reg_arg2(regs))	{	if (!__xn_access_ok(curr,VERIFY_READ,__xn_reg_arg2(regs),sizeof(name)))	    return -EFAULT;	__xn_strncpy_from_user(curr,name,(const char __user *)__xn_reg_arg2(regs),sizeof(name) - 1);	name[sizeof(name) - 1] = '\0';	}    else	*name = '\0';    /* Size of memory pool. */    poolsize = (size_t)__xn_reg_arg3(regs);    /* Queue limit. */    qlimit = (size_t)__xn_reg_arg4(regs);    /* Creation mode. */    mode = (int)__xn_reg_arg5(regs);    q = (RT_QUEUE *)xnmalloc(sizeof(*q));    if (!q)	return -ENOMEM;    err = rt_queue_create(q,name,poolsize,qlimit,mode);    if (err)	goto free_and_fail;    q->cpid = curr->pid;    /* Copy back the registry handle to the ph struct. */    ph.opaque = q->handle;    ph.opaque2 = &q->bufpool;    ph.mapsize = xnheap_size(&q->bufpool);    __xn_copy_to_user(curr,(void __user *)__xn_reg_arg1(regs),&ph,sizeof(ph));    return 0; free_and_fail:	    xnfree(q);    return err;}/* * int __rt_queue_bind(RT_QUEUE_PLACEHOLDER *ph, *                     const char *name, *                     RTIME *timeoutp) */static int __rt_queue_bind (struct task_struct *curr, struct pt_regs *regs){    RT_QUEUE_PLACEHOLDER ph;    RT_QUEUE *q;    int err;    spl_t s;    if (!__xn_access_ok(curr,VERIFY_WRITE,__xn_reg_arg1(regs),sizeof(ph)))	return -EFAULT;    xnlock_get_irqsave(&nklock,s);    err = __rt_bind_helper(curr,regs,&ph.opaque,XENO_QUEUE_MAGIC,(void **)&q);    if (err)	goto unlock_and_exit;    ph.opaque2 = &q->bufpool;    ph.mapsize = xnheap_size(&q->bufpool);    xnlock_put_irqrestore(&nklock,s);    __xn_copy_to_user(curr,(void __user *)__xn_reg_arg1(regs),&ph,sizeof(ph));    /* We might need to migrate to secondary mode now for mapping the       pool memory to user-space; since this syscall is conforming, we       might have entered it in primary mode. */    if (xnpod_primary_p())	xnshadow_relax(0);    return err; unlock_and_exit:    xnlock_put_irqrestore(&nklock,s);    return err;}/* * int __rt_queue_delete(RT_QUEUE_PLACEHOLDER *ph) */static int __rt_queue_delete (struct task_struct *curr, struct pt_regs *regs){    RT_QUEUE_PLACEHOLDER ph;    RT_QUEUE *q;    int err;    if (!__xn_access_ok(curr,VERIFY_READ,__xn_reg_arg1(regs),sizeof(ph)))	return -EFAULT;    __xn_copy_from_user(curr,&ph,(void __user *)__xn_reg_arg1(regs),sizeof(ph));    q = (RT_QUEUE *)xnregistry_fetch(ph.opaque);    if (!q)	err = -ESRCH;    else	{	err = rt_queue_delete(q); /* Callee will check the queue				     descriptor for validity again. */	if (!err && q->cpid)	    xnfree(q);	}    return err;}/* * int __rt_queue_alloc(RT_QUEUE_PLACEHOLDER *ph, *                     size_t size, *                     void **bufp) */static int __rt_queue_alloc (struct task_struct *curr, struct pt_regs *regs){    RT_QUEUE_PLACEHOLDER ph;    size_t size;    RT_QUEUE *q;    int err = 0;    void *buf;    spl_t s;    if (!__xn_access_ok(curr,VERIFY_READ,__xn_reg_arg1(regs),sizeof(ph)))	return -EFAULT;    __xn_copy_from_user(curr,&ph,(void __user *)__xn_reg_arg1(regs),sizeof(ph));    if (!__xn_access_ok(curr,VERIFY_WRITE,__xn_reg_arg3(regs),sizeof(buf)))	return -EFAULT;    xnlock_get_irqsave(&nklock,s);    q = (RT_QUEUE *)xnregistry_fetch(ph.opaque);    if (!q)	{	err = -ESRCH;	goto unlock_and_exit;	}    size = (size_t)__xn_reg_arg2(regs);    buf = rt_queue_alloc(q,size);    /* Convert the kernel-based address of buf to the equivalent area       into the caller's address space. */    if (buf)	buf = ph.mapbase + xnheap_mapped_offset(&q->bufpool,buf);    else	err = -ENOMEM; unlock_and_exit:    xnlock_put_irqrestore(&nklock,s);    __xn_copy_to_user(curr,(void __user *)__xn_reg_arg3(regs),&buf,sizeof(buf));    return err;}/* * int __rt_queue_free(RT_QUEUE_PLACEHOLDER *ph, *                     void *buf) */static int __rt_queue_free (struct task_struct *curr, struct pt_regs *regs){    RT_QUEUE_PLACEHOLDER ph;    void __user *buf;    RT_QUEUE *q;    int err;    spl_t s;    if (!__xn_access_ok(curr,VERIFY_READ,__xn_reg_arg1(regs),sizeof(ph)))	return -EFAULT;    __xn_copy_from_user(curr,&ph,(void __user *)__xn_reg_arg1(regs),sizeof(ph));    buf = (void __user *)__xn_reg_arg2(regs);    xnlock_get_irqsave(&nklock,s);    q = (RT_QUEUE *)xnregistry_fetch(ph.opaque);    if (!q)	{	err = -ESRCH;	goto unlock_and_exit;	}    /* Convert the caller-based address of buf to the equivalent area       into the kernel address space. */    if (buf)	{	buf = xnheap_mapped_address(&q->bufpool,(caddr_t)buf - ph.mapbase);	err = rt_queue_free(q,buf);	}    else	err = -EINVAL; unlock_and_exit:    xnlock_put_irqrestore(&nklock,s);    return err;}/* * int __rt_queue_send(RT_QUEUE_PLACEHOLDER *ph, *                     void *buf, *                     size_t size, *                     int mode) */static int __rt_queue_send (struct task_struct *curr, struct pt_regs *regs){    RT_QUEUE_PLACEHOLDER ph;    void __user *buf;    int err, mode;    RT_QUEUE *q;    size_t size;    spl_t s;    if (!__xn_access_ok(curr,VERIFY_READ,__xn_reg_arg1(regs),sizeof(ph)))	return -EFAULT;    __xn_copy_from_user(curr,&ph,(void __user *)__xn_reg_arg1(regs),sizeof(ph));    /* Buffer to send. */    buf = (void __user *)__xn_reg_arg2(regs);    /* Message's payload size. */    size = (size_t)__xn_reg_arg3(regs);    /* Sending mode. */    mode = (int)__xn_reg_arg4(regs);    xnlock_get_irqsave(&nklock,s);    q = (RT_QUEUE *)xnregistry_fetch(ph.opaque);    if (!q)	{	err = -ESRCH;	goto unlock_and_exit;	}    /* Convert the caller-based address of buf to the equivalent area       into the kernel address space. */    if (buf)	{	buf = xnheap_mapped_address(&q->bufpool,(caddr_t)buf - ph.mapbase);	err = rt_queue_send(q,buf,size,mode);	}    else	err = -EINVAL; unlock_and_exit:    xnlock_put_irqrestore(&nklock,s);    return err;}/* * int __rt_queue_recv(RT_QUEUE_PLACEHOLDER *ph, *                     void **bufp, *                     RTIME *timeoutp) */static int __rt_queue_recv (struct task_struct *curr, struct pt_regs *regs){    RT_QUEUE_PLACEHOLDER ph;    RTIME timeout;    RT_QUEUE *q;    void *buf;    int err;    spl_t s;    if (!__xn_access_ok(curr,VERIFY_READ,__xn_reg_arg1(regs),sizeof(ph)))	return -EFAULT;    __xn_copy_from_user(curr,&ph,(void __user *)__xn_reg_arg1(regs),sizeof(ph));    if (!__xn_access_ok(curr,VERIFY_WRITE,__xn_reg_arg2(regs),sizeof(buf)))	return -EFAULT;    if (!__xn_access_ok(curr,VERIFY_READ,__xn_reg_arg3(regs),sizeof(timeout)))	return -EFAULT;    __xn_copy_from_user(curr,&timeout,(void __user *)__xn_reg_arg3(regs),sizeof(timeout));    xnlock_get_irqsave(&nklock,s);    q = (RT_QUEUE *)xnregistry_fetch(ph.opaque);    if (!q)	{	err = -ESRCH;	goto unlock_and_exit;	}    err = (int)rt_queue_recv(q,&buf,timeout);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -