📄 syscall.c
字号:
/* Convert the caller-based address of buf to the equivalent area into the kernel address space. */ if (err >= 0) { /* Convert the kernel-based address of buf to the equivalent area into the caller's address space. */ buf = ph.mapbase + xnheap_mapped_offset(&q->bufpool,buf); __xn_copy_to_user(curr,(void __user *)__xn_reg_arg2(regs),&buf,sizeof(buf)); } unlock_and_exit: xnlock_put_irqrestore(&nklock,s); return err;}/* * int __rt_queue_inquire(RT_QUEUE_PLACEHOLDER *ph, * RT_QUEUE_INFO *infop) */static int __rt_queue_inquire (struct task_struct *curr, struct pt_regs *regs){ RT_QUEUE_PLACEHOLDER ph; RT_QUEUE_INFO info; RT_QUEUE *q; int err; if (!__xn_access_ok(curr,VERIFY_READ,__xn_reg_arg1(regs),sizeof(ph))) return -EFAULT; if (!__xn_access_ok(curr,VERIFY_WRITE,__xn_reg_arg2(regs),sizeof(info))) return -EFAULT; __xn_copy_from_user(curr,&ph,(void __user *)__xn_reg_arg1(regs),sizeof(ph)); q = (RT_QUEUE *)xnregistry_fetch(ph.opaque); if (!q) return -ESRCH; err = rt_queue_inquire(q,&info); if (!err) __xn_copy_to_user(curr,(void __user *)__xn_reg_arg2(regs),&info,sizeof(info)); return err;}#else /* !CONFIG_XENO_OPT_NATIVE_QUEUE */#define __rt_queue_create __rt_call_not_available#define __rt_queue_bind __rt_call_not_available#define __rt_queue_delete __rt_call_not_available#define __rt_queue_alloc __rt_call_not_available#define __rt_queue_free __rt_call_not_available#define __rt_queue_send __rt_call_not_available#define __rt_queue_recv __rt_call_not_available#define __rt_queue_inquire __rt_call_not_available#endif /* CONFIG_XENO_OPT_NATIVE_QUEUE */#ifdef CONFIG_XENO_OPT_NATIVE_HEAP/* * int __rt_heap_create(RT_HEAP_PLACEHOLDER *ph, * const char *name, * size_t heapsize, * int mode) */static int __rt_heap_create (struct task_struct *curr, struct pt_regs *regs){ char name[XNOBJECT_NAME_LEN]; RT_HEAP_PLACEHOLDER ph; size_t heapsize; int err, mode; RT_HEAP *heap; if (!__xn_access_ok(curr,VERIFY_WRITE,__xn_reg_arg1(regs),sizeof(ph))) return -EFAULT; if (__xn_reg_arg2(regs)) { if (!__xn_access_ok(curr,VERIFY_READ,__xn_reg_arg2(regs),sizeof(name))) return -EFAULT; __xn_strncpy_from_user(curr,name,(const char __user *)__xn_reg_arg2(regs),sizeof(name) - 1); name[sizeof(name) - 1] = '\0'; } else *name = '\0'; /* Size of heap space. */ heapsize = (size_t)__xn_reg_arg3(regs); /* Creation mode. */ mode = (int)__xn_reg_arg4(regs); heap = (RT_HEAP *)xnmalloc(sizeof(*heap)); if (!heap) return -ENOMEM; err = rt_heap_create(heap,name,heapsize,mode); if (err) goto free_and_fail; heap->cpid = curr->pid; /* Copy back the registry handle to the ph struct. */ ph.opaque = heap->handle; ph.opaque2 = &heap->heap_base; ph.mapsize = xnheap_size(&heap->heap_base); __xn_copy_to_user(curr,(void __user *)__xn_reg_arg1(regs),&ph,sizeof(ph)); return 0; free_and_fail: xnfree(heap); return err;}/* * int __rt_heap_bind(RT_HEAP_PLACEHOLDER *ph, * const char *name, * RTIME *timeoutp) */static int __rt_heap_bind (struct task_struct *curr, struct pt_regs *regs){ RT_HEAP_PLACEHOLDER ph; RT_HEAP *heap; int err; spl_t s; if (!__xn_access_ok(curr,VERIFY_WRITE,__xn_reg_arg1(regs),sizeof(ph))) return -EFAULT; xnlock_get_irqsave(&nklock,s); err = __rt_bind_helper(curr,regs,&ph.opaque,XENO_HEAP_MAGIC,(void **)&heap); if (err) goto unlock_and_exit; ph.opaque2 = &heap->heap_base; ph.mapsize = xnheap_size(&heap->heap_base); xnlock_put_irqrestore(&nklock,s); __xn_copy_to_user(curr,(void __user *)__xn_reg_arg1(regs),&ph,sizeof(ph)); /* We might need to migrate to secondary mode now for mapping the heap memory to user-space; since this syscall is conforming, we might have entered it in primary mode. */ if (xnpod_primary_p()) xnshadow_relax(0); return err; unlock_and_exit: xnlock_put_irqrestore(&nklock,s); return err;}/* * int __rt_heap_delete(RT_HEAP_PLACEHOLDER *ph) */static int __rt_heap_delete (struct task_struct *curr, struct pt_regs *regs){ RT_HEAP_PLACEHOLDER ph; RT_HEAP *heap; int err; if (!__xn_access_ok(curr,VERIFY_READ,__xn_reg_arg1(regs),sizeof(ph))) return -EFAULT; __xn_copy_from_user(curr,&ph,(void __user *)__xn_reg_arg1(regs),sizeof(ph)); heap = (RT_HEAP *)xnregistry_fetch(ph.opaque); if (!heap) err = -ESRCH; else { err = rt_heap_delete(heap); /* Callee will check the heap descriptor for validity again. */ if (!err && heap->cpid) xnfree(heap); } return err;}/* * int __rt_heap_alloc(RT_HEAP_PLACEHOLDER *ph, * size_t size, * RTIME timeout, * void **bufp) */static int __rt_heap_alloc (struct task_struct *curr, struct pt_regs *regs){ RT_HEAP_PLACEHOLDER ph; RT_HEAP *heap; RTIME timeout; size_t size; int err = 0; void *buf; spl_t s; if (!__xn_access_ok(curr,VERIFY_READ,__xn_reg_arg1(regs),sizeof(ph))) return -EFAULT; __xn_copy_from_user(curr,&ph,(void __user *)__xn_reg_arg1(regs),sizeof(ph)); if (!__xn_access_ok(curr,VERIFY_READ,__xn_reg_arg3(regs),sizeof(timeout))) return -EFAULT; __xn_copy_from_user(curr,&timeout,(void __user *)__xn_reg_arg3(regs),sizeof(timeout)); if (!__xn_access_ok(curr,VERIFY_WRITE,__xn_reg_arg4(regs),sizeof(buf))) return -EFAULT; xnlock_get_irqsave(&nklock,s); heap = (RT_HEAP *)xnregistry_fetch(ph.opaque); if (!heap) { err = -ESRCH; goto unlock_and_exit; } size = (size_t)__xn_reg_arg2(regs); err = rt_heap_alloc(heap,size,timeout,&buf); /* Convert the kernel-based address of buf to the equivalent area into the caller's address space. */ if (!err) buf = ph.mapbase + xnheap_mapped_offset(&heap->heap_base,buf); unlock_and_exit: xnlock_put_irqrestore(&nklock,s); __xn_copy_to_user(curr,(void __user *)__xn_reg_arg4(regs),&buf,sizeof(buf)); return err;}/* * int __rt_heap_free(RT_HEAP_PLACEHOLDER *ph, * void *buf) */static int __rt_heap_free (struct task_struct *curr, struct pt_regs *regs){ RT_HEAP_PLACEHOLDER ph; void __user *buf; RT_HEAP *heap; int err; spl_t s; if (!__xn_access_ok(curr,VERIFY_READ,__xn_reg_arg1(regs),sizeof(ph))) return -EFAULT; __xn_copy_from_user(curr,&ph,(void __user *)__xn_reg_arg1(regs),sizeof(ph)); buf = (void __user *)__xn_reg_arg2(regs); xnlock_get_irqsave(&nklock,s); heap = (RT_HEAP *)xnregistry_fetch(ph.opaque); if (!heap) { err = -ESRCH; goto unlock_and_exit; } /* Convert the caller-based address of buf to the equivalent area into the kernel address space. */ if (buf) { buf = xnheap_mapped_address(&heap->heap_base,(caddr_t)buf - ph.mapbase); err = rt_heap_free(heap,buf); } else err = -EINVAL; unlock_and_exit: xnlock_put_irqrestore(&nklock,s); return err;}/* * int __rt_heap_inquire(RT_HEAP_PLACEHOLDER *ph, * RT_HEAP_INFO *infop) */static int __rt_heap_inquire (struct task_struct *curr, struct pt_regs *regs){ RT_HEAP_PLACEHOLDER ph; RT_HEAP_INFO info; RT_HEAP *heap; int err; if (!__xn_access_ok(curr,VERIFY_READ,__xn_reg_arg1(regs),sizeof(ph))) return -EFAULT; if (!__xn_access_ok(curr,VERIFY_WRITE,__xn_reg_arg2(regs),sizeof(info))) return -EFAULT; __xn_copy_from_user(curr,&ph,(void __user *)__xn_reg_arg1(regs),sizeof(ph)); heap = (RT_HEAP *)xnregistry_fetch(ph.opaque); if (!heap) return -ESRCH; err = rt_heap_inquire(heap,&info); if (!err) __xn_copy_to_user(curr,(void __user *)__xn_reg_arg2(regs),&info,sizeof(info)); return err;}#else /* !CONFIG_XENO_OPT_NATIVE_HEAP */#define __rt_heap_create __rt_call_not_available#define __rt_heap_bind __rt_call_not_available#define __rt_heap_delete __rt_call_not_available#define __rt_heap_alloc __rt_call_not_available#define __rt_heap_free __rt_call_not_available#define __rt_heap_inquire __rt_call_not_available#endif /* CONFIG_XENO_OPT_NATIVE_HEAP */#ifdef CONFIG_XENO_OPT_NATIVE_ALARMvoid rt_alarm_handler (RT_ALARM *alarm, void *cookie){ /* Wake up all tasks waiting for the alarm. */ xnsynch_flush(&alarm->synch_base,0);}EXPORT_SYMBOL(rt_alarm_handler);/* * int __rt_alarm_create(RT_ALARM_PLACEHOLDER *ph, * const char *name) */static int __rt_alarm_create (struct task_struct *curr, struct pt_regs *regs){ char name[XNOBJECT_NAME_LEN]; RT_ALARM_PLACEHOLDER ph; RT_ALARM *alarm; int err; if (!__xn_access_ok(curr,VERIFY_WRITE,__xn_reg_arg1(regs),sizeof(ph))) return -EFAULT; if (__xn_reg_arg2(regs)) { if (!__xn_access_ok(curr,VERIFY_READ,__xn_reg_arg2(regs),sizeof(name))) return -EFAULT; __xn_strncpy_from_user(curr,name,(const char __user *)__xn_reg_arg2(regs),sizeof(name) - 1); name[sizeof(name) - 1] = '\0'; } else *name = '\0'; alarm = (RT_ALARM *)xnmalloc(sizeof(*alarm)); if (!alarm) return -ENOMEM; err = rt_alarm_create(alarm,name,&rt_alarm_handler,NULL); if (err == 0) { alarm->cpid = curr->pid; /* Copy back the registry handle to the ph struct. */ ph.opaque = alarm->handle; __xn_copy_to_user(curr,(void __user *)__xn_reg_arg1(regs),&ph,sizeof(ph)); } else xnfree(alarm); return err;}/* * int __rt_alarm_delete(RT_ALARM_PLACEHOLDER *ph) */static int __rt_alarm_delete (struct task_struct *curr, struct pt_regs *regs){ RT_ALARM_PLACEHOLDER ph; RT_ALARM *alarm; int err; if (!__xn_access_ok(curr,VERIFY_READ,__xn_reg_arg1(regs),sizeof(ph))) return -EFAULT; __xn_copy_from_user(curr,&ph,(void __user *)__xn_reg_arg1(regs),sizeof(ph)); alarm = (RT_ALARM *)xnregistry_fetch(ph.opaque); if (!alarm) return -ESRCH; err = rt_alarm_delete(alarm); if (!err && alarm->cpid) xnfree(alarm); return err;}/* * int __rt_alarm_start(RT_ALARM_PLACEHOLDER *ph, * RTIME value, * RTIME interval) */static int __rt_alarm_start (struct task_struct *curr, struct pt_regs *regs){ RT_ALARM_PLACEHOLDER ph; RTIME value, interval; RT_ALARM *alarm; if (!__xn_access_ok(curr,VERIFY_READ,__xn_reg_arg1(regs),sizeof(ph))) return -EFAULT; __xn_copy_from_user(curr,&ph,(void __user *)__xn_reg_arg1(regs),sizeof(ph)); alarm = (RT_ALARM *)xnregistry_fetch(ph.opaque); if (!alarm) return -ESRCH; __xn_copy_from_user(curr,&value,(void __user *)__xn_reg_arg2(regs),sizeof(value)); __xn_copy_from_user(curr,&interval,(void __user *)__xn_reg_arg3(regs),sizeof(interval)); return rt_alarm_start(alarm,value,interval);}/* * int __rt_alarm_stop(RT_ALARM_PLACEHOLDER *ph) */static int __rt_alarm_stop (struct task_struct *curr, struct pt_regs *regs){ RT_ALARM_PLACEHOLDER ph; RT_ALARM *alarm; if (!__xn_access_ok(curr,VERIFY_READ,__xn_reg_arg1(regs),sizeof(ph))) return -EFAULT; __xn_copy_from_user(curr,&ph,(void __user *)__xn_reg_arg1(regs),sizeof(ph)); alarm = (RT_ALARM *)xnregistry_fetch(ph.opaque); if (!alarm) return -ESRCH; return rt_alarm_stop(alarm);}/* * int __rt_alarm_wait(RT_ALARM_PLACEHOLDER *ph) */static int __rt_alarm_wait (struct task_struct *curr, struct pt_regs *regs){ RT_TASK *task = xeno_current_task(); RT_ALARM_PLACEHOLDER ph; RT_ALARM *alarm; int err = 0; spl_t s; if (!__xn_access_ok(curr,VERIFY_READ,__xn_reg_arg1(regs),sizeof(ph))) return -EFAULT; __xn_copy_from_user(curr,&ph,(void __user *)__xn_reg_arg1(regs),sizeof(ph)); xnlock_get_irqsave(&nklock,s); alarm = xeno_h2obj_validate(xnregistry_fetch(ph.opaque),XENO_ALARM_MAGIC,RT_ALARM); if (!alarm) { err = xeno_handle_error(alarm,XENO_ALARM_MAGIC,RT_ALARM); goto unlock_and_exit; } if (xnthread_base_priority(&task->thread_base) != XNCORE_IRQ_PRIO) /* Renice the waiter above all regular tasks if needed. */ xnpod_renice_thread(&task->thread_base,XNCORE_IRQ_PRIO); xnsynch_sleep_on(&alarm->synch_base,XN_INFINITE); if (xnthread_test_flags(&task->thread_base,XNRMID)) err = -EIDRM; /* Alarm deleted while pending. */ else if (xnthread_test_flags(&task->thread_base,XNBREAK)) err = -EINTR; /* Unblocked.*/ unlock_and_exit: xnlock_put_irqrestore(&nklock,s); return err;}/* * int __rt_alarm_inquire(RT_ALARM_PLACEHOLDER *ph, * RT_ALARM_INFO *infop) */static int __rt_alarm_inquire (struct task_struct *curr, struct pt_regs *regs){ RT_ALARM_PLACEHOLDER ph; RT_ALARM_INFO info; RT_ALARM *alarm; int err; if (!__xn_access_ok(curr,VERIFY_READ,__xn_reg_arg1(regs),sizeof(ph))) return -EFAULT; if (!__xn_access_ok(curr,VERIFY_WRITE,__xn_reg_arg2(reg
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -