📄 mutex.c
字号:
/** * @file * This file is part of the Xenomai project. * * @note Copyright (C) 2004 Philippe Gerum <rpm@xenomai.org> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * \ingroup mutex *//*! * \ingroup native * \defgroup mutex Mutex services. * * Mutex services. * * A mutex is a MUTual EXclusion object, and is useful for protecting * shared data structures from concurrent modifications, and * implementing critical sections and monitors. * * A mutex has two possible states: unlocked (not owned by any task), * and locked (owned by one task). A mutex can never be owned by two * different tasks simultaneously. A task attempting to lock a mutex * that is already locked by another task is blocked until the latter * unlocks the mutex first. * * Xenomai mutex services enforce a priority inheritance protocol in * order to solve priority inversions. * *@{*/#include <nucleus/pod.h>#include <nucleus/registry.h>#include <native/task.h>#include <native/mutex.h>#ifdef CONFIG_XENO_EXPORT_REGISTRYstatic int __mutex_read_proc (char *page, char **start, off_t off, int count, int *eof, void *data){ RT_MUTEX *mutex = (RT_MUTEX *)data; char *p = page; int len; spl_t s; xnlock_get_irqsave(&nklock,s); if (mutex->owner) { xnpholder_t *holder; /* Locked mutex -- dump owner and waiters, if any. */ p += sprintf(p,"=locked by %s depth=%d\n", xnthread_name(&mutex->owner->thread_base), mutex->lockcnt); holder = getheadpq(xnsynch_wait_queue(&mutex->synch_base)); while (holder) { xnthread_t *sleeper = link2thread(holder,plink); p += sprintf(p,"+%s\n",xnthread_name(sleeper)); holder = nextpq(xnsynch_wait_queue(&mutex->synch_base),holder); } } else /* Mutex unlocked. */ p += sprintf(p,"=unlocked\n"); xnlock_put_irqrestore(&nklock,s); len = (p - page) - off; if (len <= off + count) *eof = 1; *start = page + off; if(len > count) len = count; if(len < 0) len = 0; return len;}extern xnptree_t __native_ptree;static xnpnode_t __mutex_pnode = { .dir = NULL, .type = "mutexes", .entries = 0, .read_proc = &__mutex_read_proc, .write_proc = NULL, .root = &__native_ptree,};#elif defined(CONFIG_XENO_OPT_REGISTRY)static xnpnode_t __mutex_pnode = { .type = "mutexes"};#endif /* CONFIG_XENO_EXPORT_REGISTRY *//** * @fn int rt_mutex_create(RT_MUTEX *mutex,const char *name) * * @brief Create a mutex. * * Create a mutual exclusion object that allows multiple tasks to * synchronize access to a shared resource. A mutex is left in an * unlocked state after creation. * * @param mutex The address of a mutex descriptor Xenomai will use to * store the mutex-related data. This descriptor must always be valid * while the mutex is active therefore it must be allocated in * permanent memory. * * @param name An ASCII string standing for the symbolic name of the * mutex. When non-NULL and non-empty, this string is copied to a safe * place into the descriptor, and passed to the registry package if * enabled for indexing the created mutex. * * @return 0 is returned upon success. Otherwise: * * - -ENOMEM is returned if the system fails to get enough dynamic * memory from the global real-time heap in order to register the * mutex. * * - -EEXIST is returned if the @a name is already in use by some * registered object. * * - -EPERM is returned if this service was called from an * asynchronous context. * * Environments: * * This service can be called from: * * - Kernel module initialization/cleanup code * - Kernel-based task * - User-space task * * Rescheduling: possible. */int rt_mutex_create (RT_MUTEX *mutex, const char *name){ int err = 0; if (xnpod_asynch_p()) return -EPERM; xnsynch_init(&mutex->synch_base,XNSYNCH_PRIO|XNSYNCH_PIP); mutex->handle = 0; /* i.e. (still) unregistered mutex. */ mutex->magic = XENO_MUTEX_MAGIC; mutex->owner = NULL; mutex->lockcnt = 0; xnobject_copy_name(mutex->name,name);#if defined(__KERNEL__) && defined(CONFIG_XENO_OPT_PERVASIVE) mutex->cpid = 0;#endif /* __KERNEL__ && CONFIG_XENO_OPT_PERVASIVE */#ifdef CONFIG_XENO_OPT_REGISTRY /* <!> Since xnregister_enter() may reschedule, only register complete objects, so that the registry cannot return handles to half-baked objects... */ if (name) { xnpnode_t *pnode = &__mutex_pnode; if (!*name) { /* Since this is an anonymous object (empty name on entry) from user-space, it gets registered under an unique internal name but is not exported through /proc. */ xnobject_create_name(mutex->name,sizeof(mutex->name),(void*)mutex); pnode = NULL; } err = xnregistry_enter(mutex->name,mutex,&mutex->handle,pnode); if (err) rt_mutex_delete(mutex); }#endif /* CONFIG_XENO_OPT_REGISTRY */ return err;}/** * @fn int rt_mutex_delete(RT_MUTEX *mutex) * * @brief Delete a mutex. * * Destroy a mutex and release all the tasks currently pending on it. * A mutex exists in the system since rt_mutex_create() has been * called to create it, so this service must be called in order to * destroy it afterwards. * * @param mutex The descriptor address of the affected mutex. * * @return 0 is returned upon success. Otherwise: * * - -EINVAL is returned if @a mutex is not a mutex descriptor. * * - -EIDRM is returned if @a mutex is a deleted mutex descriptor. * * - -EPERM is returned if this service was called from an * asynchronous context. * * Environments: * * This service can be called from: * * - Kernel module initialization/cleanup code * - Kernel-based task * - User-space task * * Rescheduling: possible. */int rt_mutex_delete (RT_MUTEX *mutex){ int err = 0, rc; spl_t s; if (xnpod_asynch_p()) return -EPERM; xnlock_get_irqsave(&nklock,s); mutex = xeno_h2obj_validate(mutex,XENO_MUTEX_MAGIC,RT_MUTEX); if (!mutex) { err = xeno_handle_error(mutex,XENO_MUTEX_MAGIC,RT_MUTEX); goto unlock_and_exit; } rc = xnsynch_destroy(&mutex->synch_base);#ifdef CONFIG_XENO_OPT_REGISTRY if (mutex->handle) xnregistry_remove(mutex->handle);#endif /* CONFIG_XENO_OPT_REGISTRY */ xeno_mark_deleted(mutex); if (rc == XNSYNCH_RESCHED) /* Some task has been woken up as a result of the deletion: reschedule now. */ xnpod_schedule(); unlock_and_exit: xnlock_put_irqrestore(&nklock,s); return err;}/** * @fn int rt_mutex_lock(RT_MUTEX *mutex, RTIME timeout) * * @brief Acquire a mutex. * * Attempt to lock a mutex. The calling task is blocked until the * mutex is available, in which case it is locked again before this * service returns. Mutexes have an ownership property, which means * that their current owner is tracked. Xenomai mutexes are implicitely * recursive and implement the priority inheritance protocol. * * Since a nested locking count is maintained for the current owner, * rt_mutex_lock() and rt_mutex_unlock() must be used in pairs. * * Tasks pend on mutexes by priority order. * * @param mutex The descriptor address of the mutex to acquire. * * @param timeout The number of clock ticks to wait for the mutex to * be available to the calling task (see note). Passing TM_INFINITE * causes the caller to block indefinitely until the mutex is * available. Passing TM_NONBLOCK causes the service to return * immediately without waiting if the mutex is still locked by another * task. * * @return 0 is returned upon success. Otherwise: * * - -EINVAL is returned if @a mutex is not a mutex descriptor. * * - -EIDRM is returned if @a mutex is a deleted mutex descriptor, * including if the deletion occurred while the caller was sleeping on * it. * * - -EWOULDBLOCK is returned if @a timeout is equal to TM_NONBLOCK
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -