📄 synchronization.c
字号:
/*
** Copyright (C) 2006 Tamir Michael
**
** This program is free software; you can redistribute it and/or modify
** it under the terms of the GNU General Public License as published by
** the Free Software Foundation; either version 2 of the License, or
** (at your option) any later version.
**
** This program is distributed in the hope that it will be useful,
** but WITHOUT ANY WARRANTY; without even the implied warranty of
** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
** GNU General Public License for more details.
**
** You should have received a copy of the GNU General Public License
** along with this program; if not, write to the Free Software
** Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include <XC167.h>
#include "synchronization.h"
#include "system_messages.h"
#include "rtos_services.h"
extern int16s g_running_task ;
extern task_info g_tcb[MAX_TASKS] ;
extern prio_array *s_primary_ready_to_run_tasks ;
static int16s s_number_of_mutexes = 0 ;
static int16s s_number_of_semaphores = 0 ;
static mutex_info s_mutexes[MAX_MUTEXES] ; // system Mutexs
static semaphore_info s_semaphores[MAX_SEMAPHORES] ; // system Semaphores
static int16s scheduler_declare_task_ready(int16s a_task_index, int8u a_priority)
{
int16s l_result = queue_enqueue(&s_primary_ready_to_run_tasks->priority_queues[a_priority], a_task_index) ;
s_primary_ready_to_run_tasks->priority_bitmap |= 1<<g_tcb[a_task_index].priority ;
g_tcb[a_task_index].status = eTaskReady ;
return l_result ;
}
static int8u find_mutex(int16s a_id, int16u *a_index)
{
int16u l_index ;
mutex_info *lp_mutex ;
for (l_index = 0; l_index < MAX_MUTEXES; l_index++)
{
lp_mutex = s_mutexes + l_index ;
if ( (lp_mutex->id == a_id) && (lp_mutex->state != eMutexNotAllocated) )
{
*a_index = l_index ;
return 1 ;
}
}
return 0 ;
}
// returns the index of the next free mutex
static int16s find_next_available_mutex_index()
{
int16s l_mutex ;
for (l_mutex = 0; l_mutex < MAX_MUTEXES; l_mutex++)
{
if (s_mutexes[l_mutex].state == eMutexNotAllocated)
{
return l_mutex ;
}
}
return ERR_MUTEX_NOT_FOUND ;
}
static int8u find_semaphore(int16s a_id, int16u *a_index)
{
int16u l_index ;
semaphore_info *lp_semaphore ;
for (l_index = 0; l_index < MAX_SEMAPHORES; l_index++)
{
lp_semaphore = s_semaphores + l_index ;
if ( (lp_semaphore->id == a_id) && (lp_semaphore->state != eSemaphoreNotAllocated) )
{
*a_index = l_index ;
return 1 ;
}
}
return 0 ;
}
// returns the index of the next free semaphore
static int16s find_next_available_semaphore_index()
{
int16s l_index ;
semaphore_info *lp_semaphore ;
for (l_index = 0; l_index < MAX_MUTEXES; l_index++)
{
lp_semaphore = s_semaphores + l_index ;
if (lp_semaphore->state == eSemaphoreNotAllocated)
{
return l_index ;
}
}
return ERR_SEMAPHORE_NOT_FOUND ;
}
// creates a semaphore lock. 'max_locks_allowed' specifies hoz many tasks will be allowed in the critical section before tasks need to start block/wait.
int32s rtos_allocate_semaphore(int16s a_max_locks_allowed)
{
int16s l_id ;
int8u l_interrupts_status ;
// 'a_max_locks_allowed' is allocated on the stack of the calling task; it is interrupt safe.
if (a_max_locks_allowed < 1)
{
return ERR_DEADLOCK_RISK ;
}
// the kernel is not reentrant; protect static/global data from ISRs
rtos_save_and_disable_interrupts(&l_interrupts_status) ;
if (s_number_of_semaphores < MAX_SEMAPHORES)
{
if ( (l_id = find_next_available_semaphore_index()) != ERR_SEMAPHORE_NOT_FOUND)
{
s_semaphores[l_id].owner = ERR_SEMAPHORE_NOT_FOUND ; // an owner will be set once this Mutex is locked
s_semaphores[l_id].state = eSemaphoreUnlocked ;
s_semaphores[l_id].refCnt = 0 ;
s_semaphores[l_id].id = l_id ;
s_semaphores[l_id].max_locks_allowed = a_max_locks_allowed ;
s_number_of_semaphores++ ;
}
}
else
{
l_id = ERR_MAX_SEMAPHORES_ALLOCATED ;
}
rtos_restore_interrupts(l_interrupts_status) ;
return l_id ;
}
// release a previousely allocated semaphore
int32s rtos_deallocate_semaphore(int16s a_id)
{
int32s l_result = ERR_SEMAPHORE_NOT_FOUND ;
int16u l_index ;
int8u l_interrupts_status ;
// the kernel is not reentrant; protect static/global data from ISRs
rtos_save_and_disable_interrupts(&l_interrupts_status) ;
// deallocating a semaphore is allowed either if it is not locked
if (find_semaphore(a_id, &l_index) )
{
semaphore_info *lp_semaphore ;
lp_semaphore = s_semaphores + l_index ;
if (lp_semaphore->state == eSemaphoreLocked)
{
software_warning("%s %s %d", resolve_system_message(ERR_SEMAPHORE_LOCKED), __FILE__, __LINE__ ) ;
rtos_restore_interrupts(l_interrupts_status) ;
return ERR_SEMAPHORE_LOCKED ;
}
lp_semaphore->id = 0 ;
lp_semaphore->owner = ERR_SEMAPHORE_NOT_FOUND ;
lp_semaphore->state = eSemaphoreNotAllocated ;
l_result = a_id ;
s_number_of_semaphores-- ;
}
rtos_restore_interrupts(l_interrupts_status) ;
return l_result ;
}
int16s rtos_semaphore_lock(int16s a_id)
{
int16u l_index ;
int8u l_interrupts_status ;
REFUSE_INTERRUPT_CONTEXT
// the kernel is not reentrant; protect static/global data from ISRs
rtos_save_and_disable_interrupts(&l_interrupts_status) ;
// first check if the mutex was allocated.
if ( find_semaphore(a_id, &l_index) )
{
semaphore_info *lp_semaphore ;
lp_semaphore = s_semaphores + l_index ;
// WAR STORY
// using an assembly jump here proved to make the stability of the target too valitile when changing
// memory models (for example, medium->large). using 'goto' makes the complier use the right JMP instruction
goto standard_lock_attempt ;
lock_attempt_was_unsuccessful: // the program jumps to this label only is the lock attempt failed. note: skipped by a
// normal call to the function.
scheduler_reschedule() ; // skipped when function is called. executed only when jumping to this label (see below).
standard_lock_attempt: // if the function is called, this is where execution begins
// attempt to lock the semaphore
if (lp_semaphore->state == eSemaphoreUnlocked)
{
lp_semaphore->owner = g_running_task ;
lp_semaphore->state = eSemaphoreLocked ;
lp_semaphore->refCnt++ ;
g_tcb[g_running_task].lock_ref_counter++ ;
}
else if (lp_semaphore->state == eSemaphoreLocked && (lp_semaphore->owner == g_running_task) ) // check if the running task attempts to lock a semaphore that it already owns
{
lp_semaphore->refCnt++ ; // increment the refrence counter. a task that has locked a semaphore 'x' times must release it 'x' times
}
// check whether the semaphore is locked but still has lock slots available for tasks (the maximum number of locks has not been reached yet)
else if (lp_semaphore->state == eSemaphoreLocked && (lp_semaphore->refCnt < lp_semaphore->max_locks_allowed) )
{
lp_semaphore->refCnt++ ; // lock is granted to the calling task
}
else // if a task is not allowed to lock a semaphore, and it is not the owner, it will have to wait
{
if (priority_queue_insert(&lp_semaphore->blocked_tasks, (*(g_tcb + g_running_task)).priority, g_running_task) == ERR_QUEUE_FULL)
{
software_error("%s %s %d", resolve_system_message(ERR_QUEUE_FULL), __FILE__, __LINE__ ) ;
}
g_tcb[g_running_task].status = eTaskBlockedSemaphore ;
g_tcb[g_running_task].blocked_on_primitive = l_index ;
rtos_restore_interrupts(l_interrupts_status) ;
// WAR STORY
// using an assembly jump here proved to make the stability of the target too valitile when changing
// memory models (for example, medium->large). using 'goto' makes the complier use the right JMP instruction
goto lock_attempt_was_unsuccessful ;
}
}
else // report an error
{
software_warning("%s %s %d", resolve_system_message(ERR_SEMAPHORE_NOT_FOUND), __FILE__, __LINE__ ) ;
a_id = ERR_SEMAPHORE_NOT_FOUND ;
}
rtos_restore_interrupts(l_interrupts_status) ;
return a_id ;
}
int16s rtos_semaphore_unlock(int16s a_id)
{
int16u l_index ;
int8u l_interrupts_status ;
int8u l_reschedule = 0 ;
REFUSE_INTERRUPT_CONTEXT
// the kernel is not reentrant; protect static/global data from ISRs
rtos_save_and_disable_interrupts(&l_interrupts_status) ;
if (find_semaphore(a_id, &l_index) )
{
semaphore_info *lp_semaphore ;
lp_semaphore = s_semaphores + l_index ;
if (lp_semaphore->refCnt > 0)
{
lp_semaphore->refCnt-- ; // always decrement the reference counter, no matter what task unlocks the semaphore
}
if ( (lp_semaphore->state == eSemaphoreLocked) && (lp_semaphore->refCnt == 0) )
{
int16s l_unblocked_task ;
// select a task to be returned to the running queue, so that the scheduler can select it
l_unblocked_task = (int16s)priority_queue_minimum_data_extract(&lp_semaphore->blocked_tasks) ;
if (l_unblocked_task != ERR_QUEUE_EMPTY) // there is at least one task blocked on the semaphore - prepare it to be scheduled
{
int16s l_result ;
if ( (l_result = scheduler_declare_task_ready(l_unblocked_task, g_tcb[l_unblocked_task].priority)) != NO_ERROR)
{
software_error("%s %s %d", resolve_system_message(l_result), __FILE__, __LINE__ ) ;
}
l_reschedule = 1 ;
}
lp_semaphore->owner = ERR_SEMAPHORE_NOT_FOUND ;
lp_semaphore->state = eSemaphoreUnlocked ;
g_tcb[g_running_task].lock_ref_counter-- ;
}
}
else
{
software_warning("%s %s %d", resolve_system_message(ERR_SEMAPHORE_NOT_FOUND), __FILE__, __LINE__ ) ;
}
rtos_restore_interrupts(l_interrupts_status) ;
// this prevents the unlocking task from quickly relocking the semaphore if this call returns, and an locking attempt
// is made very quickly thereafter, before the next task switch occurs. this call forces a task switch.
// reschedule only if a task has been unlocked. otherwise, this task is allowed to continue to consume its time-slice.
if (l_reschedule == 1)
{
scheduler_reschedule() ;
}
return 0 ;
}
// creates a mutex lock. returns its id or -1 upon failure.
int16s rtos_allocate_mutex()
{
int16s l_id = ERR_MUTEX_NOT_FOUND ;
int8u l_interrupts_status ;
// the kernel is not reentrant; protect static/global data from ISRs
rtos_save_and_disable_interrupts(&l_interrupts_status) ;
if (s_number_of_mutexes < MAX_MUTEXES)
{
if ( (l_id = find_next_available_mutex_index() ) != ERR_MUTEX_NOT_FOUND)
{
mutex_info *lp_mutex ;
lp_mutex = s_mutexes + l_id ;
lp_mutex->owner = ERR_MUTEX_NOT_FOUND ; // an owner will be set once this Mutex is locked
lp_mutex->state = eMutexUnlocked ;
lp_mutex->refCnt = 0 ;
lp_mutex->id = l_id ;
l_id = lp_mutex->id ;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -