⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 synchronization.c

📁 嵌入式操作系统EOS(Embedded OperatingSystem)是一种用途广泛的系统软件
💻 C
📖 第 1 页 / 共 2 页
字号:
			s_number_of_mutexes++ ;
		}
	}
	else
	{
		l_id = ERR_MAX_MUTEXES_ALLOCATED ;
	}

	rtos_restore_interrupts(l_interrupts_status) ;

	return l_id ;
}

// release a previousely allocated mutex. returns -1 if failed.
int16s rtos_deallocate_mutex(int16s a_id)
{
	int32s 		l_result = ERR_MUTEX_NOT_FOUND ;
	int16u 		l_index ;
	int8u  		l_interrupts_status ;
	mutex_info *lp_mutex ;

	// the kernel is not reentrant; protect static/global data from ISRs
	rtos_save_and_disable_interrupts(&l_interrupts_status) ;

	// deallocating a mutex is allowed either if it is not locked
	if (find_mutex(a_id, &l_index) )
	{
		lp_mutex = s_mutexes + l_index ;

		if (lp_mutex->state == eMutexLocked)
		{
			software_warning("%s %s %d", resolve_system_message(ERR_MUTEX_LOCKED), __FILE__, __LINE__ ) ;
			rtos_restore_interrupts(l_interrupts_status) ;

			return ERR_MUTEX_LOCKED ;
		}

		lp_mutex->id = 0 ;
		lp_mutex->owner = ERR_MUTEX_NOT_FOUND ;
		lp_mutex->state = eMutexNotAllocated ;

		l_result = a_id ;

		s_number_of_mutexes-- ;
	}
	
	rtos_restore_interrupts(l_interrupts_status) ;

	return l_result ;
}

// WAR STORY
// interrupt scannot wait, sleep for meddle with the task scheduler as they must be as fast as possible.
// the macro REFUSE_INTERRUPT_CONTEXT invokes a software error if this (or other) systems calls is called from
// interrupt context
int16s rtos_mutex_lock(int16s a_id)
{
	int16u	l_index ;
	int8u 	l_interrupts_status ;

	REFUSE_INTERRUPT_CONTEXT
		
	// the kernel is not reentrant; protect static/global data from ISRs
	rtos_save_and_disable_interrupts(&l_interrupts_status) ;

	// first check if the mutex was allocated.
	if ( find_mutex(a_id, &l_index) )
	{
		mutex_info *lp_mutex ;

		lp_mutex = s_mutexes + l_index ;

		// when the function is entered, skip the next 'scheduler_reschedule' call. it shall be called only if a
		// mutex lock attempt has failed, because then it will make the first instruction after it the return address of
		// the calling task, allowing a task to retry the lock attempt.

		// WAR STORY
		// a previous implementation of this function, called 'scheduler_reschedule' in a loop in case the mutex was
		// acquired by another task while this call took place. that lead to a "chainsaw effect" when observed with
		// the logic analyzer: if a task that owned the lock ran out of time-slice, the tasks that were waiting for
		// that lock indeed already inserted their id into the blocked_tasks queue of the mutex, but continued to
		// poll the mutex status to get the lock. by calling 'scheduler_reschedule' the return address of the task
		// became that polling loop, and that meant that they never truly gave up the CPU (the original intention was
		// that once a task cannot lock a mutex, it inserts itself into the blocked tasks queue of the mutex pening
		// an unlock operation by the lock owner. by polling they never left the ready administration until they
		// ran out of time-slice!). that lead to rapid rescheduling until they themselves ran out of
		// time-slice, after which and an administration switch was made. the current implementation calls 
		// 'scheduler_reschedule' ONLY ONCE, after the lock attempt has failed. the first statement after the call
		// becomes the return address. that is why assemply jumps were used here ('scheduler_reschedule' needs to be
		// skipped upon a regular call).

		// WAR STORY
        // using an assembly jump here proved to make the stability of the target too valitile when changing 
        // memory models (for example, medium->large). using 'goto' makes the complier use the right JMP instruction
		
        // WAR STORY
        // using an assembly jump here proved to make the stability of the target too valitile when changing 
        // memory models (for example, medium->large). using 'goto' makes the complier use the right JMP instruction
		
        // a regular call will not regenerate a return address by invoking 'scheduler_reschedule', unless the mutex
		// is locked. so jump unconditionally to 'standard_lock_attempt'.
		goto standard_lock_attempt ;
			
lock_attempt_was_unsuccessful: // the program jumps to this label only is the lock attempt failed. note: skipped by a 
							   // normal call to the function.
		scheduler_reschedule() ; // skipped when function is called. executed only when jumping to this label (see below).
		
standard_lock_attempt: // if the function is called, this is where execution begins

	 	// attempt to lock the mutex
		if (lp_mutex->state == eMutexUnlocked) // note: the lock is never relinquished if there is a task waiting for the mutex in order to prevent a race condition
		// mutexes (another mutex might lock it before the dequeued task (from the mutex's queue) gets a chance to lock it. after the next owner of the lock is 
		// scehduled (because it is being put in the running queue), it is restarted at this function (after the call to 'scheduler_reschedule', see below - that is
		// the new return address of the task) and locks the mutex. a mutex is freed only is there is no task waiting for it.
		{
			lp_mutex->owner = g_running_task ; 
			lp_mutex->state = eMutexLocked ;
			lp_mutex->refCnt++ ;
			g_tcb[g_running_task].lock_ref_counter++ ;
		}
		else if (lp_mutex->state == eMutexLocked && (lp_mutex->owner == g_running_task) ) // check if the running task attempts to lock a mutex that it already owns
		{
			lp_mutex->refCnt++ ; // increment the refrence counter. a task that has locked a mutex 'x' times must release it 'x' times
		} 
		else // if a task is not allowed to lock a mutex, and it is not the owner, it will have to wait
		{
			// move task to the waiting tasks queue of the mutex. this task does not get any CPU time unless moved back to the running queue.
			// remember that g_running_task is absent from the running queue (because it was dequeued)
			
			//if (queue_enqueue(&lp_mutex->blocked_tasks, g_running_task) == ERR_QUEUE_FULL)
			if (priority_queue_insert(&lp_mutex->blocked_tasks, (*(g_tcb + g_running_task)).priority, g_running_task) == ERR_QUEUE_FULL)
			{
				software_error("%s %s %d", resolve_system_message(ERR_QUEUE_FULL), __FILE__, __LINE__ ) ;
			}
			g_tcb[g_running_task].status = eTaskBlockedMutex ;
			g_tcb[g_running_task].blocked_on_primitive = l_index ;

			rtos_restore_interrupts(l_interrupts_status) ;
			
			// WAR STORY
            // using an assembly jump here proved to make the stability of the target too valitile when changing 
            // memory models (for example, medium->large). using 'goto' makes the complier use the right JMP instruction
		
            // after the task has been marked as 'blocked' and put in the blocked tasks queue, it is time
			// to call 'scheduler_reschedule'. note that this call is made in order to execute the entire 
			// lock procedure all over again for tasks that failed to lock the mutex. calling 'scheduler_reschedule'
			// from the label insures that the first statement after it will serve as a return address for the next
			// time the calling tasks is scheduled.
			goto lock_attempt_was_unsuccessful ;
		}
	}
	else // report an error
	{
		software_warning("%s %s %d (%d)", resolve_system_message(ERR_MUTEX_NOT_FOUND), __FILE__, __LINE__, a_id ) ;

		a_id = ERR_MUTEX_NOT_FOUND ;
	}

	rtos_restore_interrupts(l_interrupts_status) ;

	return a_id ;
}

// never use from within interrupt context. see comments for rtos_mutex_lock.
int16s rtos_mutex_unlock(int16s a_id)
{
	int16u 	l_index ;
	int8u 	l_interrupts_status ;
	int8u	l_reschedule = 0 ;
	
	REFUSE_INTERRUPT_CONTEXT

	// the kernel is not reentrant; protect static/global data from ISRs
	rtos_save_and_disable_interrupts(&l_interrupts_status) ;

	if (find_mutex(a_id, &l_index) )
	{
		mutex_info *lp_mutex ;

		lp_mutex = s_mutexes + l_index ;

		if (lp_mutex->refCnt > 0)
		{
			lp_mutex->refCnt-- ; // always decrement the reference counter, no matter what task unlocks the mutex
		}

		if ( (lp_mutex->state == eMutexLocked) && (lp_mutex->refCnt == 0) )
		{
			int16s l_unblocked_task ;
			
			// select a task to be returned to the running queue, so that the scheduler can select it
			
			//l_unblocked_task = (int16s)queue_dequeue(&lp_mutex->blocked_tasks) ;
			l_unblocked_task = (int16s)priority_queue_minimum_data_extract(&lp_mutex->blocked_tasks) ;
			
			if (l_unblocked_task != ERR_QUEUE_EMPTY) // there is at least one task blocked on the mutex - prepare it to be scheduled
			{
				int16s l_result ;
				// WAR STORY
				// this is the most fair approach: if a task has been waiting and the lock has become available, 
				// select the next task from the blocked queue (note: this is not yet a priority queue) and put it in
				// the right slot in the ready list, for later selection. Howver, this introduces a problem of 
				// possible starvation - the task needs the lock again but it might be lock by another task once it is
				// scheduled again. different approaches, such as granting the CPU to the extracted task in 
				// the next task switch are doomed to fail, because they might introduce a priority inversion: if the
				// unblocked task task was a low priority task that plans to keep the lock for a while, it might be
				// preempted by a high priority task which will have to wait.
				if ( (l_result = scheduler_declare_task_ready(l_unblocked_task, g_tcb[l_unblocked_task].priority)) != NO_ERROR)
				{
					software_error("%s %s %d", resolve_system_message(l_result), __FILE__, __LINE__ ) ;
				}
				l_reschedule = 1 ;
			}			
			lp_mutex->owner = ERR_MUTEX_NOT_FOUND ;
			lp_mutex->state = eMutexUnlocked ;
			g_tcb[g_running_task].lock_ref_counter-- ;
		}
	}
	else
	{
		software_warning("%s %s %d", resolve_system_message(ERR_MUTEX_NOT_FOUND), __FILE__, __LINE__ ) ;
	}
	
	rtos_restore_interrupts(l_interrupts_status) ;
		
	// this prevents the unlocking task from quickly relocking the mutex if this call returns, and an locking attempt
	// is made very quickly thereafter, before the next task switch occurs. this call forces a task switch.
	// reschedule only if a task has been unlocked. otherwise, this task is allowed to continue to consume its time-slice.
	if (l_reschedule == 1)
	{
		scheduler_reschedule() ; 
	}

	return 0 ;
}

int16s rtos_is_mutex_locked(int16s a_id)
{
	int16u	l_index ;
	int16s 	l_locked = 1 ;
	int8u 	l_interrupts_status ;

	// the kernel is not reentrant; protect static/global data from ISRs
	rtos_save_and_disable_interrupts(&l_interrupts_status) ;

	if (find_mutex(a_id, &l_index) )
	{
		l_locked = (s_mutexes[l_index].state == eMutexLocked) ;
	}
	
	rtos_restore_interrupts(l_interrupts_status) ;
		
	return l_locked ;
}

int16s rtos_is_semaphore_locked(int16s a_id)
{
	int16u	l_index ;
	int16s 	l_locked = ERR_SEMAPHORE_NOT_FOUND ;
	int8u 	l_interrupts_status ;

	// the kernel is not reentrant; protect static/global data from ISRs
	rtos_save_and_disable_interrupts(&l_interrupts_status) ;

	if (find_semaphore(a_id, &l_index) )
	{
		l_locked = (s_semaphores[l_index].state == eSemaphoreLocked) ;
	}
	
	rtos_restore_interrupts(l_interrupts_status) ;
		
	return l_locked ;
}

int16s	rtos_get_semaphore_ref_count(int16s a_id)
{
	int16u	l_index ;
	int16s 	l_ref = ERR_SEMAPHORE_NOT_FOUND ;
	int8u 	l_interrupts_status ;

	// the kernel is not reentrant; protect static/global data from ISRs
	rtos_save_and_disable_interrupts(&l_interrupts_status) ;

	if (find_semaphore(a_id, &l_index) )
	{
		l_ref = s_semaphores[l_index].refCnt ;
	}
	
	rtos_restore_interrupts(l_interrupts_status) ;
		
	return l_ref ;
}

int16s	rtos_mutex_get_owner(int16s a_id)
{
	int16u	l_index ;
	int32s 	l_owner = ERR_SEMAPHORE_NOT_FOUND ;
	int8u 	l_interrupts_status ;

	// the kernel is not reentrant; protect static/global data from ISRs
	rtos_save_and_disable_interrupts(&l_interrupts_status) ;

	if (find_mutex(a_id, &l_index) )
	{
		l_owner = s_mutexes[l_index].owner ;
	}
	
	rtos_restore_interrupts(l_interrupts_status) ;
		
	return l_owner ;
}

int16s rtos_mutex_set_blocked_task_priority(int16s a_mutex_id, int16s a_task_id, int8u a_priority)
{
	int16u	l_index ;
	int16s 	l_result = ERR_MUTEX_NOT_FOUND ;
	int8u   l_interrupts_status ;

	rtos_save_and_disable_interrupts(&l_interrupts_status) ;

	if (find_mutex(a_mutex_id, &l_index) )
	{
		priority_queue_info *lp_queue = &s_mutexes[l_index].blocked_tasks ;
		
		l_result = priority_queue_update_key(lp_queue, a_priority, a_task_id) ;
	}
	
	rtos_restore_interrupts(l_interrupts_status) ;
	
	return l_result ;
}

int16s rtos_semaphore_set_blocked_task_priority(int16s a_semaphire_id, int16s a_task_id, int8u a_priority)
{
	int16u	l_index ;
	int16s 	l_result = ERR_SEMAPHORE_NOT_FOUND ;
	int8u   l_interrupts_status ;

	rtos_save_and_disable_interrupts(&l_interrupts_status) ;

	if (find_semaphore(a_semaphire_id, &l_index) )
	{
		priority_queue_info *lp_queue = &s_semaphores[l_index].blocked_tasks ;
		
		l_result = priority_queue_update_key(lp_queue, a_priority, a_task_id) ;
	}
	
	rtos_restore_interrupts(l_interrupts_status) ;
	
	return l_result ;
}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -