📄 dynsched.cpp
字号:
{
frame_size_ =
ACE::minimum_frame_size (frame_size_,
ordered_task_entries_ [i]->
effective_period ());
}
}
return status;
}
// propagate the dispatch information from the
// threads throughout the call graph
ACE_DynScheduler::status_t
ACE_DynScheduler::calculate_utilization_params (void)
{
critical_set_frame_size_ = 0;
utilization_ = 0.0;
critical_set_utilization_ = 0.0;
minimum_priority_queue_ =
ordered_dispatch_entries_ [0]->priority ();
minimum_guaranteed_priority_queue_ = -1;
// iterate through ordered task entries, calculating frame size, utilization
for (u_int i = 0; i < dispatch_entry_count_; ++i)
{
// if we've just finished examining another priority level
if (minimum_priority_queue_ != ordered_dispatch_entries_ [i]->priority ())
{
// update parameters for the previous priority level
update_priority_level_params ();
// update the minimum priority queue
minimum_priority_queue_ = ordered_dispatch_entries_ [i]->priority ();
}
// Only consider computation times of dispatches of
// OPERATION and REMOTE_DEPENDANT descriptors.
if (((ordered_dispatch_entries_ [i]->task_entry ().info_type () ==
RtecScheduler::OPERATION) ||
(ordered_dispatch_entries_ [i]->task_entry ().info_type () ==
RtecScheduler::REMOTE_DEPENDANT)) &&
(ordered_dispatch_entries_ [i]->task_entry ().effective_period () > 0))
{
utilization_ +=
ACE_static_cast (double,
ACE_UINT64_DBLCAST_ADAPTER (ordered_dispatch_entries_ [i]->
task_entry ().rt_info ()->worst_case_execution_time)) /
ACE_static_cast (double, ordered_dispatch_entries_ [i]->
task_entry ().effective_period ());
}
}
// update parameters for the lowest priority level
update_priority_level_params ();
// if the critical set is schedulable, return success
return (1.0 - critical_set_utilization_ > DBL_EPSILON)
? SUCCEEDED : ST_UTILIZATION_BOUND_EXCEEDED;
}
void
ACE_DynScheduler::update_priority_level_params ()
{
// if we've just finished examining a critical priority level
if (minimum_priority_queue_ <= minimum_critical_priority ())
{
// update the information about the critical set
critical_set_frame_size_ = frame_size_;
critical_set_utilization_ = utilization_;
}
// if the lowest priority level considered is schedulable
if (1.0 - utilization_ > DBL_EPSILON)
{
// the minimum guaranteed priority queue is the minimum considered so far
minimum_guaranteed_priority_queue_ = minimum_priority_queue_;
}
}
ACE_DynScheduler::status_t
ACE_DynScheduler::setup_task_entries (void)
{
// store number of tasks, based on registrations
tasks (ACE_static_cast (u_int, rt_info_entries_.size ()));
// bail out if there are no tasks registered
if (tasks () <= 0)
{
return ST_NO_TASKS_REGISTERED;
}
// clear the decks of any previous scheduling information
reset ();
// allocate new table of task entries (wrappers for rt_infos)
size_t task_count = tasks ();
ACE_NEW_RETURN (task_entries_, Task_Entry [task_count],
ST_VIRTUAL_MEMORY_EXHAUSTED);
// allocate new table of pointers to task entries (for sorting)
ACE_NEW_RETURN (ordered_task_entries_, Task_Entry *[task_count],
ST_VIRTUAL_MEMORY_EXHAUSTED);
// @@ TODO: this is completely bogus code, the bit-wise
// representation of a null pointer is not always a string of
// zeroes. The correct way to intialize this array is with a for
// loop.
// ACE_OS::memset (ordered_task_entries_, 0,
// sizeof (Task_Entry *) * task_count);
for (size_t j = 0; j != task_count; ++j)
ordered_task_entries_[j] = 0;
// allocate new unbounded set for pointers to
// task entries that delineate threads
ACE_NEW_RETURN (thread_delineators_, ACE_Unbounded_Set <Dispatch_Entry *>,
ST_VIRTUAL_MEMORY_EXHAUSTED);
// allocate new unbounded set for pointers to dispatch entries
ACE_NEW_RETURN (dispatch_entries_,
ACE_Unbounded_Set <Dispatch_Entry *>,
ST_VIRTUAL_MEMORY_EXHAUSTED);
// allocate new unbounded set for pointers to config info entries
ACE_NEW_RETURN (config_info_entries_,
ACE_Unbounded_Set <Config_Info *>,
ST_VIRTUAL_MEMORY_EXHAUSTED);
// set up links between rt_info_entries_, task_entries_,
// and ordered_task_entries_ tables
ACE_Unbounded_Set_Iterator <RT_Info *> iter (rt_info_entries_);
for (u_int i = 0; i < tasks (); ++i, iter.advance ())
{
RT_Info** info_entry;
// tie task entry to corresponding rt_info
if (! iter.next (info_entry))
{
return ST_BAD_INTERNAL_POINTER;
}
task_entries_ [i].rt_info (*info_entry);
// Tie rt_info to corresponding task entry: the double cast is
// needed to ensure that the size of the pointer and the size of the
// stored magic cookie are the same (see the definition of
// ptrdiff_t in ACE to grok how this works portably).
task_entries_ [i].rt_info ()->volatile_token =
ACE_static_cast (CORBA::ULongLong,
ACE_reinterpret_cast (ptrdiff_t,
&(task_entries_ [i])));
// tie ordered task entry pointer to corresponding task entry
ordered_task_entries_ [i] = &(task_entries_ [i]);
}
// set up bidirectional links between task entries
return relate_task_entries ();
}
ACE_DynScheduler::status_t
ACE_DynScheduler::relate_task_entries (void)
{
status_t status = SUCCEEDED;
// do DFS traversal of the entire RT_Info handle dependency DAG, replicating
// the handle dependency DAG as a calls DAG of pointers between task
// entries (and creating its transpose, the callers DAG). This is done
// to avoid the O(n) cost of handle lookups in the RT_Infos for further
// traversal of the graph during schedule sorting. One useful side effect
// of this traversal is that is produces a topological ordering of dependencies
// in the traversal finishing times, which can be used to detect call cycles.
long time = 0;
for (u_int i = 0; i < tasks (); ++i)
{
if ((status = relate_task_entries_recurse (time, task_entries_[i]))
!= SUCCEEDED)
{
break;
}
}
return status;
}
ACE_DynScheduler::status_t
ACE_DynScheduler::relate_task_entries_recurse (long &time, Task_Entry &entry)
{
// may have entered at a non-root node previously, so this does
// not necessarily indicate a cycle in the dependency graph
if (entry.dfs_status () != Task_Entry::NOT_VISITED)
{
return SUCCEEDED;
}
// when a node is discovered, mark it as visited, increment "time" and
// store as the entry's discovery time. This is not currently used in
// the scheduling algorithms, but is left in for possible future use
// as it shows full parenthetization of entry discovery/finishing.
entry.dfs_status (Task_Entry::VISITED);
entry.discovered (++time);
u_int dependency_count = number_of_dependencies (*entry.rt_info ());
if (dependency_count > 0)
{
// traverse dependencies of underlying RT_Info
for (u_int i = 0; i < dependency_count; ++i)
{
// obtain a pointer to the corresponding Task_Entry for each dependency
RT_Info* dependency_info = 0;
lookup_rt_info(entry.rt_info ()->dependencies[i].rt_info, dependency_info);
if (! dependency_info)
{
return ST_BAD_INTERNAL_POINTER;
}
// Obtain a pointer to the Task_Entry from the dependency
// RT_Info: the double cast is needed to ensure that the size of
// the pointer and the size of the stored magic cookie are the
// same (see the definition of ptrdiff_t in ACE to grok how
// this works portably).
Task_Entry *dependency_entry_ptr =
ACE_LONGLONG_TO_PTR (Task_Entry *, dependency_info->volatile_token);
if (! dependency_entry_ptr)
{
return ST_BAD_INTERNAL_POINTER;
}
// relate the entries according to the direction of the dependency
Task_Entry_Link *link;
ACE_NEW_RETURN (link,
Task_Entry_Link (entry,
*dependency_entry_ptr,
entry.rt_info ()->dependencies[i].number_of_calls,
entry.rt_info ()->dependencies[i].dependency_type),
ST_VIRTUAL_MEMORY_EXHAUSTED);
dependency_entry_ptr->callers ().insert (link);
entry.calls ().insert (link);
// depth first recursion on the newly identified entry
relate_task_entries_recurse (time, *dependency_entry_ptr);
}
}
// when a node is finished, mark it as finished, increment "time" and
// store as the entry's finish time. This produces a topological ordering
// based on dependencies, which is used to check for call cycles.
entry.dfs_status (Task_Entry::FINISHED);
entry.finished (++time);
return SUCCEEDED;
}
ACE_DynScheduler::status_t
ACE_DynScheduler::identify_threads (ACE_CString & unresolved_locals,
ACE_CString & unresolved_remotes)
{
u_int i, j;
ACE_DynScheduler::status_t result = SUCCEEDED;
char string_buffer [BUFSIZ];
// walk array of task entries, picking out thread delineators
for (i = 0; i < tasks_; i++)
{
// if entry has exposed threads or no callers, it may be a thread
if ((task_entries_ [i].rt_info ()->threads > 0) ||
(task_entries_ [i].callers ().is_empty ()))
{
// if its period is valued, it's a thread delineator
if (task_entries_ [i].rt_info ()->period > 0)
{
task_entries_ [i].effective_period (task_entries_ [i].rt_info ()->period);
task_entries_ [i].is_thread_delineator (1);
// create a Dispatch_Entry for each thread of the delimiting Task_Entry
u_int thread_count = (task_entries_ [i].rt_info ()->threads > 0)
? task_entries_ [i].rt_info ()->threads : 1;
// Just use low 32 bits of effective_period. This will
// have to change when TimeBase.idl is finalized.
const TimeBase::TimeT zero = 0;
for (j = 0; j < thread_count; j++)
{
Dispatch_Entry *dispatch_ptr;
const TimeBase::TimeT effective_period =
task_entries_ [i].effective_period ();
ACE_NEW_RETURN(dispatch_ptr,
Dispatch_Entry (zero,
effective_period,
task_entries_ [i].rt_info ()->preemption_priority,
task_entries_ [i].rt_info ()->priority,
task_entries_ [i]),
ST_VIRTUAL_MEMORY_EXHAUSTED);
if ((task_entries_ [i].dispatches ().insert (Dispatch_Entry_Link (*dispatch_ptr)) < 0) ||
(dispatch_entries_->insert (dispatch_ptr) < 0) ||
(thread_delineators_->insert (dispatch_ptr) < 0))
{
return ST_VIRTUAL_MEMORY_EXHAUSTED;
}
// increase the count of thread dispatches
++ threads_;
}
}
else if (task_entries_ [i].rt_info ()->info_type == RtecScheduler::REMOTE_DEPENDANT)
{
// Warn about unresolved remote dependencies, mark the task entry
result = (result == SUCCEEDED)
? ST_UNRESOLVED_REMOTE_DEPENDENCIES
: result;
task_entries_ [i].has_unresolved_remote_dependencies (1);
ACE_DEBUG (
(LM_DEBUG,
ACE_LIB_TEXT("Warning: an operation identified by ")
ACE_LIB_TEXT("\"%s\" has unresolved remote dependencies.\n"),
ACE_TEXT_CHAR_TO_TCHAR((const char*)task_entries_ [i].rt_info ()->entry_point)));
// Record entry point in list of unresolved remote dependencies
ACE_OS::sprintf (string_buffer, "// %s\n",
(const char*) task_entries_ [i].rt_info ()->
entry_point);
unresolved_remotes +=
ACE_CString (string_buffer);
}
else
{
// Local node that no one calls and has neither rate nor threads is suspect
ACE_DEBUG (
(LM_DEBUG,
ACE_LIB_TEXT("Error: operation \"%s\" does not specify a period or\n")
ACE_LIB_TEXT("visible threads, and is not called by any other operation.\n")
ACE_LIB_TEXT("Are there backwards dependencies.\n"),
ACE_TEXT_CHAR_TO_TCHAR((const char*)task_entries_ [i].rt_info ()->entry_point)));
result = ST_UNRESOLVED_LOCAL_DEPENDENCIES;
task_entries_ [i].has_unresolved_local_dependencies (1);
// Record entry point in list of unresolved local dependencies
ACE_OS::sprintf (string_buffer, "// %s\n",
(const char*) task_entries_ [i].rt_info ()->
entry_point);
unresolved_locals +=
ACE_CString (string_buffer);
}
}
}
return result;
}
ACE_DynScheduler::status_t
ACE_DynScheduler::check_dependency_cycles (void)
{
status_t return_status = SUCCEEDED;
// sort the pointers to entries in order of descending finish time
ACE_OS::qsort ((void *) ordered_task_entries_,
tasks (),
sizeof (Task_Entry *),
compare_entry_finish_times);
// set all the dfs_status indicators to NOT_VISITED
u_int i;
for (i = 0; i < tasks (); ++i)
{
ordered_task_entries_ [i]->dfs_status (Task_Entry::NOT_VISITED);
}
// recurse on each entry, saving most recent status if it is not SUCCEEDED
for (i = 0; i < tasks (); ++i)
{
status_t status =
check_dependency_cycles_recurse (*ordered_task_entries_ [i]);
if (status != SUCCEEDED)
{
return_status = status;
}
}
return return_status;
}
// uses strongly connected components algorithm: consider entries
// in order of finishing time from dependency DAG traversal,
// but traverse transpose graph: any entry that has a dependant
// that was not previously visited in this traversal is part
// of a dependency cycle
ACE_DynScheduler::status_t
ACE_DynScheduler::check_dependency_cycles_recurse (Task_Entry &entry)
{
status_t return_status = SUCCEEDED;
// halt DFS recursion on callers graph if entry has already been visited
if (entry.dfs_status () != Task_Entry::NOT_VISITED)
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -