📄 reconfig_sched_utils_t.cpp
字号:
// Accessor for whether or not the recursion is within a previously
// detected cycle.
template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK> int
TAO_RSE_SCC_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
in_a_cycle (void)
{
return this->in_a_cycle_;
}
// Mutator for whether or not the recursion is within a previously
// detected cycle.
template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK> void
TAO_RSE_SCC_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
in_a_cycle (int i)
{
this->in_a_cycle_ = i;
}
/* WSOA merge - commented out
template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK> int
TAO_RSE_SCC_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
unconditional_action (TAO_Reconfig_Scheduler_Entry &rse)
{
if (rse.is_thread_delineator () &&
rse.effective_period () == 0)
{
rse.effective_period (rse.actual_rt_info ()->period);
long threads = rse.actual_rt_info ()->threads;
rse.effective_exec_multiplier (threads > 0 ? threads : 1);
}
return 0;
}
*/
// Makes sure the entry has not previously been visited in the
// reverse DFS (call graph transpose) direction. Returns 0 if
// the actions should be applied, 1 if the entry should be left
// alone, and -1 if an error occurred.
template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK> int
TAO_RSE_SCC_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
precondition (TAO_Reconfig_Scheduler_Entry &rse)
{
int result =
TAO_RSE_Dependency_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
precondition (rse);
return (result == 0)
? ((rse.rev_dfs_status () == TAO_Reconfig_Scheduler_Entry::NOT_VISITED)
? 0
: 1)
: 1;
}
// Marks reverse status as visited and sets reverse start time for
// entry, prior to visiting any of its successors. Returns 0 on
// success and -1 on error.
template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK> int
TAO_RSE_SCC_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
prefix_action (TAO_Reconfig_Scheduler_Entry &rse)
{
rse.rev_dfs_status (TAO_Reconfig_Scheduler_Entry::VISITED);
rse.rev_discovered (this->DFS_time_++);
return 0;
}
// Checks reverse status of each successor. For any that have not
// been previously visited, it complains about the entry and
// successor being part of a cycle, stores the fact that a cycle was
// detected, and maintains a count of the total number of cycles
// (strongly connected components). Returns 0 on success and -1 on
// an error (finding a cycle is not considered an error, at least as
// far as this method is concerned).
template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK> int
TAO_RSE_SCC_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
pre_recurse_action (TAO_Reconfig_Scheduler_Entry &entry,
TAO_Reconfig_Scheduler_Entry &successor,
const RtecScheduler::Dependency_Info &di)
{
ACE_UNUSED_ARG (di);
if (successor.enabled_state () !=
RtecScheduler::RT_INFO_DISABLED
&& successor.rev_dfs_status () ==
TAO_Reconfig_Scheduler_Entry::NOT_VISITED)
{
if (this->in_a_cycle () == 0)
{
this->in_a_cycle (1);
++this->number_of_cycles_;
}
ACE_DEBUG ((LM_ERROR,
"RT_Infos \"%s\" and \"%s\" are part of dependency cycle %d.\n",
entry.actual_rt_info ()->entry_point.in (),
successor.actual_rt_info ()->entry_point.in (),
this->number_of_cycles_));
}
return 0;
}
// Sets the entry's reverse finish time and marks it as finished in
// the reverse DFS traversal, after visiting all of its successors.
// Returns 0 on success and -1 on error.
template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK> int
TAO_RSE_SCC_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
postfix_action (TAO_Reconfig_Scheduler_Entry &rse)
{
rse.rev_dfs_status (TAO_Reconfig_Scheduler_Entry::FINISHED);
rse.rev_finished (this->DFS_time_++);
return 0;
}
/////////////////////////////////////////
// TAO_RSE_Reverse_Propagation_Visitor //
/////////////////////////////////////////
// Constructor.
template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK>
TAO_RSE_Reverse_Propagation_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
TAO_RSE_Reverse_Propagation_Visitor
(ACE_TYPENAME TAO_RSE_Dependency_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::DEPENDENCY_SET_MAP & dependency_map,
ACE_TYPENAME TAO_RSE_Dependency_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::RT_INFO_MAP & rt_info_map)
: TAO_RSE_Dependency_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK> (dependency_map, rt_info_map)
{
}
template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK> int
TAO_RSE_Reverse_Propagation_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
pre_recurse_action (TAO_Reconfig_Scheduler_Entry &entry,
TAO_Reconfig_Scheduler_Entry &successor,
const RtecScheduler::Dependency_Info &di)
{
ACE_UNUSED_ARG (di);
// @TODO - check for conjunction nodes here and perform conjunctive
// function on existing rate tuples. Idea: treat conjunctive tuples
// as skolem functions over the possible rates of their incedent
// edges thread delineators!!! Then, can tentatively compute
// utilization for rate combinations. Question: can I find a case
// where this makes tuple rate admission non-monotonic??? I.e.,
// where a higher rate for an input results in a lower utilization?
// Might require a skew in the exec times and rates. What are the
// determining characteristics of this? What impact if any does
// phasing have on this?
// Check for conjunction nodes and don't propagate
// upward from them: they represent a cut point in the graph.
// Do not allow conjunction nodes for now.
if (entry.actual_rt_info ()->info_type == RtecScheduler::CONJUNCTION)
{
ACE_ERROR_RETURN ((LM_ERROR,
ACE_TEXT ("Conjunction Nodes are not supported currently.")),
-1);
}
else
{
// @TODO - replace the explicit WCET attribute propagation with
// a scheduling strategy functor that propagates arbitrary
// execution time attributes. BTW, for conjunctions BCET and WCET
// are probably needed relative the upper and lower bounds on
// arrival waveforms.
// Add the successor's aggregate time to the entry's aggregate time.
// Since we're visiting in topological order (called nodes before
// calling nodes), the successor's aggregate time is up to date.
if (successor.enabled_state () != RtecScheduler::RT_INFO_DISABLED)
{
entry.aggregate_exec_time (entry.aggregate_exec_time ()
+ successor.aggregate_exec_time ());
}
}
// Do not recurse on the successor node, just continue to the next successor.
return 1;
}
/////////////////////////////////////////
// TAO_RSE_Forward_Propagation_Visitor //
/////////////////////////////////////////
// Constructor.
template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK>
TAO_RSE_Forward_Propagation_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
TAO_RSE_Forward_Propagation_Visitor
(ACE_TYPENAME TAO_RSE_Dependency_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::DEPENDENCY_SET_MAP & dependency_map,
ACE_TYPENAME TAO_RSE_Dependency_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::RT_INFO_MAP & rt_info_map)
: TAO_RSE_Dependency_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK> (dependency_map, rt_info_map),
unresolved_locals_ (0),
unresolved_remotes_ (0),
thread_specification_errors_ (0)
{
}
// Accessor for number of nodes with unresolved local dependencies.
template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK> int
TAO_RSE_Forward_Propagation_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
unresolved_locals (void)
{
return this->unresolved_locals_;
}
// Mutator for number of nodes with unresolved local dependencies.
template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK> void
TAO_RSE_Forward_Propagation_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
unresolved_locals (int i)
{
this->unresolved_locals_ = i;
}
// Accessor for number of nodes with unresolved remote dependencies.
template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK> int
TAO_RSE_Forward_Propagation_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
unresolved_remotes (void)
{
return this->unresolved_remotes_;
}
// Mutator for number of nodes with unresolved remote dependencies.
template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK> void
TAO_RSE_Forward_Propagation_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
unresolved_remotes (int i)
{
this->unresolved_remotes_ = i;
}
// Accessor for number of nodes with thread specification errors.
template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK> int
TAO_RSE_Forward_Propagation_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
thread_specification_errors (void)
{
return this->thread_specification_errors_;
}
// Mutator for number of nodes with thread specification errors.
template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK> void
TAO_RSE_Forward_Propagation_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
thread_specification_errors (int i)
{
this->thread_specification_errors_ = i;
}
// Tests the entry for possibly having unresolved remote or local
// dependencies prior to visiting any of its successors, and also
// checks for thread specification errors. Returns 0 on success and
// -1 on error (having unresolved dependencies or thread specification
// problems is not considered an error, at least for this method).
template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK> int
TAO_RSE_Forward_Propagation_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
prefix_action (TAO_Reconfig_Scheduler_Entry &rse)
{
// Complain about anything that is still marked as a thread
// delineator but does not have a period: if it has threads, it is a
// specification error. Otherwise, if it's not a remote dependant
// (per RT_Info's info_type field) it has unresolved *local*
// dependencies.
if (rse.is_thread_delineator ())
{
if (rse.actual_rt_info ()->period == 0)
{
if (rse.actual_rt_info ()->threads == 0)
{
if (rse.actual_rt_info ()->info_type ==
RtecScheduler::REMOTE_DEPENDANT)
{
++this->unresolved_remotes_;
ACE_DEBUG ((LM_ERROR,
"RT_Info \"%s\" has unresolved "
"remote dependencies.\n",
rse.actual_rt_info ()->entry_point.in ()));
}
else
{
++this->unresolved_locals_;
ACE_DEBUG ((LM_ERROR,
"RT_Info \"%s\" has unresolved "
"local dependencies.\n",
rse.actual_rt_info ()->entry_point.in ()));
}
}
else
{
// Specification error: any RT_Info that specifies threads
// must also specify a period.
++this->thread_specification_errors_;
ACE_DEBUG ((LM_ERROR,
"RT_Info \"%s\" specifies %1d "
"threads, but no period.\n",
rse.actual_rt_info ()->entry_point.in (),
rse.actual_rt_info ()->threads));
}
}
}
return 0;
}
// Propagates effective period from entry to successor prior to
// visiting successor. Returns 0 on success and -1 on error.
template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK> int
TAO_RSE_Forward_Propagation_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
pre_recurse_action (TAO_Reconfig_Scheduler_Entry &entry,
TAO_Reconfig_Scheduler_Entry &successor,
const RtecScheduler::Dependency_Info &di)
{
if (successor.enabled_state () == RtecScheduler::RT_INFO_DISABLED)
{
return 1;
}
ACE_UNUSED_ARG (di);
TAO_RT_Info_Tuple **tuple_ptr_ptr;
TUPLE_SET_ITERATOR orig_tuple_iter (entry.orig_tuple_subset ());
while (orig_tuple_iter.done () == 0)
{
if (orig_tuple_iter.next (tuple_ptr_ptr) == 0
|| tuple_ptr_ptr == 0 || *tuple_ptr_ptr == 0)
{
ACE_ERROR ((LM_ERROR,
"Failed to access tuple under iterator"));
return -1;
}
// @TODO - check for conjunction nodes here and perform conjunctive
// function on existing rate tuples.
#ifdef SCHEDULER_LOGGING
ACE_DEBUG((LM_DEBUG, "Inserting new propagated tuple for RT_Info: %d, entry_ptr: 0x%x, tuple_ptr: 0x%x\n",
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -