📄 reconfig_sched_utils_t.cpp
字号:
successor.actual_rt_info ()->handle,
&successor,
(*tuple_ptr_ptr)));
#endif
// Propagate tuples disjunctively.
successor.insert_tuple (**tuple_ptr_ptr,
TAO_Reconfig_Scheduler_Entry::PROPAGATED);
successor.actual_rt_info ()->period =
(successor.actual_rt_info ()->period == 0)
? (*tuple_ptr_ptr)->period
: ACE::minimum_frame_size (successor.actual_rt_info ()->period,
(*tuple_ptr_ptr)->period);
orig_tuple_iter.advance ();
}
TUPLE_SET_ITERATOR prop_tuple_iter (entry.prop_tuple_subset ());
while (prop_tuple_iter.done () == 0)
{
if (prop_tuple_iter.next (tuple_ptr_ptr) == 0
|| tuple_ptr_ptr == 0 || *tuple_ptr_ptr == 0)
{
ACE_ERROR ((LM_ERROR,
"Failed to access tuple under iterator"));
return -1;
}
// @TODO - check for conjunction nodes here and perform conjunctive
// function on existing rate tuples.
#ifdef SCHEDULER_LOGGING
ACE_DEBUG((LM_DEBUG, "Inserting new propagated tuple for RT_Info: %d, entry_ptr: 0x%x, tuple_ptr: 0x%x\n",
successor.actual_rt_info ()->handle,
&successor,
(*tuple_ptr_ptr)));
#endif
// Propagate tuples disjunctively.
successor.insert_tuple (**tuple_ptr_ptr,
TAO_Reconfig_Scheduler_Entry::PROPAGATED);
successor.actual_rt_info ()->period =
(successor.actual_rt_info ()->period == 0)
? (*tuple_ptr_ptr)->period
: ACE::minimum_frame_size (successor.actual_rt_info ()->period,
(*tuple_ptr_ptr)->period);
prop_tuple_iter.advance ();
}
// Do not recurse on the successor node, just continue to the next successor.
return 1;
}
////////////////////////////////////
// class TAO_RSE_Priority_Visitor //
////////////////////////////////////
// Constructor.
template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK>
TAO_RSE_Priority_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
TAO_RSE_Priority_Visitor (RtecScheduler::handle_t handles,
TAO_Reconfig_Scheduler_Entry ** entry_ptr_array)
: previous_entry_ (0),
first_subpriority_entry_ (0),
priority_ (0),
subpriority_ (0),
os_priority_ (ACE_Sched_Params::priority_max (ACE_SCHED_FIFO,
ACE_SCOPE_PROCESS)),
handles_ (handles),
entry_ptr_array_ (entry_ptr_array)
{
}
// Visit a RT_Info tuple. This method assigns a priority and
// subpriority value to each tuple. Priorities are assigned in
// increasing numeric order, with lower numbers corresponding to
// higher priorities.
template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK> int
TAO_RSE_Priority_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::visit (TAO_Reconfig_Scheduler_Entry &rse)
{
int result = 0;
#ifdef SCHEDULER_LOGGING
ACE_DEBUG ((LM_DEBUG,
"Priority_Visitor visiting %s[%d],crit=%d,period=%d\n",
rse.actual_rt_info ()->entry_point.in(),
rse.actual_rt_info ()->handle,
rse.actual_rt_info ()->criticality,
rse.actual_rt_info ()->period));
#endif
if (previous_entry_ == 0)
{
// Indicate a new priority level was assigned.
result = 1;
// If we're on the first node, store the start of the array
// as the start of the priority level.
first_subpriority_entry_ = this->entry_ptr_array_;
rse.actual_rt_info ()->preemption_subpriority = subpriority_;
}
else
{
#ifdef SCHEDULER_LOGGING
ACE_DEBUG ((LM_DEBUG,
"Previous entry %s[%d],crit=%d,period=%d\n",
previous_entry_->actual_rt_info ()->entry_point.in(),
previous_entry_->actual_rt_info ()->handle,
previous_entry_->actual_rt_info ()->criticality,
previous_entry_->actual_rt_info ()->period));
#endif
// Don't change priority levels on a disabled node.
if (rse.enabled_state () == RtecScheduler::RT_INFO_DISABLED
|| RECONFIG_SCHED_STRATEGY::compare_priority (*previous_entry_, rse) == 0)
{
// Subpriority is increased at each new node.
++subpriority_;
// Store negative value of subpriority level: will be
// adjusted by adding back in the total number of
// subpriorities in the priority level, so the
// subpriorities are assigned in decreasing order.
rse.actual_rt_info ()->preemption_subpriority = - subpriority_;
}
else
{
// Indicate a new priority level was assigned.
result = 1;
// Iterate back through and adjust the subpriority levels.
for (int i = 0; i <= subpriority_; ++i, ++first_subpriority_entry_)
{
(*first_subpriority_entry_)->actual_rt_info ()->
preemption_subpriority += subpriority_;
}
subpriority_ = 0;
rse.actual_rt_info ()->preemption_subpriority = subpriority_;
++priority_;
#ifdef SCHEDULER_LOGGING
ACE_DEBUG ((LM_DEBUG, "New priority %d formed\n", priority_));
#endif
os_priority_ = ACE_Sched_Params::previous_priority (ACE_SCHED_FIFO,
os_priority_,
ACE_SCOPE_PROCESS);
}
}
// Assign the entry's priority and subpriority values
rse.actual_rt_info ()->priority = os_priority_;
rse.actual_rt_info ()->preemption_priority = priority_;
// Remember the current entry for the next visit.
previous_entry_ = &rse;
return result;
}
// Finishes scheduler entry priority assignment by iterating over the
// remaining entries in the last subpriority level, and adjusting
// their subpriorities.
template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK> int
TAO_RSE_Priority_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::finish ()
{
// Iterate back through and adjust the subpriority levels.
for (int i = 0; i <= subpriority_; ++i, ++first_subpriority_entry_)
{
(*first_subpriority_entry_)->actual_rt_info ()->
preemption_subpriority += subpriority_;
}
// Indicate no new priority level was identified.
return 0;
}
///////////////////////////////////////
// class TAO_Tuple_Admission_Visitor //
///////////////////////////////////////
// Constructor.
template <class RECONFIG_SCHED_STRATEGY>
TAO_Tuple_Admission_Visitor<RECONFIG_SCHED_STRATEGY>::
TAO_Tuple_Admission_Visitor (const CORBA::Double & critical_utilization_threshold,
const CORBA::Double & noncritical_utilization_threshold)
: critical_utilization_ (0.0),
noncritical_utilization_ (0.0),
critical_utilization_threshold_ (critical_utilization_threshold),
noncritical_utilization_threshold_ (noncritical_utilization_threshold)
{
}
// Visit a Reconfig Scheduler Entry. This method
// determines the utilization by the entry, and
// adds it to the critical or non-critical utilization,
// depending on whether or not the strategy says the
// operation is critical.
template <class RECONFIG_SCHED_STRATEGY> int
TAO_Tuple_Admission_Visitor<RECONFIG_SCHED_STRATEGY>::visit (TAO_RT_Info_Tuple &t)
{
TAO_Reconfig_Scheduler_Entry *entry =
ACE_LONGLONG_TO_PTR (TAO_Reconfig_Scheduler_Entry *,
t.volatile_token);
// Ignore disabled tuples and entries
if (t.enabled_state () == RtecScheduler::RT_INFO_DISABLED
|| entry->enabled_state () == RtecScheduler::RT_INFO_DISABLED)
{
return 0;
}
// Compute the current tuple's utilization.
CORBA::Double delta_utilization =
(ACE_static_cast (CORBA::Double,
t.threads)
* ACE_static_cast (CORBA::Double,
ACE_UINT64_DBLCAST_ADAPTER (entry->
aggregate_exec_time ())))
/ ACE_static_cast (CORBA::Double,
t.period);
// Subtract the previous tuple's utilization (if any) for the entry.
if (entry->current_admitted_tuple ())
{
delta_utilization -=
(ACE_static_cast (CORBA::Double,
entry->current_admitted_tuple ()->threads)
* ACE_static_cast (CORBA::Double,
ACE_UINT64_DBLCAST_ADAPTER (entry->
aggregate_exec_time ())))
/ ACE_static_cast (CORBA::Double,
entry->current_admitted_tuple ()->period);
}
if (RECONFIG_SCHED_STRATEGY::is_critical (t))
{
if (this->critical_utilization_ + this->noncritical_utilization_
+delta_utilization
< this->critical_utilization_threshold_)
{
this->critical_utilization_ += delta_utilization;
entry->current_admitted_tuple (&t);
entry->actual_rt_info ()->period = t.period;
}
}
else
{
if (this->critical_utilization_ + this->noncritical_utilization_
+delta_utilization
< this->noncritical_utilization_threshold_)
{
this->noncritical_utilization_ += delta_utilization;
entry->current_admitted_tuple (&t);
entry->actual_rt_info ()->period = t.period;
}
}
return 0;
}
// Accessor for utilization by critical operations.
template <class RECONFIG_SCHED_STRATEGY> CORBA::Double
TAO_Tuple_Admission_Visitor<RECONFIG_SCHED_STRATEGY>::critical_utilization ()
{
return this->critical_utilization_;
}
// Accessor for utilization by noncritical operations.
template <class RECONFIG_SCHED_STRATEGY> CORBA::Double
TAO_Tuple_Admission_Visitor<RECONFIG_SCHED_STRATEGY>::noncritical_utilization ()
{
return this->noncritical_utilization_;
}
// Accessor for utilization threshold for critical operations.
template <class RECONFIG_SCHED_STRATEGY> CORBA::Double
TAO_Tuple_Admission_Visitor<RECONFIG_SCHED_STRATEGY>::critical_utilization_threshold ()
{
return this->critical_utilization_threshold_;
}
// Accessor for utilization by noncritical operations.
template <class RECONFIG_SCHED_STRATEGY> CORBA::Double
TAO_Tuple_Admission_Visitor<RECONFIG_SCHED_STRATEGY>::noncritical_utilization_threshold ()
{
return this->noncritical_utilization_threshold_;
}
/////////////////////////////////////////
// TAO_RSE_Criticality_Propagation_Visitor //
/////////////////////////////////////////
// Constructor.
template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK>
TAO_RSE_Criticality_Propagation_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
TAO_RSE_Criticality_Propagation_Visitor
(ACE_TYPENAME TAO_RSE_Dependency_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::DEPENDENCY_SET_MAP & dependency_map,
ACE_TYPENAME TAO_RSE_Dependency_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::RT_INFO_MAP & rt_info_map)
: TAO_RSE_Dependency_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK> (dependency_map, rt_info_map)
{
}
template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK> int
TAO_RSE_Criticality_Propagation_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
pre_recurse_action (TAO_Reconfig_Scheduler_Entry &entry,
TAO_Reconfig_Scheduler_Entry &successor,
const RtecScheduler::Dependency_Info &di)
{
ACE_UNUSED_ARG (di);
#ifdef SCHEDULER_LOGGING
ACE_DEBUG ((LM_DEBUG,
"Crit Prop_Visitor visiting %s[%d], successor is %s[%d]\n",
entry.actual_rt_info ()->entry_point.in(),
entry.actual_rt_info ()->handle,
successor.actual_rt_info ()->entry_point.in(),
successor.actual_rt_info ()->handle));
#endif
if (successor.enabled_state () != RtecScheduler::RT_INFO_DISABLED)
{
RtecScheduler::Criticality_t entry_crit =
entry.actual_rt_info ()->criticality;
RtecScheduler::Criticality_t succ_crit =
successor.actual_rt_info ()->criticality;
RtecScheduler::Criticality_t max_crit = entry_crit;
if (max_crit < succ_crit)
max_crit = succ_crit;
successor.actual_rt_info ()->criticality = max_crit;
#ifdef SCHEDULER_LOGGING
ACE_DEBUG ((LM_DEBUG,
"Successor's new criticality is %d\n",
successor.actual_rt_info ()->criticality));
#endif
}
// Do not recurse on the successor node, just continue to the next successor.
return 1;
}
#endif /* TAO_RECONFIG_SCHED_UTILS_T_C */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -