📄 current.cpp
字号:
ACE_CHECK;
this->scheduler_ = RTScheduling::Scheduler::_narrow (scheduler_obj
ACE_ENV_ARG_PARAMETER);
ACE_CHECK;
}
void
TAO_RTScheduler_Current_i::begin_scheduling_segment(
const char * name,
CORBA::Policy_ptr sched_param,
CORBA::Policy_ptr implicit_sched_param
ACE_ENV_ARG_DECL)
ACE_THROW_SPEC ((CORBA::SystemException,
RTScheduling::Current::UNSUPPORTED_SCHEDULING_DISCIPLINE))
{
// Check if it is a new Scheduling Segmnet
if (this->guid_.length () == 0)
{
//Generate GUID
long temp = ++TAO_RTScheduler_Current::guid_counter;
this->guid_.length (sizeof(long));
ACE_OS::memcpy (this->guid_.get_buffer (),
&temp,
sizeof(long));
int guid;
ACE_OS::memcpy (&guid,
this->guid_.get_buffer (),
this->guid_.length ());
// Inform the scheduler of the new scheduling segment.
this->scheduler_->begin_new_scheduling_segment (this->guid_,
name,
sched_param,
implicit_sched_param
ACE_ENV_ARG_PARAMETER);
ACE_CHECK;
if (CORBA::is_nil (this->dt_.in ()))
//Create new DT.
this->dt_ = TAO_DistributableThread_Factory::create_DT ();
//Add new DT to map
int result = this->dt_hash_->bind (this->guid_,
this->dt_);
// Error in binding to the map - cancel thread.
if (result != 0)
{
this->cancel_thread (ACE_ENV_SINGLE_ARG_PARAMETER);
ACE_CHECK;
}
// Remember parameters for the scheduling segment.
this->name_ = CORBA::string_dup (name);
this->sched_param_ = CORBA::Policy::_duplicate (sched_param);
this->implicit_sched_param_ = CORBA::Policy::_duplicate (implicit_sched_param);
}
else //Nested segment
{
// Check current DT state.
if (this->dt_->state () == RTScheduling::DistributableThread::CANCELLED)
{
this->cancel_thread (ACE_ENV_SINGLE_ARG_PARAMETER);
ACE_CHECK;
}
// Inform scheduler of start of nested scheduling segment.
this->scheduler_->begin_nested_scheduling_segment
(this->guid_,
name,
sched_param,
implicit_sched_param
ACE_ENV_ARG_PARAMETER);
ACE_CHECK;
TAO_TSS_Resources *tss =
TAO_TSS_RESOURCES::instance ();
TAO_RTScheduler_Current_i* new_current;
ACE_NEW_THROW_EX (new_current,
TAO_RTScheduler_Current_i (this->orb_,
this->dt_hash_,
this->guid_,
name,
sched_param,
implicit_sched_param,
this->dt_.in (),
this),
CORBA::NO_MEMORY (
CORBA::SystemException::_tao_minor_code (
TAO_DEFAULT_MINOR_CODE,
ENOMEM),
CORBA::COMPLETED_NO));
ACE_CHECK;
tss->rtscheduler_current_impl_ = new_current;
}
}
void
TAO_RTScheduler_Current_i::update_scheduling_segment (const char * name,
CORBA::Policy_ptr sched_param,
CORBA::Policy_ptr implicit_sched_param
ACE_ENV_ARG_DECL)
ACE_THROW_SPEC ((CORBA::SystemException,
RTScheduling::Current::UNSUPPORTED_SCHEDULING_DISCIPLINE))
{
// Check if DT has been cancelled
if (this->dt_->state () == RTScheduling::DistributableThread::CANCELLED)
{
this->cancel_thread (ACE_ENV_SINGLE_ARG_PARAMETER);
ACE_CHECK;
}
// Let scheduler know of the updates.
this->scheduler_->update_scheduling_segment (this->guid_,
name,
sched_param,
implicit_sched_param
ACE_ENV_ARG_PARAMETER);
ACE_CHECK;
// Remember the new values.
this->name_ = CORBA::string_dup (name);
this->sched_param_ = CORBA::Policy::_duplicate (sched_param);
this->implicit_sched_param_ = CORBA::Policy::_duplicate (implicit_sched_param);
}
void
TAO_RTScheduler_Current_i::end_scheduling_segment (const char * name
ACE_ENV_ARG_DECL)
ACE_THROW_SPEC ((CORBA::SystemException))
{
// Check if DT has been cancelled
if (this->dt_->state () == RTScheduling::DistributableThread::CANCELLED)
{
this->cancel_thread (ACE_ENV_SINGLE_ARG_PARAMETER);
ACE_CHECK;
}
if (this->previous_current_ == 0)
{
// Let the scheduler know that the DT is
// terminating.
this->scheduler_->end_scheduling_segment(this->guid_,
name
ACE_ENV_ARG_PARAMETER);
ACE_CHECK;
// Cleanup DT.
this->cleanup_DT ();
// Cleanup current.
this->cleanup_current ();
// A Nested segment.
} else {
// Inform scheduler of end of nested
// scheduling segment.
this->scheduler_->end_nested_scheduling_segment (this->guid_,
name,
this->previous_current_->sched_param_
ACE_ENV_ARG_PARAMETER);
ACE_CHECK;
// Cleanup current.
this->cleanup_current ();
}
}
// returns a null reference if
// the distributable thread is
// not known to the local scheduler
RTScheduling::DistributableThread_ptr
TAO_RTScheduler_Current_i::spawn (RTScheduling::ThreadAction_ptr start,
CORBA::VoidData data,
const char* name,
CORBA::Policy_ptr sched_param,
CORBA::Policy_ptr implicit_sched_param,
CORBA::ULong stack_size,
RTCORBA::Priority base_priority
ACE_ENV_ARG_DECL)
ACE_THROW_SPEC ((CORBA::SystemException))
{
// Check if DT has been cancelled.
if (this->dt_->state () == RTScheduling::DistributableThread::CANCELLED)
this->cancel_thread (ACE_ENV_SINGLE_ARG_PARAMETER);
// Create new task for new DT.
DTTask *dttask;
// If no scheduling parameter is specified then use the current
// implicit scheduling parameter as the scheduling parameter
if (sched_param == 0)
sched_param = this->implicit_sched_param_;
RTScheduling::DistributableThread_var dt = TAO_DistributableThread_Factory::create_DT ();
TAO_RTScheduler_Current_i *new_current;
ACE_NEW_RETURN (new_current,
TAO_RTScheduler_Current_i (this->orb_,
this->dt_hash_),
0);
new_current->DT (dt.in ());
ACE_NEW_RETURN (dttask,
DTTask (//thread_manager_,
this->orb_,
this->dt_hash_,
new_current,
start,
data,
name,
sched_param,
implicit_sched_param),
0);
if (dttask->activate_task (base_priority,
stack_size) == -1)
{
ACE_ERROR((LM_ERROR,
"Error in Spawning\n"));
RTScheduling::DistributableThread::_nil ();
}
return dt._retn ();
}
int
DTTask::activate_task (RTCORBA::Priority base_priority,
CORBA::ULong stack_size
ACE_ENV_ARG_DECL)
{
// Activate thread.
long default_flags = THR_NEW_LWP | THR_JOINABLE;
long flags =
default_flags |
this->orb_->orb_params ()->scope_policy () |
this->orb_->orb_params ()->sched_policy ();
CORBA::Object_var object = this->orb_->object_ref_table ().resolve_initial_references ("PriorityMappingManager"
ACE_ENV_ARG_PARAMETER);
ACE_CHECK_RETURN (-1);
RTCORBA::PriorityMappingManager_var mapping_manager =
RTCORBA::PriorityMappingManager::_narrow (object.in ()
ACE_ENV_ARG_PARAMETER);
ACE_CHECK_RETURN (-1);
RTCORBA::PriorityMapping *pm =
mapping_manager->mapping ();
RTCORBA::NativePriority native_priority;
pm->to_native (base_priority,
native_priority);
size_t stack [1];
stack [0] = stack_size;
if (this->activate (flags,
1,
0,//force_active
native_priority,//priority
-1,//grp_id
0,//ACE_Task_Base
0,//thread_handles
0,//stack
stack//stack_size
) == -1)
{
if (ACE_OS::last_error () == EPERM)
ACE_ERROR_RETURN ((LM_ERROR,
ACE_TEXT ("Insufficient privilege to run this test.\n")),
-1);
}
return 0;
}
DTTask::DTTask (//ACE_Thread_Manager *manager,
TAO_ORB_Core *orb,
DT_Hash_Map *dt_hash,
TAO_RTScheduler_Current_i* new_current,
RTScheduling::ThreadAction_ptr start,
CORBA::VoidData data,
const char *name,
CORBA::Policy_ptr sched_param,
CORBA::Policy_ptr implicit_sched_param)
://manager_ (manager),
orb_ (orb),
dt_hash_ (dt_hash),
current_ (new_current),
start_ (RTScheduling::ThreadAction::_duplicate (start)),
data_ (data),
name_ (CORBA::string_dup (name)),
sched_param_ (CORBA::Policy::_duplicate (sched_param)),
implicit_sched_param_ (CORBA::Policy::_duplicate (implicit_sched_param))
{
}
int
DTTask::svc (void)
{
ACE_TRY_NEW_ENV
{
TAO_TSS_Resources *tss =
TAO_TSS_RESOURCES::instance ();
tss->rtscheduler_current_impl_ = this->current_;
this->current_->begin_scheduling_segment (this->name_.in (),
this->sched_param_.in (),
this->implicit_sched_param_.in ()
ACE_ENV_ARG_PARAMETER);
ACE_TRY_CHECK;
// Invoke entry point into new DT.
this->start_->_cxx_do (this->data_
ACE_ENV_ARG_PARAMETER);
ACE_TRY_CHECK;
this->current_->end_scheduling_segment (this->name_.in ()
ACE_ENV_ARG_PARAMETER);
ACE_TRY_CHECK;
}
ACE_CATCHANY
{
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -