📄 rtcosscheduling_pcp_manager.cpp
字号:
/// take pending lock off the head of the list
/// (highest priority request) and add to the free list
L = *(this->pending_->next());
L.next(this->pending_->next()->next());
this->pending_->next()->next(free_->next());
this->free_->next(this->pending_->next());
this->pending_->next(L.next());
L.next(0);
return 1;
}
PCP_Manager::PCP_Manager(CosSchedulingLockList *locks,
ACE_SYNCH_MUTEX *mutex,
RTCORBA::Current_var current)
: locks_(locks),
mutex_(mutex),
current_(current)
{
/// Get the thread ID
this->threadID_ = (ACE_OS::getpid() << 16) + int(ACE_Thread::self());
}
void
PCP_Manager::lock(int priority_ceiling, int priority)
{
ACE_TRY_NEW_ENV
{
/// we do not want the thread to be pre-empted inside
/// this critical section, so we
/// will set its priority to the highest possible
// This is not completely necessary since the server should be running
// at RTCORBA::maxPriority
this->current_->the_priority(RTCORBA::maxPriority);
this->mutex_->acquire();
ACE_TRY_CHECK;
/// create a structure to store my own lock request
CosSchedulingLockNode MyLock;
MyLock.threadID_ = this->threadID_;
MyLock.priority_ceiling_ = MyLock.elevated_priority_ = priority_ceiling;
MyLock.priority_ = priority;
/// Start by assuming we don't have the lock then go look for it
int HaveLock = 0;
while (!HaveLock)
{
/// retrieve the highest priority ceiling from the list
CosSchedulingLockNode *highest_lock = this->locks_->highest_ceiling();
int prio_ceiling;
/// check to see if are at the highest priority,
/// if so set the priority ceiling
if (highest_lock)
{
prio_ceiling = highest_lock->priority_ceiling_;
}
else
{
prio_ceiling = -1;
}
/// if I hold the highest ceiling or my priority is higher than the
/// highest ceiling, just get the lock
if ((highest_lock == 0) ||
(highest_lock->threadID_ == this->threadID_) ||
(highest_lock->priority_ceiling_ < priority))
{
/// Try and grant the lock, if it is not granted,
/// then there are unfortunately no more lock nodes
if (!this->locks_->grant_lock (MyLock))
{
ACE_ERROR ((LM_ERROR,
"Fatal error--out of lock nodes!!!"));
}
/// Lock obtained from grant_lock, don't loop again
HaveLock = 1;
}
else
{
/// There is another lock out there active, put this one
/// in the list of pending locks
if (this->locks_->defer_lock(MyLock, *this->mutex_))
{
/// done waiting for it, remove it from the pending
/// lock list, will try again to grant on next loop
/// iteration
this->locks_->remove_deferred_lock (MyLock);
}
else
{
ACE_ERROR((LM_ERROR,
"Error in deferring lock\n"));
}
ACE_TRY_CHECK;
}
}
/// remove mutex on the lock list
this->mutex_->release();
ACE_TRY_CHECK;
/// at this point we have the right to set the OS priority
/// Do so at the priority ceiline.
this->current_->the_priority(priority_ceiling);
ACE_TRY_CHECK;
}
ACE_CATCHANY
{
ACE_PRINT_EXCEPTION(ACE_ANY_EXCEPTION,
"Error in locking the node for ServerScheduler\n");
}
ACE_ENDTRY;
}
void PCP_Manager::release_lock()
{
ACE_TRY_NEW_ENV
{
/// To prevent pre-emption in the critical section,
/// which could lead to unbounded blocking
this->current_->the_priority(RTCORBA::maxPriority);
/// set up the mutex
this->mutex_->acquire();
ACE_TRY_CHECK;
/// remove the lock node from the list of locks
CosSchedulingLockNode L;
L.threadID_ = this->threadID_;
this->locks_->release_lock(L);
/// Done with the list, release the mutex
this->mutex_->release();
ACE_TRY_CHECK;
/// Let the highest priority lock signal the condition variable
CosSchedulingLockNode *waiter = this->locks_->highest_priority();
if (waiter)
{
waiter->condition_->signal();
}
ACE_TRY_CHECK;
/// We do not need to restore priority because we have already set this
// thread to wait at RTCORBA::maxPriority at the start of this method
}
ACE_CATCHANY
{
ACE_PRINT_EXCEPTION(ACE_ANY_EXCEPTION,
"Error in unlocking the node for ServerScheduler\n");
}
ACE_ENDTRY;
}
PCP_Manager_Factory::PCP_Manager_Factory(char *shared_file)
{
ACE_TRY_NEW_ENV
{
#if !defined (ACE_LACKS_MMAP)
char temp_file[MAXPATHLEN + 1];
/// Get the temporary directory
if (ACE::get_temp_dir (temp_file,
MAXPATHLEN - ACE_OS::strlen(shared_file))
== -1)
ACE_ERROR ((LM_ERROR,
"Temporary path too long\n"));
ACE_TRY_CHECK;
/// Add the filename to the end
ACE_OS::strcat (temp_file, shared_file);
/// Store in the global variable.
this->shm_key_ = temp_file;
#ifndef ACE_LACKS_MKSTEMP
if (ACE_OS::mkstemp (this->shm_key_) == 0
#else
char *new_key = ACE_OS::mktemp (this->shm_key_);
if (ACE_OS::fopen(new_key, "w") != NULL
#endif /* ACE_LACKS_MKSTEMP */
|| (ACE_OS::unlink (this->shm_key_) == -1
#ifndef ACE_HAS_WINCE
&& errno == EPERM
#endif /* ACE_HAS_WINCE */
))
ACE_ERROR ((LM_ERROR,
"(%P|%t) %p\n",
this->shm_key_));
ACE_TRY_CHECK;
#else /* !ACE_LACKS_MMAP */
ACE_ERROR ((LM_INFO,
"mmap is not supported on this platform\n"));
#endif /* !ACE_LACKS_MMAP */
/// determine space requirements for the lock list
u_int CosSchedulingLockList_space =
LOCK_ARRAY_SIZE * sizeof (CosSchedulingLockNode);
/// allocate space in shared memory for size of the lock list
int result =
this->mem_.open(this->shm_key_, CosSchedulingLockList_space);
/// Make sure shared memory CosSchedulingLockList is ok, scheduling
/// service cannot run without it.
if (result == -1)
{
ACE_ERROR((LM_ERROR,
"Error in creating the shared memory segment to hold "
"Lock information, aborting ServerScheduler.\n"));
ACE_OS::exit(1);
}
ACE_CHECK;
/// Make the shared memory a place for a lock list
this->lock_array_ = ACE_static_cast(CosSchedulingLockNode *,
this->mem_.malloc(CosSchedulingLockList_space));
if (this->lock_array_ == 0)
{
ACE_ERROR((LM_ERROR,
"Error in creating Lock Array, locking impossible.\n"));
}
ACE_TRY_CHECK;
/// get the pointer to the list of locks and
/// construct a lock list manager object
if (this->lock_array_ == 0)
{
ACE_ERROR((LM_ERROR,
"No Pointer for LockArray, aborting\n"));
ACE_OS::exit(1);
}
else
{
/// construct the new lock list in shared memory
ACE_NEW_THROW_EX(this->locks_,
CosSchedulingLockList(this->lock_array_,
LOCK_ARRAY_SIZE,
&this->mutex_),
CORBA::NO_MEMORY()
);
ACE_TRY_CHECK;
}
}
ACE_CATCHANY
{
ACE_PRINT_EXCEPTION(ACE_ANY_EXCEPTION,
"Error in Setting lock factory for ServerScheduler\n");
}
ACE_ENDTRY;
}
PCP_Manager_Factory::~PCP_Manager_Factory()
{
/// throw out all the old Locks
this->locks_->destroy(LOCK_ARRAY_SIZE);
/// and delete the shared memory
this->mem_.remove();
delete this->locks_;
}
}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -