📄 mqueue.inl
字号:
// put() copies len bytes of *buf into the queue at priority prio
CYGPRI_KERNEL_SYNCH_MQUEUE_INLINE Cyg_Mqueue::qerr_t
Cyg_Mqueue::put( const char *buf, size_t len, unsigned int prio, bool block
#ifdef CYGFUN_KERNEL_THREADS_TIMER
, cyg_tick_count timeout
#endif
)
{
CYG_REPORT_FUNCTYPE( "err=%d");
CYG_REPORT_FUNCARG4( "buf=%08x, len=%ld, prio=%ud, block=%d",
buf, len, prio, block==true );
CYG_CHECK_DATA_PTRC( buf );
CYG_ASSERT_THISC();
CYG_PRECONDITIONC( len <= (size_t)msgsize );
qerr_t err;
struct qentry *qtmp, *qent;
// wait till a freelist entry is available
if ( true == block ) {
#ifdef CYGFUN_KERNEL_THREADS_TIMER
if ( timeout != 0) {
if ( false == putsem.wait(timeout) ) {
err = TIMEOUT;
goto exit;
}
}
else
#endif
if ( false == putsem.wait() ) {
err = INTR;
goto exit;
}
} else {
if ( false == putsem.trywait() ) {
err = WOULDBLOCK;
goto exit;
}
}
// prevent preemption when fiddling with important members
Cyg_Scheduler::lock();
CYG_ASSERT_THISC();
// get a queue entry from the freelist
// don't need to check the freelist - the semaphore tells us there's
// definitely a usable non-busy one there. It's just a question of
// locating it.
if (!freelist->busy) { // fast-track common case
qent = freelist;
freelist = freelist->next;
} else {
for ( qtmp=freelist; qtmp->next->busy; qtmp=qtmp->next )
CYG_EMPTY_STATEMENT; // skip through
qent = qtmp->next;
qtmp->next = qent->next;
}
// now put it in place in q
if ( NULL == q ) {
q = qent;
q->next = NULL;
} else {
struct qentry **qentp;
// insert into queue according to prio
for ( qentp=&q; NULL != *qentp; qentp = &((*qentp)->next) ) {
if ((*qentp)->priority < prio)
break;
} // for
qent->next = *qentp;
*qentp = qent;
} // else
qent->priority = prio; // have to set this now so when the sched is
// unlocked, other qent's can be added in the
// right place
qent->busy = true; // let things know this entry should be ignored until
// it's finished having its data copied
// unlock the scheduler, and potentially switch threads, but
// that's okay now. We don't want it locked for the expensive memcpy
Cyg_Scheduler::unlock();
qent->buflen = len;
memcpy( qent->buf(), buf, len );
// make available now - setting non-atomically is alright if you think
// about it - the only thing that matters is that it's completed before
// the post()
qent->busy = false;
// if we have to notify someone, we only do it if no-one's already
// sitting waiting for a message to appear, AND if it's a transition
// from empty to non-empty
if ( callback != NULL && !getsem.waiting() && (0 == getsem.peek()) ) {
getsem.post();
callback( *this, callback_data );
} else
getsem.post();
err = OK;
exit:
CYG_ASSERT_THISC();
CYG_REPORT_RETVAL(err);
return err;
} // Cyg_Mqueue::put()
//------------------------------------------------------------------------
// get() returns the oldest highest priority message in the queue in *buf
// and sets *prio to the priority (if prio is non-NULL) and *len to the
// actual message size
CYGPRI_KERNEL_SYNCH_MQUEUE_INLINE Cyg_Mqueue::qerr_t
Cyg_Mqueue::get( char *buf, size_t *len, unsigned int *prio, bool block
#ifdef CYGFUN_KERNEL_THREADS_TIMER
, cyg_tick_count timeout
#endif
)
{
CYG_REPORT_FUNCTYPE( "err=%d");
CYG_REPORT_FUNCARG4( "buf=%08x, len=%08x, prio=%08x, block=%d",
buf, len, prio, block==true );
CYG_CHECK_DATA_PTRC( buf );
CYG_CHECK_DATA_PTRC( len );
if ( NULL != prio )
CYG_CHECK_DATA_PTRC( prio );
CYG_ASSERT_THISC();
qerr_t err;
struct qentry *qent;
// wait till a q entry is available
if ( true == block ) {
#ifdef CYGFUN_KERNEL_THREADS_TIMER
if ( timeout != 0) {
if ( false == getsem.wait(timeout) ) {
err = TIMEOUT;
goto exit;
}
}
else
#endif
if ( false == getsem.wait() ) {
err = INTR;
goto exit;
}
} else {
if ( false == getsem.trywait() ) {
err = WOULDBLOCK;
goto exit;
}
}
// prevent preemption when fiddling with important members
Cyg_Scheduler::lock();
// don't need to check the q - the semaphore tells us there's
// definitely a usable non-busy one there. It's just a question of
// locating it.
if ( !q->busy ) { // fast-track the common case
qent = q;
q = qent->next;
} else {
struct qentry *qtmp;
for ( qtmp=q; qtmp->next->busy; qtmp=qtmp->next )
CYG_EMPTY_STATEMENT; // skip through
qent = qtmp->next;
qtmp->next = qent->next;
} // else
// now stick at front of freelist, but marked busy
qent->next = freelist;
freelist = qent;
qent->busy = true; // don't let it truly be part of the freelist just yet
// till the data is copied out
// unlock the scheduler, and potentially switch threads, but
// that's okay now. We don't want it locked for the expensive memcpy
Cyg_Scheduler::unlock();
*len = qent->buflen;
if ( NULL != prio )
*prio = qent->priority;
memcpy( buf, qent->buf(), *len );
// make available now - setting non-atomically is alright if you think
// about it - the only thing that matters is that it's completed before
// the post()
qent->busy = false;
putsem.post();
err = OK;
exit:
CYG_ASSERT_THISC();
CYG_REPORT_RETVAL(err);
return err;
} // Cyg_Mqueue::get()
//------------------------------------------------------------------------
// count() returns the number of messages in the queue
inline long
Cyg_Mqueue::count()
{
CYG_REPORT_FUNCTYPE("curmsgs=%d");
long curmsgs = (long)getsem.peek();
CYG_REPORT_RETVAL(curmsgs);
return curmsgs;
} // Cyg_Mqueue::count()
//------------------------------------------------------------------------
// Supply a callback function to call (with the supplied data argument)
// when the queue goes from empty to non-empty (unless someone's already
// doing a get()). This returns the old callback_fn, and if olddata is
// non-NULL sets it to the old data (yes, really!)
CYGPRI_KERNEL_SYNCH_MQUEUE_INLINE Cyg_Mqueue::callback_fn_t
Cyg_Mqueue::setnotify( callback_fn_t callback_fn, CYG_ADDRWORD data,
CYG_ADDRWORD *olddata)
{
CYG_REPORT_FUNCTYPE("old callback=%08x");
CYG_REPORT_FUNCARG3XV( callback_fn, data, olddata );
if ( NULL != callback_fn )
CYG_CHECK_FUNC_PTRC( callback_fn );
if (NULL != olddata)
CYG_CHECK_DATA_PTRC( olddata );
callback_fn_t oldfn;
// Need to prevent preemption for accessing common structures
// Just locking the scheduler has the least overhead
Cyg_Scheduler::lock();
oldfn = callback;
if (NULL != olddata)
*olddata = callback_data;
callback_data = data;
callback = callback_fn;
Cyg_Scheduler::unlock();
CYG_REPORT_RETVAL(oldfn);
return oldfn;
}
//------------------------------------------------------------------------
#endif /* CYGONCE_KERNEL_MQUEUE_INL multiple inclusion protection */
/* EOF mqueue.inl */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -