📄 sys_arch.c
字号:
sys_arch_mbox_fetch(struct sys_mbox **mb, void **msg, u32_t timeout)
{
u32_t time_needed = 0;
struct sys_mbox *mbox;
LWIP_ASSERT("invalid mbox", (mb != NULL) && (*mb != NULL));
mbox = *mb;
/* The mutex lock is quick so we don't bother with the timeout
stuff here. */
sys_arch_sem_wait(&mbox->mutex, 0);
while (mbox->first == mbox->last) {
sys_sem_signal(&mbox->mutex);
/* We block while waiting for a mail to arrive in the mailbox. We
must be prepared to timeout. */
if (timeout != 0) {
time_needed = sys_arch_sem_wait(&mbox->not_empty, timeout);
if (time_needed == SYS_ARCH_TIMEOUT) {
return SYS_ARCH_TIMEOUT;
}
} else {
sys_arch_sem_wait(&mbox->not_empty, 0);
}
sys_arch_sem_wait(&mbox->mutex, 0);
}
if (msg != NULL) {
LWIP_DEBUGF(SYS_DEBUG, ("sys_mbox_fetch: mbox %p msg %p\n", (void *)mbox, *msg));
*msg = mbox->msgs[mbox->first % SYS_MBOX_SIZE];
}
else{
LWIP_DEBUGF(SYS_DEBUG, ("sys_mbox_fetch: mbox %p, null msg\n", (void *)mbox));
}
mbox->first++;
if (mbox->wait_send) {
sys_sem_signal(&mbox->not_full);
}
sys_sem_signal(&mbox->mutex);
return time_needed;
}
/*-----------------------------------------------------------------------------------*/
static struct sys_sem *
sys_sem_new_internal(u8_t count)
{
struct sys_sem *sem;
sem = (struct sys_sem *)malloc(sizeof(struct sys_sem));
if (sem != NULL) {
sem->c = count;
pthread_cond_init(&(sem->cond), NULL);
pthread_mutex_init(&(sem->mutex), NULL);
}
return sem;
}
/*-----------------------------------------------------------------------------------*/
err_t
sys_sem_new(struct sys_sem **sem, u8_t count)
{
SYS_STATS_INC_USED(sem);
*sem = sys_sem_new_internal(count);
if (*sem == NULL) {
return ERR_MEM;
}
return ERR_OK;
}
/*-----------------------------------------------------------------------------------*/
static u32_t
cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex, u32_t timeout)
{
int tdiff;
unsigned long sec, usec;
struct timeval rtime1, rtime2;
struct timespec ts;
int retval;
if (timeout > 0) {
/* Get a timestamp and add the timeout value. */
gettimeofday(&rtime1, NULL);
sec = rtime1.tv_sec;
usec = rtime1.tv_usec;
usec += timeout % 1000 * 1000;
sec += (int)(timeout / 1000) + (int)(usec / 1000000);
usec = usec % 1000000;
ts.tv_nsec = usec * 1000;
ts.tv_sec = sec;
retval = pthread_cond_timedwait(cond, mutex, &ts);
if (retval == ETIMEDOUT) {
return SYS_ARCH_TIMEOUT;
} else {
/* Calculate for how long we waited for the cond. */
gettimeofday(&rtime2, NULL);
tdiff = (rtime2.tv_sec - rtime1.tv_sec) * 1000 +
(rtime2.tv_usec - rtime1.tv_usec) / 1000;
if (tdiff <= 0) {
return 0;
}
return tdiff;
}
} else {
pthread_cond_wait(cond, mutex);
return SYS_ARCH_TIMEOUT;
}
}
/*-----------------------------------------------------------------------------------*/
u32_t
sys_arch_sem_wait(struct sys_sem **s, u32_t timeout)
{
u32_t time_needed = 0;
struct sys_sem *sem;
LWIP_ASSERT("invalid sem", (s != NULL) && (*s != NULL));
sem = *s;
pthread_mutex_lock(&(sem->mutex));
while (sem->c <= 0) {
if (timeout > 0) {
time_needed = cond_wait(&(sem->cond), &(sem->mutex), timeout);
if (time_needed == SYS_ARCH_TIMEOUT) {
pthread_mutex_unlock(&(sem->mutex));
return SYS_ARCH_TIMEOUT;
}
/* pthread_mutex_unlock(&(sem->mutex));
return time_needed; */
} else {
cond_wait(&(sem->cond), &(sem->mutex), 0);
}
}
sem->c--;
pthread_mutex_unlock(&(sem->mutex));
return time_needed;
}
/*-----------------------------------------------------------------------------------*/
void
sys_sem_signal(struct sys_sem **s)
{
struct sys_sem *sem;
LWIP_ASSERT("invalid sem", (s != NULL) && (*s != NULL));
sem = *s;
pthread_mutex_lock(&(sem->mutex));
sem->c++;
if (sem->c > 1) {
sem->c = 1;
}
pthread_cond_broadcast(&(sem->cond));
pthread_mutex_unlock(&(sem->mutex));
}
/*-----------------------------------------------------------------------------------*/
static void
sys_sem_free_internal(struct sys_sem *sem)
{
pthread_cond_destroy(&(sem->cond));
pthread_mutex_destroy(&(sem->mutex));
free(sem);
}
/*-----------------------------------------------------------------------------------*/
void
sys_sem_free(struct sys_sem **sem)
{
if ((sem != NULL) && (*sem != SYS_SEM_NULL)) {
SYS_STATS_DEC(sem.used);
sys_sem_free_internal(*sem);
}
}
#endif /* !NO_SYS */
/*-----------------------------------------------------------------------------------*/
u32_t
sys_now(void)
{
struct timeval tv;
u32_t sec, usec, msec;
gettimeofday(&tv, NULL);
sec = (u32_t)(tv.tv_sec - starttime.tv_sec);
usec = (u32_t)(tv.tv_usec - starttime.tv_usec);
msec = sec * 1000 + usec / 1000;
return msec;
}
/*-----------------------------------------------------------------------------------*/
void
sys_init(void)
{
gettimeofday(&starttime, NULL);
}
/*-----------------------------------------------------------------------------------*/
#if SYS_LIGHTWEIGHT_PROT
/** sys_prot_t sys_arch_protect(void)
This optional function does a "fast" critical region protection and returns
the previous protection level. This function is only called during very short
critical regions. An embedded system which supports ISR-based drivers might
want to implement this function by disabling interrupts. Task-based systems
might want to implement this by using a mutex or disabling tasking. This
function should support recursive calls from the same task or interrupt. In
other words, sys_arch_protect() could be called while already protected. In
that case the return value indicates that it is already protected.
sys_arch_protect() is only required if your port is supporting an operating
system.
*/
sys_prot_t
sys_arch_protect(void)
{
/* Note that for the UNIX port, we are using a lightweight mutex, and our
* own counter (which is locked by the mutex). The return code is not actually
* used. */
if (lwprot_thread != pthread_self())
{
/* We are locking the mutex where it has not been locked before *
* or is being locked by another thread */
pthread_mutex_lock(&lwprot_mutex);
lwprot_thread = pthread_self();
lwprot_count = 1;
}
else
/* It is already locked by THIS thread */
lwprot_count++;
return 0;
}
/*-----------------------------------------------------------------------------------*/
/** void sys_arch_unprotect(sys_prot_t pval)
This optional function does a "fast" set of critical region protection to the
value specified by pval. See the documentation for sys_arch_protect() for
more information. This function is only required if your port is supporting
an operating system.
*/
void
sys_arch_unprotect(sys_prot_t pval)
{
LWIP_UNUSED_ARG(pval);
if (lwprot_thread == pthread_self())
{
if (--lwprot_count == 0)
{
lwprot_thread = (pthread_t) 0xDEAD;
pthread_mutex_unlock(&lwprot_mutex);
}
}
}
#endif /* SYS_LIGHTWEIGHT_PROT */
/*-----------------------------------------------------------------------------------*/
#ifndef MAX_JIFFY_OFFSET
#define MAX_JIFFY_OFFSET ((~0U >> 1)-1)
#endif
#ifndef HZ
#define HZ 100
#endif
u32_t
sys_jiffies(void)
{
struct timeval tv;
unsigned long sec;
long usec;
gettimeofday(&tv,NULL);
sec = tv.tv_sec - starttime.tv_sec;
usec = tv.tv_usec;
if (sec >= (MAX_JIFFY_OFFSET / HZ))
return MAX_JIFFY_OFFSET;
usec += 1000000L / HZ - 1;
usec /= 1000000L / HZ;
return HZ * sec + usec;
}
#if PPP_DEBUG
#include <stdarg.h>
void ppp_trace(int level, const char *format, ...)
{
va_list args;
(void)level;
va_start(args, format);
vprintf(format, args);
va_end(args);
}
#endif
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -