📄 timer.c
字号:
/* we call delete now without any locking on hash/ref_count; we can do that because delete_handler is only entered after the delete timer was installed from wait_handler, which removed transaction from hash table and did not destroy it because some processes were using it; that means that the processes currently using the transaction can unref and no new processes can ref -- we can wait until ref_count is zero safely without locking */ delete_cell( p_cell, 0 /* don't unlock on return */ ); DBG("DEBUG: delete_handler : done\n");}/***********************************************************/struct timer_table *get_timertable(){ return timertable;}void unlink_timer_lists(){ struct timer_link *tl, *end, *tmp; enum lists i; if (timertable==0) return; /* nothing to do */ /* remember the DELETE LIST */ tl = timertable->timers[DELETE_LIST].first_tl.next_tl; end = & timertable->timers[DELETE_LIST].last_tl; /* unlink the timer lists */ for( i=0; i<NR_OF_TIMER_LISTS ; i++ ) reset_timer_list( i ); DBG("DEBUG: unlink_timer_lists : emptying DELETE list\n"); /* deletes all cells from DELETE_LIST list (they are no more accessible from entrys) */ while (tl!=end) { tmp=tl->next_tl; free_cell( get_dele_timer_payload(tl) ); tl=tmp; } }struct timer_table *tm_init_timers(){ enum lists i; timertable=(struct timer_table *) shm_malloc(sizeof(struct timer_table)); if (!timertable) { LOG(L_ERR, "ERROR: tm_init_timers: no shmem for timer_Table\n"); goto error0; } memset(timertable, 0, sizeof (struct timer_table)); /* inits the timers*/ for( i=0 ; i<NR_OF_TIMER_LISTS ; i++ ) init_timer_list( i ); /* init. timer lists */ timertable->timers[RT_T1_TO_1].id = RT_T1_TO_1; timertable->timers[RT_T1_TO_2].id = RT_T1_TO_2; timertable->timers[RT_T1_TO_3].id = RT_T1_TO_3; timertable->timers[RT_T2].id = RT_T2; timertable->timers[FR_TIMER_LIST].id = FR_TIMER_LIST; timertable->timers[FR_INV_TIMER_LIST].id = FR_INV_TIMER_LIST; timertable->timers[WT_TIMER_LIST].id = WT_TIMER_LIST; timertable->timers[DELETE_LIST].id = DELETE_LIST; return timertable;error0: return 0;}void free_timer_table(){ enum lists i; if (timertable) { /* the mutexs for sync the lists are released*/ for ( i=0 ; i<NR_OF_TIMER_LISTS ; i++ ) release_timerlist_lock( &timertable->timers[i] ); shm_free(timertable); } }void reset_timer_list( enum lists list_id){ timertable->timers[list_id].first_tl.next_tl = &(timertable->timers[list_id].last_tl ); timertable->timers[list_id].last_tl.prev_tl = &(timertable->timers[list_id].first_tl ); timertable->timers[list_id].first_tl.prev_tl = timertable->timers[list_id].last_tl.next_tl = NULL; timertable->timers[list_id].last_tl.time_out = -1;}void init_timer_list( /* struct s_table* ht, */ enum lists list_id){ reset_timer_list( /* ht, */ list_id ); init_timerlist_lock( /* ht, */ list_id );}void print_timer_list( enum lists list_id){ struct timer* timer_list=&(timertable->timers[ list_id ]); struct timer_link *tl ; tl = timer_list->first_tl.next_tl; while (tl!=& timer_list->last_tl) { DBG("DEBUG: print_timer_list[%d]: %p, next=%p \n", list_id, tl, tl->next_tl); tl = tl->next_tl; }}static void remove_timer_unsafe( struct timer_link* tl ){#ifdef EXTRA_DEBUG if (tl && is_in_timer_list2(tl) && tl->timer_list->last_tl.prev_tl==0) { LOG( L_CRIT, "CRITICAL : Oh no, zero link in trailing timer element\n"); abort(); };#endif if (is_in_timer_list2( tl )) {#ifdef EXTRA_DEBUG DBG("DEBUG: unlinking timer: tl=%p, timeout=%d, group=%d\n", tl, tl->time_out, tl->tg);#endif tl->prev_tl->next_tl = tl->next_tl; tl->next_tl->prev_tl = tl->prev_tl; tl->next_tl = 0; tl->prev_tl = 0; tl->timer_list = NULL; }}/* put a new cell into a list nr. list_id */static void insert_timer_unsafe( struct timer *timer_list, struct timer_link *tl, unsigned int time_out ){ struct timer_link* ptr; tl->time_out = time_out; tl->timer_list = timer_list; for(ptr = timer_list->last_tl.prev_tl; ptr != &timer_list->first_tl; ptr = ptr->prev_tl) { if (ptr->time_out <= time_out) break; } tl->prev_tl = ptr; tl->next_tl = ptr->next_tl; tl->prev_tl->next_tl = tl; tl->next_tl->prev_tl = tl; DBG("DEBUG: add_to_tail_of_timer[%d]: %p\n",timer_list->id,tl);}#if 0 /* not used anymore *//* put a new cell into a list nr. list_id */static void add_timer_unsafe( struct timer *timer_list, struct timer_link *tl, unsigned int time_out ){#ifdef EXTRA_DEBUG if (timer_list->last_tl.prev_tl==0) { LOG( L_CRIT, "CRITICAL : Oh no, zero link in trailing timer element\n"); abort(); };#endif tl->time_out = time_out; tl->prev_tl = timer_list->last_tl.prev_tl; tl->next_tl = & timer_list->last_tl; timer_list->last_tl.prev_tl = tl; tl->prev_tl->next_tl = tl; tl->timer_list = timer_list;#ifdef EXTRA_DEBUG if ( tl->tg != timer_group[ timer_list->id ] ) { LOG( L_CRIT, "CRITICAL error: changing timer group\n"); abort(); }#endif DBG("DEBUG: add_timer_unsafe[%d]: %p\n",timer_list->id,tl);}#endif/* detach items passed by the time from timer list */static struct timer_link *check_and_split_time_list( struct timer *timer_list, int time ){ struct timer_link *tl , *end, *ret; /* quick check whether it is worth entering the lock */ if (timer_list->first_tl.next_tl==&timer_list->last_tl || ( /* timer_list->first_tl.next_tl && */ timer_list->first_tl.next_tl->time_out > time) ) return NULL; /* the entire timer list is locked now -- noone else can manipulate it */ lock(timer_list->mutex); end = &timer_list->last_tl; tl = timer_list->first_tl.next_tl; while( tl!=end && tl->time_out <= time) { tl->timer_list = DETACHED_LIST; tl=tl->next_tl; } /* nothing to delete found */ if (tl->prev_tl==&(timer_list->first_tl)) { ret = NULL; } else { /* we did find timers to be fired! */ /* the detached list begins with current beginning */ ret = timer_list->first_tl.next_tl; /* and we mark the end of the split list */ tl->prev_tl->next_tl = NULL; /* the shortened list starts from where we suspended */ timer_list->first_tl.next_tl = tl; tl->prev_tl = & timer_list->first_tl; }#ifdef EXTRA_DEBUG if (timer_list->last_tl.prev_tl==0) { LOG( L_CRIT, "CRITICAL : Oh no, zero link in trailing timer element\n"); abort(); };#endif /* give the list lock away */ unlock(timer_list->mutex); return ret;}/* stop timer * WARNING: a reset'ed timer will be lost forever * (successive set_timer won't work unless you're lucky * an catch the race condition, the idea here is there is no * guarantee you can do anything after a timer_reset)*/void reset_timer( struct timer_link* tl ){ /* disqualify this timer from execution by setting its time_out to zero; it will stay in timer-list until the timer process starts removing outdated elements; then it will remove it but not execute; there is a race condition, though -- see timer.c for more details */ tl->time_out = TIMER_DELETED;#ifdef EXTRA_DEBUG DBG("DEBUG: reset_timer (group %d, tl=%p)\n", tl->tg, tl );#endif}/* determine timer length and put on a correct timer list * WARNING: - don't try to use it to "move" a timer from one list * to another, you'll run into races * - reset_timer; set_timer might not work, a reset'ed timer * has no set_timer guarantee, it might be lost; * same for an expired timer: only it's handler can * set it again, an external set_timer has no guarantee */void set_timer( struct timer_link *new_tl, enum lists list_id, unsigned int* ext_timeout ){ unsigned int timeout; struct timer* list; if (list_id<FR_TIMER_LIST || list_id>=NR_OF_TIMER_LISTS) { LOG(L_CRIT, "ERROR: set_timer: unknown list: %d\n", list_id);#ifdef EXTRA_DEBUG abort();#endif return; } if (!ext_timeout) { timeout = timer_id2timeout[ list_id ]; } else { timeout = *ext_timeout; } list= &(timertable->timers[ list_id ]); lock(list->mutex); /* check first if we are on the "detached" timer_routine list, * if so do nothing, the timer is not valid anymore * (sideffect: reset_timer ; set_timer is not safe, a reseted timer * might be lost, depending on this race condition ) */ if (new_tl->timer_list==DETACHED_LIST){ LOG(L_CRIT, "WARNING: set_timer called on a \"detached\" timer" " -- ignoring: %p\n", new_tl); goto end; } /* make sure I'm not already on a list */ remove_timer_unsafe( new_tl ); /* add_timer_unsafe( list, new_tl, get_ticks()+timeout); */ insert_timer_unsafe( list, new_tl, get_ticks()+timeout);end: unlock(list->mutex);}/* similar to set_timer, except it allows only one-time timer setting and all later attempts are ignored */void set_1timer( struct timer_link *new_tl, enum lists list_id, unsigned int* ext_timeout ){ unsigned int timeout; struct timer* list; if (list_id<FR_TIMER_LIST || list_id>=NR_OF_TIMER_LISTS) { LOG(L_CRIT, "ERROR: set_timer: unknown list: %d\n", list_id);#ifdef EXTRA_DEBUG abort();#endif return; } if (!ext_timeout) { timeout = timer_id2timeout[ list_id ]; } else { timeout = *ext_timeout; } list= &(timertable->timers[ list_id ]); lock(list->mutex); if (!(new_tl->time_out>TIMER_DELETED)) { /* make sure I'm not already on a list */ /* remove_timer_unsafe( new_tl ); */ /* add_timer_unsafe( list, new_tl, get_ticks()+timeout); */ insert_timer_unsafe( list, new_tl, get_ticks()+timeout); /* set_1timer is used only by WAIT -- that's why we can afford updating wait statistics; I admit its not nice but it greatly utilizes existing lock */ } unlock(list->mutex); t_stats_wait();}/* should be called only from timer process context, * else it's unsafe */static void unlink_timers( struct cell *t ){ int i; int remove_fr, remove_retr; remove_fr=0; remove_retr=0; /* first look if we need to remove timers and play with costly locks at all note that is_in_timer_list2 is unsafe but it does not hurt -- transaction is already dead (wait state) so that noone else will install a FR/RETR timer and it can only be removed from timer process itself -> it is safe to use it without any protection */ if (is_in_timer_list2(&t->uas.response.fr_timer)) remove_fr=1; else for (i=0; i<t->nr_of_outgoings; i++) if (is_in_timer_list2(&t->uac[i].request.fr_timer) || is_in_timer_list2(&t->uac[i].local_cancel.fr_timer)) { remove_fr=1; break; } if (is_in_timer_list2(&t->uas.response.retr_timer)) remove_retr=1; else for (i=0; i<t->nr_of_outgoings; i++) if (is_in_timer_list2(&t->uac[i].request.retr_timer) || is_in_timer_list2(&t->uac[i].local_cancel.retr_timer)) { remove_retr=1; break; } /* do what we have to do....*/ if (remove_retr) { /* RT_T1 lock is shared by all other RT timer lists -- we can safely lock just one */ lock(timertable->timers[RT_T1_TO_1].mutex); remove_timer_unsafe(&t->uas.response.retr_timer); for (i=0; i<t->nr_of_outgoings; i++) { remove_timer_unsafe(&t->uac[i].request.retr_timer); remove_timer_unsafe(&t->uac[i].local_cancel.retr_timer); } unlock(timertable->timers[RT_T1_TO_1].mutex); } if (remove_fr) { /* FR lock is shared by all other FR timer lists -- we can safely lock just one */ lock(timertable->timers[FR_TIMER_LIST].mutex); remove_timer_unsafe(&t->uas.response.fr_timer); for (i=0; i<t->nr_of_outgoings; i++) { remove_timer_unsafe(&t->uac[i].request.fr_timer); remove_timer_unsafe(&t->uac[i].local_cancel.fr_timer); } unlock(timertable->timers[FR_TIMER_LIST].mutex); }}#define run_handler_for_each( _tl , _handler ) \ while ((_tl))\ {\ /* reset the timer list linkage */\ tmp_tl = (_tl)->next_tl;\ (_tl)->next_tl = (_tl)->prev_tl = 0;\ DBG("DEBUG: timer routine:%d,tl=%p next=%p\n",\ id,(_tl),tmp_tl);\ if ((_tl)->time_out>TIMER_DELETED) \ (_handler)( _tl );\ (_tl) = tmp_tl;\ }void timer_routine(unsigned int ticks , void * attr){ /* struct timer_table *tt= (struct timer_table*)attr; */ struct timer_link *tl, *tmp_tl; int id; for( id=0 ; id<NR_OF_TIMER_LISTS ; id++ ) { /* to waste as little time in lock as possible, detach list with expired items and process them after leaving the lock */ tl=check_and_split_time_list( &timertable->timers[ id ], ticks); /* process items now */ switch (id) { case FR_TIMER_LIST: case FR_INV_TIMER_LIST: run_handler_for_each(tl,final_response_handler); break; case RT_T1_TO_1: case RT_T1_TO_2: case RT_T1_TO_3: case RT_T2: run_handler_for_each(tl,retransmission_handler); break; case WT_TIMER_LIST: run_handler_for_each(tl,wait_handler); break; case DELETE_LIST: run_handler_for_each(tl,delete_handler); break; } }}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -