📄 uthash.h
字号:
* optimization. But it is more optimal, and more elegant this way. :-) */#define HASH_EXPAND_BUCKETS(tbl) \ tbl->new_buckets = (UT_hash_bucket*)uthash_bkt_malloc( \ 2 * tbl->num_buckets * sizeof(struct UT_hash_bucket)); \ if (!tbl->new_buckets) { uthash_fatal( "out of memory"); } \ memset(tbl->new_buckets, 0, \ 2 * tbl->num_buckets * sizeof(struct UT_hash_bucket)); \ tbl->ideal_chain_maxlen = \ (tbl->num_items >> (tbl->log2_num_buckets+1)) + \ ((tbl->num_items & ((tbl->num_buckets*2)-1)) ? 1 : 0); \ tbl->nonideal_items = 0; \ for(tbl->bkt_i = 0; tbl->bkt_i < tbl->num_buckets; tbl->bkt_i++) \ { \ tbl->thh = tbl->buckets[ tbl->bkt_i ].hh_head; \ while (tbl->thh) { \ tbl->hh_nxt = tbl->thh->hh_next; \ HASH_TO_BKT( tbl->thh->hashv, tbl->num_buckets*2, tbl->bkt); \ tbl->newbkt = &(tbl->new_buckets[ tbl->bkt ]); \ if (++(tbl->newbkt->count) > tbl->ideal_chain_maxlen) { \ tbl->nonideal_items++; \ tbl->newbkt->expand_mult = tbl->newbkt->count / \ tbl->ideal_chain_maxlen; \ } \ tbl->thh->hh_prev = NULL; \ tbl->thh->hh_next = tbl->newbkt->hh_head; \ if (tbl->newbkt->hh_head) tbl->newbkt->hh_head->hh_prev = \ tbl->thh; \ tbl->newbkt->hh_head = tbl->thh; \ tbl->thh = tbl->hh_nxt; \ } \ } \ tbl->num_buckets *= 2; \ tbl->log2_num_buckets++; \ uthash_bkt_free( tbl->buckets ); \ tbl->buckets = tbl->new_buckets; \ tbl->ineff_expands = (tbl->nonideal_items > (tbl->num_items >> 1)) \ ? (tbl->ineff_expands+1) : 0; \ if (tbl->ineff_expands > 1) { \ tbl->noexpand=1; \ uthash_noexpand_fyi(tbl); \ } \ uthash_expand_fyi(tbl); /* This is an adaptation of Simon Tatham's O(n log(n)) mergesort *//* Note that HASH_SORT assumes the hash handle name to be hh. * HASH_SRT was added to allow the hash handle name to be passed in. */#define HASH_SORT(head,cmpfcn) HASH_SRT(hh,head,cmpfcn)#define HASH_SRT(hh,head,cmpfcn) \ if (head) { \ (head)->hh.tbl->insize = 1; \ (head)->hh.tbl->looping = 1; \ (head)->hh.tbl->list = &((head)->hh); \ while ((head)->hh.tbl->looping) { \ (head)->hh.tbl->p = (head)->hh.tbl->list; \ (head)->hh.tbl->list = NULL; \ (head)->hh.tbl->tale = NULL; \ (head)->hh.tbl->nmerges = 0; \ while ((head)->hh.tbl->p) { \ (head)->hh.tbl->nmerges++; \ (head)->hh.tbl->q = (head)->hh.tbl->p; \ (head)->hh.tbl->psize = 0; \ for ( (head)->hh.tbl->i = 0; \ (head)->hh.tbl->i < (head)->hh.tbl->insize; \ (head)->hh.tbl->i++ ) { \ (head)->hh.tbl->psize++; \ (head)->hh.tbl->q = \ (UT_hash_handle*)(((head)->hh.tbl->q->next) ? \ ((void*)((char*)((head)->hh.tbl->q->next) + \ (head)->hh.tbl->hho)) : NULL); \ if (! ((head)->hh.tbl->q) ) break; \ } \ (head)->hh.tbl->qsize = (head)->hh.tbl->insize; \ while (((head)->hh.tbl->psize > 0) || \ (((head)->hh.tbl->qsize > 0) && (head)->hh.tbl->q )) { \ if ((head)->hh.tbl->psize == 0) { \ (head)->hh.tbl->e = (head)->hh.tbl->q; \ (head)->hh.tbl->q = \ (UT_hash_handle*)(((head)->hh.tbl->q->next) ? \ ((void*)((char*)((head)->hh.tbl->q->next) + \ (head)->hh.tbl->hho)) : NULL); \ (head)->hh.tbl->qsize--; \ } else if ( ((head)->hh.tbl->qsize == 0) || \ !((head)->hh.tbl->q) ) { \ (head)->hh.tbl->e = (head)->hh.tbl->p; \ (head)->hh.tbl->p = \ (UT_hash_handle*)(((head)->hh.tbl->p->next) ? \ ((void*)((char*)((head)->hh.tbl->p->next) + \ (head)->hh.tbl->hho)) : NULL); \ (head)->hh.tbl->psize--; \ } else if (( \ cmpfcn(TYPEOF(head) \ (ELMT_FROM_HH((head)->hh.tbl,(head)->hh.tbl->p)), \ TYPEOF(head) \ (ELMT_FROM_HH((head)->hh.tbl,(head)->hh.tbl->q))) \ ) <= 0) { \ (head)->hh.tbl->e = (head)->hh.tbl->p; \ (head)->hh.tbl->p = \ (UT_hash_handle*)(((head)->hh.tbl->p->next) ? \ ((void*)((char*)((head)->hh.tbl->p->next) + \ (head)->hh.tbl->hho)) : NULL); \ (head)->hh.tbl->psize--; \ } else { \ (head)->hh.tbl->e = (head)->hh.tbl->q; \ (head)->hh.tbl->q = \ (UT_hash_handle*)(((head)->hh.tbl->q->next) ? \ ((void*)((char*)((head)->hh.tbl->q->next) + \ (head)->hh.tbl->hho)) : NULL); \ (head)->hh.tbl->qsize--; \ } \ if ( (head)->hh.tbl->tale ) { \ (head)->hh.tbl->tale->next = (((head)->hh.tbl->e) ? \ ELMT_FROM_HH((head)->hh.tbl,(head)->hh.tbl->e) : NULL); \ } else { \ (head)->hh.tbl->list = (head)->hh.tbl->e; \ } \ (head)->hh.tbl->e->prev = (((head)->hh.tbl->tale) ? \ ELMT_FROM_HH((head)->hh.tbl,(head)->hh.tbl->tale) : NULL); \ (head)->hh.tbl->tale = (head)->hh.tbl->e; \ } \ (head)->hh.tbl->p = (head)->hh.tbl->q; \ } \ (head)->hh.tbl->tale->next = NULL; \ if ( (head)->hh.tbl->nmerges <= 1 ) { \ (head)->hh.tbl->looping=0; \ (head)->hh.tbl->tail = (head)->hh.tbl->tale; \ (head) = TYPEOF(head)ELMT_FROM_HH((head)->hh.tbl, \ (head)->hh.tbl->list); \ } \ (head)->hh.tbl->insize *= 2; \ } \ HASH_FSCK(hh,head); \ }/* obtain a count of items in the hash */#define HASH_COUNT(head) HASH_CNT(hh,head) #define HASH_CNT(hh,head) (head?(head->hh.tbl->num_items):0)typedef struct UT_hash_bucket { struct UT_hash_handle *hh_head; unsigned count; /* expand_mult is normally set to 0. In this situation, the max chain length * threshold is enforced at its default value, HASH_BKT_CAPACITY_THRESH. (If * the bucket's chain exceeds this length, bucket expansion is triggered). * However, setting expand_mult to a non-zero value delays bucket expansion * (that would be triggered by additions to this particular bucket) * until its chain length reaches a *multiple* of HASH_BKT_CAPACITY_THRESH. * (The multiplier is simply expand_mult+1). The whole idea of this * multiplier is to reduce bucket expansions, since they are expensive, in * situations where we know that a particular bucket tends to be overused. * It is better to let its chain length grow to a longer yet-still-bounded * value, than to do an O(n) bucket expansion too often. */ unsigned expand_mult;} UT_hash_bucket;typedef struct UT_hash_table { UT_hash_bucket *buckets; unsigned num_buckets, log2_num_buckets; unsigned num_items; struct UT_hash_handle *tail; /* tail hh in app order, for fast append */ ptrdiff_t hho; /* hash handle offset (byte pos of hash handle in element */ /* in an ideal situation (all buckets used equally), no bucket would have * more than ceil(#items/#buckets) items. that's the ideal chain length. */ unsigned ideal_chain_maxlen; /* nonideal_items is the number of items in the hash whose chain position * exceeds the ideal chain maxlen. these items pay the penalty for an uneven * hash distribution; reaching them in a chain traversal takes >ideal steps */ unsigned nonideal_items; /* ineffective expands occur when a bucket doubling was performed, but * afterward, more than half the items in the hash had nonideal chain * positions. If this happens on two consecutive expansions we inhibit any * further expansion, as it's not helping; this happens when the hash * function isn't a good fit for the key domain. When expansion is inhibited * the hash will still work, albeit no longer in constant time. */ unsigned ineff_expands, noexpand; /* scratch */ unsigned hash_scratch, bkt, bkt_i, keylen, i, j, k; char *key; struct UT_hash_handle *thh, *hh_nxt, *hh_del; /* scratch for bucket expansion */ UT_hash_bucket *new_buckets, *newbkt; /* scratch for sort */ int looping,nmerges,insize,psize,qsize; struct UT_hash_handle *p, *q, *e, *list, *tale;} UT_hash_table;typedef struct UT_hash_handle { struct UT_hash_table *tbl; void *prev; /* prev element in app order */ void *next; /* next element in app order */ struct UT_hash_handle *hh_prev; /* previous hh in bucket order */ struct UT_hash_handle *hh_next; /* next hh in bucket order */ void *key; /* ptr to enclosing struct's key */ int keylen; /* enclosing struct's key len */ unsigned hashv; /* result of hash-fcn(key) */} UT_hash_handle;#endif /* UTHASH_H */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -