⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 hash.c

📁 GNU的词法/语法分析器bison源码
💻 C
📖 第 1 页 / 共 2 页
字号:
   arguments pointing to user data, it then returns true for a pair of entries   that compare equal, or false otherwise.  This function is internally called   on entries which are already known to hash to the same bucket index.   The user-supplied DATA_FREER function, when not NULL, may be later called   with the user data as an argument, just before the entry containing the   data gets freed.  This happens from within `hash_free' or `hash_clear'.   You should specify this function only if you want these functions to free   all of your `data' data.  This is typically the case when your data is   simply an auxiliary struct that you have malloc'd to aggregate several   values.  */Hash_table *hash_initialize (size_t candidate, const Hash_tuning *tuning,		 Hash_hasher hasher, Hash_comparator comparator,		 Hash_data_freer data_freer){  Hash_table *table;  if (hasher == NULL || comparator == NULL)    return NULL;  table = malloc (sizeof *table);  if (table == NULL)    return NULL;  if (!tuning)    tuning = &default_tuning;  table->tuning = tuning;  if (!check_tuning (table))    {      /* Fail if the tuning options are invalid.  This is the only occasion	 when the user gets some feedback about it.  Once the table is created,	 if the user provides invalid tuning options, we silently revert to	 using the defaults, and ignore further request to change the tuning	 options.  */      goto fail;    }  if (!tuning->is_n_buckets)    {      float new_candidate = candidate / tuning->growth_threshold;      if (SIZE_MAX <= new_candidate)	goto fail;      candidate = new_candidate;    }  if (xalloc_oversized (candidate, sizeof *table->bucket))    goto fail;  table->n_buckets = next_prime (candidate);  if (xalloc_oversized (table->n_buckets, sizeof *table->bucket))    goto fail;  table->bucket = calloc (table->n_buckets, sizeof *table->bucket);  table->bucket_limit = table->bucket + table->n_buckets;  table->n_buckets_used = 0;  table->n_entries = 0;  table->hasher = hasher;  table->comparator = comparator;  table->data_freer = data_freer;  table->free_entry_list = NULL;#if USE_OBSTACK  obstack_init (&table->entry_stack);#endif  return table; fail:  free (table);  return NULL;}/* Make all buckets empty, placing any chained entries on the free list.   Apply the user-specified function data_freer (if any) to the datas of any   affected entries.  */voidhash_clear (Hash_table *table){  struct hash_entry *bucket;  for (bucket = table->bucket; bucket < table->bucket_limit; bucket++)    {      if (bucket->data)	{	  struct hash_entry *cursor;	  struct hash_entry *next;	  /* Free the bucket overflow.  */	  for (cursor = bucket->next; cursor; cursor = next)	    {	      if (table->data_freer)		(*table->data_freer) (cursor->data);	      cursor->data = NULL;	      next = cursor->next;	      /* Relinking is done one entry at a time, as it is to be expected		 that overflows are either rare or short.  */	      cursor->next = table->free_entry_list;	      table->free_entry_list = cursor;	    }	  /* Free the bucket head.  */	  if (table->data_freer)	    (*table->data_freer) (bucket->data);	  bucket->data = NULL;	  bucket->next = NULL;	}    }  table->n_buckets_used = 0;  table->n_entries = 0;}/* Reclaim all storage associated with a hash table.  If a data_freer   function has been supplied by the user when the hash table was created,   this function applies it to the data of each entry before freeing that   entry.  */voidhash_free (Hash_table *table){  struct hash_entry *bucket;  struct hash_entry *cursor;  struct hash_entry *next;  /* Call the user data_freer function.  */  if (table->data_freer && table->n_entries)    {      for (bucket = table->bucket; bucket < table->bucket_limit; bucket++)	{	  if (bucket->data)	    {	      for (cursor = bucket; cursor; cursor = cursor->next)		{		  (*table->data_freer) (cursor->data);		}	    }	}    }#if USE_OBSTACK  obstack_free (&table->entry_stack, NULL);#else  /* Free all bucket overflowed entries.  */  for (bucket = table->bucket; bucket < table->bucket_limit; bucket++)    {      for (cursor = bucket->next; cursor; cursor = next)	{	  next = cursor->next;	  free (cursor);	}    }  /* Also reclaim the internal list of previously freed entries.  */  for (cursor = table->free_entry_list; cursor; cursor = next)    {      next = cursor->next;      free (cursor);    }#endif  /* Free the remainder of the hash table structure.  */  free (table->bucket);  free (table);}/* Insertion and deletion.  *//* Get a new hash entry for a bucket overflow, possibly by reclying a   previously freed one.  If this is not possible, allocate a new one.  */static struct hash_entry *allocate_entry (Hash_table *table){  struct hash_entry *new;  if (table->free_entry_list)    {      new = table->free_entry_list;      table->free_entry_list = new->next;    }  else    {#if USE_OBSTACK      new = obstack_alloc (&table->entry_stack, sizeof *new);#else      new = malloc (sizeof *new);#endif    }  return new;}/* Free a hash entry which was part of some bucket overflow,   saving it for later recycling.  */static voidfree_entry (Hash_table *table, struct hash_entry *entry){  entry->data = NULL;  entry->next = table->free_entry_list;  table->free_entry_list = entry;}/* This private function is used to help with insertion and deletion.  When   ENTRY matches an entry in the table, return a pointer to the corresponding   user data and set *BUCKET_HEAD to the head of the selected bucket.   Otherwise, return NULL.  When DELETE is true and ENTRY matches an entry in   the table, unlink the matching entry.  */static void *hash_find_entry (Hash_table *table, const void *entry,		 struct hash_entry **bucket_head, bool delete){  struct hash_entry *bucket    = table->bucket + table->hasher (entry, table->n_buckets);  struct hash_entry *cursor;  if (! (bucket < table->bucket_limit))    abort ();  *bucket_head = bucket;  /* Test for empty bucket.  */  if (bucket->data == NULL)    return NULL;  /* See if the entry is the first in the bucket.  */  if ((*table->comparator) (entry, bucket->data))    {      void *data = bucket->data;      if (delete)	{	  if (bucket->next)	    {	      struct hash_entry *next = bucket->next;	      /* Bump the first overflow entry into the bucket head, then save		 the previous first overflow entry for later recycling.  */	      *bucket = *next;	      free_entry (table, next);	    }	  else	    {	      bucket->data = NULL;	    }	}      return data;    }  /* Scan the bucket overflow.  */  for (cursor = bucket; cursor->next; cursor = cursor->next)    {      if ((*table->comparator) (entry, cursor->next->data))	{	  void *data = cursor->next->data;	  if (delete)	    {	      struct hash_entry *next = cursor->next;	      /* Unlink the entry to delete, then save the freed entry for later		 recycling.  */	      cursor->next = next->next;	      free_entry (table, next);	    }	  return data;	}    }  /* No entry found.  */  return NULL;}/* For an already existing hash table, change the number of buckets through   specifying CANDIDATE.  The contents of the hash table are preserved.  The   new number of buckets is automatically selected so as to _guarantee_ that   the table may receive at least CANDIDATE different user entries, including   those already in the table, before any other growth of the hash table size   occurs.  If TUNING->IS_N_BUCKETS is true, then CANDIDATE specifies the   exact number of buckets desired.  */boolhash_rehash (Hash_table *table, size_t candidate){  Hash_table *new_table;  struct hash_entry *bucket;  struct hash_entry *cursor;  struct hash_entry *next;  new_table = hash_initialize (candidate, table->tuning, table->hasher,			       table->comparator, table->data_freer);  if (new_table == NULL)    return false;  /* Merely reuse the extra old space into the new table.  */#if USE_OBSTACK  obstack_free (&new_table->entry_stack, NULL);  new_table->entry_stack = table->entry_stack;#endif  new_table->free_entry_list = table->free_entry_list;  for (bucket = table->bucket; bucket < table->bucket_limit; bucket++)    if (bucket->data)      for (cursor = bucket; cursor; cursor = next)	{	  void *data = cursor->data;	  struct hash_entry *new_bucket	    = (new_table->bucket	       + new_table->hasher (data, new_table->n_buckets));	  if (! (new_bucket < new_table->bucket_limit))	    abort ();	  next = cursor->next;	  if (new_bucket->data)	    {	      if (cursor == bucket)		{		  /* Allocate or recycle an entry, when moving from a bucket		     header into a bucket overflow.  */		  struct hash_entry *new_entry = allocate_entry (new_table);		  if (new_entry == NULL)		    return false;		  new_entry->data = data;		  new_entry->next = new_bucket->next;		  new_bucket->next = new_entry;		}	      else		{		  /* Merely relink an existing entry, when moving from a		     bucket overflow into a bucket overflow.  */		  cursor->next = new_bucket->next;		  new_bucket->next = cursor;		}	    }	  else	    {	      /* Free an existing entry, when moving from a bucket		 overflow into a bucket header.  Also take care of the		 simple case of moving from a bucket header into a bucket		 header.  */	      new_bucket->data = data;	      new_table->n_buckets_used++;	      if (cursor != bucket)		free_entry (new_table, cursor);	    }	}  free (table->bucket);  table->bucket = new_table->bucket;  table->bucket_limit = new_table->bucket_limit;  table->n_buckets = new_table->n_buckets;  table->n_buckets_used = new_table->n_buckets_used;  table->free_entry_list = new_table->free_entry_list;  /* table->n_entries already holds its value.  */#if USE_OBSTACK  table->entry_stack = new_table->entry_stack;#endif  free (new_table);  return true;}/* If ENTRY matches an entry already in the hash table, return the pointer   to the entry from the table.  Otherwise, insert ENTRY and return ENTRY.   Return NULL if the storage required for insertion cannot be allocated.  */void *hash_insert (Hash_table *table, const void *entry){  void *data;  struct hash_entry *bucket;  /* The caller cannot insert a NULL entry.  */  if (! entry)    abort ();  /* If there's a matching entry already in the table, return that.  */  if ((data = hash_find_entry (table, entry, &bucket, false)) != NULL)    return data;  /* ENTRY is not matched, it should be inserted.  */  if (bucket->data)    {      struct hash_entry *new_entry = allocate_entry (table);      if (new_entry == NULL)	return NULL;      /* Add ENTRY in the overflow of the bucket.  */      new_entry->data = (void *) entry;      new_entry->next = bucket->next;      bucket->next = new_entry;      table->n_entries++;      return (void *) entry;    }  /* Add ENTRY right in the bucket head.  */  bucket->data = (void *) entry;  table->n_entries++;  table->n_buckets_used++;  /* If the growth threshold of the buckets in use has been reached, increase     the table size and rehash.  There's no point in checking the number of     entries:  if the hashing function is ill-conditioned, rehashing is not     likely to improve it.  */  if (table->n_buckets_used      > table->tuning->growth_threshold * table->n_buckets)    {      /* Check more fully, before starting real work.  If tuning arguments	 became invalid, the second check will rely on proper defaults.  */      check_tuning (table);      if (table->n_buckets_used	  > table->tuning->growth_threshold * table->n_buckets)	{	  const Hash_tuning *tuning = table->tuning;	  float candidate =	    (tuning->is_n_buckets	     ? (table->n_buckets * tuning->growth_factor)	     : (table->n_buckets * tuning->growth_factor		* tuning->growth_threshold));	  if (SIZE_MAX <= candidate)	    return NULL;	  /* If the rehash fails, arrange to return NULL.  */	  if (!hash_rehash (table, candidate))	    entry = NULL;	}    }  return (void *) entry;}/* If ENTRY is already in the table, remove it and return the just-deleted   data (the user may want to deallocate its storage).  If ENTRY is not in the   table, don't modify the table and return NULL.  */void *hash_delete (Hash_table *table, const void *entry){  void *data;  struct hash_entry *bucket;  data = hash_find_entry (table, entry, &bucket, true);  if (!data)    return NULL;  table->n_entries--;  if (!bucket->data)    {      table->n_buckets_used--;      /* If the shrink threshold of the buckets in use has been reached,	 rehash into a smaller table.  */      if (table->n_buckets_used	  < table->tuning->shrink_threshold * table->n_buckets)	{	  /* Check more fully, before starting real work.  If tuning arguments	     became invalid, the second check will rely on proper defaults.  */	  check_tuning (table);	  if (table->n_buckets_used	      < table->tuning->shrink_threshold * table->n_buckets)	    {	      const Hash_tuning *tuning = table->tuning;	      size_t candidate =		(tuning->is_n_buckets		 ? table->n_buckets * tuning->shrink_factor		 : (table->n_buckets * tuning->shrink_factor		    * tuning->growth_threshold));	      hash_rehash (table, candidate);	    }	}    }  return data;}/* Testing.  */#if TESTINGvoidhash_print (const Hash_table *table){  struct hash_entry const *bucket;  for (bucket = table->bucket; bucket < table->bucket_limit; bucket++)    {      struct hash_entry *cursor;      if (bucket)	printf ("%lu:\n", (unsigned long int) (bucket - table->bucket));      for (cursor = bucket; cursor; cursor = cursor->next)	{	  char const *s = cursor->data;	  /* FIXME */	  if (s)	    printf ("  %s\n", s);	}    }}#endif /* TESTING */

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -