⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 kmem_slab.c

📁 Simple Operating Systems (简称SOS)是一个可以运行在X86平台上(包括QEMU
💻 C
📖 第 1 页 / 共 2 页
字号:
    return NULL;  /* Add the cache to the list of slab caches */  list_add_tail(kslab_cache_list, cache_of_ranges);  /*   * Add the first slab for this cache   */  memset((void*)vaddr_first_slab_of_ranges, 0x0, nb_pages*SOS_PAGE_SIZE);  /* Add the pages for the 1st slab of ranges */  slab_of_ranges = (struct sos_kslab*)(vaddr_first_slab_of_ranges				       + nb_pages*SOS_PAGE_SIZE				       - sizeof(struct sos_kslab));  cache_add_slab(cache_of_ranges,		 vaddr_first_slab_of_ranges,		 slab_of_ranges);  return cache_of_ranges;}struct sos_kslab_cache *sos_kmem_cache_setup_prepare(sos_vaddr_t kernel_core_base,			     sos_vaddr_t kernel_core_top,			     sos_size_t  sizeof_struct_range,			     /* results */			     struct sos_kslab **first_struct_slab_of_caches,			     sos_vaddr_t *first_slab_of_caches_base,			     sos_count_t *first_slab_of_caches_nb_pages,			     struct sos_kslab **first_struct_slab_of_ranges,			     sos_vaddr_t *first_slab_of_ranges_base,			     sos_count_t *first_slab_of_ranges_nb_pages){  int i;  sos_ret_t   retval;  sos_vaddr_t vaddr;  /* The cache of ranges we are about to allocate */  struct sos_kslab_cache *cache_of_ranges;  /* In the begining, there isn't any cache */  kslab_cache_list = NULL;  cache_of_struct_kslab = NULL;  cache_of_struct_kslab_cache = NULL;  /*   * Create the cache of caches, initialised with 1 allocated slab   */  /* Allocate the pages needed for the 1st slab of caches, and map them     in kernel space, right after the kernel */  *first_slab_of_caches_base = SOS_PAGE_ALIGN_SUP(kernel_core_top);  for (i = 0, vaddr = *first_slab_of_caches_base ;       i < NB_PAGES_IN_SLAB_OF_CACHES ;       i++, vaddr += SOS_PAGE_SIZE)    {      sos_paddr_t ppage_paddr;      ppage_paddr	= sos_physmem_ref_physpage_new(FALSE);      SOS_ASSERT_FATAL(ppage_paddr != (sos_paddr_t)NULL);      retval = sos_paging_map(ppage_paddr, vaddr,			      FALSE,			      SOS_VM_MAP_ATOMIC			      | SOS_VM_MAP_PROT_READ			      | SOS_VM_MAP_PROT_WRITE);      SOS_ASSERT_FATAL(retval == SOS_OK);      retval = sos_physmem_unref_physpage(ppage_paddr);      SOS_ASSERT_FATAL(retval == FALSE);    }  /* Create the cache of caches */  *first_slab_of_caches_nb_pages = NB_PAGES_IN_SLAB_OF_CACHES;  cache_of_struct_kslab_cache    = create_cache_of_caches(*first_slab_of_caches_base,			     NB_PAGES_IN_SLAB_OF_CACHES);  SOS_ASSERT_FATAL(cache_of_struct_kslab_cache != NULL);  /* Retrieve the slab that should have been allocated */  *first_struct_slab_of_caches    = list_get_head(cache_of_struct_kslab_cache->slab_list);    /*   * Create the cache of ranges, initialised with 1 allocated slab   */  *first_slab_of_ranges_base = vaddr;  /* Allocate the 1st slab */  for (i = 0, vaddr = *first_slab_of_ranges_base ;       i < NB_PAGES_IN_SLAB_OF_RANGES ;       i++, vaddr += SOS_PAGE_SIZE)    {      sos_paddr_t ppage_paddr;      ppage_paddr	= sos_physmem_ref_physpage_new(FALSE);      SOS_ASSERT_FATAL(ppage_paddr != (sos_paddr_t)NULL);      retval = sos_paging_map(ppage_paddr, vaddr,			      FALSE,			      SOS_VM_MAP_ATOMIC			      | SOS_VM_MAP_PROT_READ			      | SOS_VM_MAP_PROT_WRITE);      SOS_ASSERT_FATAL(retval == SOS_OK);      retval = sos_physmem_unref_physpage(ppage_paddr);      SOS_ASSERT_FATAL(retval == FALSE);    }  /* Create the cache of ranges */  *first_slab_of_ranges_nb_pages = NB_PAGES_IN_SLAB_OF_RANGES;  cache_of_ranges = create_cache_of_ranges(*first_slab_of_ranges_base,					   sizeof_struct_range,					   NB_PAGES_IN_SLAB_OF_RANGES);  SOS_ASSERT_FATAL(cache_of_ranges != NULL);  /* Retrieve the slab that should have been allocated */  *first_struct_slab_of_ranges    = list_get_head(cache_of_ranges->slab_list);  /*   * Create the cache of slabs, without any allocated slab yet   */  cache_of_struct_kslab    = sos_kmem_cache_create("off-slab slab structures",			    sizeof(struct sos_kslab),			    1,			    0,			    SOS_KSLAB_CREATE_MAP);  SOS_ASSERT_FATAL(cache_of_struct_kslab != NULL);  return cache_of_ranges;}sos_ret_tsos_kmem_cache_setup_commit(struct sos_kslab *first_struct_slab_of_caches,			    struct sos_kmem_range *first_range_of_caches,			    struct sos_kslab *first_struct_slab_of_ranges,			    struct sos_kmem_range *first_range_of_ranges){  first_struct_slab_of_caches->range = first_range_of_caches;  first_struct_slab_of_ranges->range = first_range_of_ranges;  return SOS_OK;}struct sos_kslab_cache *sos_kmem_cache_create(const char* name,		      sos_size_t  obj_size,		      sos_count_t pages_per_slab,		      sos_count_t min_free_objs,		      sos_ui32_t  cache_flags){  struct sos_kslab_cache *new_cache;  /* Allocate the new cache */  new_cache = (struct sos_kslab_cache*)    sos_kmem_cache_alloc(cache_of_struct_kslab_cache,			 0/* NOT ATOMIC */);  if (! new_cache)    return NULL;  if (cache_initialize(new_cache, name, obj_size,		       pages_per_slab, min_free_objs,		       cache_flags))    {      /* Something was wrong */      sos_kmem_cache_free((sos_vaddr_t)new_cache);      return NULL;    }  /* Add the cache to the list of slab caches */  list_add_tail(kslab_cache_list, new_cache);    /* if the min_free_objs is set, pre-allocate a slab */  if (min_free_objs)    {      if (cache_grow(new_cache, 0 /* Not atomic */) != SOS_OK)	{	  sos_kmem_cache_destroy(new_cache);	  return NULL; /* Not enough memory */	}    }  return new_cache;  }  sos_ret_t sos_kmem_cache_destroy(struct sos_kslab_cache *kslab_cache){  int nb_slabs;  struct sos_kslab *slab;  if (! kslab_cache)    return -SOS_EINVAL;  /* Refuse to destroy the cache if there are any objects still     allocated */  list_foreach(kslab_cache->slab_list, slab, nb_slabs)    {      if (slab->nb_free != kslab_cache->nb_objects_per_slab)	return -SOS_EBUSY;    }  /* Remove all the slabs */  while ((slab = list_get_head(kslab_cache->slab_list)) != NULL)    {      cache_release_slab(slab, TRUE);    }  /* Remove the cache */  return sos_kmem_cache_free((sos_vaddr_t)kslab_cache);}sos_vaddr_t sos_kmem_cache_alloc(struct sos_kslab_cache *kslab_cache,				 sos_ui32_t alloc_flags){  sos_vaddr_t obj_vaddr;  struct sos_kslab * slab_head;#define ALLOC_RET return  /* If the slab at the head of the slabs' list has no free object,     then the other slabs don't either => need to allocate a new     slab */  if ((! kslab_cache->slab_list)      || (! list_get_head(kslab_cache->slab_list)->free))    {      if (cache_grow(kslab_cache, alloc_flags) != SOS_OK)	/* Not enough memory or blocking alloc */	ALLOC_RET( (sos_vaddr_t)NULL);    }  /* Here: we are sure that list_get_head(kslab_cache->slab_list)     exists *AND* that list_get_head(kslab_cache->slab_list)->free is     NOT NULL */  slab_head = list_get_head(kslab_cache->slab_list);  SOS_ASSERT_FATAL(slab_head != NULL);  /* Allocate the object at the head of the slab at the head of the     slabs' list */  obj_vaddr = (sos_vaddr_t)list_pop_head(slab_head->free);  slab_head->nb_free --;  kslab_cache->nb_free_objects --;  /* If needed, reset object's contents */  if (kslab_cache->flags & SOS_KSLAB_CREATE_ZERO)    memset((void*)obj_vaddr, 0x0, kslab_cache->alloc_obj_size);  /* Slab is now full ? */  if (slab_head->free == NULL)    {      /* Transfer it at the tail of the slabs' list */      struct sos_kslab *slab;      slab = list_pop_head(kslab_cache->slab_list);      list_add_tail(kslab_cache->slab_list, slab);    }    /*   * For caches that require a minimum amount of free objects left,   * allocate a slab if needed.   *   * Notice the "== min_objects - 1": we did not write " <   * min_objects" because for the cache of kmem structure, this would   * lead to an chicken-and-egg problem, since cache_grow below would   * call cache_alloc again for the kmem_vmm cache, so we return here   * with the same cache. If the test were " < min_objects", then we   * would call cache_grow again for the kmem_vmm cache again and   * again... until we reach the bottom of our stack (infinite   * recursion). By telling precisely "==", then the cache_grow would   * only be called the first time.   */  if ((kslab_cache->min_free_objects > 0)      && (kslab_cache->nb_free_objects == (kslab_cache->min_free_objects - 1)))    {      /* No: allocate a new slab now */      if (cache_grow(kslab_cache, alloc_flags) != SOS_OK)	{	  /* Not enough free memory or blocking alloc => undo the	     allocation */	  sos_kmem_cache_free(obj_vaddr);	  ALLOC_RET( (sos_vaddr_t)NULL);	}    }  ALLOC_RET(obj_vaddr);}/** * Helper function to free the object located at the given address. * * @param empty_slab is the address of the slab to release, if removing * the object causes the slab to become empty. */inline staticsos_ret_tfree_object(sos_vaddr_t vaddr,	    struct sos_kslab ** empty_slab){  struct sos_kslab_cache *kslab_cache;  /* Lookup the slab containing the object in the slabs' list */  struct sos_kslab *slab = sos_kmem_vmm_resolve_slab(vaddr);  /* By default, consider that the slab will not become empty */  *empty_slab = NULL;  /* Did not find the slab */  if (! slab)    return -SOS_EINVAL;  SOS_ASSERT_FATAL(slab->cache);  kslab_cache = slab->cache;  /*   * Check whether the address really could mark the start of an actual   * allocated object   */  /* Address multiple of an object's size ? */  if (( (vaddr - slab->first_object)	% kslab_cache->alloc_obj_size) != 0)    return -SOS_EINVAL;  /* Address not too large ? */  if (( (vaddr - slab->first_object)	/ kslab_cache->alloc_obj_size) >= kslab_cache->nb_objects_per_slab)    return -SOS_EINVAL;  /*   * Ok: we now release the object   */  /* Did find a full slab => will not be full any more => move it     to the head of the slabs' list */  if (! slab->free)    {      list_delete(kslab_cache->slab_list, slab);      list_add_head(kslab_cache->slab_list, slab);    }  /* Release the object */  list_add_head(slab->free, (struct sos_kslab_free_object*)vaddr);  slab->nb_free++;  kslab_cache->nb_free_objects++;  SOS_ASSERT_FATAL(slab->nb_free <= slab->cache->nb_objects_per_slab);  /* Cause the slab to be released if it becomes empty, and if we are     allowed to do it */  if ((slab->nb_free >= kslab_cache->nb_objects_per_slab)      && (kslab_cache->nb_free_objects - slab->nb_free	  >= kslab_cache->min_free_objects))    {      *empty_slab = slab;    }  return SOS_OK;}sos_ret_t sos_kmem_cache_free(sos_vaddr_t vaddr){  sos_ret_t retval;  struct sos_kslab *empty_slab;  /* Remove the object from the slab */  retval = free_object(vaddr, & empty_slab);  if (retval != SOS_OK)    return retval;  /* Remove the slab and the underlying range if needed */  if (empty_slab != NULL)    return cache_release_slab(empty_slab, TRUE);  return SOS_OK;}struct sos_kmem_range *sos_kmem_cache_release_struct_range(struct sos_kmem_range *the_range){  sos_ret_t retval;  struct sos_kslab *empty_slab;  /* Remove the object from the slab */  retval = free_object((sos_vaddr_t)the_range, & empty_slab);  if (retval != SOS_OK)    return NULL;  /* Remove the slab BUT NOT the underlying range if needed */  if (empty_slab != NULL)    {      struct sos_kmem_range *empty_range = empty_slab->range;      SOS_ASSERT_FATAL(cache_release_slab(empty_slab, FALSE) == SOS_OK);      SOS_ASSERT_FATAL(empty_range != NULL);      return empty_range;    }  return NULL;}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -