📄 kmem_slab.c
字号:
/* Copyright (C) 2000 Thomas Petazzoni Copyright (C) 2004 David Decotigny This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */#include <sos/macros.h>#include <sos/klibc.h>#include <sos/list.h>#include <sos/assert.h>#include <hwcore/paging.h>#include <sos/physmem.h>#include <sos/kmem_vmm.h>#include "kmem_slab.h"/* Dimensioning constants */#define NB_PAGES_IN_SLAB_OF_CACHES 1#define NB_PAGES_IN_SLAB_OF_RANGES 1/** The structure of a slab cache */struct sos_kslab_cache{ char *name; /* non mutable characteristics of this slab */ sos_size_t original_obj_size; /* asked object size */ sos_size_t alloc_obj_size; /* actual object size, taking the alignment constraints into account */ sos_count_t nb_objects_per_slab; sos_count_t nb_pages_per_slab; sos_count_t min_free_objects;/* slab cache flags */// #define SOS_KSLAB_CREATE_MAP (1<<0) /* See kmem_slab.h */// #define SOS_KSLAB_CREATE_ZERO (1<<1) /* " " " " " " " " */#define ON_SLAB (1<<31) /* struct sos_kslab is included inside the slab */ sos_ui32_t flags; /* Supervision data (updated at run-time) */ sos_count_t nb_free_objects; /* The lists of slabs owned by this cache */ struct sos_kslab *slab_list; /* head = non full, tail = full */ /* The caches are linked together on the kslab_cache_list */ struct sos_kslab_cache *prev, *next;};/** The structure of a slab */struct sos_kslab{ /** Number of free objects on this slab */ sos_count_t nb_free; /** The list of these free objects */ struct sos_kslab_free_object *free; /** The address of the associated range structure */ struct sos_kmem_range *range; /** Virtual start address of this range */ sos_vaddr_t first_object; /** Slab cache owning this slab */ struct sos_kslab_cache *cache; /** Links to the other slabs managed by the same cache */ struct sos_kslab *prev, *next;};/** The structure of the free objects in the slab */struct sos_kslab_free_object{ struct sos_kslab_free_object *prev, *next;};/** The cache of slab caches */static struct sos_kslab_cache *cache_of_struct_kslab_cache;/** The cache of slab structures for non-ON_SLAB caches */static struct sos_kslab_cache *cache_of_struct_kslab;/** The list of slab caches */static struct sos_kslab_cache *kslab_cache_list;/* Helper function to initialize a cache structure */static sos_ret_tcache_initialize(/*out*/struct sos_kslab_cache *the_cache, const char* name, sos_size_t obj_size, sos_count_t pages_per_slab, sos_count_t min_free_objs, sos_ui32_t cache_flags){ unsigned int space_left; sos_size_t alloc_obj_size; if (obj_size <= 0) return -SOS_EINVAL; /* Default allocation size is the requested one */ alloc_obj_size = obj_size; /* Make sure the requested size is large enough to store a free_object structure */ if (alloc_obj_size < sizeof(struct sos_kslab_free_object)) alloc_obj_size = sizeof(struct sos_kslab_free_object); /* Align obj_size on 4 bytes */ alloc_obj_size = SOS_ALIGN_SUP(alloc_obj_size, sizeof(int)); /* Make sure supplied number of pages per slab is consistent with actual allocated object size */ if (alloc_obj_size > pages_per_slab*SOS_PAGE_SIZE) return -SOS_EINVAL; /* Refuse too large slabs */ if (pages_per_slab > MAX_PAGES_PER_SLAB) return -SOS_ENOMEM; /* Fills in the cache structure */ memset(the_cache, 0x0, sizeof(struct sos_kslab_cache)); the_cache->name = (char*)name; the_cache->flags = cache_flags; the_cache->original_obj_size = obj_size; the_cache->alloc_obj_size = alloc_obj_size; the_cache->min_free_objects = min_free_objs; the_cache->nb_pages_per_slab = pages_per_slab; /* Small size objets => the slab structure is allocated directly in the slab */ if(alloc_obj_size <= sizeof(struct sos_kslab)) the_cache->flags |= ON_SLAB; /* * Compute the space left once the maximum number of objects * have been allocated in the slab */ space_left = the_cache->nb_pages_per_slab*SOS_PAGE_SIZE; if(the_cache->flags & ON_SLAB) space_left -= sizeof(struct sos_kslab); the_cache->nb_objects_per_slab = space_left / alloc_obj_size; space_left -= the_cache->nb_objects_per_slab*alloc_obj_size; /* Make sure a single slab is large enough to contain the minimum number of objects requested */ if (the_cache->nb_objects_per_slab < min_free_objs) return -SOS_EINVAL; /* If there is now enough place for both the objects and the slab structure, then make the slab structure ON_SLAB */ if (space_left >= sizeof(struct sos_kslab)) the_cache->flags |= ON_SLAB; return SOS_OK;}/** Helper function to add a new slab for the given cache. */static sos_ret_tcache_add_slab(struct sos_kslab_cache *kslab_cache, sos_vaddr_t vaddr_slab, struct sos_kslab *slab){ int i; /* Setup the slab structure */ memset(slab, 0x0, sizeof(struct sos_kslab)); slab->cache = kslab_cache; /* Establish the address of the first free object */ slab->first_object = vaddr_slab; /* Account for this new slab in the cache */ slab->nb_free = kslab_cache->nb_objects_per_slab; kslab_cache->nb_free_objects += slab->nb_free; /* Build the list of free objects */ for (i = 0 ; i < kslab_cache->nb_objects_per_slab ; i++) { sos_vaddr_t obj_vaddr; /* Set object's address */ obj_vaddr = slab->first_object + i*kslab_cache->alloc_obj_size; /* Add it to the list of free objects */ list_add_tail(slab->free, (struct sos_kslab_free_object *)obj_vaddr); } /* Add the slab to the cache's slab list: add the head of the list since this slab is non full */ list_add_head(kslab_cache->slab_list, slab); return SOS_OK;}/** Helper function to allocate a new slab for the given kslab_cache */static sos_ret_tcache_grow(struct sos_kslab_cache *kslab_cache, sos_ui32_t alloc_flags){ sos_ui32_t range_alloc_flags; struct sos_kmem_range *new_range; sos_vaddr_t new_range_start; struct sos_kslab *new_slab; /* * Setup the flags for the range allocation */ range_alloc_flags = 0; /* Atomic ? */ if (alloc_flags & SOS_KSLAB_ALLOC_ATOMIC) range_alloc_flags |= SOS_KMEM_VMM_ATOMIC; /* Need physical mapping NOW ? */ if (kslab_cache->flags & (SOS_KSLAB_CREATE_MAP | SOS_KSLAB_CREATE_ZERO)) range_alloc_flags |= SOS_KMEM_VMM_MAP; /* Allocate the range */ new_range = sos_kmem_vmm_new_range(kslab_cache->nb_pages_per_slab, range_alloc_flags, & new_range_start); if (! new_range) return -SOS_ENOMEM; /* Allocate the slab structure */ if (kslab_cache->flags & ON_SLAB) { /* Slab structure is ON the slab: simply set its address to the end of the range */ sos_vaddr_t slab_vaddr = new_range_start + kslab_cache->nb_pages_per_slab*SOS_PAGE_SIZE - sizeof(struct sos_kslab); new_slab = (struct sos_kslab*)slab_vaddr; } else { /* Slab structure is OFF the slab: allocate it from the cache of slab structures */ sos_vaddr_t slab_vaddr = sos_kmem_cache_alloc(cache_of_struct_kslab, alloc_flags); if (! slab_vaddr) { sos_kmem_vmm_del_range(new_range); return -SOS_ENOMEM; } new_slab = (struct sos_kslab*)slab_vaddr; } cache_add_slab(kslab_cache, new_range_start, new_slab); new_slab->range = new_range; /* Set the backlink from range to this slab */ sos_kmem_vmm_set_slab(new_range, new_slab); return SOS_OK;}/** * Helper function to release a slab * * The corresponding range is always deleted, except when the @param * must_del_range_now is not set. This happens only when the function * gets called from sos_kmem_cache_release_struct_range(), to avoid * large recursions. */static sos_ret_tcache_release_slab(struct sos_kslab *slab, sos_bool_t must_del_range_now){ struct sos_kslab_cache *kslab_cache = slab->cache; struct sos_kmem_range *range = slab->range; SOS_ASSERT_FATAL(kslab_cache != NULL); SOS_ASSERT_FATAL(range != NULL); SOS_ASSERT_FATAL(slab->nb_free == slab->cache->nb_objects_per_slab); /* First, remove the slab from the slabs' list of the cache */ list_delete(kslab_cache->slab_list, slab); slab->cache->nb_free_objects -= slab->nb_free; /* Release the slab structure if it is OFF slab */ if (! (slab->cache->flags & ON_SLAB)) sos_kmem_cache_free((sos_vaddr_t)slab); /* Ok, the range is not bound to any slab anymore */ sos_kmem_vmm_set_slab(range, NULL); /* Always delete the range now, unless we are told not to do so (see sos_kmem_cache_release_struct_range() below) */ if (must_del_range_now) return sos_kmem_vmm_del_range(range); return SOS_OK;}/** * Helper function to create the initial cache of caches, with a very * first slab in it, so that new cache structures can be simply allocated. * @return the cache structure for the cache of caches */static struct sos_kslab_cache *create_cache_of_caches(sos_vaddr_t vaddr_first_slab_of_caches, int nb_pages){ /* The preliminary cache structure we need in order to allocate the first slab in the cache of caches (allocated on the stack !) */ struct sos_kslab_cache fake_cache_of_caches; /* The real cache structure for the cache of caches */ struct sos_kslab_cache *real_cache_of_caches; /* The kslab structure for this very first slab */ struct sos_kslab *slab_of_caches; /* Init the cache structure for the cache of caches */ if (cache_initialize(& fake_cache_of_caches, "Caches", sizeof(struct sos_kslab_cache), nb_pages, 0, SOS_KSLAB_CREATE_MAP | ON_SLAB)) /* Something wrong with the parameters */ return NULL; memset((void*)vaddr_first_slab_of_caches, 0x0, nb_pages*SOS_PAGE_SIZE); /* Add the pages for the 1st slab of caches */ slab_of_caches = (struct sos_kslab*)(vaddr_first_slab_of_caches + nb_pages*SOS_PAGE_SIZE - sizeof(struct sos_kslab)); /* Add the abovementioned 1st slab to the cache of caches */ cache_add_slab(& fake_cache_of_caches, vaddr_first_slab_of_caches, slab_of_caches); /* Now we allocate a cache structure, which will be the real cache of caches, ie a cache structure allocated INSIDE the cache of caches, not inside the stack */ real_cache_of_caches = (struct sos_kslab_cache*) sos_kmem_cache_alloc(& fake_cache_of_caches, 0); /* We initialize it */ memcpy(real_cache_of_caches, & fake_cache_of_caches, sizeof(struct sos_kslab_cache)); /* We need to update the slab's 'cache' field */ slab_of_caches->cache = real_cache_of_caches; /* Add the cache to the list of slab caches */ list_add_tail(kslab_cache_list, real_cache_of_caches); return real_cache_of_caches;}/** * Helper function to create the initial cache of ranges, with a very * first slab in it, so that new kmem_range structures can be simply * allocated. * @return the cache of kmem_range */static struct sos_kslab_cache *create_cache_of_ranges(sos_vaddr_t vaddr_first_slab_of_ranges, sos_size_t sizeof_struct_range, int nb_pages){ /* The cache structure for the cache of kmem_range */ struct sos_kslab_cache *cache_of_ranges; /* The kslab structure for the very first slab of ranges */ struct sos_kslab *slab_of_ranges; cache_of_ranges = (struct sos_kslab_cache*) sos_kmem_cache_alloc(cache_of_struct_kslab_cache, 0); if (! cache_of_ranges) return NULL; /* Init the cache structure for the cache of ranges with min objects per slab = 2 !!! */ if (cache_initialize(cache_of_ranges, "struct kmem_range", sizeof_struct_range, nb_pages, 2, SOS_KSLAB_CREATE_MAP | ON_SLAB)) /* Something wrong with the parameters */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -