📄 kmem_vmm.c
字号:
/* Copyright (C) 2000 Thomas Petazzoni Copyright (C) 2004 David Decotigny This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */#include <sos/list.h>#include <sos/physmem.h>#include <hwcore/paging.h>#include <sos/assert.h>#include "kmem_vmm.h"/** The structure of a range of kernel-space virtual addresses */struct sos_kmem_range{ sos_vaddr_t base_vaddr; sos_count_t nb_pages; /* The slab owning this range, or NULL */ struct sos_kslab *slab; struct sos_kmem_range *prev, *next;};const int sizeof_struct_sos_kmem_range = sizeof(struct sos_kmem_range);/** The ranges are SORTED in (strictly) ascending base addresses */static struct sos_kmem_range *kmem_free_range_list, *kmem_used_range_list;/** The slab cache for the kmem ranges */static struct sos_kslab_cache *kmem_range_cache;/** Helper function to get the closest preceding or containing range for the given virtual address */static struct sos_kmem_range *get_closest_preceding_kmem_range(struct sos_kmem_range *the_list, sos_vaddr_t vaddr){ int nb_elements; struct sos_kmem_range *a_range, *ret_range; /* kmem_range list is kept SORTED, so we exit as soon as vaddr >= a range base address */ ret_range = NULL; list_foreach(the_list, a_range, nb_elements) { if (vaddr < a_range->base_vaddr) return ret_range; ret_range = a_range; } /* This will always be the LAST range in the kmem area */ return ret_range;}/** * Helper function to lookup a free range large enough to hold nb_pages * pages (first fit) */static struct sos_kmem_range *find_suitable_free_range(sos_count_t nb_pages){ int nb_elements; struct sos_kmem_range *r; list_foreach(kmem_free_range_list, r, nb_elements) { if (r->nb_pages >= nb_pages) return r; } return NULL;}/** * Helper function to add a_range in the_list, in strictly ascending order. * * @return The (possibly) new head of the_list */static struct sos_kmem_range *insert_range(struct sos_kmem_range *the_list, struct sos_kmem_range *a_range){ struct sos_kmem_range *prec_used; /** Look for any preceding range */ prec_used = get_closest_preceding_kmem_range(the_list, a_range->base_vaddr); /** insert a_range /after/ this prec_used */ if (prec_used != NULL) list_insert_after(the_list, prec_used, a_range); else /* Insert at the beginning of the list */ list_add_head(the_list, a_range); return the_list;}/** * Helper function to retrieve the range owning the given vaddr, by * scanning the physical memory first if vaddr is mapped in RAM */static struct sos_kmem_range *lookup_range(sos_vaddr_t vaddr){ struct sos_kmem_range *range; /* First: try to retrieve the physical page mapped at this address */ sos_paddr_t ppage_paddr = SOS_PAGE_ALIGN_INF(sos_paging_get_paddr(vaddr)); if (ppage_paddr) { range = sos_physmem_get_kmem_range(ppage_paddr); /* If a page is mapped at this address, it is EXPECTED that it is really associated with a range */ SOS_ASSERT_FATAL(range != NULL); } /* Otherwise scan the list of used ranges, looking for the range owning the address */ else { range = get_closest_preceding_kmem_range(kmem_used_range_list, vaddr); /* Not found */ if (! range) return NULL; /* vaddr not covered by this range */ if ( (vaddr < range->base_vaddr) || (vaddr >= (range->base_vaddr + range->nb_pages*SOS_PAGE_SIZE)) ) return NULL; } return range;}/** * Helper function for sos_kmem_vmm_setup() to initialize a new range * that maps a given area as free or as already used. * This function either succeeds or halts the whole system. */static struct sos_kmem_range *create_range(sos_bool_t is_free, sos_vaddr_t base_vaddr, sos_vaddr_t top_vaddr, struct sos_kslab *associated_slab){ struct sos_kmem_range *range; SOS_ASSERT_FATAL(SOS_IS_PAGE_ALIGNED(base_vaddr)); SOS_ASSERT_FATAL(SOS_IS_PAGE_ALIGNED(top_vaddr)); if ((top_vaddr - base_vaddr) < SOS_PAGE_SIZE) return NULL; range = (struct sos_kmem_range*)sos_kmem_cache_alloc(kmem_range_cache, SOS_KSLAB_ALLOC_ATOMIC); SOS_ASSERT_FATAL(range != NULL); range->base_vaddr = base_vaddr; range->nb_pages = (top_vaddr - base_vaddr) / SOS_PAGE_SIZE; if (is_free) { list_add_tail(kmem_free_range_list, range); } else { sos_vaddr_t vaddr; range->slab = associated_slab; list_add_tail(kmem_used_range_list, range); /* Ok, set the range owner for the pages in this page */ for (vaddr = base_vaddr ; vaddr < top_vaddr ; vaddr += SOS_PAGE_SIZE) { sos_paddr_t ppage_paddr = sos_paging_get_paddr(vaddr); SOS_ASSERT_FATAL((void*)ppage_paddr != NULL); sos_physmem_set_kmem_range(ppage_paddr, range); } } return range;}sos_ret_tsos_kmem_vmm_subsystem_setup(sos_vaddr_t kernel_core_base, sos_vaddr_t kernel_core_top, sos_vaddr_t bootstrap_stack_bottom_vaddr, sos_vaddr_t bootstrap_stack_top_vaddr){ struct sos_kslab *first_struct_slab_of_caches, *first_struct_slab_of_ranges; sos_vaddr_t first_slab_of_caches_base, first_slab_of_caches_nb_pages, first_slab_of_ranges_base, first_slab_of_ranges_nb_pages; struct sos_kmem_range *first_range_of_caches, *first_range_of_ranges; list_init(kmem_free_range_list); list_init(kmem_used_range_list); kmem_range_cache = sos_kmem_cache_subsystem_setup_prepare(kernel_core_base, kernel_core_top, sizeof(struct sos_kmem_range), & first_struct_slab_of_caches, & first_slab_of_caches_base, & first_slab_of_caches_nb_pages, & first_struct_slab_of_ranges, & first_slab_of_ranges_base, & first_slab_of_ranges_nb_pages); SOS_ASSERT_FATAL(kmem_range_cache != NULL); /* Mark virtual addresses 16kB - Video as FREE */ create_range(TRUE, SOS_KMEM_VMM_BASE, SOS_PAGE_ALIGN_INF(BIOS_N_VIDEO_START), NULL); /* Mark virtual addresses in Video hardware mapping as NOT FREE */ create_range(FALSE, SOS_PAGE_ALIGN_INF(BIOS_N_VIDEO_START), SOS_PAGE_ALIGN_SUP(BIOS_N_VIDEO_END), NULL); /* Mark virtual addresses Video - Kernel as FREE */ create_range(TRUE, SOS_PAGE_ALIGN_SUP(BIOS_N_VIDEO_END), SOS_PAGE_ALIGN_INF(kernel_core_base), NULL); /* Mark virtual addresses in Kernel code/data up to the bootstrap stack as NOT FREE */ create_range(FALSE, SOS_PAGE_ALIGN_INF(kernel_core_base), bootstrap_stack_bottom_vaddr, NULL); /* Mark virtual addresses in the bootstrap stack as NOT FREE too, but in another vmm region in order to be un-allocated later */ create_range(FALSE, bootstrap_stack_bottom_vaddr, bootstrap_stack_top_vaddr, NULL); /* Mark the remaining virtual addresses in Kernel code/data after the bootstrap stack as NOT FREE */ create_range(FALSE, bootstrap_stack_top_vaddr, SOS_PAGE_ALIGN_SUP(kernel_core_top), NULL); /* Mark virtual addresses in the first slab of the cache of caches as NOT FREE */ SOS_ASSERT_FATAL(SOS_PAGE_ALIGN_SUP(kernel_core_top) == first_slab_of_caches_base); SOS_ASSERT_FATAL(first_struct_slab_of_caches != NULL); first_range_of_caches = create_range(FALSE, first_slab_of_caches_base, first_slab_of_caches_base + first_slab_of_caches_nb_pages*SOS_PAGE_SIZE, first_struct_slab_of_caches); /* Mark virtual addresses in the first slab of the cache of ranges as NOT FREE */ SOS_ASSERT_FATAL((first_slab_of_caches_base + first_slab_of_caches_nb_pages*SOS_PAGE_SIZE) == first_slab_of_ranges_base); SOS_ASSERT_FATAL(first_struct_slab_of_ranges != NULL); first_range_of_ranges = create_range(FALSE, first_slab_of_ranges_base, first_slab_of_ranges_base + first_slab_of_ranges_nb_pages*SOS_PAGE_SIZE, first_struct_slab_of_ranges); /* Mark virtual addresses after these slabs as FREE */ create_range(TRUE, first_slab_of_ranges_base + first_slab_of_ranges_nb_pages*SOS_PAGE_SIZE,
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -