📄 kmem_vmm.c
字号:
SOS_KMEM_VMM_TOP, NULL); /* Update the cache subsystem so that the artificially-created caches of caches and ranges really behave like *normal* caches (ie those allocated by the normal slab API) */ sos_kmem_cache_subsystem_setup_commit(first_struct_slab_of_caches, first_range_of_caches, first_struct_slab_of_ranges, first_range_of_ranges); return SOS_OK;}/** * Allocate a new kernel area spanning one or multiple pages. * * @eturn a new range structure */struct sos_kmem_range *sos_kmem_vmm_new_range(sos_count_t nb_pages, sos_ui32_t flags, sos_vaddr_t * range_start){ struct sos_kmem_range *free_range, *new_range; if (nb_pages <= 0) return NULL; /* Find a suitable free range to hold the size-sized object */ free_range = find_suitable_free_range(nb_pages); if (free_range == NULL) return NULL; /* If range has exactly the requested size, just move it to the "used" list */ if(free_range->nb_pages == nb_pages) { list_delete(kmem_free_range_list, free_range); kmem_used_range_list = insert_range(kmem_used_range_list, free_range); /* The new_range is exactly the free_range */ new_range = free_range; } /* Otherwise the range is bigger than the requested size, split it. This involves reducing its size, and allocate a new range, which is going to be added to the "used" list */ else { /* free_range split in { new_range | free_range } */ new_range = (struct sos_kmem_range*) sos_kmem_cache_alloc(kmem_range_cache, (flags & SOS_KMEM_VMM_ATOMIC)? SOS_KSLAB_ALLOC_ATOMIC:0); if (! new_range) return NULL; new_range->base_vaddr = free_range->base_vaddr; new_range->nb_pages = nb_pages; free_range->base_vaddr += nb_pages*SOS_PAGE_SIZE; free_range->nb_pages -= nb_pages; /* free_range is still at the same place in the list */ /* insert new_range in the used list */ kmem_used_range_list = insert_range(kmem_used_range_list, new_range); } /* By default, the range is not associated with any slab */ new_range->slab = NULL; /* If mapping of physical pages is needed, map them now */ if (flags & SOS_KMEM_VMM_MAP) { int i; for (i = 0 ; i < nb_pages ; i ++) { /* Get a new physical page */ sos_paddr_t ppage_paddr = sos_physmem_ref_physpage_new(! (flags & SOS_KMEM_VMM_ATOMIC)); /* Map the page in kernel space */ if (ppage_paddr) { if (sos_paging_map(ppage_paddr, new_range->base_vaddr + i * SOS_PAGE_SIZE, FALSE /* Not a user page */, ((flags & SOS_KMEM_VMM_ATOMIC)? SOS_VM_MAP_ATOMIC:0) | SOS_VM_MAP_PROT_READ | SOS_VM_MAP_PROT_WRITE)) { /* Failed => force unallocation, see below */ sos_physmem_unref_physpage(ppage_paddr); ppage_paddr = (sos_paddr_t)NULL; } else { /* Success : page can be unreferenced since it is now mapped */ sos_physmem_unref_physpage(ppage_paddr); } } /* Undo the allocation if failed to allocate or map a new page */ if (! ppage_paddr) { sos_kmem_vmm_del_range(new_range); return NULL; } /* Ok, set the range owner for this page */ sos_physmem_set_kmem_range(ppage_paddr, new_range); } } /* ... Otherwise: Demand Paging will do the job */ if (range_start) *range_start = new_range->base_vaddr; return new_range;}sos_ret_t sos_kmem_vmm_del_range(struct sos_kmem_range *range){ int i; struct sos_kmem_range *ranges_to_free; list_init(ranges_to_free); SOS_ASSERT_FATAL(range != NULL); SOS_ASSERT_FATAL(range->slab == NULL); /* Remove the range from the 'USED' list now */ list_delete(kmem_used_range_list, range); /* * The following do..while() loop is here to avoid an indirect * recursion: if we call directly kmem_cache_free() from inside the * current function, we take the risk to re-enter the current function * (sos_kmem_vmm_del_range()) again, which may cause problem if it * in turn calls kmem_slab again and sos_kmem_vmm_del_range again, * and again and again. This may happen while freeing ranges of * struct sos_kslab... * * To avoid this,we choose to call a special function of kmem_slab * doing almost the same as sos_kmem_cache_free(), but which does * NOT call us (ie sos_kmem_vmm_del_range()): instead WE add the * range that is to be freed to a list, and the do..while() loop is * here to process this list ! The recursion is replaced by * classical iterations. */ do { /* Ok, we got the range. Now, insert this range in the free list */ kmem_free_range_list = insert_range(kmem_free_range_list, range); /* Unmap the physical pages */ for (i = 0 ; i < range->nb_pages ; i ++) { /* This will work even if no page is mapped at this address */ sos_paging_unmap(range->base_vaddr + i*SOS_PAGE_SIZE); } /* Eventually coalesce it with prev/next free ranges (there is always a valid prev/next link since the list is circular). Note: the tests below will lead to correct behaviour even if the list is limited to the 'range' singleton, at least as long as the range is not zero-sized */ /* Merge with preceding one ? */ if (range->prev->base_vaddr + range->prev->nb_pages*SOS_PAGE_SIZE == range->base_vaddr) { struct sos_kmem_range *empty_range_of_ranges = NULL; struct sos_kmem_range *prec_free = range->prev; /* Merge them */ prec_free->nb_pages += range->nb_pages; list_delete(kmem_free_range_list, range); /* Mark the range as free. This may cause the slab owning the range to become empty */ empty_range_of_ranges = sos_kmem_cache_release_struct_range(range); /* If this causes the slab owning the range to become empty, add the range corresponding to the slab at the end of the list of the ranges to be freed: it will be actually freed in one of the next iterations of the do{} loop. */ if (empty_range_of_ranges != NULL) { list_delete(kmem_used_range_list, empty_range_of_ranges); list_add_tail(ranges_to_free, empty_range_of_ranges); } /* Set range to the beginning of this coelescion */ range = prec_free; } /* Merge with next one ? [NO 'else' since range may be the result of the merge above] */ if (range->base_vaddr + range->nb_pages*SOS_PAGE_SIZE == range->next->base_vaddr) { struct sos_kmem_range *empty_range_of_ranges = NULL; struct sos_kmem_range *next_range = range->next; /* Merge them */ range->nb_pages += next_range->nb_pages; list_delete(kmem_free_range_list, next_range); /* Mark the next_range as free. This may cause the slab owning the next_range to become empty */ empty_range_of_ranges = sos_kmem_cache_release_struct_range(next_range); /* If this causes the slab owning the next_range to become empty, add the range corresponding to the slab at the end of the list of the ranges to be freed: it will be actually freed in one of the next iterations of the do{} loop. */ if (empty_range_of_ranges != NULL) { list_delete(kmem_used_range_list, empty_range_of_ranges); list_add_tail(ranges_to_free, empty_range_of_ranges); } } /* If deleting the range(s) caused one or more range(s) to be freed, get the next one to free */ if (list_is_empty(ranges_to_free)) range = NULL; /* No range left to free */ else range = list_pop_head(ranges_to_free); } /* Stop when there is no range left to be freed for now */ while (range != NULL); return SOS_OK;}sos_vaddr_t sos_kmem_vmm_alloc(sos_count_t nb_pages, sos_ui32_t flags){ struct sos_kmem_range *range = sos_kmem_vmm_new_range(nb_pages, flags, NULL); if (! range) return (sos_vaddr_t)NULL; return range->base_vaddr;}sos_ret_t sos_kmem_vmm_free(sos_vaddr_t vaddr){ struct sos_kmem_range *range = lookup_range(vaddr); /* We expect that the given address is the base address of the range */ if (!range || (range->base_vaddr != vaddr)) return -SOS_EINVAL; /* We expect that this range is not held by any cache */ if (range->slab != NULL) return -SOS_EBUSY; return sos_kmem_vmm_del_range(range);}sos_ret_t sos_kmem_vmm_set_slab(struct sos_kmem_range *range, struct sos_kslab *slab){ if (! range) return -SOS_EINVAL; range->slab = slab; return SOS_OK;}struct sos_kslab * sos_kmem_vmm_resolve_slab(sos_vaddr_t vaddr){ struct sos_kmem_range *range = lookup_range(vaddr); if (! range) return NULL; return range->slab;}sos_bool_t sos_kmem_vmm_is_valid_vaddr(sos_vaddr_t vaddr){ struct sos_kmem_range *range = lookup_range(vaddr); return (range != NULL);}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -