📄 paging.c
字号:
sos_ret_t sos_paging_unmap_interval(sos_vaddr_t vaddr, sos_size_t size){ sos_ret_t retval = 0; if (! SOS_IS_PAGE_ALIGNED(vaddr)) return -SOS_EINVAL; if (! SOS_IS_PAGE_ALIGNED(size)) return -SOS_EINVAL; for ( ; size >= SOS_PAGE_SIZE ; vaddr += SOS_PAGE_SIZE, size -= SOS_PAGE_SIZE) if (SOS_OK == sos_paging_unmap(vaddr)) retval += SOS_PAGE_SIZE; return retval;}sos_ui32_t sos_paging_get_prot(sos_vaddr_t vaddr){ sos_ui32_t retval; /* Get the page directory entry and table entry index for this address */ unsigned index_in_pd = virt_to_pd_index(vaddr); unsigned index_in_pt = virt_to_pt_index(vaddr); /* Get the PD of the current context */ struct x86_pde *pd = (struct x86_pde*) (SOS_PAGING_MIRROR_VADDR + SOS_PAGE_SIZE*virt_to_pd_index(SOS_PAGING_MIRROR_VADDR)); /* Address of the PT in the mirroring */ struct x86_pte * pt = (struct x86_pte*) (SOS_PAGING_MIRROR_VADDR + SOS_PAGE_SIZE*index_in_pd); /* No page mapped at this address ? */ if (! pd[index_in_pd].present) return SOS_VM_MAP_PROT_NONE; if (! pt[index_in_pt].present) return SOS_VM_MAP_PROT_NONE; /* Default access right of an available page is "read" on x86 */ retval = SOS_VM_MAP_PROT_READ; if (pd[index_in_pd].write && pt[index_in_pt].write) retval |= SOS_VM_MAP_PROT_WRITE; return retval;}sos_ret_t sos_paging_set_prot(sos_vaddr_t vaddr, sos_ui32_t new_prot){ /* Get the page directory entry and table entry index for this address */ unsigned index_in_pd = virt_to_pd_index(vaddr); unsigned index_in_pt = virt_to_pt_index(vaddr); /* Get the PD of the current context */ struct x86_pde *pd = (struct x86_pde*) (SOS_PAGING_MIRROR_VADDR + SOS_PAGE_SIZE*virt_to_pd_index(SOS_PAGING_MIRROR_VADDR)); /* Address of the PT in the mirroring */ struct x86_pte * pt = (struct x86_pte*) (SOS_PAGING_MIRROR_VADDR + SOS_PAGE_SIZE*index_in_pd); /* EXEC permission ignored on x86 */ new_prot &= ~SOS_VM_MAP_PROT_EXEC; /* Check flags */ if (new_prot & ~(SOS_VM_MAP_PROT_READ | SOS_VM_MAP_PROT_WRITE)) return -SOS_EINVAL; if (! (new_prot & SOS_VM_MAP_PROT_READ)) /* x86 READ flag always set by default */ return -SOS_ENOSUP; /* No page mapped at this address ? */ if (! pd[index_in_pd].present) return -SOS_EINVAL; if (! pt[index_in_pt].present) return -SOS_EINVAL; /* Update access rights */ pt[index_in_pt].write = ((new_prot & SOS_VM_MAP_PROT_WRITE) != 0); invlpg(vaddr); return SOS_OK;}sos_ret_t sos_paging_set_prot_of_interval(sos_vaddr_t vaddr, sos_size_t size, sos_ui32_t new_prot){ if (! SOS_IS_PAGE_ALIGNED(vaddr)) return -SOS_EINVAL; if (! SOS_IS_PAGE_ALIGNED(size)) return -SOS_EINVAL; for ( ; size >= SOS_PAGE_SIZE ; vaddr += SOS_PAGE_SIZE, size -= SOS_PAGE_SIZE) sos_paging_set_prot(vaddr, new_prot); return SOS_OK;}sos_paddr_t sos_paging_get_paddr(sos_vaddr_t vaddr){ /* Get the page directory entry and table entry index for this address */ unsigned index_in_pd = virt_to_pd_index(vaddr); unsigned index_in_pt = virt_to_pt_index(vaddr); unsigned offset_in_page = virt_to_page_offset(vaddr); /* Get the PD of the current context */ struct x86_pde *pd = (struct x86_pde*) (SOS_PAGING_MIRROR_VADDR + SOS_PAGE_SIZE*virt_to_pd_index(SOS_PAGING_MIRROR_VADDR)); /* Address of the PT in the mirroring */ struct x86_pte * pt = (struct x86_pte*) (SOS_PAGING_MIRROR_VADDR + SOS_PAGE_SIZE*index_in_pd); /* No page mapped at this address ? */ if (! pd[index_in_pd].present) return (sos_paddr_t)NULL; if (! pt[index_in_pt].present) return (sos_paddr_t)NULL; return (pt[index_in_pt].paddr << 12) + offset_in_page;}/* ************************************************* * Functions restricted to mm_context module */sos_paddr_t sos_paging_get_current_PD_paddr(){ struct x86_pdbr pdbr; asm volatile("movl %%cr3, %0\n": "=r"(pdbr)); return (pdbr.pd_paddr << 12);}sos_ret_t sos_paging_set_current_PD_paddr(sos_paddr_t paddr_PD){ struct x86_pdbr pdbr; SOS_ASSERT_FATAL(paddr_PD != 0); SOS_ASSERT_FATAL(SOS_IS_PAGE_ALIGNED(paddr_PD)); /* Setup the value of the PDBR */ memset(& pdbr, 0x0, sizeof(struct x86_pdbr)); /* Reset the PDBR */ pdbr.pd_paddr = (paddr_PD >> 12); /* Configure the MMU according to the PDBR */ asm volatile ("movl %0,%%cr3\n" ::"r"(pdbr)); return SOS_OK;}sos_ret_t sos_paging_dispose(sos_vaddr_t vaddr_PD){ x86_pde_val_t *pd = (x86_pde_val_t*) vaddr_PD; x86_pte_val_t *pt; int index_in_pd; /* Allocate 1 page in kernel space to map the PTs in order to unreference the physical pages they reference */ pt = (x86_pte_val_t *)sos_kmem_vmm_alloc(1, 0); if (! pt) return -SOS_ENOMEM; /* (Nothing to do in kernel space) */ /* Reset all the PTs in user space */ for (index_in_pd = (SOS_PAGING_BASE_USER_ADDRESS >> 22) ; index_in_pd < 1024 ; /* 1 PDE = 1 PT = 1024 Pages = 4MB */ index_in_pd ++) { sos_paddr_t paddr_pt = (pd[index_in_pd].pde.pt_paddr << 12); int index_in_pt; /* Nothing to do if there is no PT */ if (! pd[index_in_pd].pde.present) { pd[index_in_pd].ui32 = 0; continue; } /* Map this PT inside kernel */ SOS_ASSERT_FATAL(SOS_OK == sos_paging_map(paddr_pt, (sos_vaddr_t)pt, FALSE, SOS_VM_MAP_PROT_READ | SOS_VM_MAP_PROT_WRITE)); /* Reset all the mappings in this PT */ for (index_in_pt = 0 ; index_in_pt < 1024 ; index_in_pt ++) { /* Ignore unmapped PTE */ if (! pt[index_in_pt].pte.present) { pt[index_in_pt].ui32 = 0; continue; } /* Unreference the associated page */ sos_physmem_unref_physpage(pt[index_in_pt].pte.paddr << 12); /* Decrease occupation count of the PT */ sos_physmem_dec_physpage_occupation(paddr_pt); /* Reset PTE */ pt[index_in_pt].ui32 = 0; } /* Unmap PT */ SOS_ASSERT_FATAL(SOS_OK == sos_paging_unmap((sos_vaddr_t)pt)); /* Reset PDE */ pd[index_in_pd].ui32 = 0; /* Unreference PT */ sos_physmem_unref_physpage(paddr_pt); } /* Unallocate kernel space used for the temporary PT */ SOS_ASSERT_FATAL(SOS_OK == sos_kmem_vmm_free((sos_vaddr_t)pt)); return SOS_OK;}sos_ret_t sos_paging_copy_kernel_space(sos_vaddr_t dest_vaddr_PD, sos_vaddr_t src_vaddr_PD){ x86_pde_val_t *src_pd = (x86_pde_val_t*) src_vaddr_PD; x86_pde_val_t *dest_pd = (x86_pde_val_t*) dest_vaddr_PD; sos_paddr_t dest_paddr_PD = sos_paging_get_paddr(dest_vaddr_PD); x86_pde_val_t mirror_pde; int index_in_pd; /* Fill destination PD with zeros */ memset((void*)dest_vaddr_PD, 0x0, SOS_PAGE_SIZE); /* Synchronize it with the master Kernel MMU context. Stop just before the mirroring ! */ for (index_in_pd = 0 ; index_in_pd < (SOS_PAGING_MIRROR_VADDR >> 22) ; /* 1 PDE = 1 PT = 1024 Pages = 4MB */ index_in_pd ++) { /* Copy the master's configuration */ dest_pd[index_in_pd].ui32 = src_pd[index_in_pd].ui32; /* We DON'T mark the underlying PT and pages as referenced because all the PD are equivalent in the kernel space: as soon as a page is mapped in the kernel, it is mapped by X address spaces, and as soon as it is unmapped by 1 address space, it is unmapped in all the others. So that for X address spaces, the reference counter will be either 0 or X, and not something else: using the reference counter correctly won't be of any use and would consume some time in updating it. */ } /* Setup the mirroring for the new address space */ mirror_pde.ui32 = 0; mirror_pde.pde.present = TRUE; mirror_pde.pde.write = 1; mirror_pde.pde.user = 0; /* This is a KERNEL PDE */ mirror_pde.pde.pt_paddr = (dest_paddr_PD >> 12); dest_pd[SOS_PAGING_MIRROR_VADDR >> 22].ui32 = mirror_pde.ui32; return SOS_OK;}sos_ret_t sos_paging_copy_user_space(sos_vaddr_t dest_vaddr_PD, sos_vaddr_t src_vaddr_PD){ x86_pde_val_t *src_pd = (x86_pde_val_t*) src_vaddr_PD; x86_pde_val_t *dest_pd = (x86_pde_val_t*) dest_vaddr_PD; x86_pte_val_t *tmp_src_pt, *tmp_dest_pt; int index_in_pd; /* Allocate 2 pages in kernel space to map the PT in order to perform the copy of the PTs from source to destination */ tmp_src_pt = (x86_pte_val_t *)sos_kmem_vmm_alloc(1, 0); if (! tmp_src_pt) return -SOS_ENOMEM; tmp_dest_pt = (x86_pte_val_t *)sos_kmem_vmm_alloc(1, 0); if (! tmp_dest_pt) { sos_kmem_vmm_free((sos_vaddr_t)tmp_dest_pt); return -SOS_ENOMEM; } /* Copy each used PT from source to destination */ for (index_in_pd = (SOS_PAGING_BASE_USER_ADDRESS >> 22) ; index_in_pd < 1024 ; /* 1 PDE = 1 PT = 1024 Pages = 4MB */ index_in_pd ++) { sos_paddr_t paddr_dest_pt; int index_in_pt; /* We first litterally copy the source PDE in the destination PDE. However, please bare in mind that, in the end, both won't reference the same physical PT: the destination PDE will be updated (below) to match the address of its own new PT */ dest_pd[index_in_pd].ui32 = src_pd[index_in_pd].ui32; /* Ignore unused PTs */ if (! src_pd[index_in_pd].pde.present) continue; /* Allocate the destination PT */ paddr_dest_pt = sos_physmem_ref_physpage_new(TRUE); if (NULL == (void*)paddr_dest_pt) { sos_paging_dispose((sos_vaddr_t)dest_vaddr_PD); /* Unallocate temporary kernel space used for the copy */ sos_kmem_vmm_free((sos_vaddr_t)tmp_src_pt); sos_kmem_vmm_free((sos_vaddr_t)tmp_dest_pt); return -SOS_ENOMEM; } /* Map source and destination PT */ SOS_ASSERT_FATAL(SOS_OK == sos_paging_map(src_pd[index_in_pd].pde.pt_paddr << 12, (sos_vaddr_t)tmp_src_pt, FALSE, SOS_VM_MAP_PROT_READ)); SOS_ASSERT_FATAL(SOS_OK == sos_paging_map(paddr_dest_pt, (sos_vaddr_t)tmp_dest_pt, FALSE, SOS_VM_MAP_PROT_READ | SOS_VM_MAP_PROT_WRITE)); /* Copy the contents of the source to the destination PT, updating the reference counts of the pages */ for (index_in_pt = 0 ; index_in_pt < 1024 ; index_in_pt ++) { /* Copy the source PTE */ tmp_dest_pt[index_in_pt].ui32 = tmp_src_pt[index_in_pt].ui32; /* Ignore non-present pages */ if (! tmp_dest_pt[index_in_pt].pte.present) continue; /* Reset the dirty/accessed flags */ tmp_dest_pt[index_in_pt].pte.accessed = 0; tmp_dest_pt[index_in_pt].pte.dirty = 0; /* Increase the reference count of the destination page */ sos_physmem_ref_physpage_at(tmp_src_pt[index_in_pt].pte.paddr << 12); /* Increase occupation count of the PT */ sos_physmem_inc_physpage_occupation(paddr_dest_pt); } /* Unmap the temporary PTs */ SOS_ASSERT_FATAL(SOS_OK == sos_paging_unmap((sos_vaddr_t)tmp_src_pt)); SOS_ASSERT_FATAL(SOS_OK == sos_paging_unmap((sos_vaddr_t)tmp_dest_pt)); /* Update the destination PDE */ dest_pd[index_in_pd].pde.pt_paddr = (paddr_dest_pt >> 12); /* Reset the dirty/accessed flags */ dest_pd[index_in_pd].pde.accessed = 0; } /* Unallocate temporary kernel space used for the copy */ SOS_ASSERT_FATAL(SOS_OK == sos_kmem_vmm_free((sos_vaddr_t)tmp_src_pt)); SOS_ASSERT_FATAL(SOS_OK == sos_kmem_vmm_free((sos_vaddr_t)tmp_dest_pt)); return SOS_OK;}sos_ret_t sos_paging_prepare_COW(sos_uaddr_t base_address, sos_size_t length){ SOS_ASSERT_FATAL(SOS_IS_PAGE_ALIGNED(base_address)); SOS_ASSERT_FATAL(SOS_IS_PAGE_ALIGNED(length)); SOS_ASSERT_FATAL(SOS_PAGING_BASE_USER_ADDRESS <= base_address); /* Mark all the pages read-only, when already mapped in physical memory */ for ( ; length > 0 ; length -= SOS_PAGE_SIZE, base_address += SOS_PAGE_SIZE) { sos_paging_set_prot(base_address, SOS_VM_MAP_PROT_READ); } return SOS_OK;}sos_ret_t sos_paging_try_resolve_COW(sos_uaddr_t uaddr){ sos_ret_t refcnt; /* Get the page directory entry and table entry index for this address */ unsigned index_in_pd = virt_to_pd_index(uaddr); unsigned index_in_pt = virt_to_pt_index(uaddr); /* Get the PD of the current context */ struct x86_pde *pd = (struct x86_pde*) (SOS_PAGING_MIRROR_VADDR + SOS_PAGE_SIZE*virt_to_pd_index(SOS_PAGING_MIRROR_VADDR)); /* Address of the PT in the mirroring */ struct x86_pte * pt = (struct x86_pte*) (SOS_PAGING_MIRROR_VADDR + SOS_PAGE_SIZE*index_in_pd); /* No page mapped at this address ? */ if (! pd[index_in_pd].present) return -SOS_EFAULT; if (! pt[index_in_pt].present) return -SOS_EFAULT; /* Read-only PT not supported by kernel ! */ if (! pd[index_in_pd].write) return -SOS_EFAULT; /* Cannot understand a COW request if the page is already read/write */ SOS_ASSERT_FATAL(! pt[index_in_pt].write); /* We do a private copy of the page only if the current mapped page is shared by more than 1 process */ refcnt = sos_physmem_get_physpage_refcount(pt[index_in_pt].paddr << 12); SOS_ASSERT_FATAL(refcnt > 0); if (refcnt == 1) { /* We are the only address space to reference this page, we can safely turn it read/write now */ pt[index_in_pt].write = 1; invlpg(pt[index_in_pt].paddr << 12); } /* Otherwise we need to make a private copy of the page */ else { sos_paddr_t new_ppage; sos_vaddr_t vpage_src, tmp_dest; /* For that, we allocate the destination page inside the kernel space to perform the copy. We will transfer it into its final user-space address later */ tmp_dest = sos_kmem_vmm_alloc(1, SOS_KMEM_VMM_MAP); if (! tmp_dest) return -SOS_ENOMEM; /* copy the contents of the page */ vpage_src = SOS_PAGE_ALIGN_INF(uaddr); memcpy((void*)tmp_dest, (void*)vpage_src, SOS_PAGE_SIZE); /* replace the original (read-only) mapping with a (read/write) mapping to the new page. This will automatically unreference the original page */ new_ppage = sos_paging_get_paddr(tmp_dest); SOS_ASSERT_FATAL(new_ppage != (sos_vaddr_t)NULL); if (SOS_OK != sos_paging_map(new_ppage, vpage_src, TRUE, SOS_VM_MAP_PROT_READ | SOS_VM_MAP_PROT_WRITE)) { sos_kmem_vmm_free(tmp_dest); return -SOS_ENOMEM; } /* We can now unmap the destination page from inside the kernel and free the kernel VM range for it */ SOS_ASSERT_FATAL(SOS_OK == sos_kmem_vmm_free(tmp_dest)); } /* That's all, folks ! */ return SOS_OK;}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -