📄 paging.c
字号:
pt[index_in_pt].present = TRUE; pt[index_in_pt].write = (flags & SOS_VM_MAP_PROT_WRITE)?1:0; pt[index_in_pt].user = (is_user_page)?1:0; pt[index_in_pt].paddr = ppage_paddr >> 12; sos_physmem_ref_physpage_at(ppage_paddr); /* * The page is now mapped in the current address space */ /* Invalidate TLB for the page we just added */ invlpg(vpage_vaddr); return SOS_OK;}sos_ret_t sos_paging_unmap(sos_vaddr_t vpage_vaddr){ sos_ret_t pt_dec_occupation_retval; /* Get the page directory entry and table entry index for this address */ unsigned index_in_pd = virt_to_pd_index(vpage_vaddr); unsigned index_in_pt = virt_to_pt_index(vpage_vaddr); /* Get the PD of the current context */ struct x86_pde *pd = (struct x86_pde*) (SOS_PAGING_MIRROR_VADDR + SOS_PAGE_SIZE*virt_to_pd_index(SOS_PAGING_MIRROR_VADDR)); /* Address of the PT in the mirroring */ struct x86_pte * pt = (struct x86_pte*) (SOS_PAGING_MIRROR_VADDR + SOS_PAGE_SIZE*index_in_pd); SOS_ASSERT_FATAL(SOS_IS_PAGE_ALIGNED(vpage_vaddr)); /* No page mapped at this address ? */ if (! pd[index_in_pd].present) return -SOS_EINVAL; if (! pt[index_in_pt].present) return -SOS_EINVAL; /* The unmapping of anywhere in the PD mirroring is FORBIDDEN ;) */ if ((vpage_vaddr >= SOS_PAGING_MIRROR_VADDR) && (vpage_vaddr < SOS_PAGING_MIRROR_VADDR + SOS_PAGING_MIRROR_SIZE)) return -SOS_EINVAL; /* Reclaim the physical page */ sos_physmem_unref_physpage(pt[index_in_pt].paddr << 12); /* Unmap the page in the page table */ memset(pt + index_in_pt, 0x0, sizeof(struct x86_pte)); /* Invalidate TLB for the page we just unmapped */ invlpg(vpage_vaddr); /* Reclaim this entry in the PT, which may free the PT */ pt_dec_occupation_retval = sos_physmem_dec_physpage_occupation(pd[index_in_pd].pt_paddr << 12); SOS_ASSERT_FATAL(pt_dec_occupation_retval >= 0); if (pt_dec_occupation_retval > 0) /* If the PT is now completely unused... */ { x86_pde_val_t u; /* * The PT is not referenced by this PD anymore */ sos_physmem_unref_physpage(pd[index_in_pd].pt_paddr << 12); /* * Reset the PDE */ /* Mark the PDE as unavailable */ u.ui32 = 0; /* Is it a PDE concerning the kernel space */ if (vpage_vaddr < SOS_PAGING_MIRROR_VADDR) { /* Now synchronize all the PD */ SOS_ASSERT_FATAL(SOS_OK == sos_mm_context_synch_kernel_PDE(index_in_pd, u.ui32)); } else /* We should have written "else if (vpage_vaddr >= SOS_PAGING_BASE_USER_ADDRESS)" but this is not needed because the beginning of the function detects and rejects mapping requests inside the mirroring */ { /* No: The request concerns the user space. So only the current MMU context is concerned */ pd[index_in_pd] = u.pde; } /* Update the TLB */ invlpg(pt); } return SOS_OK; }sos_ret_t sos_paging_unmap_interval(sos_vaddr_t vaddr, sos_size_t size){ sos_ret_t retval = 0; if (! SOS_IS_PAGE_ALIGNED(vaddr)) return -SOS_EINVAL; if (! SOS_IS_PAGE_ALIGNED(size)) return -SOS_EINVAL; for ( ; size >= SOS_PAGE_SIZE ; vaddr += SOS_PAGE_SIZE, size -= SOS_PAGE_SIZE) if (SOS_OK == sos_paging_unmap(vaddr)) retval += SOS_PAGE_SIZE; return retval;}sos_ui32_t sos_paging_get_prot(sos_vaddr_t vaddr){ sos_ui32_t retval; /* Get the page directory entry and table entry index for this address */ unsigned index_in_pd = virt_to_pd_index(vaddr); unsigned index_in_pt = virt_to_pt_index(vaddr); /* Get the PD of the current context */ struct x86_pde *pd = (struct x86_pde*) (SOS_PAGING_MIRROR_VADDR + SOS_PAGE_SIZE*virt_to_pd_index(SOS_PAGING_MIRROR_VADDR)); /* Address of the PT in the mirroring */ struct x86_pte * pt = (struct x86_pte*) (SOS_PAGING_MIRROR_VADDR + SOS_PAGE_SIZE*index_in_pd); /* No page mapped at this address ? */ if (! pd[index_in_pd].present) return SOS_VM_MAP_PROT_NONE; if (! pt[index_in_pt].present) return SOS_VM_MAP_PROT_NONE; /* Default access right of an available page is "read" on x86 */ retval = SOS_VM_MAP_PROT_READ; if (pd[index_in_pd].write && pt[index_in_pt].write) retval |= SOS_VM_MAP_PROT_WRITE; return retval;}sos_ret_t sos_paging_set_prot(sos_vaddr_t vaddr, sos_ui32_t new_prot){ /* Get the page directory entry and table entry index for this address */ unsigned index_in_pd = virt_to_pd_index(vaddr); unsigned index_in_pt = virt_to_pt_index(vaddr); /* Get the PD of the current context */ struct x86_pde *pd = (struct x86_pde*) (SOS_PAGING_MIRROR_VADDR + SOS_PAGE_SIZE*virt_to_pd_index(SOS_PAGING_MIRROR_VADDR)); /* Address of the PT in the mirroring */ struct x86_pte * pt = (struct x86_pte*) (SOS_PAGING_MIRROR_VADDR + SOS_PAGE_SIZE*index_in_pd); /* EXEC permission ignored on x86 */ new_prot &= ~SOS_VM_MAP_PROT_EXEC; /* Check flags */ if (new_prot & ~(SOS_VM_MAP_PROT_READ | SOS_VM_MAP_PROT_WRITE)) return -SOS_EINVAL; if (! (new_prot & SOS_VM_MAP_PROT_READ)) /* x86 READ flag always set by default */ return -SOS_ENOSUP; /* No page mapped at this address ? */ if (! pd[index_in_pd].present) return -SOS_EINVAL; if (! pt[index_in_pt].present) return -SOS_EINVAL; /* Update access rights */ pt[index_in_pt].write = ((new_prot & SOS_VM_MAP_PROT_WRITE) != 0); invlpg(vaddr); return SOS_OK;}sos_ret_t sos_paging_set_prot_of_interval(sos_vaddr_t vaddr, sos_size_t size, sos_ui32_t new_prot){ if (! SOS_IS_PAGE_ALIGNED(vaddr)) return -SOS_EINVAL; if (! SOS_IS_PAGE_ALIGNED(size)) return -SOS_EINVAL; for ( ; size >= SOS_PAGE_SIZE ; vaddr += SOS_PAGE_SIZE, size -= SOS_PAGE_SIZE) sos_paging_set_prot(vaddr, new_prot); return SOS_OK;}sos_paddr_t sos_paging_get_paddr(sos_vaddr_t vaddr){ /* Get the page directory entry and table entry index for this address */ unsigned index_in_pd = virt_to_pd_index(vaddr); unsigned index_in_pt = virt_to_pt_index(vaddr); unsigned offset_in_page = virt_to_page_offset(vaddr); /* Get the PD of the current context */ struct x86_pde *pd = (struct x86_pde*) (SOS_PAGING_MIRROR_VADDR + SOS_PAGE_SIZE*virt_to_pd_index(SOS_PAGING_MIRROR_VADDR)); /* Address of the PT in the mirroring */ struct x86_pte * pt = (struct x86_pte*) (SOS_PAGING_MIRROR_VADDR + SOS_PAGE_SIZE*index_in_pd); /* No page mapped at this address ? */ if (! pd[index_in_pd].present) return (sos_paddr_t)NULL; if (! pt[index_in_pt].present) return (sos_paddr_t)NULL; return (pt[index_in_pt].paddr << 12) + offset_in_page;}/* ************************************************* * Functions restricted to mm_context module */sos_paddr_t sos_paging_get_current_PD_paddr(){ struct x86_pdbr pdbr; asm volatile("movl %%cr3, %0\n": "=r"(pdbr)); return (pdbr.pd_paddr << 12);}sos_ret_t sos_paging_set_current_PD_paddr(sos_paddr_t paddr_PD){ struct x86_pdbr pdbr; SOS_ASSERT_FATAL(paddr_PD != 0); SOS_ASSERT_FATAL(SOS_IS_PAGE_ALIGNED(paddr_PD)); /* Setup the value of the PDBR */ memset(& pdbr, 0x0, sizeof(struct x86_pdbr)); /* Reset the PDBR */ pdbr.pd_paddr = (paddr_PD >> 12); /* Configure the MMU according to the PDBR */ asm volatile ("movl %0,%%cr3\n" ::"r"(pdbr)); return SOS_OK;}sos_ret_t sos_paging_dispose(sos_vaddr_t vaddr_PD){ x86_pde_val_t *pd = (x86_pde_val_t*) vaddr_PD; x86_pte_val_t *pt; int index_in_pd; /* Allocate 1 page in kernel space to map the PTs in order to unreference the physical pages they reference */ pt = (x86_pte_val_t *)sos_kmem_vmm_alloc(1, 0); if (! pt) return -SOS_ENOMEM; /* (Nothing to do in kernel space) */ /* Reset all the PTs in user space */ for (index_in_pd = (SOS_PAGING_BASE_USER_ADDRESS >> 22) ; index_in_pd < 1024 ; /* 1 PDE = 1 PT = 1024 Pages = 4MB */ index_in_pd ++) { sos_paddr_t paddr_pt = (pd[index_in_pd].pde.pt_paddr << 12); int index_in_pt; /* Nothing to do if there is no PT */ if (! pd[index_in_pd].pde.present) { pd[index_in_pd].ui32 = 0; continue; } /* Map this PT inside kernel */ SOS_ASSERT_FATAL(SOS_OK == sos_paging_map(paddr_pt, (sos_vaddr_t)pt, FALSE, SOS_VM_MAP_PROT_READ | SOS_VM_MAP_PROT_WRITE)); /* Reset all the mappings in this PT */ for (index_in_pt = 0 ; index_in_pt < 1024 ; index_in_pt ++) { /* Ignore unmapped PTE */ if (! pt[index_in_pt].pte.present) { pt[index_in_pt].ui32 = 0; continue; } /* Unreference the associated page */ sos_physmem_unref_physpage(pt[index_in_pt].pte.paddr << 12); /* Decrease occupation count of the PT */ sos_physmem_dec_physpage_occupation(paddr_pt); /* Reset PTE */ pt[index_in_pt].ui32 = 0; } /* Unmap PT */ SOS_ASSERT_FATAL(SOS_OK == sos_paging_unmap((sos_vaddr_t)pt)); /* Reset PDE */ pd[index_in_pd].ui32 = 0; /* Unreference PT */ sos_physmem_unref_physpage(paddr_pt); } /* Unallocate kernel space used for the temporary PT */ SOS_ASSERT_FATAL(SOS_OK == sos_kmem_vmm_free((sos_vaddr_t)pt)); return SOS_OK;}sos_ret_t sos_paging_copy_kernel_space(sos_vaddr_t dest_vaddr_PD, sos_vaddr_t src_vaddr_PD){ x86_pde_val_t *src_pd = (x86_pde_val_t*) src_vaddr_PD; x86_pde_val_t *dest_pd = (x86_pde_val_t*) dest_vaddr_PD; sos_paddr_t dest_paddr_PD = sos_paging_get_paddr(dest_vaddr_PD); x86_pde_val_t mirror_pde; int index_in_pd; /* Fill destination PD with zeros */ memset((void*)dest_vaddr_PD, 0x0, SOS_PAGE_SIZE); /* Synchronize it with the master Kernel MMU context. Stop just before the mirroring ! */ for (index_in_pd = 0 ; index_in_pd < (SOS_PAGING_MIRROR_VADDR >> 22) ; /* 1 PDE = 1 PT = 1024 Pages = 4MB */ index_in_pd ++) { /* Copy the master's configuration */ dest_pd[index_in_pd].ui32 = src_pd[index_in_pd].ui32; /* We DON'T mark the underlying PT and pages as referenced because all the PD are equivalent in the kernel space: as soon as a page is mapped in the kernel, it is mapped by X address spaces, and as soon as it is unmapped by 1 address space, it is unmapped in all the others. So that for X address spaces, the reference counter will be either 0 or X, and not something else: using the reference counter correctly won't be of any use and would consume some time in updating it. */ } /* Setup the mirroring for the new address space */ mirror_pde.ui32 = 0; mirror_pde.pde.present = TRUE; mirror_pde.pde.write = 1; mirror_pde.pde.user = 0; /* This is a KERNEL PDE */ mirror_pde.pde.pt_paddr = (dest_paddr_PD >> 12); dest_pd[SOS_PAGING_MIRROR_VADDR >> 22].ui32 = mirror_pde.ui32; return SOS_OK;}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -