📄 umem_vmm.c
字号:
next_vr = vr->prev_in_as; if (next_vr->start <= vr->start) next_vr = NULL; /* * Compute new offset inside the mapped resource, if any */ /* Don't allow to resize if the uaddr goes beyond the 'offset 0' of the resource */ if ( (*new_uaddr < vr->start) && (vr->start - *new_uaddr > vr->offset_in_resource) ) return -SOS_EINVAL; /* Compute new offset in the resource (overflow-safe) */ if (vr->start > *new_uaddr) new_offset_in_resource = vr->offset_in_resource - (vr->start - *new_uaddr); else new_offset_in_resource = vr->offset_in_resource + (*new_uaddr - vr->start); /* If other VRs would be affected by this resizing, then the VR must be moved */ if (prev_vr && (prev_vr->start + prev_vr->size > *new_uaddr)) must_move_vr |= TRUE; if (next_vr && (next_vr->start < *new_uaddr + new_size)) must_move_vr |= TRUE; /* If VR would be out-of-user-space, it must be moved */ if (*new_uaddr < SOS_PAGING_BASE_USER_ADDRESS) must_move_vr |= TRUE; if (*new_uaddr > SOS_PAGING_TOP_USER_ADDRESS - new_size) must_move_vr |= TRUE; /* The VR must be moved but the user forbids it */ if ( must_move_vr && !(flags & SOS_VR_REMAP_MAYMOVE) ) return -SOS_EINVAL; /* If the VR must be moved, we simply map the resource elsewhere and unmap the current VR */ if (must_move_vr) { sos_uaddr_t uaddr, result_uaddr; sos_ret_t retval; result_uaddr = *new_uaddr; retval = sos_umem_vmm_map(as, & result_uaddr, new_size, vr->access_rights, vr->flags | INTERNAL_MAP_CALLED_FROM_MREMAP, vr->mapped_resource, new_offset_in_resource); if (SOS_OK != retval) return retval; /* Remap the physical pages at their new address */ for (uaddr = vr->start ; uaddr < vr->start + vr->size ; uaddr += SOS_PAGE_SIZE) { sos_paddr_t paddr; sos_ui32_t prot; sos_uaddr_t vaddr; if (uaddr < *new_uaddr) continue; if (uaddr > *new_uaddr + new_size) continue; /* Compute destination virtual address (should be overflow-safe) */ if (vr->start >= *new_uaddr) vaddr = result_uaddr + (uaddr - vr->start) + (vr->start - *new_uaddr); else vaddr = result_uaddr + (uaddr - vr->start) - (*new_uaddr - vr->start); paddr = sos_paging_get_paddr(uaddr); if (! paddr) /* No physical page mapped at this address yet */ continue; prot = sos_paging_get_prot(uaddr); SOS_ASSERT_FATAL(prot); /* Remap it at its destination address */ retval = sos_paging_map(paddr, vaddr, TRUE, prot); if (SOS_OK != retval) { sos_umem_vmm_unmap(as, result_uaddr, new_size); return retval; } } retval = sos_umem_vmm_unmap(as, vr->start, vr->size); if (SOS_OK != retval) { sos_umem_vmm_unmap(as, result_uaddr, new_size); return retval; } *new_uaddr = result_uaddr; return retval; } /* Otherwise we simply resize the VR, taking care of unmapping what's been unmapped */ if (*new_uaddr + new_size < vr->start + vr->size) sos_umem_vmm_unmap(as, *new_uaddr + new_size, vr->start + vr->size - (*new_uaddr + new_size)); else { as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED, *new_uaddr + new_size - (vr->start + vr->size), 0, vr->access_rights); vr->size += *new_uaddr + new_size - (vr->start + vr->size); } if (*new_uaddr > vr->start) sos_umem_vmm_unmap(as, vr->start, *new_uaddr - vr->start); else { as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED, vr->start - *new_uaddr, 0, vr->access_rights); vr->size += vr->start - *new_uaddr; vr->start = *new_uaddr; vr->offset_in_resource = new_offset_in_resource; } SOS_ASSERT_FATAL(vr->start == *new_uaddr); SOS_ASSERT_FATAL(vr->size == new_size); SOS_ASSERT_FATAL(vr->offset_in_resource == new_offset_in_resource); return SOS_OK;}sos_ret_t sos_umem_vmm_try_resolve_page_fault(sos_uaddr_t uaddr, sos_bool_t write_access, sos_bool_t user_access){ struct sos_process *process = sos_thread_get_current()->process; struct sos_umem_vmm_as *as; struct sos_umem_vmm_vr *vr; if (! process) return -SOS_EFAULT; as = sos_process_get_address_space(process); if (! as) return -SOS_EFAULT; vr = find_first_intersecting_vr(as, uaddr, 1); if (! vr) return -SOS_EFAULT; /* Write on a read-only VR */ if (write_access && !(vr->access_rights & SOS_VM_MAP_PROT_WRITE)) return -SOS_EFAULT; /* Write on a COW VR */ if (write_access && !(vr->flags & SOS_VR_MAP_SHARED)) { if (SOS_OK == sos_paging_try_resolve_COW(uaddr)) { as->pgflt_cow ++; return SOS_OK; } } /* Ask the underlying resource to resolve the page fault */ if (SOS_OK != vr->ops->page_in(vr, uaddr, write_access)) { as->pgflt_invalid ++; return -SOS_EFAULT; } as->phys_total += SOS_PAGE_SIZE; as->pgflt_page_in ++; /* For a private mapping, keep the mapping read-only */ if (!(vr->flags & SOS_VR_MAP_SHARED)) { sos_paging_prepare_COW(SOS_PAGE_ALIGN_INF(uaddr), SOS_PAGE_SIZE); } return SOS_OK;}sos_ret_tsos_umem_vmm_init_heap(struct sos_umem_vmm_as * as, sos_uaddr_t heap_start){ SOS_ASSERT_FATAL(! as->heap_start); as->heap_start = heap_start; as->heap_size = 0; return SOS_OK;}sos_uaddr_tsos_umem_vmm_brk(struct sos_umem_vmm_as * as, sos_uaddr_t new_top_uaddr){ sos_uaddr_t new_start; sos_size_t new_size; SOS_ASSERT_FATAL(as->heap_start); if (! new_top_uaddr) return as->heap_start + as->heap_size; if (new_top_uaddr == as->heap_start + as->heap_size) return as->heap_start + as->heap_size; if (new_top_uaddr < as->heap_start) return (sos_uaddr_t)NULL; new_top_uaddr = SOS_PAGE_ALIGN_SUP(new_top_uaddr); new_start = as->heap_start; new_size = new_top_uaddr - as->heap_start; /* First call to brk: we must map /dev/zero */ if (! as->heap_size) { if (SOS_OK != sos_dev_zero_map(as, & as->heap_start, new_size, SOS_VM_MAP_PROT_READ | SOS_VM_MAP_PROT_WRITE, 0 /* private non-fixed */)) return (sos_uaddr_t)NULL; as->heap_size = new_size; return as->heap_start + as->heap_size; } /* Otherwise we just have to unmap or resize the region */ if (new_size <= 0) { if (SOS_OK != sos_umem_vmm_unmap(as, as->heap_start, as->heap_size)) return (sos_uaddr_t)NULL; } else { if (SOS_OK != sos_umem_vmm_resize(as, as->heap_start, as->heap_size, & new_start, new_size, 0)) return (sos_uaddr_t)NULL; } SOS_ASSERT_FATAL(new_start == as->heap_start); as->heap_size = new_size; return new_top_uaddr;}static struct sos_umem_vmm_vr *find_enclosing_or_next_vr(struct sos_umem_vmm_as * as, sos_uaddr_t uaddr){ struct sos_umem_vmm_vr *vr; int nb_vr; if (uaddr < SOS_PAGING_BASE_USER_ADDRESS) return NULL; if (uaddr > SOS_PAGING_TOP_USER_ADDRESS) return NULL; list_foreach_named(as->list_vr, vr, nb_vr, prev_in_as, next_in_as) { /* Equivalent to "if (uaddr < vr->start + vr->size)" but more robust (resilient to integer overflows) */ if (uaddr <= vr->start + (vr->size - 1)) return vr; } return NULL;}static struct sos_umem_vmm_vr *find_first_intersecting_vr(struct sos_umem_vmm_as * as, sos_uaddr_t start_uaddr, sos_size_t size){ struct sos_umem_vmm_vr * vr; vr = find_enclosing_or_next_vr(as, start_uaddr); if (! vr) return NULL; if (start_uaddr + size <= vr->start) return NULL; return vr;}static sos_uaddr_tfind_first_free_interval(struct sos_umem_vmm_as * as, sos_uaddr_t hint_uaddr, sos_size_t size){ struct sos_umem_vmm_vr * initial_vr, * vr; if (hint_uaddr < SOS_PAGING_BASE_USER_ADDRESS) hint_uaddr = SOS_PAGING_BASE_USER_ADDRESS; if (hint_uaddr > SOS_PAGING_TOP_USER_ADDRESS - size + 1) return (sos_uaddr_t)NULL; initial_vr = vr = find_enclosing_or_next_vr(as, hint_uaddr); if (! vr) /* Great, there is nothing after ! */ return hint_uaddr; /* Scan the remaining VRs in the list */ do { /* Is there enough space /before/ that VR ? */ if (hint_uaddr + size <= vr->start) /* Great ! */ return hint_uaddr; /* Is there any VR /after/ this one, or do we have to wrap back at the begining of the user space ? */ if (vr->next_in_as->start >= hint_uaddr) /* Ok, the next VR is really after us */ hint_uaddr = vr->start + vr->size; else { /* No: wrapping up */ /* Is there any space before the end of user space ? */ if (hint_uaddr <= SOS_PAGING_TOP_USER_ADDRESS - size) return hint_uaddr; hint_uaddr = SOS_PAGING_BASE_USER_ADDRESS; } /* Prepare to look after this VR */ vr = vr->next_in_as; } while (vr != initial_vr); /* Reached the end of the list and did not find anything ?... Look at the space after the last VR */ return (sos_uaddr_t)NULL;}static voidas_account_change_of_vr_protection(struct sos_umem_vmm_as * as, sos_bool_t is_shared, sos_size_t size, sos_ui32_t prev_access_rights, sos_ui32_t new_access_rights){ if (prev_access_rights == new_access_rights) return;#define _UPDATE_VMSTAT(field,is_increment) \ ({ if (is_increment > 0) \ as->field += size; \ else \ { SOS_ASSERT_FATAL(as->field >= size); as->field -= size; } })#define UPDATE_VMSTAT(field,is_increment) \ ({ if (is_shared) _UPDATE_VMSTAT(vm_shrd.field, is_increment); \ _UPDATE_VMSTAT(vm_total.field, is_increment); \ SOS_ASSERT_FATAL(as->vm_total.field >= as->vm_shrd.field); }) if ( (new_access_rights & SOS_VM_MAP_PROT_WRITE) && !(prev_access_rights & SOS_VM_MAP_PROT_WRITE)) { UPDATE_VMSTAT(rw, +1); if (prev_access_rights & SOS_VM_MAP_PROT_READ) UPDATE_VMSTAT(ro, -1); } else if ( !(new_access_rights & SOS_VM_MAP_PROT_WRITE) && (prev_access_rights & SOS_VM_MAP_PROT_WRITE)) { if (new_access_rights & SOS_VM_MAP_PROT_READ) UPDATE_VMSTAT(ro, +1); UPDATE_VMSTAT(rw, -1); } else if (new_access_rights & SOS_VM_MAP_PROT_READ) UPDATE_VMSTAT(ro, +1); else if (!(new_access_rights & SOS_VM_MAP_PROT_READ)) UPDATE_VMSTAT(ro, -1); if ( (new_access_rights & SOS_VM_MAP_PROT_EXEC) && !(prev_access_rights & SOS_VM_MAP_PROT_EXEC)) { UPDATE_VMSTAT(code, +1); } else if ( !(new_access_rights & SOS_VM_MAP_PROT_EXEC) && (prev_access_rights & SOS_VM_MAP_PROT_EXEC)) { UPDATE_VMSTAT(code, -1); } if (new_access_rights && !prev_access_rights) UPDATE_VMSTAT(overall, +1); else if (!new_access_rights && prev_access_rights) UPDATE_VMSTAT(overall, -1);}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -