📄 umem_vmm.c
字号:
struct sos_umem_vmm_vr_ops *sos_umem_vmm_get_ops_of_vr(struct sos_umem_vmm_vr * vr){ return vr->ops;}sos_ui32_t sos_umem_vmm_get_prot_of_vr(struct sos_umem_vmm_vr * vr){ return vr->access_rights;}sos_ui32_t sos_umem_vmm_get_flags_of_vr(struct sos_umem_vmm_vr * vr){ return vr->flags;}struct sos_umem_vmm_mapped_resource *sos_umem_vmm_get_mapped_resource_of_vr(struct sos_umem_vmm_vr * vr){ return vr->mapped_resource;}sos_uaddr_t sos_umem_vmm_get_start_of_vr(struct sos_umem_vmm_vr * vr){ return vr->start;}sos_size_t sos_umem_vmm_get_size_of_vr(struct sos_umem_vmm_vr * vr){ return vr->size;}sos_luoffset_t sos_umem_vmm_get_offset_in_resource(struct sos_umem_vmm_vr * vr){ return vr->offset_in_resource;}sos_ret_tsos_umem_vmm_set_ops_of_vr(struct sos_umem_vmm_vr * vr, struct sos_umem_vmm_vr_ops * ops){ /* Don't allow to overwrite any preceding VR ops */ SOS_ASSERT_FATAL(NULL == vr->ops); vr->ops = ops; return SOS_OK;}/** * When resize asks to map the resource elsewhere, make sure not to * overwrite the offset_in_resource field */#define INTERNAL_MAP_CALLED_FROM_MREMAP (1 << 8)sos_ret_tsos_umem_vmm_map(struct sos_umem_vmm_as * as, sos_uaddr_t * /*in/out*/uaddr, sos_size_t size, sos_ui32_t access_rights, sos_ui32_t flags, struct sos_umem_vmm_mapped_resource * resource, sos_luoffset_t offset_in_resource){ __label__ return_mmap; sos_uaddr_t hint_uaddr; struct sos_umem_vmm_vr *prev_vr, *next_vr, *vr, *preallocated_vr; sos_bool_t merge_with_preceding, merge_with_next, used_preallocated_vr; sos_bool_t internal_map_called_from_mremap = (flags & INTERNAL_MAP_CALLED_FROM_MREMAP); sos_ret_t retval = SOS_OK; used_preallocated_vr = FALSE; hint_uaddr = *uaddr; /* Default mapping address is NULL */ *uaddr = (sos_vaddr_t)NULL; if (! resource) return -SOS_EINVAL; if (! resource->mmap) return -SOS_EPERM; if (! SOS_IS_PAGE_ALIGNED(hint_uaddr)) return -SOS_EINVAL; if (size <= 0) return -SOS_EINVAL; size = SOS_PAGE_ALIGN_SUP(size); if (flags & SOS_VR_MAP_SHARED) { /* Make sure the mapped resource allows the required protection flags */ if ( ( (access_rights & SOS_VM_MAP_PROT_READ) && !(resource->allowed_access_rights & SOS_VM_MAP_PROT_READ) ) || ( (access_rights & SOS_VM_MAP_PROT_WRITE) && !(resource->allowed_access_rights & SOS_VM_MAP_PROT_WRITE) ) || ( (access_rights & SOS_VM_MAP_PROT_EXEC) && !(resource->allowed_access_rights & SOS_VM_MAP_PROT_EXEC)) ) return -SOS_EPERM; } /* Sanity checks over the offset_in_resource parameter */ if ( !internal_map_called_from_mremap && ( resource->flags & SOS_MAPPED_RESOURCE_ANONYMOUS ) ) /* Initial offset ignored for anonymous mappings */ { /* Nothing to check */ } /* Make sure that the offset in resource won't overflow */ else if (offset_in_resource + size <= offset_in_resource) return -SOS_EINVAL; /* Filter out unsupported flags */ access_rights &= (SOS_VM_MAP_PROT_READ | SOS_VM_MAP_PROT_WRITE | SOS_VM_MAP_PROT_EXEC); flags &= (SOS_VR_MAP_SHARED | SOS_VR_MAP_FIXED); /* Pre-allocate a new VR. Because once we found a valid slot inside the VR list, we don't want the list to be altered by another process */ preallocated_vr = (struct sos_umem_vmm_vr *)sos_kmem_cache_alloc(cache_of_vr, 0); if (! preallocated_vr) return -SOS_ENOMEM; /* Compute the user address of the new mapping */ if (flags & SOS_VR_MAP_FIXED) { /* * The address is imposed */ /* Make sure the hint_uaddr hint is valid */ if (hint_uaddr < SOS_PAGING_BASE_USER_ADDRESS) { retval = -SOS_EINVAL; goto return_mmap; } if (hint_uaddr > SOS_PAGING_TOP_USER_ADDRESS - size) { retval = -SOS_EINVAL; goto return_mmap; } /* Unmap any overlapped VR */ retval = sos_umem_vmm_unmap(as, hint_uaddr, size); if (SOS_OK != retval) { goto return_mmap; } } else { /* * A free range has to be determined */ /* Find a suitable free VR */ hint_uaddr = find_first_free_interval(as, hint_uaddr, size); if (! hint_uaddr) { retval = -SOS_ENOMEM; goto return_mmap; } } /* For anonymous resource mappings, set the initial offset_in_resource to the initial virtual start address in user space */ if ( !internal_map_called_from_mremap && (resource->flags & SOS_MAPPED_RESOURCE_ANONYMOUS ) ) offset_in_resource = hint_uaddr; /* Lookup next and previous VR, if any. This will allow us to merge the regions, when possible */ next_vr = find_enclosing_or_next_vr(as, hint_uaddr); if (next_vr) { /* Find previous VR, if any */ prev_vr = next_vr->prev_in_as; /* The list is curcular: it may happen that we looped over the tail of the list (ie the list is a singleton) */ if (prev_vr->start > hint_uaddr) prev_vr = NULL; /* No preceding VR */ } else { /* Otherwise we went beyond the last VR */ prev_vr = list_get_tail_named(as->list_vr, prev_in_as, next_in_as); } /* Merge with preceding VR ? */ merge_with_preceding = ( (NULL != prev_vr) && (prev_vr->mapped_resource == resource) && (prev_vr->offset_in_resource + prev_vr->size == offset_in_resource) && (prev_vr->start + prev_vr->size == hint_uaddr) && (prev_vr->flags == flags) && (prev_vr->access_rights == access_rights) ); /* Merge with next VR ? */ merge_with_next = ( (NULL != next_vr) && (next_vr->mapped_resource == resource) && (offset_in_resource + size == next_vr->offset_in_resource) && (hint_uaddr + size == next_vr->start) && (next_vr->flags == flags) && (next_vr->access_rights == access_rights) ); if (merge_with_preceding && merge_with_next) { /* Widen the prev_vr VR to encompass both the new VR and the next_vr */ vr = prev_vr; vr->size += size + next_vr->size; /* Remove the next_vr VR */ list_delete_named(as->list_vr, next_vr, prev_in_as, next_in_as); list_delete_named(next_vr->mapped_resource->list_vr, next_vr, prev_in_mapped_resource, next_in_mapped_resource); if (next_vr->ops && next_vr->ops->unref) next_vr->ops->unref(next_vr); sos_kmem_vmm_free((sos_vaddr_t) next_vr); } else if (merge_with_preceding) { /* Widen the prev_vr VR to encompass the new VR */ vr = prev_vr; vr->size += size; } else if (merge_with_next) { /* Widen the next_vr VR to encompass the new VR */ vr = next_vr; vr->start -= size; vr->size += size; } else { /* Allocate a brand new VR and insert it into the list */ vr = preallocated_vr; used_preallocated_vr = TRUE; vr->start = hint_uaddr; vr->size = size; vr->access_rights = access_rights; vr->flags = flags; vr->mapped_resource = resource; vr->offset_in_resource = offset_in_resource; /* Insert VR in address space */ vr->address_space = as; if (prev_vr) list_insert_after_named(as->list_vr, prev_vr, vr, prev_in_as, next_in_as); else list_add_head_named(as->list_vr, vr, prev_in_as, next_in_as); list_add_tail_named(vr->mapped_resource->list_vr, vr, prev_in_mapped_resource, next_in_mapped_resource); /* Signal the resource we are mapping it */ if (resource && resource->mmap) { retval = resource->mmap(vr); if (SOS_OK != retval) { retval = sos_umem_vmm_unmap(as, vr->start, vr->size); goto return_mmap; } /* The page_in method is MANDATORY for mapped resources */ SOS_ASSERT_FATAL(vr->ops && vr->ops->page_in); } if (vr->ops && vr->ops->ref) vr->ops->ref(vr); } /* Ok, fine, we got it right ! Return the address to the caller */ *uaddr = hint_uaddr; as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED, size, 0, vr->access_rights); retval = SOS_OK; return_mmap: if (! used_preallocated_vr) sos_kmem_vmm_free((sos_vaddr_t)preallocated_vr); return retval;}sos_ret_tsos_umem_vmm_unmap(struct sos_umem_vmm_as * as, sos_uaddr_t uaddr, sos_size_t size){ struct sos_umem_vmm_vr *vr, *preallocated_vr; sos_bool_t need_to_setup_mmu; sos_bool_t used_preallocated_vr; if (! SOS_IS_PAGE_ALIGNED(uaddr)) return -SOS_EINVAL; if (size <= 0) return -SOS_EINVAL; size = SOS_PAGE_ALIGN_SUP(size); /* Make sure the uaddr is valid */ if (uaddr < SOS_PAGING_BASE_USER_ADDRESS) return -SOS_EINVAL; if (uaddr > SOS_PAGING_TOP_USER_ADDRESS - size) return -SOS_EINVAL; /* In some cases, the unmapping might imply a VR to be split into 2. Actually, allocating a new VR can be a blocking operation, but actually we can block now, it won't do no harm. But we must be careful not to block later, while altering the VR lists: that's why we pre-allocate now. */ used_preallocated_vr = FALSE; preallocated_vr = (struct sos_umem_vmm_vr *)sos_kmem_cache_alloc(cache_of_vr, 0); if (! preallocated_vr) return -SOS_ENOMEM; /* Find any VR intersecting with the given interval */ vr = find_first_intersecting_vr(as, uaddr, size); /* Unmap (part of) the VR covered by [uaddr .. uaddr+size[ */ while (NULL != vr) { /* Went past the end of the *circular* list => back at the beginning ? */ if (vr->start + vr->size <= uaddr) /* Yes, stop now */ break; /* Went beyond the region to unmap ? */ if (uaddr + size <= vr->start) /* Yes, stop now */ break; /* VR totally unmapped ? */ if ((vr->start >= uaddr) && (vr->start + vr->size <= uaddr + size)) { struct sos_umem_vmm_vr *next_vr; /* Yes: signal we remove it completely */ if (vr->ops && vr->ops->unmap) vr->ops->unmap(vr, vr->start, vr->size); /* Remove it from the AS list now */ next_vr = vr->next_in_as; if (next_vr == vr) /* singleton ? */ next_vr = NULL; list_delete_named(as->list_vr, vr, prev_in_as, next_in_as); /* Remove from the list of VRs mapping the resource */ list_delete_named(vr->mapped_resource->list_vr, vr, prev_in_mapped_resource, next_in_mapped_resource); if (vr->ops && vr->ops->unref) vr->ops->unref(vr); as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED, vr->size, vr->access_rights, 0); sos_kmem_vmm_free((sos_vaddr_t)vr); /* Prepare next iteration */ vr = next_vr; continue; } /* unmapped region lies completely INSIDE the the VR */ else if ( (vr->start < uaddr) && (vr->start + vr->size > uaddr + size) ) { /* VR has to be split into 2 */ /* Use the preallocated VR and copy the VR into it */ used_preallocated_vr = TRUE; memcpy(preallocated_vr, vr, sizeof(*vr)); /* Adjust the start/size of both VRs */ preallocated_vr->start = uaddr + size; preallocated_vr->size = vr->start + vr->size - (uaddr + size); preallocated_vr->offset_in_resource += uaddr + size - vr->start; vr->size = uaddr - vr->start; /* Insert the new VR into the list */ list_insert_after_named(as->list_vr, vr, preallocated_vr, prev_in_as, next_in_as); list_add_tail_named(vr->mapped_resource->list_vr, preallocated_vr, prev_in_mapped_resource, next_in_mapped_resource); /* Signal the changes to the underlying resource */ if (vr->ops && vr->ops->unmap) vr->ops->unmap(vr, uaddr, size); if (preallocated_vr->ops && preallocated_vr->ops->ref) preallocated_vr->ops->ref(preallocated_vr); /* Account for change in VRs */ as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED, size, vr->access_rights, 0); /* No need to go further */ break; } /* Unmapped region only affects the START address of the VR */ else if (uaddr <= vr->start) { sos_size_t translation = uaddr + size - vr->start; /* Shift the VR */ vr->size -= translation;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -