⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 arch_vm_translation_map.c

📁 newos is new operation system
💻 C
📖 第 1 页 / 共 2 页
字号:
	} else {//		dprintf("flush_tmap: %d pages to invalidate, doing local invalidation\n", map->arch_data->num_invalidate_pages);		arch_cpu_invalidate_TLB_list(map->arch_data->pages_to_invalidate, map->arch_data->num_invalidate_pages);		smp_send_broadcast_ici(SMP_MSG_INVL_PAGE_LIST, (unsigned long)map->arch_data->pages_to_invalidate,			map->arch_data->num_invalidate_pages, 0, NULL, SMP_MSG_FLAG_SYNC);	}	map->arch_data->num_invalidate_pages = 0;	int_restore_interrupts();}static int map_iospace_chunk(addr_t va, addr_t pa){	int i;	ptentry *pt;	addr_t ppn;	pa &= ~(PAGE_SIZE - 1); // make sure it's page aligned	va &= ~(PAGE_SIZE - 1); // make sure it's page aligned	if(va < IOSPACE_BASE || va >= (IOSPACE_BASE + IOSPACE_SIZE))		panic("map_iospace_chunk: passed invalid va 0x%lx\n", va);	ppn = ADDR_SHIFT(pa);	pt = &iospace_pgtables[(va - IOSPACE_BASE)/PAGE_SIZE];	for(i=0; i<1024; i++) {		init_ptentry(&pt[i]);		pt[i].addr = ppn + i;		pt[i].user = 0;		pt[i].rw = 1;		pt[i].present = 1;	}	int_disable_interrupts();	arch_cpu_invalidate_TLB_range(va, va + (IOSPACE_CHUNK_SIZE - PAGE_SIZE));	smp_send_broadcast_ici(SMP_MSG_INVL_PAGE_RANGE, va, va + (IOSPACE_CHUNK_SIZE - PAGE_SIZE), 0,		NULL, SMP_MSG_FLAG_SYNC);	int_restore_interrupts();	return 0;}static int get_physical_page_tmap(addr_t pa, addr_t *va, int flags){	int index;	paddr_chunk_desc *replaced_pchunk;restart:	mutex_lock(&iospace_mutex);	// see if the page is already mapped	index = pa / IOSPACE_CHUNK_SIZE;	if(paddr_desc[index].va != 0) {		if(paddr_desc[index].ref_count++ == 0) {			// pull this descriptor out of the lru list			queue_remove_item(&mapped_paddr_lru, &paddr_desc[index]);		}		*va = paddr_desc[index].va + pa % IOSPACE_CHUNK_SIZE;		mutex_unlock(&iospace_mutex);		return 0;	}	// map it	if(first_free_vmapping < num_virtual_chunks) {		// there's a free hole		paddr_desc[index].va = first_free_vmapping * IOSPACE_CHUNK_SIZE + IOSPACE_BASE;		*va = paddr_desc[index].va + pa % IOSPACE_CHUNK_SIZE;		virtual_pmappings[first_free_vmapping] = &paddr_desc[index];		paddr_desc[index].ref_count++;		// push up the first_free_vmapping pointer		for(; first_free_vmapping < num_virtual_chunks; first_free_vmapping++) {			if(virtual_pmappings[first_free_vmapping] == NULL)				break;		}		map_iospace_chunk(paddr_desc[index].va, index * IOSPACE_CHUNK_SIZE);		mutex_unlock(&iospace_mutex);		return 0;	}	// replace an earlier mapping	if(queue_peek(&mapped_paddr_lru) == NULL) {		// no free slots available		if(flags == PHYSICAL_PAGE_NO_WAIT) {			// punt back to the caller and let them handle this			mutex_unlock(&iospace_mutex);			return ERR_NO_MEMORY;		} else {			mutex_unlock(&iospace_mutex);			sem_acquire(iospace_full_sem, 1);			goto restart;		}	}	replaced_pchunk = queue_dequeue(&mapped_paddr_lru);	paddr_desc[index].va = replaced_pchunk->va;	replaced_pchunk->va = 0;	*va = paddr_desc[index].va + pa % IOSPACE_CHUNK_SIZE;	paddr_desc[index].ref_count++;	map_iospace_chunk(paddr_desc[index].va, index * IOSPACE_CHUNK_SIZE);	mutex_unlock(&iospace_mutex);	return 0;}static int put_physical_page_tmap(addr_t va){	paddr_chunk_desc *desc;	if(va < IOSPACE_BASE || va >= IOSPACE_BASE + IOSPACE_SIZE)		panic("someone called put_physical_page on an invalid va 0x%lx\n", va);	va -= IOSPACE_BASE;	mutex_lock(&iospace_mutex);	desc = virtual_pmappings[va / IOSPACE_CHUNK_SIZE];	if(desc == NULL) {		mutex_unlock(&iospace_mutex);		panic("put_physical_page called on page at va 0x%lx which is not checked out\n", va);		return ERR_VM_GENERAL;	}	if(--desc->ref_count == 0) {		// put it on the mapped lru list		queue_enqueue(&mapped_paddr_lru, desc);		// no sense rescheduling on this one, there's likely a race in the waiting		// thread to grab the iospace_mutex, which would block and eventually get back to		// this thread. waste of time.		sem_release_etc(iospace_full_sem, 1, SEM_FLAG_NO_RESCHED);	}	mutex_unlock(&iospace_mutex);	return 0;}static vm_translation_map_ops tmap_ops = {	destroy_tmap,	lock_tmap,	unlock_tmap,	map_tmap,	unmap_tmap,	query_tmap,	get_mapped_size_tmap,	protect_tmap,	clear_flags_tmap,	flush_tmap,	get_physical_page_tmap,	put_physical_page_tmap};int vm_translation_map_create(vm_translation_map *new_map, bool kernel){	if(new_map == NULL)		return ERR_INVALID_ARGS;	// initialize the new object	new_map->ops = &tmap_ops;	new_map->map_count = 0;	if(recursive_lock_create(&new_map->lock) < 0)		return ERR_NO_MEMORY;	new_map->arch_data = kmalloc(sizeof(vm_translation_map_arch_info));	if(new_map->arch_data == NULL) {		recursive_lock_destroy(&new_map->lock);		return ERR_NO_MEMORY;	}	new_map->arch_data->num_invalidate_pages = 0;	if(!kernel) {		// user		// allocate a pgdir		new_map->arch_data->pgdir_virt = kmalloc(PAGE_SIZE);		if(new_map->arch_data->pgdir_virt == NULL) {			kfree(new_map->arch_data);			recursive_lock_destroy(&new_map->lock);			return ERR_NO_MEMORY;		}		if(((addr_t)new_map->arch_data->pgdir_virt % PAGE_SIZE) != 0)			panic("vm_translation_map_create: malloced pgdir and found it wasn't aligned!\n");		vm_get_page_mapping(vm_get_kernel_aspace_id(), (addr_t)new_map->arch_data->pgdir_virt, (addr_t *)&new_map->arch_data->pgdir_phys);	} else {		// kernel		// we already know the kernel pgdir mapping		(addr_t)new_map->arch_data->pgdir_virt = kernel_pgdir_virt;		(addr_t)new_map->arch_data->pgdir_phys = kernel_pgdir_phys;	}	// zero out the bottom portion of the new pgdir	memset(new_map->arch_data->pgdir_virt + FIRST_USER_PGDIR_ENT, 0, NUM_USER_PGDIR_ENTS * sizeof(pdentry));	// insert this new map into the map list	{		int_disable_interrupts();		acquire_spinlock(&tmap_list_lock);		// copy the top portion of the pgdir from the current one		memcpy(new_map->arch_data->pgdir_virt + FIRST_KERNEL_PGDIR_ENT, kernel_pgdir_virt + FIRST_KERNEL_PGDIR_ENT,			NUM_KERNEL_PGDIR_ENTS * sizeof(pdentry));		list_add_head(&tmap_list_head, &new_map->tmap_list_node);		release_spinlock(&tmap_list_lock);		int_restore_interrupts();	}	return 0;}int vm_translation_map_module_init(kernel_args *ka){	int i;	dprintf("vm_translation_map_module_init: entry\n");	// page hole set up in stage2	page_hole = (ptentry *)ka->arch_args.page_hole;	// calculate where the pgdir would be	page_hole_pgdir = (pdentry *)(((unsigned int)ka->arch_args.page_hole) + (PAGE_SIZE * 1024 - PAGE_SIZE));	// clear out the bottom 2 GB, unmap everything	memset(page_hole_pgdir + FIRST_USER_PGDIR_ENT, 0, sizeof(pdentry) * NUM_USER_PGDIR_ENTS);	kernel_pgdir_phys = (pdentry *)ka->arch_args.phys_pgdir;	kernel_pgdir_virt = (pdentry *)ka->arch_args.vir_pgdir;	tmap_list_lock = 0;	list_initialize(&tmap_list_head);	// allocate some space to hold physical page mapping info	paddr_desc = (paddr_chunk_desc *)vm_alloc_from_ka_struct(ka,		sizeof(paddr_chunk_desc) * 1024, LOCK_RW|LOCK_KERNEL);	num_virtual_chunks = IOSPACE_SIZE / IOSPACE_CHUNK_SIZE;	virtual_pmappings = (paddr_chunk_desc **)vm_alloc_from_ka_struct(ka,		sizeof(paddr_chunk_desc *) * num_virtual_chunks, LOCK_RW|LOCK_KERNEL);	iospace_pgtables = (ptentry *)vm_alloc_from_ka_struct(ka,		PAGE_SIZE * (IOSPACE_SIZE / (PAGE_SIZE * 1024)), LOCK_RW|LOCK_KERNEL);	dprintf("paddr_desc %p, virtual_pmappings %p, iospace_pgtables %p\n",		paddr_desc, virtual_pmappings, iospace_pgtables);	// initialize our data structures	memset(paddr_desc, 0, sizeof(paddr_chunk_desc) * 1024);	memset(virtual_pmappings, 0, sizeof(paddr_chunk_desc *) * num_virtual_chunks);	first_free_vmapping = 0;	queue_init(&mapped_paddr_lru);	memset(iospace_pgtables, 0, PAGE_SIZE * (IOSPACE_SIZE / (PAGE_SIZE * 1024)));	iospace_mutex.sem = -1;	iospace_mutex.holder = -1;	iospace_full_sem = -1;	dprintf("mapping iospace_pgtables\n");	// put the array of pgtables directly into the kernel pagedir	// these will be wired and kept mapped into virtual space to be easy to get to	{		addr_t phys_pgtable;		addr_t virt_pgtable;		pdentry *e;		virt_pgtable = (addr_t)iospace_pgtables;		for(i = 0; i < (IOSPACE_SIZE / (PAGE_SIZE * 1024)); i++, virt_pgtable += PAGE_SIZE) {			vm_translation_map_quick_query(virt_pgtable, &phys_pgtable);			e = &page_hole_pgdir[(IOSPACE_BASE / (PAGE_SIZE * 1024)) + i];			put_pgtable_in_pgdir(e, phys_pgtable, LOCK_RW|LOCK_KERNEL);		}	}	// turn on the global bit if the cpu supports it	if(i386_check_feature(X86_PGE, FEATURE_COMMON)) {		dprintf("enabling global bit\n");		uint32 cr4; 		read_cr4(cr4);		write_cr4(cr4 | (1<<7)); // PGE bit in cr4	}	dprintf("vm_translation_map_module_init: done\n");	return 0;}void vm_translation_map_module_init_post_sem(kernel_args *ka){	mutex_init(&iospace_mutex, "iospace_mutex");	iospace_full_sem = sem_create(1, "iospace_full_sem");}int vm_translation_map_module_init2(kernel_args *ka){	// now that the vm is initialized, create an region that represents	// the page hole	void *temp;	dprintf("vm_translation_map_module_init2: entry\n");	// unmap the page hole hack we were using before	kernel_pgdir_virt[1023].present = 0;	page_hole_pgdir = NULL;	page_hole = NULL;	temp = (void *)kernel_pgdir_virt;	vm_create_anonymous_region(vm_get_kernel_aspace_id(), "kernel_pgdir", &temp,		REGION_ADDR_EXACT_ADDRESS, PAGE_SIZE, REGION_WIRING_WIRED_ALREADY, LOCK_RW|LOCK_KERNEL);	temp = (void *)paddr_desc;	vm_create_anonymous_region(vm_get_kernel_aspace_id(), "physical_page_mapping_descriptors", &temp,		REGION_ADDR_EXACT_ADDRESS, ROUNDUP(sizeof(paddr_chunk_desc) * 1024, PAGE_SIZE),		REGION_WIRING_WIRED_ALREADY, LOCK_RW|LOCK_KERNEL);	temp = (void *)virtual_pmappings;	vm_create_anonymous_region(vm_get_kernel_aspace_id(), "iospace_virtual_chunk_descriptors", &temp,		REGION_ADDR_EXACT_ADDRESS, ROUNDUP(sizeof(paddr_chunk_desc *) * num_virtual_chunks, PAGE_SIZE),		REGION_WIRING_WIRED_ALREADY, LOCK_RW|LOCK_KERNEL);	temp = (void *)iospace_pgtables;	vm_create_anonymous_region(vm_get_kernel_aspace_id(), "iospace_pgtables", &temp,		REGION_ADDR_EXACT_ADDRESS, PAGE_SIZE * (IOSPACE_SIZE / (PAGE_SIZE * 1024)),		REGION_WIRING_WIRED_ALREADY, LOCK_RW|LOCK_KERNEL);	dprintf("vm_translation_map_module_init2: creating iospace\n");	temp = (void *)IOSPACE_BASE;	vm_create_null_region(vm_get_kernel_aspace_id(), "iospace", &temp,		REGION_ADDR_EXACT_ADDRESS, IOSPACE_SIZE);	dprintf("vm_translation_map_module_init2: done\n");	return 0;}// XXX horrible back door to map a page quickly regardless of translation map object, etc.// used only during VM setup.// uses a 'page hole' set up in the stage 2 bootloader. The page hole is created by pointing one of// the pgdir entries back at itself, effectively mapping the contents of all of the 4MB of pagetables// into a 4 MB region. It's only used here, and is later unmapped.int vm_translation_map_quick_map(kernel_args *ka, addr_t va, addr_t pa, unsigned int attributes, addr_t (*get_free_page)(kernel_args *)){	ptentry *pentry;	int index;#if CHATTY_TMAP	dprintf("quick_tmap: entry pa 0x%x va 0x%x\n", pa, va);#endif	// check to see if a page table exists for this range	index = VADDR_TO_PDENT(va);	if(page_hole_pgdir[index].present == 0) {		addr_t pgtable;		pdentry *e;		// we need to allocate a pgtable		pgtable = get_free_page(ka);		// pgtable is in pages, convert to physical address		pgtable *= PAGE_SIZE;#if CHATTY_TMAP		dprintf("quick_map: asked for free page for pgtable. 0x%x\n", pgtable);#endif		// put it in the pgdir		e = &page_hole_pgdir[index];		put_pgtable_in_pgdir(e, pgtable, attributes);		// zero it out in it's new mapping		memset((unsigned int *)((unsigned int)page_hole + (va / PAGE_SIZE / 1024) * PAGE_SIZE), 0, PAGE_SIZE);	}	// now, fill in the pentry	pentry = page_hole + va / PAGE_SIZE;	init_ptentry(pentry);	pentry->addr = ADDR_SHIFT(pa);	pentry->user = !(attributes & LOCK_KERNEL);	pentry->rw = attributes & LOCK_RW;	pentry->present = 1;	if(is_kernel_address(va))		pentry->global = 1; // global bit set for all kernel addresses	arch_cpu_invalidate_TLB_range(va, va);	return 0;}// XXX currently assumes this translation map is activestatic int vm_translation_map_quick_query(addr_t va, addr_t *out_physical){	ptentry *pentry;	if(page_hole_pgdir[VADDR_TO_PDENT(va)].present == 0) {		// no pagetable here		return ERR_VM_PAGE_NOT_PRESENT;	}	pentry = page_hole + va / PAGE_SIZE;	if(pentry->present == 0) {		// page mapping not valid		return ERR_VM_PAGE_NOT_PRESENT;	}	*out_physical = pentry->addr << 12;	return 0;}addr_t vm_translation_map_get_pgdir(vm_translation_map *map){	return (addr_t)map->arch_data->pgdir_phys;}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -