⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 arch_vm_translation_map.c

📁 newos is new operation system
💻 C
📖 第 1 页 / 共 2 页
字号:
/*** Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.** Distributed under the terms of the NewOS License.*/#include <kernel/kernel.h>#include <kernel/arch/vm_translation_map.h>#include <kernel/heap.h>#include <kernel/int.h>#include <kernel/smp.h>#include <kernel/vm.h>#include <kernel/vm_page.h>#include <kernel/vm_priv.h>#include <kernel/arch/cpu.h>#include <kernel/debug.h>#include <kernel/lock.h>#include <kernel/sem.h>#include <kernel/queue.h>#include <kernel/list.h>#include <string.h>#include <boot/stage2.h>#include <newos/errors.h>// 256 MB of iospace#define IOSPACE_SIZE (256*1024*1024)// put it 256 MB into kernel space#define IOSPACE_BASE (KERNEL_BASE + IOSPACE_SIZE)// 4 MB chunks, to optimize for 4 MB pages#define IOSPACE_CHUNK_SIZE (4*1024*1024)// data and structures used to represent physical pages mapped into iospacetypedef struct paddr_chunk_descriptor {	struct paddr_chunk_descriptor *next_q; // must remain first in structure, queue code uses it	int ref_count;	addr_t va;} paddr_chunk_desc;static paddr_chunk_desc *paddr_desc;         // will be one per physical chunkstatic paddr_chunk_desc **virtual_pmappings; // will be one ptr per virtual chunk in iospacestatic int first_free_vmapping;static int num_virtual_chunks;static queue mapped_paddr_lru;static ptentry *iospace_pgtables = NULL;static mutex iospace_mutex;static sem_id iospace_full_sem;#define PAGE_INVALIDATE_CACHE_SIZE 64// vm_translation object stufftypedef struct vm_translation_map_arch_info_struct {	pdentry *pgdir_virt;	pdentry *pgdir_phys;	int num_invalidate_pages;	addr_t pages_to_invalidate[PAGE_INVALIDATE_CACHE_SIZE];} vm_translation_map_arch_info;static ptentry *page_hole = NULL;static pdentry *page_hole_pgdir = NULL;static pdentry *kernel_pgdir_phys = NULL;static pdentry *kernel_pgdir_virt = NULL;static struct list_node tmap_list_head;static spinlock_t tmap_list_lock;#define CHATTY_TMAP 0#define ADDR_SHIFT(x) ((x)>>12)#define ADDR_REVERSE_SHIFT(x) ((x)<<12)#define VADDR_TO_PDENT(va) (((va) / PAGE_SIZE) / 1024)#define VADDR_TO_PTENT(va) (((va) / PAGE_SIZE) % 1024)#define FIRST_USER_PGDIR_ENT    (VADDR_TO_PDENT(USER_BASE))#define NUM_USER_PGDIR_ENTS     (VADDR_TO_PDENT(ROUNDUP(USER_SIZE, PAGE_SIZE * 1024)))#define FIRST_KERNEL_PGDIR_ENT  (VADDR_TO_PDENT(KERNEL_BASE))#define NUM_KERNEL_PGDIR_ENTS   (VADDR_TO_PDENT(KERNEL_SIZE))static int vm_translation_map_quick_query(addr_t va, addr_t *out_physical);static int get_physical_page_tmap(addr_t pa, addr_t *va, int flags);static int put_physical_page_tmap(addr_t va);static void flush_tmap(vm_translation_map *map);static void init_pdentry(pdentry *e){	*(int *)e = 0;}static void init_ptentry(ptentry *e){	*(int *)e = 0;}static void _update_all_pgdirs(int index, pdentry e){	vm_translation_map *entry;	int_disable_interrupts();	acquire_spinlock(&tmap_list_lock);	list_for_every_entry(&tmap_list_head, entry, vm_translation_map, tmap_list_node)		entry->arch_data->pgdir_virt[index] = e;	release_spinlock(&tmap_list_lock);	int_restore_interrupts();}static int lock_tmap(vm_translation_map *map){//	dprintf("lock_tmap: map 0x%x\n", map);	if(recursive_lock_lock(&map->lock) == true) {		// we were the first one to grab the lock//		dprintf("clearing invalidated page count\n");		map->arch_data->num_invalidate_pages = 0;	}	return 0;}static int unlock_tmap(vm_translation_map *map){//	dprintf("unlock_tmap: map 0x%x\n", map);	if(recursive_lock_get_recursion(&map->lock) == 1) {		// we're about to release it for the last time		flush_tmap(map);	}	recursive_lock_unlock(&map->lock);	return 0;}static void destroy_tmap(vm_translation_map *map){	unsigned int i;	if(map == NULL)		return;	// remove it from the tmap list	int_disable_interrupts();	acquire_spinlock(&tmap_list_lock);	list_delete(&map->tmap_list_node);	release_spinlock(&tmap_list_lock);	int_restore_interrupts();	if(map->arch_data->pgdir_virt != NULL) {		// cycle through and free all of the user space pgtables		for(i = VADDR_TO_PDENT(USER_BASE); i <= VADDR_TO_PDENT(USER_BASE + (USER_SIZE - 1)); i++) {			addr_t pgtable_addr;			vm_page *page;			if(map->arch_data->pgdir_virt[i].present == 1) {				pgtable_addr = map->arch_data->pgdir_virt[i].addr;				page = vm_lookup_page(pgtable_addr);				if(!page)					panic("destroy_tmap: didn't find pgtable page\n");				vm_page_set_state(page, PAGE_STATE_FREE);			}		}		kfree(map->arch_data->pgdir_virt);	}	kfree(map->arch_data);	recursive_lock_destroy(&map->lock);}static void put_pgtable_in_pgdir(pdentry *e, addr_t pgtable_phys, int attributes){	// put it in the pgdir	init_pdentry(e);	e->addr = ADDR_SHIFT(pgtable_phys);	// mark it user and read/write. 	// The pgtable entries will override it	e->user = 1;	e->rw = 1;	e->present = 1;}static int map_tmap(vm_translation_map *map, addr_t va, addr_t pa, unsigned int attributes){	pdentry *pd;	ptentry *pt;	unsigned int index;	int err;#if CHATTY_TMAP	dprintf("map_tmap: entry pa 0x%x va 0x%x\n", pa, va);#endif/*	dprintf("pgdir at 0x%x\n", pgdir);	dprintf("index is %d\n", va / PAGE_SIZE / 1024);	dprintf("final at 0x%x\n", &pgdir[va / PAGE_SIZE / 1024]);	dprintf("value is 0x%x\n", *(int *)&pgdir[va / PAGE_SIZE / 1024]);	dprintf("present bit is %d\n", pgdir[va / PAGE_SIZE / 1024].present);	dprintf("addr_t is %d\n", pgdir[va / PAGE_SIZE / 1024].addr);*/	pd = map->arch_data->pgdir_virt;	// check to see if a page table exists for this range	index = VADDR_TO_PDENT(va);	if(pd[index].present == 0) {		addr_t pgtable;		vm_page *page;		// we need to allocate a pgtable		page = vm_page_allocate_page(PAGE_STATE_CLEAR);		// mark the page WIRED		vm_page_set_state(page, PAGE_STATE_WIRED);				pgtable = page->ppn * PAGE_SIZE;#if CHATTY_TMAP		dprintf("map_tmap: asked for free page for pgtable. 0x%x\n", pgtable);#endif		// put it in the pgdir		put_pgtable_in_pgdir(&pd[index], pgtable, attributes | LOCK_RW);		// update any other page directories, if it maps kernel space		if(index >= FIRST_KERNEL_PGDIR_ENT && index < (FIRST_KERNEL_PGDIR_ENT + NUM_KERNEL_PGDIR_ENTS))			_update_all_pgdirs(index, pd[index]);		map->map_count++;	}	// now, fill in the pentry	do {		err = get_physical_page_tmap(ADDR_REVERSE_SHIFT(pd[index].addr), (addr_t *)&pt, PHYSICAL_PAGE_NO_WAIT);	} while(err < 0);	index = VADDR_TO_PTENT(va);	init_ptentry(&pt[index]);	pt[index].addr = ADDR_SHIFT(pa);	pt[index].user = !(attributes & LOCK_KERNEL);	pt[index].rw = attributes & LOCK_RW;	pt[index].present = 1;	if(is_kernel_address(va))		pt[index].global = 1; // global bit set for all kernel addresses	put_physical_page_tmap((addr_t)pt);	if(map->arch_data->num_invalidate_pages < PAGE_INVALIDATE_CACHE_SIZE) {		map->arch_data->pages_to_invalidate[map->arch_data->num_invalidate_pages] = va;	}	map->arch_data->num_invalidate_pages++;	map->map_count++;	return 0;}static int unmap_tmap(vm_translation_map *map, addr_t start, addr_t end){	ptentry *pt;	pdentry *pd = map->arch_data->pgdir_virt;	int index;	int err;	start = ROUNDOWN(start, PAGE_SIZE);	end = ROUNDUP(end, PAGE_SIZE);#if CHATTY_TMAP		dprintf("unmap_tmap: asked to free pages 0x%x to 0x%x\n", start, end);#endifrestart:	if(start >= end)		return 0;	index = VADDR_TO_PDENT(start);	if(pd[index].present == 0) {		// no pagetable here, move the start up to access the next page table		start = ROUNDUP(start + 1, PAGE_SIZE);		goto restart;	}	do {		err = get_physical_page_tmap(ADDR_REVERSE_SHIFT(pd[index].addr), (addr_t *)&pt, PHYSICAL_PAGE_NO_WAIT);	} while(err < 0);	for(index = VADDR_TO_PTENT(start); (index < 1024) && (start < end); index++, start += PAGE_SIZE) {		if(pt[index].present == 0) {			// page mapping not valid			continue;		}#if CHATTY_TMAP		dprintf("unmap_tmap: removing page 0x%x\n", start);#endif		pt[index].present = 0;		map->map_count--;		if(map->arch_data->num_invalidate_pages < PAGE_INVALIDATE_CACHE_SIZE) {			map->arch_data->pages_to_invalidate[map->arch_data->num_invalidate_pages] = start;		}		map->arch_data->num_invalidate_pages++;	}	put_physical_page_tmap((addr_t)pt);	goto restart;}static int query_tmap(vm_translation_map *map, addr_t va, addr_t *out_physical, unsigned int *out_flags){	ptentry *pt;	pdentry *pd = map->arch_data->pgdir_virt;	int index;	int err;	// default the flags to not present	*out_flags = 0;	*out_physical = 0;	index = VADDR_TO_PDENT(va);	if(pd[index].present == 0) {		// no pagetable here		return NO_ERROR;	}	do {		err = get_physical_page_tmap(ADDR_REVERSE_SHIFT(pd[index].addr), (addr_t *)&pt, PHYSICAL_PAGE_NO_WAIT);	} while(err < 0);	index = VADDR_TO_PTENT(va);	*out_physical = ADDR_REVERSE_SHIFT(pt[index].addr) | (va & 0xfff);	// read in the page state flags, clearing the modified and accessed flags in the process	*out_flags = 0;	*out_flags |= pt[index].rw ? LOCK_RW : LOCK_RO;	*out_flags |= pt[index].user ? 0 : LOCK_KERNEL;	*out_flags |= pt[index].dirty ? PAGE_MODIFIED : 0;	*out_flags |= pt[index].accessed ? PAGE_ACCESSED : 0;	*out_flags |= pt[index].present ? PAGE_PRESENT : 0;	put_physical_page_tmap((addr_t)pt);//	dprintf("query_tmap: returning pa 0x%x for va 0x%x\n", *out_physical, va);	return 0;}static addr_t get_mapped_size_tmap(vm_translation_map *map){	return map->map_count;}static int protect_tmap(vm_translation_map *map, addr_t base, addr_t top, unsigned int attributes){	// XXX finish	panic("protect_tmap called, not implemented\n");	return ERR_UNIMPLEMENTED;}static int clear_flags_tmap(vm_translation_map *map, addr_t va, unsigned int flags){	ptentry *pt;	pdentry *pd = map->arch_data->pgdir_virt;	int index;	int err;	int tlb_flush = false;	index = VADDR_TO_PDENT(va);	if(pd[index].present == 0) {		// no pagetable here		return NO_ERROR;	}	do {		err = get_physical_page_tmap(ADDR_REVERSE_SHIFT(pd[index].addr), (addr_t *)&pt, PHYSICAL_PAGE_NO_WAIT);	} while(err < 0);	index = VADDR_TO_PTENT(va);	// clear out the flags we've been requested to clear	if(flags & PAGE_MODIFIED) {		pt[index].dirty = 0;		tlb_flush = true;	}	if(flags & PAGE_ACCESSED) {		pt[index].accessed = 0;		tlb_flush = true;	}	put_physical_page_tmap((addr_t)pt);	if(tlb_flush) {		if(map->arch_data->num_invalidate_pages < PAGE_INVALIDATE_CACHE_SIZE) {			map->arch_data->pages_to_invalidate[map->arch_data->num_invalidate_pages] = va;		}		map->arch_data->num_invalidate_pages++;	}//	dprintf("query_tmap: returning pa 0x%x for va 0x%x\n", *out_physical, va);	return 0;}static void flush_tmap(vm_translation_map *map){	if(map->arch_data->num_invalidate_pages <= 0)		return;	int_disable_interrupts();	if(map->arch_data->num_invalidate_pages > PAGE_INVALIDATE_CACHE_SIZE) {		// invalidate all pages//		dprintf("flush_tmap: %d pages to invalidate, doing global invalidation\n", map->arch_data->num_invalidate_pages);		arch_cpu_global_TLB_invalidate();		smp_send_broadcast_ici(SMP_MSG_GLOBAL_INVL_PAGE, 0, 0, 0, NULL, SMP_MSG_FLAG_SYNC);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -