📄 host-linux.c
字号:
}/************************************************************************//* Status reporting: /proc code *//************************************************************************/ intplex86_read_procmem(char *buf, char **start, off_t offset,#if LINUX_VERSION_CODE >= VERSION_CODE(2,4,0) int len#else int len, int unused#endif ){ unsigned i; len = 0; len += sprintf(buf, "monitor-->host interrupt reflection counts\n"); for (i=0; i<256; i++) { int count; count = atomic_read( &interruptRedirCount[i] ); if (count) len += sprintf(buf+len, " 0x%2x:%10u\n", i, count); } return(len);} intretrieveKernelModulePages(void){ /* * Retrieve start address and size of this module. * * Note that with old kernels, we cannot access the module info (size), * hence we rely on the fact that Linux lets at least one page of * virtual address space unused after the end of the module. */#ifdef THIS_MODULE Bit32u driverStartAddr = (Bit32u) THIS_MODULE; unsigned size = THIS_MODULE->size;#else Bit32u driverStartAddr = (Bit32u) &mod_use_count_; unsigned size = 0; /* Actual size determined below */#endif Bit32u driverStartAddrPageAligned = driverStartAddr & ~0xfff; int nPages; if (driverStartAddr != driverStartAddrPageAligned) { /* Pretend this kernel module starts at the beginning of the page. */ /* If size is known, we have to add the extra offset from the beginning * of the page. */ if (size) size += (driverStartAddr & 0xfff); } nPages = retrievePhyPages(kernelModulePages.ppi, Plex86MaxKernelModulePages, (void *) driverStartAddrPageAligned, size); if (nPages == 0) { printk(KERN_ERR "plex86: retrieveKernelModulePages: retrieve returned error.\n"); return( 0 ); /* Error. */ } printk(KERN_WARNING "plex86: %u monitor pages located\n", nPages); kernelModulePages.startOffset = driverStartAddr; kernelModulePages.startOffsetPageAligned = driverStartAddrPageAligned; kernelModulePages.nPages = nPages; return( 1 ); /* OK. */} unsignedretrievePhyPages(Bit32u *page, int max_pages, void *addr_v, unsigned size){ /* * Grrr. There doesn't seem to be an exported mechanism to retrieve * the physical pages underlying a vmalloc()'ed area. We do it the * hard way ... */ pageEntry_t *host_pgd; Bit32u host_cr3; Bit32u addr; // start_addr; unsigned n_pages; int i; addr = (Bit32u) addr_v; if ( addr & 0xfff ) { printk(KERN_ERR "plex86: retrievePhyPages: not page aligned!\n"); return 0; } if (!addr) { printk(KERN_ERR "plex86: retrievePhyPages: addr NULL!\n"); return 0; } if (size == 0) { /* Size unknown. Determine by cycling through page tables until * we find one which is not present. We will assume that means * the end of the data structure. Set the number of pages to * cycle through, to one more than the maximum requested. This * way we'll look through enough pages. */ n_pages = max_pages + 1; } else { n_pages = BytesToPages(size); if ( n_pages > max_pages ) { printk(KERN_ERR "plex86: retrievePhyPages: n=%u > max=%u\n", n_pages, max_pages); return 0; } } asm volatile ("movl %%cr3, %0" : "=r" (host_cr3)); host_pgd = (pageEntry_t *)(phys_to_virt(host_cr3 & ~0xfff)); for (i = 0; i < n_pages; i++) { Bit32u laddr; unsigned long lpage; pgd_t *pgdPtr; pmd_t *pmdPtr; pte_t *ptePtr; pgd_t pgdVal; pmd_t pmdVal; pte_t pteVal; laddr = KERNEL_OFFSET + ((Bit32u) addr); lpage = VMALLOC_VMADDR(laddr); /* About to traverse the page tables. We need to lock others * out of them briefly. Newer Linux versions can do a fine-grained * lock on the page tables themselves. Older ones have to do * a "big kernel lock". */#if LINUX_VERSION_CODE >= VERSION_CODE(2,3,10) spin_lock(&init_mm.page_table_lock);#else lock_kernel(); /* Big kernel lock. */#endif pgdPtr = pgd_offset(&init_mm, lpage); pmdPtr = pmd_offset(pgdPtr, lpage); ptePtr = pte_offset(pmdPtr, lpage); pgdVal = *pgdPtr; pmdVal = *pmdPtr; pteVal = *ptePtr;#if LINUX_VERSION_CODE >= VERSION_CODE(2,3,10) spin_unlock(&init_mm.page_table_lock);#else unlock_kernel(); /* Big kernel unlock. */#endif if ( !(pgdVal.pgd & 1) || !(pmdVal.pmd & 1) || !(pteVal.pte_low & 1) ) { if (size == 0) return i; /* Report number of pages until area ended. */ printk(KERN_ERR "plex86: retrievePhyPages: " "PDE.P==0: i=%u, n=%u laddr=0x%x\n", i, n_pages, laddr); return 0; /* Error, ran into unmapped page in memory range. */ } /* Abort if our page list is too small. */ if (i >= max_pages) { printk(KERN_WARNING "plex86: page list is too small!\n"); printk(KERN_WARNING "plex86: n_pages=%u, max_pages=%u\n", n_pages, max_pages); return 0; } /* Get physical page address for this virtual page address. */ page[i] = pte_val(pteVal) >> 12; /* Increment to the next virtual page address. */ addr += 4096; } return(n_pages);}/************************************************************************ * The requisite host-specific functions. An implementation of each of * these functions needs to be offered for each host-XYZ.c file. ************************************************************************/ unsignedhostOSIdle(void){ if (NEED_RESCHED) schedule(); /* return !current_got_fatal_signal(); */ return( ! signal_pending(current) );} void *hostOSAllocZeroedMem(unsigned long size){ void *ptr; ptr = vmalloc(size); if ( ((Bit32u) ptr) & 0x00000fff ) return( 0 ); /* Error. */ /* Zero pages. This also demand maps the pages in, which we need * since we'll cycle through all the pages to get the physical * address mappings. */ mon_memzero(ptr, size); return( ptr );} voidhostOSFreeMem(void *ptr){ vfree(ptr);} void *hostOSAllocZeroedPage(void){ return( (void *) get_zeroed_page(GFP_KERNEL) );} voidhostOSFreePage(void *ptr){ free_page( (Bit32u)ptr );} unsignedhostOSGetAllocedMemPhyPages(Bit32u *page, int max_pages, void *ptr, unsigned size){ return( retrievePhyPages(page, max_pages, ptr, size) );} Bit32uhostOSGetAllocedPagePhyPage(void *ptr){ if (!ptr) return 0; /* return MAP_NR(ptr); */ return(__pa(ptr) >> PAGE_SHIFT);} voidhostOSPrint(char *fmt, ...){#warning "Fix hostPrint"#if 0 va_list args; int ret; unsigned char buffer[256]; va_start(args, fmt); ret = mon_vsnprintf(buffer, 256, fmt, args); if (ret == -1) { printk(KERN_ERR "plex86: hostPrint: vsnprintf returns error.\n"); } else { printk(KERN_WARNING "plex86: %s\n", buffer); }#endif} inthostOSConvertPlex86Errno(unsigned ret){ switch (ret) { case 0: return(0); case Plex86ErrnoEBUSY: return(EBUSY); case Plex86ErrnoENOMEM: return(ENOMEM); case Plex86ErrnoEFAULT: return(EFAULT); case Plex86ErrnoEINVAL: return(EINVAL); case Plex86ErrnoEACCES: return(EACCES); case Plex86ErrnoEAGAIN: return(EAGAIN); default: printk(KERN_ERR "plex86: ioctlAllocVPhys: case %u\n", ret); return(EINVAL); }} Bit32uhostOSKernelOffset(void){ return( KERNEL_OFFSET );} voidhostOSModuleCountReset(vm_t *vm, void *inode, void *filp){#if LINUX_VERSION_CODE < VERSION_CODE(2,4,0) while (MOD_IN_USE) { MOD_DEC_USE_COUNT; } MOD_INC_USE_COUNT; /* bump back to 1 so release can decrement */#endif} unsigned longhostOSCopyFromUser(void *to, void *from, unsigned long len){ return( copy_from_user(to, from, len) );} unsigned longhostOSCopyToUser(void *to, void *from, unsigned long len){ return( copy_to_user(to, from, len) );} Bit32uhostOSGetAndPinUserPage(vm_t *vm, Bit32u userAddr, void **osSpecificPtr, Bit32u *ppi, Bit32u *kernelAddr){ int ret; struct page **pagePtr; struct page *page; pagePtr = (struct page **) osSpecificPtr; ret = get_user_pages(current, current->mm, (unsigned long) userAddr, 1, /* 1 page. */ 1, /* 'write': intent to write. */ 0, /* 'force': ? */ pagePtr, NULL /* struct vm_area_struct *[] */ ); if (ret != 1) { printk(KERN_ERR "plex86: hostGetAndPinUserPages: failed.\n"); return(0); /* Error. */ } page = *pagePtr; /* The returned "struct page *" value. */ /* Now that we have a list of "struct page *", one for each physical * page of memory of the user space process's requested area, we can * calculate the physical page address by simple pointer arithmetic * based on "mem_map". */ *ppi = page - mem_map; if (kernelAddr) { /* Caller wants a kernel address returned which maps to this physical * address. */ *kernelAddr = (Bit32u) kmap( page );#warning "FIXME: Check return value here."#warning "Also, conditionally compile for version and high memory support." } return(1); /* OK. */} voidhostOSUnpinUserPage(vm_t *vm, Bit32u userAddr, void *osSpecificPtr, Bit32u ppi, Bit32u *kernelAddr, unsigned dirty){#if 0 /* Here is some sample code from Linux 2.4.18, mm/memory.c:__free_pte() */ struct page *page = pte_page(pte); if ((!VALID_PAGE(page)) || PageReserved(page)) return; if (pte_dirty(pte)) set_page_dirty(page); free_page_and_swap_cache(page);#endif struct page *page; page = (struct page *) osSpecificPtr; /* If a kernel address is passed, that means that previously we created * a mapping for this physical page in the kernel address pace. * We should unmap it. Only really useful for pages allocated from * high memory. */ if (kernelAddr) kunmap(page); /* If the page was dirtied due to the guest running in the VM, we * need to tell the kernel about that since it is not aware of * the VM page tables. */ if (dirty) set_page_dirty(page); /* Release/unpin the page. */ put_page(page);} voidhostOSInstrumentIntRedirCount(unsigned interruptVector){ atomic_inc( &interruptRedirCount[interruptVector] );}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -