⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 monitor-host.c

📁 bochs : one pc simulator.
💻 C
📖 第 1 页 / 共 4 页
字号:
  where++;  if (!(pg->page_dir = hostOSGetAllocedPagePhyPage(ad->page_dir))) {    goto error;    }  where++;  /* Monitor page tables */  if ( !(ad->page_tbl = hostOSAllocZeroedMem(4096 * MON_PAGE_TABLES)) ) {    goto error;    }  where++;  if (!hostOSGetAllocedMemPhyPages(pg->page_tbl, MON_PAGE_TABLES,            ad->page_tbl, 4096 * MON_PAGE_TABLES)) {    goto error;    }  where++;  /* Map of the linear addresses of page tables currently */  /* mapped into the monitor space. */  if ( !(ad->page_tbl_laddr_map = (unsigned *) hostOSAllocZeroedPage()) ) {    goto error;    }  where++;  if ( !(pg->page_tbl_laddr_map =         hostOSGetAllocedPagePhyPage(ad->page_tbl_laddr_map)) ) {    goto error;    }  where++;  /* Nexus page table */  if ( !(ad->nexus_page_tbl = (page_t *) hostOSAllocZeroedPage()) ) {    goto error;    }  where++;  if ( !(pg->nexus_page_tbl = hostOSGetAllocedPagePhyPage(ad->nexus_page_tbl)) ) {    goto error;    }  where++;  /* Transition page table */  if ( !(ad->transition_PT = (page_t *) hostOSAllocZeroedPage()) ) {    goto error;    }  where++;  if ( !(pg->transition_PT = hostOSGetAllocedPagePhyPage(ad->transition_PT)) ) {    goto error;    }  where++;  /* Nexus page */  if ( !(ad->nexus = (nexus_t *) hostOSAllocZeroedPage()) ) {    goto error;    }  where++;  if ( !(pg->nexus = hostOSGetAllocedPagePhyPage(ad->nexus)) ) {    goto error;    }  where++;  /* Monitor IDT */  if ( !(ad->idt = hostOSAllocZeroedMem(MON_IDT_PAGES*4096)) ) {    goto error;    }  where++;  if (!hostOSGetAllocedMemPhyPages(pg->idt, MON_IDT_PAGES, ad->idt, MON_IDT_SIZE)) {    goto error;    }  where++;  /* Monitor GDT */  if ( !(ad->gdt = hostOSAllocZeroedMem(MON_GDT_PAGES*4096)) ) {    goto error;    }  where++;  if (!hostOSGetAllocedMemPhyPages(pg->gdt, MON_GDT_PAGES, ad->gdt, MON_GDT_SIZE)) {    goto error;    }  where++;  /* Monitor LDT */  if ( !(ad->ldt = hostOSAllocZeroedMem(MON_LDT_PAGES*4096)) ) {    goto error;    }  where++;  if (!hostOSGetAllocedMemPhyPages(pg->ldt, MON_LDT_PAGES, ad->ldt, MON_LDT_SIZE)) {    goto error;    }  where++;  /* Monitor TSS */  if ( !(ad->tss = hostOSAllocZeroedMem(MON_TSS_PAGES*4096)) ) {    goto error;    }  where++;  if (!hostOSGetAllocedMemPhyPages(pg->tss, MON_TSS_PAGES, ad->tss, MON_TSS_SIZE)) {    goto error;    }  where++;  /* Monitor IDT stubs */  if ( !(ad->idt_stubs = hostOSAllocZeroedMem(MON_IDT_STUBS_PAGES*4096)) ) {    goto error;    }  where++;  if (!hostOSGetAllocedMemPhyPages(pg->idt_stubs, MON_IDT_STUBS_PAGES,            ad->idt_stubs, MON_IDT_STUBS_SIZE)) {    goto error;    }  where++;  /* Get the physical pages associated with the vm_t structure. */  if (!hostOSGetAllocedMemPhyPages(pg->vm, MAX_VM_STRUCT_PAGES, vm, sizeof(*vm))) {    goto error;    }  where++;  vm->vmState |= VMStateMemAllocated;  return 0; /* OK. */ error:    hostUnallocVmPages( vm );    return( where );}/* *//* Unallocate pages/memory used by monitor *//* */  voidhostUnallocVmPages( vm_t *vm ){  vm_pages_t *pg = &vm->pages;  vm_addr_t  *ad = &vm->host.addr;  /* Guest physical memory pages */  if (vm->guestPhyMemAddr) {    hostReleasePinnedUserPages(vm);    vm->guestPhyMemAddr = 0;    }  vm->vmState &= ~VMStateRegisteredPhyMem; /* Bogus for now. */  /* Monitor page directory */  if (ad->page_dir) hostOSFreePage(ad->page_dir);  /* Monitor page tables */  if (ad->page_tbl) hostOSFreeMem(ad->page_tbl);  /* Map of linear addresses of page tables mapped into monitor. */  if (ad->page_tbl_laddr_map) hostOSFreePage(ad->page_tbl_laddr_map);  /* Nexus page table */  if (ad->nexus_page_tbl) hostOSFreePage(ad->nexus_page_tbl);  /* Guest CPU state. */  if (ad->guest_cpu) hostOSFreePage(ad->guest_cpu);  /* Transition page table */  if (ad->transition_PT) hostOSFreePage(ad->transition_PT);  if (ad->log_buffer) hostOSFreeMem(ad->log_buffer);  /* Nexus page */  if (ad->nexus) hostOSFreePage(ad->nexus);  /* Monitor IDT */  if (ad->idt) hostOSFreeMem(ad->idt);  /* Monitor GDT */  if (ad->gdt) hostOSFreeMem(ad->gdt);  /* Monitor LDT */  if (ad->ldt) hostOSFreeMem(ad->ldt);  /* Monitor TSS */  if (ad->tss) hostOSFreeMem(ad->tss);  /* Monitor IDT stubs */  if (ad->idt_stubs) hostOSFreeMem(ad->idt_stubs);  /* clear out allocated pages lists */  mon_memzero(pg, sizeof(*pg));  mon_memzero(ad, sizeof(*ad));}  unsignedhostGetCpuCapabilities(void){  Bit32u eax, ebx, ecx, edx;  /* Get the highest allowed cpuid level */  asm volatile (    "xorl %%eax,%%eax\n\t"    "cpuid"    : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx)    :    : "cc"    );  if (eax < 1)    return(0); /* not enough capabilities */  /* Copy vendor string. */  hostCpuIDInfo.vendorDWord0 = ebx;  hostCpuIDInfo.vendorDWord1 = edx;  hostCpuIDInfo.vendorDWord2 = ecx;  /* CPUID w/ EAX==1: Processor Signature & Feature Flags */  asm volatile (    "movl $1,%%eax\n\t"    "cpuid"    : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx)    :    : "cc"    );  hostCpuIDInfo.procSignature.raw = eax;  hostCpuIDInfo.featureFlags.raw = edx;  /* Plex86 needs TSC */  if (hostCpuIDInfo.featureFlags.fields.tsc==0)    return(0);  return(1);}/* Map the monitor and guest into the VM. */  unsignedhostMapMonitor(vm_t *vm){  selector_t monCsSel, monSsSel, monTssSel;  Bit32u laddr, base;  unsigned slot;  guest_context_t *guestContext;  nexus_t *nexus;  descriptor_t *gdt;  /* For convenience, some pointers. */  guestContext = vm->host.addr.guest_context;  nexus        = vm->host.addr.nexus;  gdt          = vm->host.addr.gdt;#warning "Is the GDT being cleared of old values?"/* +++ should zero out GDT, so prev entries do not remain */  /* =========================   * Map in Monitor structures   * =========================   */  /* CS/SS/TSS selectors:   * For now, hardcode in monitor descriptors at slots 1,2,3.  As we   * are only running user code in the VM, these are likely safe slots   * as they are often used guest OSes for kernel descriptors.   */  monCsSel.raw     = Selector(1, 0, RPL0);  monSsSel.raw     = Selector(2, 0, RPL0);  monTssSel.raw    = Selector(3, 0, RPL0);  /* Search for unused PDE for nexus PT  (fixed for now) */  laddr = 0x70000000;  vm->mon_pde_mask = laddr & 0xffc00000;  vm->mon_pdi      = laddr >> 22;  base = MON_BASE_FROM_LADDR(laddr);  /* Map nexus into monitor/guest address space */  vm->host.addr.page_dir[laddr >> 22] = vm->host.nexus_pde;  /* CS/SS/TSS descriptors: Put at fixed GDT location for now. */  SET_DESCRIPTOR(gdt[monCsSel.fields.index], base, 0xfffff,                  D_PG, D_D32, D_AVL0, D_PRESENT, D_DPL0, D_CODE | D_READ)  SET_DESCRIPTOR(gdt[monSsSel.fields.index], base, 0xfffff,                  D_PG, D_D32, D_AVL0, D_PRESENT, D_DPL0, D_DATA | D_WRITE)  SET_DESCRIPTOR(gdt[monTssSel.fields.index],                 base + (Bit32u) vm->guest.addr.tss,                 sizeof(tss_t)-1,                 D_BG, 0, D_AVL0, D_PRESENT, D_DPL0, D_TSS)  /* Fix up the selectors of all IDT entries. */  for ( slot = 0; slot < 256; slot++ )      vm->host.addr.idt[slot].selector = monCsSel;  /* The monitor GDT/IDT loading info. */  nexus->mon_gdt_info.base  = base + (Bit32u) vm->guest.addr.gdt;  nexus->mon_gdt_info.limit = MON_GDT_SIZE;  nexus->mon_idt_info.base  = base + (Bit32u) vm->guest.addr.idt;  nexus->mon_idt_info.limit = MON_IDT_SIZE;  /* We don't have a monitor LDT for now. */  nexus->mon_ldt_sel = 0;  /* The monitor TSS. */  nexus->mon_tss_sel = monTssSel.raw;  vm->host.addr.tss->esp0 = ((Bit32u)vm->guest.addr.nexus) + PAGESIZE;  vm->host.addr.tss->ss0  = monSsSel.raw;  /* Monitor code and stack segments. */  nexus->mon_jmp_info.selector   = monCsSel.raw;  nexus->mon_stack_info.selector = monSsSel.raw;  /* Monitor code/data segment base. */  nexus->mon_base = base;  vm->vmState |= VMStateMapMonitor;  return(1);}  voidhostInitShadowPaging(vm_t *vm){  pageEntry_t *monPDir;  Bit32u pdi;/*Bit32u cr3_page_index;*//*phy_page_usage_t *pusage;*/#if 0  cr3_page_index = A20Addr(vm, vm->guest_cpu.cr3) >> 12;  if ( cr3_page_index >= vm->pages.guest_n_pages)    xxxpanic(vm, "monPagingRemap: CR3 conflicts with monitor space\n");#endif  /* Reset page table heap */  vm->ptbl_laddr_map_i = 0;  /* Clear monitor PD except 4Meg range used by monitor */  monPDir = vm->host.addr.page_dir;  for (pdi=0; pdi<1024; pdi++) {#if ANAL_CHECKS    vm->host.addr.page_tbl_laddr_map[pdi] = -1; /* max unsigned */#endif    if (pdi != vm->mon_pdi)      monPDir[pdi].raw = 0;    }  /* Update vpaging timestamp. */  vm->vpaging_tsc = vm_rdtsc();#if 0  /* When we remap the monitor page tables, IF guest paging is   * enabled, then mark the page containing the guest page directory   * as such.  In non-paged mode, there is no page directory.   */  if (vm->guest_cpu.cr0.fields.pg) {    pusage = &vm->pageInfo[cr3_page_index];    pusage->tsc = vm->vpaging_tsc;    pusage->attr.raw &= PageUsageSticky;    pusage->attr.raw |= PageUsagePDir;    pusage->attr.fields.access_perm = PagePermNA;    if (pusage->attr.raw & PageBadUsage4PDir)      xxxpanic(vm, "monPagingRemap: BadUsage4PDir\n");    }#endif}  voidhostReleasePinnedUserPages(vm_t *vm){  unsigned ppi;  unsigned dirty;  unsigned nPages;  Bit32u kernelAddr;  /* Unpin the pages associate with the guest physical memory. */  nPages = vm->pages.guest_n_pages;  for (ppi=0; ppi<nPages; ppi++) {    if ( vm->pageInfo[ppi].attr.fields.pinned ) {      void *osSpecificPtr;      osSpecificPtr = (void *) vm->hostStructPagePtr[ppi];#warning "Conditionalize page dirtying before page release."      dirty = 1; /* FIXME: 1 for now. */      hostOSUnpinUserPage(vm,          vm->guestPhyMemAddr + (ppi<<12),          osSpecificPtr,          ppi,          0 /* There was no host kernel addr mapped for this page. */,          dirty);      vm->pageInfo[ppi].attr.fields.pinned = 0;      }    }  /* Unpin the pages associated with the guest_cpu area. */  kernelAddr = (Bit32u) vm->host.addr.guest_cpu;  hostOSUnpinUserPage(vm,      0, /* User space address. */      vm->pages.guest_cpu_hostOSPtr,      vm->pages.guest_cpu,      &kernelAddr,      1 /* Dirty. */);  /* Unpin the pages associated with the log buffer area. */  kernelAddr = (Bit32u) vm->host.addr.log_buffer;  hostOSUnpinUserPage(vm,      0, /* User space address. */      vm->pages.log_buffer_hostOSPtr[0],      vm->pages.log_buffer[0],      &kernelAddr,      1 /* Dirty. */);#warning "User space address is passed as 0 for now..."}  unsignedhostHandlePagePinRequest(vm_t *vm, Bit32u reqGuestPPI){  Bit32u hostPPI;  unsigned qIndex;#warning "We must not unpin open pages (for page walking) here."  if (vm->guestPhyPagePinQueue.nEntries < MaxPhyPagesPinned) {    /* There is room in the Q for another entry - we have not reached     * the upper limit of allowable number of pinned pages.     */    qIndex = vm->guestPhyPagePinQueue.nEntries;    }  else {    unsigned dirty;    Bit32u unpinGuestPPI;    /* There is no room in the Q for another entry - we have reached     * the upper limit of allowable number of pinned pages.  We must     * first unpin a page to free up the limit, then we can pin the     * requested page.  This keeps plex86 from pinning an unconstrained     * number of pages at one time.     */    qIndex = vm->guestPhyPagePinQueue.tail;    dirty = 1; /* FIXME: 1 for now. */    unpinGuestPPI = vm->guestPhyPagePinQueue.ppi[qIndex];    hostOSUnpinUserPage(vm,        vm->guestPhyMemAddr + (unpinGuestPPI<<12),        vm->hostStructPagePtr[unpinGuestPPI],        unpinGuestPPI,        0 /* There was no host kernel addr mapped for this page. */,        dirty);    vm->pageInfo[unpinGuestPPI].attr.fields.pinned = 0;    }  /* Pin the requested guest physical page in the host OS. */  if ( !hostOSGetAndPinUserPage(vm,            vm->guestPhyMemAddr + (reqGuestPPI<<12),            &vm->hostStructPagePtr[reqGuestPPI],            &hostPPI,            0 /* Don't need a host kernel address. */            ) ) {    hostOSPrint("handlePagePinReq: request to pin failed.\n");    return(0); /* Fail. */    }  /* Pinning activities have succeeded.  Mark this physical page as being   * pinnned, and store it's physical address.   */  vm->pageInfo[reqGuestPPI].attr.fields.pinned = 1;  vm->pageInfo[reqGuestPPI].hostPPI = hostPPI;  /* Now add this entry to the Q. */  vm->guestPhyPagePinQueue.ppi[qIndex] = reqGuestPPI;  if (vm->guestPhyPagePinQueue.nEntries < MaxPhyPagesPinned) {    vm->guestPhyPagePinQueue.nEntries++;    vm->guestPhyPagePinQueue.tail =        vm->guestPhyPagePinQueue.nEntries % MaxPhyPagesPinned;    }  else {    /* Leave .nEntries at the maximum value - Q is full. */    vm->guestPhyPagePinQueue.tail =        (vm->guestPhyPagePinQueue.tail + 1) % MaxPhyPagesPinned;    }  return(1); /* OK. */}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -