📄 machine.cxx
字号:
extern kva_t heap_start; extern kva_t heap_end; extern kva_t heap_defined; extern kva_t heap_bound; printf("Last physpage at 0x%08x\n", EROS_PAGE_SIZE * physPages); heap_start = PTOV(EROS_PAGE_SIZE * heap_first_page); heap_end = heap_start; heap_defined = heap_start; /* BELOW THIS POINT we do not bother to duplicate mappings, since by * the time any of these are needed we have already loaded the * segment structures. */ /* We do not need to allocate the PAGES for the heap content, but we * *do* need to allocate the page tables that will map those pages: * To do this, perform a conservative computation of the size of the * heap that will be needed. This relies on the fact that PhysMem * has already been initialized, so we know at this point how many * total physical pages are present. * * We compute below a generously conservative approximation to the * largest likely heap so that we can preallocate page tables for * the heap. */ { kpsize_t usablePages = physPages; /* well, close */ heap_bound = 0; /* We will allocate one node per page: */ heap_bound += usablePages * sizeof(Node); /* We will allocate four depend entries per node: */ heap_bound += usablePages * (4 * sizeof(KeyDependEntry)); /* We will allocate one ObjectHeader structure per page: */ heap_bound += usablePages * (4 * sizeof(ObjectHeader)); /* Oink oink oink: */ heap_bound += 1024 * 1024; /* Finally, allow for the object headers we will need for card * memory: */ heap_bound += ((KTUNE_MAX_CARDMEM * 1024)/4096) * sizeof(ObjectHeader); /* Round up to nearest page: */ heap_bound += (EROS_PAGE_SIZE - 1); heap_bound -= (heap_bound % EROS_PAGE_SIZE); assert((heap_bound % EROS_PAGE_SIZE) == 0); assert((heap_start % EROS_PAGE_SIZE) == 0); unsigned nPages = (heap_bound / EROS_PAGE_SIZE); printf("Heap contains %d pages\n", nPages); heap_bound += heap_start; /* Place the heap right at the end of the physical page map: We build this mapping for the side effect of ensuring that mapping entries for the heap are actually present. */ for(unsigned va = heap_start; va < heap_bound; va += EROS_PAGE_SIZE) MapPageAt(KVTOL(va), 0, PTE_W|globalPage); } /* Now set up the console frame buffer, if any. Do this only if the start of the frame buffer falls above physPages, as we may be using a crippleware(tm) video card, and if so the VGA region was already mapped as part of the physical page mappings. */ if (BootInfoPtr->consInfo) { ConsoleInfo *ci = BootInfoPtr->consInfo; kpa_t pbase = ci->frameBuffer; kpa_t ptop = ((kpa_t)ci->frameBuffer) + ci->bytesPerScanLine * ci->Ylimit; ptop = align_up(ptop, EROS_PAGE_SIZE); kpa_t paddr = align_down(pbase, EROS_PAGE_SIZE); kpa_t fb_len = ptop - paddr; /* length of bracketing page range */ assert((fb_len % EROS_PAGE_SIZE) == 0); kva_t fb_start = heap_bound; kva_t fb_bound = fb_start + fb_len; /* Place the frame buffer at the end of the heap: */ for(unsigned va = fb_start; va < fb_bound; va += EROS_PAGE_SIZE, paddr += EROS_PAGE_SIZE) { unsigned vaddr = KVTOL(va); assert((paddr & EROS_PAGE_MASK) == 0); uint32_t mode = PTE_W|PTE_V |PTE_WT; MapPageAt(vaddr, paddr, mode | globalPage); /* Make the one at the physically congruent address non-global, just in case: */ MapPageAt(paddr, paddr, mode); Machine::mappedFrameBuffer = fb_start + (pbase & EROS_PAGE_MASK); } printf("Framebuffer (ci = 0x%08x pa = 0x%08x) mapped at " "0x%08x (linear 0x%08x), len 0x%08x\n", ci, (unsigned long) ci->frameBuffer, Machine::mappedFrameBuffer, KVTOL(Machine::mappedFrameBuffer), fb_len); } /* BELOW THIS POINT we do not use the /globalPage/ bit, as * these mappings are per-IPC and MUST get flushed when a context * switch occurs. */ /* Reserve directory entries for the fast send buffer */ PTE::kern_fstbuf = &pageDir[KVTOL(KVA_FSTBUF) >> 22]; /* Set up mapping slots for the receive buffer page: */ pageTab = KPAtoP(PTE *, PhysMem::Alloc(EROS_PAGE_SIZE, &PhysMem::pages)); assert (((uint32_t)pageTab & EROS_PAGE_MASK) == 0); bzero(pageTab, EROS_PAGE_SIZE); /* I AM NO LONGER CONVINCED THAT THIS IS NECESSARY in the contiguous string case. Copying the user PDEs should be sufficient in that situation, and if we really needed to copy the PTEs were weren't going to see any TLB locality in any case. */ PTE_SET(pageDir[KVTOL(KVA_PTEBUF) >> 22], (VTOP(pageTab) & PTE_FRAMEBITS) ); PTE_SET(pageDir[KVTOL(KVA_PTEBUF) >> 22], PTE_W|PTE_V ); /* Following is harmless on pre-pentium: */ PTE_CLR(pageDir[KVTOL(KVA_PTEBUF) >> 22], PTE_PGSZ ); PTE *pte = &pageTab[(KVTOL(KVA_PTEBUF) >> 12) & 0x3ffu]; PTE::kern_ptebuf = pte; /* 64K message limit = 16 pages + 1 for unaligned. */ for (int j = 0; j < 17; j++) { PTE_SET(*pte, PTE_W|PTE_V|PTE_DRTY|PTE_ACC);#ifdef WRITE_THROUGH if (cpuType >= 5) PTE_SET(*pte, PTE_WT);#endif pte++; } #ifdef OPTION_SMALL_SPACES MakeSmallSpaces();#endif #if 0 printf("Built Kernel Page Map!\n");#endif}void Machine::MapHeapPage(kva_t va, kpa_t paddr){ assert((paddr & EROS_PAGE_MASK) == 0); uint32_t mode = PTE_V|PTE_W;#ifndef NO_GLOBAL_PAGES if (CpuIdHi > 1 && CpuFeatures & CPUFEAT_PGE) mode |= PTE_GLBL;#endif#ifdef WRITE_THROUGH mode |= PTE_WT;#endif MapPageAt(KVTOL(va), paddr, mode);}static inlineuint32_t BcdToBin(uint32_t val){ return ((val)=((val)&15) + ((val)>>4)*10);}inline bool IsLeapYear(uint32_t yr){ if (yr % 400 == 0) return true; if (yr % 100 == 0) return false; if (yr % 4 == 0) return true; return false;}inline static uint32_t yeartoday(unsigned year){ return (IsLeapYear(year) ? 366 : 365);}voidMachine::GetHardwareTimeOfDay(TimeOfDay& tod){ static uint32_t month_length[12] = { 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 }; tod.sec = CMOS::cmosByte(0x0); tod.min = CMOS::cmosByte(0x2); tod.hr = CMOS::cmosByte(0x4); tod.dayOfWeek = CMOS::cmosByte(0x6); tod.dayOfMonth = CMOS::cmosByte(0x7); tod.month = CMOS::cmosByte(0x8); tod.year = CMOS::cmosByte(0x9); tod.sec = BcdToBin(tod.sec); tod.min = BcdToBin(tod.min); tod.hr = BcdToBin(tod.hr); tod.dayOfMonth = BcdToBin(tod.dayOfMonth); tod.dayOfWeek = BcdToBin(tod.dayOfWeek); tod.month = BcdToBin(tod.month); tod.year = BcdToBin(tod.year); if (tod.year < 70) /* correct for y2k rollover */ tod.year += 100; tod.year += 1900; /* correct for century. */ tod.dayOfYear = 0; for (uint32_t i = 0; i < tod.month; i++) tod.dayOfYear += month_length[i]; tod.dayOfYear += tod.dayOfMonth; if (tod.month > 1 && IsLeapYear(tod.year)) tod.dayOfYear++; /* Compute coordinated universal time: */ tod.utcDay = 0; for (uint32_t yr = 1970; yr < tod.year; yr++) tod.utcDay += yeartoday(yr); tod.utcDay += tod.dayOfYear;}/* This cannot be run until the kernel has it's own map -- the gift * map we get from the bootstrap code at the moment is too small. */uint32_tMachine::BusArchitecture(){ static uint32_t busType = bt_Unknown; if (busType != bt_Unknown) return busType; busType = bt_ISA; if (memcmp((char*)0x0FFFD9, "EISA", 4) == 0) busType = bt_EISA; if (PciBios::Present()) busType = bt_PCI; return busType;}voidMachine::SpinWaitMs(uint32_t ms){ uint64_t ticks = MillisecondsToTicks(ms); uint64_t start = SysTimer::Now(); uint64_t end = start + ticks + 1; while (SysTimer::Now() < end) ;}#if 0/* Map the passed physical pages starting at the designated kernel * virtual address: */voidMachine::MapBuffer(kva_t va, kpa_t p0, kpa_t p1){ const uint32_t ndx0 = (KVTOL(va) >> 22) & 0x3ffu; const uint32_t ndx1 = (KVTOL(va) >> 12) & 0x3ffu; kpa_t maptbl_paddr; __asm__ __volatile__("movl %%cr3, %0" : "=r" (maptbl_paddr) : /* no inputs */); PTE *pageTbl = (PTE*) PTOV(maptbl_paddr); pageTbl = (PTE*) PTOV( (pageTbl[ndx0].AsWord() & ~EROS_PAGE_MASK) );#ifdef MSGDEBUG printf("MapBuffer: pg tb = 0x%08x p0=0x%08x\n", pageTbl, p0);#endif PTE *pte = pageTbl + ndx1; /* These PTE's are already marked present, writable, etc. etc. by * construction in the kernel mapping table - just update the * frames. */ (*pte) = p0; pte++; if (p1) { (*pte) = p1; PTE_SET(*pte, PTE_V); } else PTE_CLR(*pte, PTE_V); if (CpuType > 3) { Machine::FlushTLB(KVTOL(va)); Machine::FlushTLB(KVTOL(va + 4096)); } else Machine::FlushTLB();}#endif/* non-static because compiler cannot detect ASM usage and complains */uint32_t BogusIDTDescriptor[2] = { 0, 0 };voidMachine::HardReset(){ /* Load an IDT with no valid entries, which will force a machine * check when the next interrupt occurs: */ __asm__ __volatile__("lidt BogusIDTDescriptor" : /* no output */ : /* no input */ : "memory"); /* now force an interrupt: */ __asm__ ("int $0x30");}/* Note that the RETURN from this procedure flushes the I prefetch * cache, which is why it is not inline. */voidMachine::SetMappingTable(kpa_t pAddr){ __asm__ __volatile__("movl %0,%%cr3" : /* no outputs */ : "r" (pAddr) );}kpa_tMachine::GetMappingTable(){ kpa_t result; __asm__ __volatile__("movl %%cr3,%0" : "=r" (result)); return result;}voidMachine::EnableVirtualMapping(){ __asm__ __volatile__("movl %%cr0,%%eax\n\t" "orl $0x80010000,%%eax\n\t" "movl %%eax,%%cr0\n\t" /* Turn on PG,WP bits. */ : /* no outputs */ : /* no inputs */ : "ax" /* eax smashed */);}#ifdef EROS_HAVE_FPUvoidMachine::InitializeFPU(){ __asm__ __volatile__("fninit\n\t" "smsw %%ax\n\t" "orw $0x1a,%%ax\n\t" "lmsw %%ax\n\t" /* Turn on TS,MP bits. */ : /* no outputs */ : /* no inputs */ : "ax" /* eax smashed */);}voidMachine::DisableFPU(){ __asm__ __volatile__("smsw %%ax\n\t" "orw $0xa,%%ax\n\t" "lmsw %%ax\n\t" /* Turn on TS,MP bits. */ : /* no outputs */ : /* no inputs */ : "ax" /* eax smashed */);}voidMachine::EnableFPU(){ __asm__ __volatile__("smsw %%ax\n\t" "andl $0xfff1,%%eax\n\t" "lmsw %%ax\n\t" /* Turn off TS,EM,MP bits. */ : /* no outputs */ : /* no inputs */ : "ax" /* eax smashed */);}#endifboolMachine::IsDebugBoot(){#ifdef DBG_WILD_PTR return true;#endif if (BootInfoPtr->volFlags & VolHdr::VF_DEBUG) return true; return false;}extern "C" { extern uint32_t CpuType; extern const char CpuVendor[];}const char *Machine::GetCpuVendor(){ return CpuVendor;}uint32_tMachine::GetCpuType(){ return CpuType;}/* Following probably would be easier to do in assembler, but updating * the mode table is a lot easier to do in C++. Second argument says * whether we wish to count cycles (1) or events (2). Generally we * will want events. */extern "C" { extern void Pentium_SetCounterMode(uint32_t mode, uint32_t wantCy); extern void PentiumPro_SetCounterMode(uint32_t mode, uint32_t wantCy); }static const char * ModeNames[SysTrace_Mode_NumCommonMode] = { "Cycles", "Instrs", "DTLB", "ITLB", "Dmiss", "Imiss", "Dwrtbk", "Dfetch", "Ifetch", "Branch", "TkBrnch",};static uint32_t PentiumModes[SysTrace_Mode_NumCommonMode] = { 0x0, /* SysTrace_Mode_Cycles */ 0x16, /* SysTrace_Mode_Instrs */ 0x02, /* SysTrace_Mode_DTLB */ 0x0d, /* SysTrace_Mode_ITLB */ 0x29, /* SysTrace_Mode_Dmiss */ 0x0e, /* SysTrace_Mode_Imiss */ 0x06, /* SysTrace_Mode_Dwrtbk */ 0x28, /* SysTrace_Mode_Dfetch */ 0x0c, /* SysTrace_Mode_Ifetch */ 0x12, /* SysTrace_Mode_Branches */ 0x14, /* SysTrace_Mode_BrTaken */};static uint32_t PentiumProModes[SysTrace_Mode_NumCommonMode] = { 0x79, /* SysTrace_Mode_Cycles */ 0xc0, /* SysTrace_Mode_Instrs */ 0x0, /* no analog */ /* SysTrace_Mode_DTLB */ 0x85, /* SysTrace_Mode_ITLB */ 0x45, /* SysTrace_Mode_Dmiss */ 0x81, /* SysTrace_Mode_Imiss */ 0x0, /* no analog */ /* SysTrace_Mode_Dwrtbk */ 0x43, /* SysTrace_Mode_Dfetch */ 0x80, /* SysTrace_Mode_Ifetch */ 0xc4, /* SysTrace_Mode_Branches */ 0xc9, /* SysTrace_Mode_BrTaken */};/* Other possible modes of interest: Pentium Ppro What 0x17 0x0 V-pipe instrs 0x19 0x04 WB-full stalls 0x1a ?? mem read stalls 0x13 0xca btb hits (ppro: retired taken mispredicted branches) 0x0 0x65 Dcache reads (ppro: burst read transactions) 0x1f ?? Agen interlocks */boolMachine::SetCounterMode(uint32_t mode){ uint32_t wantcy = (mode == SysTrace_Mode_Cycles) ? 1 : 0; if (mode >= SysTrace_Mode_NumCommonMode) return false; if (CpuType == 5) { Pentium_SetCounterMode(PentiumModes[mode], wantcy); } else if (CpuType == 6) { PentiumPro_SetCounterMode(PentiumProModes[mode], wantcy); } return true;}const char *Machine::ModeName(uint32_t mode){ if (mode >= SysTrace_Mode_NumCommonMode) return "???"; return ModeNames[mode];}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -