⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 pmap_bootstrap.c

📁 早期freebsd实现
💻 C
📖 第 1 页 / 共 2 页
字号:
		}		/*		 * Invalidate all but the last remaining entries in both.		 */		epte = &((u_int *)kptmpa)[NPTEPG-1];		while (pte < epte) {			*ste++ = SG_NV;			*pte++ = PG_NV;		}		/*		 * Initialize the last to point to point to the page		 * table page allocated earlier.		 */		*ste = lkptpa | SG_RW | SG_V;		*pte = lkptpa | PG_RW | PG_CI | PG_V;	}	/*	 * Invalidate all but the final entry in the last kernel PT page	 * (u-area PTEs will be validated later).  The final entry maps	 * the last page of physical memory.	 */	pte = (u_int *)lkptpa;	epte = &pte[NPTEPG-1];	while (pte < epte)		*pte++ = PG_NV;	*pte = MAXADDR | PG_RW | PG_CI | PG_V;	/*	 * Initialize kernel page table.	 * Start by invalidating the `nptpages' that we have allocated.	 */	pte = (u_int *)kptpa;	epte = &pte[nptpages * NPTEPG];	while (pte < epte)		*pte++ = PG_NV;	/*	 * Validate PTEs for kernel text (RO)	 */	pte = &((u_int *)kptpa)[hp300_btop(KERNBASE)];	epte = &pte[hp300_btop(hp300_trunc_page(&etext))];#ifdef KGDB	protopte = firstpa | PG_RW | PG_V;	/* XXX RW for now */#else	protopte = firstpa | PG_RO | PG_V;#endif	while (pte < epte) {		*pte++ = protopte;		protopte += NBPG;	}	/*	 * Validate PTEs for kernel data/bss, dynamic data allocated	 * by us so far (nextpa - firstpa bytes), and pages for proc0	 * u-area and page table allocated below (RW).	 */	epte = &((u_int *)kptpa)[hp300_btop(nextpa - firstpa)];	protopte = (protopte & ~PG_PROT) | PG_RW;	/*	 * Enable copy-back caching of data pages	 */	if (RELOC(mmutype, int) == MMU_68040)		protopte |= PG_CCB;	while (pte < epte) {		*pte++ = protopte;		protopte += NBPG;	}	/*	 * Finally, validate the internal IO space PTEs (RW+CI).	 * We do this here since the 320/350 MMU registers (also	 * used, but to a lesser extent, on other models) are mapped	 * in this range and it would be nice to be able to access	 * them after the MMU is turned on.	 */	pte = (u_int *)iiopa;	epte = (u_int *)eiopa;	protopte = INTIOBASE | PG_RW | PG_CI | PG_V;	while (pte < epte) {		*pte++ = protopte;		protopte += NBPG;	}	/*	 * Calculate important exported kernel virtual addresses	 */	/*	 * Sysseg: base of kernel segment table	 */	RELOC(Sysseg, struct ste *) =		(struct ste *)(kstpa - firstpa);	/*	 * Sysptmap: base of kernel page table map	 */	RELOC(Sysptmap, struct pte *) =		(struct pte *)(kptmpa - firstpa);	/*	 * Sysmap: kernel page table (as mapped through Sysptmap)	 * Immediately follows `nptpages' of static kernel page table.	 */	RELOC(Sysmap, struct pte *) =		(struct pte *)hp300_ptob(nptpages * NPTEPG);	/*	 * Umap: first of UPAGES PTEs (in Sysmap) for fixed-address u-area.	 * HIGHPAGES PTEs from the end of Sysmap.	 */	RELOC(Umap, vm_offset_t) =		(vm_offset_t)RELOC(Sysmap, struct pte *) +			(HP_MAX_PTSIZE - HIGHPAGES * sizeof(struct pte));	/*	 * intiobase, intiolimit: base and end of internal (DIO) IO space.	 * IIOMAPSIZE pages prior to external IO space at end of static	 * kernel page table.	 */	RELOC(intiobase, char *) =		(char *)hp300_ptob(nptpages*NPTEPG - (IIOMAPSIZE+EIOMAPSIZE));	RELOC(intiolimit, char *) =		(char *)hp300_ptob(nptpages*NPTEPG - EIOMAPSIZE);	/*	 * extiobase: base of external (DIO-II) IO space.	 * EIOMAPSIZE pages at the end of the static kernel page table.	 */	RELOC(extiobase, char *) =		(char *)hp300_ptob(nptpages*NPTEPG - EIOMAPSIZE);	/*	 * CLKbase, MMUbase: important registers in internal IO space	 * accessed from assembly language.	 */	RELOC(CLKbase, vm_offset_t) =		(vm_offset_t)RELOC(intiobase, char *) + CLKBASE;	RELOC(MMUbase, vm_offset_t) =		(vm_offset_t)RELOC(intiobase, char *) + MMUBASE;	/*	 * Setup u-area for process 0.	 */	/*	 * Validate PTEs in Sysmap corresponding to the u-area (Umap)	 * which are HIGHPAGES from the end of the last kernel PT page	 * allocated earlier.	 */	pte = &((u_int *)lkptpa)[NPTEPG - HIGHPAGES];	epte = &pte[UPAGES];	protopte = p0upa | PG_RW | PG_V;	while (pte < epte) {		*pte++ = protopte;		protopte += NBPG;	}	/*	 * Zero the u-area.	 * NOTE: `pte' and `epte' aren't PTEs here.	 */	pte = (u_int *)p0upa;	epte = (u_int *)(p0upa + UPAGES*NBPG);	while (pte < epte)		*pte++ = 0;	/*	 * Remember the u-area address so it can be loaded in the	 * proc struct p_addr field later.	 */	RELOC(proc0paddr, char *) = (char *)(p0upa - firstpa);	/*	 * VM data structures are now initialized, set up data for	 * the pmap module.	 */	RELOC(avail_start, vm_offset_t) = nextpa;	RELOC(avail_end, vm_offset_t) =		hp300_ptob(RELOC(maxmem, int))			/* XXX allow for msgbuf */			- hp300_round_page(sizeof(struct msgbuf));	RELOC(mem_size, vm_size_t) = hp300_ptob(RELOC(physmem, int));	RELOC(virtual_avail, vm_offset_t) =		VM_MIN_KERNEL_ADDRESS + (nextpa - firstpa);	RELOC(virtual_end, vm_offset_t) = VM_MAX_KERNEL_ADDRESS;#ifdef HAVEVAC	/*	 * Determine VA aliasing distance if any	 */	if (RELOC(ectype, int) == EC_VIRT)		if (RELOC(machineid, int) == HP_320)			RELOC(pmap_aliasmask, int) = 0x3fff;	/* 16k */		else if (RELOC(machineid, int) == HP_350)			RELOC(pmap_aliasmask, int) = 0x7fff;	/* 32k */#endif	/*	 * Initialize protection array.	 * XXX don't use a switch statement, it might produce an	 * absolute "jmp" table.	 */	{		register int *kp;		kp = &RELOC(protection_codes, int);		kp[VM_PROT_NONE|VM_PROT_NONE|VM_PROT_NONE] = 0;		kp[VM_PROT_READ|VM_PROT_NONE|VM_PROT_NONE] = PG_RO;		kp[VM_PROT_READ|VM_PROT_NONE|VM_PROT_EXECUTE] = PG_RO;		kp[VM_PROT_NONE|VM_PROT_NONE|VM_PROT_EXECUTE] = PG_RO;		kp[VM_PROT_NONE|VM_PROT_WRITE|VM_PROT_NONE] = PG_RW;		kp[VM_PROT_NONE|VM_PROT_WRITE|VM_PROT_EXECUTE] = PG_RW;		kp[VM_PROT_READ|VM_PROT_WRITE|VM_PROT_NONE] = PG_RW;		kp[VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE] = PG_RW;	}	/*	 * Kernel page/segment table allocated in locore,	 * just initialize pointers.	 */	{		struct pmap *kpm = &RELOC(kernel_pmap_store, struct pmap);		kpm->pm_stab = RELOC(Sysseg, struct ste *);		kpm->pm_ptab = RELOC(Sysmap, struct pte *);		simple_lock_init(&kpm->pm_lock);		kpm->pm_count = 1;		kpm->pm_stpa = (struct ste *)kstpa;		/*		 * For the 040 we also initialize the free level 2		 * descriptor mask noting that we have used:		 *	0:		level 1 table		 *	1 to `num':	map page tables		 *	MAXKL2SIZE-1:	maps last-page page table		 */		if (RELOC(mmutype, int) == MMU_68040) {			register int num;						kpm->pm_stfree = ~l2tobm(0);			num = roundup((nptpages + 1) * (NPTEPG / SG4_LEV3SIZE),				      SG4_LEV2SIZE) / SG4_LEV2SIZE;			while (num)				kpm->pm_stfree &= ~l2tobm(num--);			kpm->pm_stfree &= ~l2tobm(MAXKL2SIZE-1);			for (num = MAXKL2SIZE;			     num < sizeof(kpm->pm_stfree)*NBBY;			     num++)				kpm->pm_stfree &= ~l2tobm(num);		}	}	/*	 * Allocate some fixed, special purpose kernel virtual addresses	 */	{		vm_offset_t va = RELOC(virtual_avail, vm_offset_t);		RELOC(CADDR1, caddr_t) = (caddr_t)va;		va += HP_PAGE_SIZE;		RELOC(CADDR2, caddr_t) = (caddr_t)va;		va += HP_PAGE_SIZE;		RELOC(vmmap, caddr_t) = (caddr_t)va;		va += HP_PAGE_SIZE;		RELOC(ledbase, caddr_t) = (caddr_t)va;		va += HP_PAGE_SIZE;		RELOC(msgbufp, struct msgbuf *) = (struct msgbuf *)va;		va += HP_PAGE_SIZE;		RELOC(virtual_avail, vm_offset_t) = va;	}}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -