⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 mmu.c

📁 这是一个同样来自贝尔实验室的和UNIX有着渊源的操作系统, 其简洁的设计和实现易于我们学习和理解
💻 C
📖 第 1 页 / 共 2 页
字号:
 * Device mappings are shared by all procs and processors and * live in the virtual range VMAP to VMAP+VMAPSIZE.  The master * copy of the mappings is stored in mach0->pdb, and they are * paged in from there as necessary by vmapsync during faults. */static Lock vmaplock;static int findhole(ulong *a, int n, int count);static ulong vmapalloc(ulong size);static void pdbunmap(ulong*, ulong, int);/* * Add a device mapping to the vmap range. */void*vmap(ulong pa, int size){	int osize;	ulong o, va;		/*	 * might be asking for less than a page.	 */	osize = size;	o = pa & (BY2PG-1);	pa -= o;	size += o;	size = ROUND(size, BY2PG);	if(pa == 0){		print("vmap pa=0 pc=%#.8lux\n", getcallerpc(&pa));		return nil;	}	ilock(&vmaplock);	if((va = vmapalloc(size)) == 0 	|| pdbmap(MACHP(0)->pdb, pa|PTEUNCACHED|PTEWRITE, va, size) < 0){		iunlock(&vmaplock);		return 0;	}	iunlock(&vmaplock);	/* avoid trap on local processor	for(i=0; i<size; i+=4*MB)		vmapsync(va+i);	*/	USED(osize);//	print("  vmap %#.8lux %d => %#.8lux\n", pa+o, osize, va+o);	return (void*)(va + o);}static intfindhole(ulong *a, int n, int count){	int have, i;		have = 0;	for(i=0; i<n; i++){		if(a[i] == 0)			have++;		else			have = 0;		if(have >= count)			return i+1 - have;	}	return -1;}/* * Look for free space in the vmap. */static ulongvmapalloc(ulong size){	int i, n, o;	ulong *vpdb;	int vpdbsize;		vpdb = &MACHP(0)->pdb[PDX(VMAP)];	vpdbsize = VMAPSIZE/(4*MB);	if(size >= 4*MB){		n = (size+4*MB-1) / (4*MB);		if((o = findhole(vpdb, vpdbsize, n)) != -1)			return VMAP + o*4*MB;		return 0;	}	n = (size+BY2PG-1) / BY2PG;	for(i=0; i<vpdbsize; i++)		if((vpdb[i]&PTEVALID) && !(vpdb[i]&PTESIZE))			if((o = findhole(KADDR(PPN(vpdb[i])), WD2PG, n)) != -1)				return VMAP + i*4*MB + o*BY2PG;	if((o = findhole(vpdb, vpdbsize, 1)) != -1)		return VMAP + o*4*MB;			/*	 * could span page directory entries, but not worth the trouble.	 * not going to be very much contention.	 */	return 0;}/* * Remove a device mapping from the vmap range. * Since pdbunmap does not remove page tables, just entries, * the call need not be interlocked with vmap. */voidvunmap(void *v, int size){	int i;	ulong va, o;	Mach *nm;	Proc *p;		/*	 * might not be aligned	 */	va = (ulong)v;	o = va&(BY2PG-1);	va -= o;	size += o;	size = ROUND(size, BY2PG);		if(size < 0 || va < VMAP || va+size > VMAP+VMAPSIZE)		panic("vunmap va=%#.8lux size=%#x pc=%#.8lux\n",			va, size, getcallerpc(&va));	pdbunmap(MACHP(0)->pdb, va, size);		/*	 * Flush mapping from all the tlbs and copied pdbs.	 * This can be (and is) slow, since it is called only rarely.	 */	for(i=0; i<conf.nproc; i++){		p = proctab(i);		if(p->state == Dead)			continue;		if(p != up)			p->newtlb = 1;	}	for(i=0; i<conf.nmach; i++){		nm = MACHP(i);		if(nm != m)			nm->flushmmu = 1;	}	flushmmu();	for(i=0; i<conf.nmach; i++){		nm = MACHP(i);		if(nm != m)			while((active.machs&(1<<nm->machno)) && nm->flushmmu)				;	}}/* * Add kernel mappings for pa -> va for a section of size bytes. */intpdbmap(ulong *pdb, ulong pa, ulong va, int size){	int pse;	ulong pgsz, *pte, *table;	ulong flag, off;		flag = pa&0xFFF;	pa &= ~0xFFF;	if((MACHP(0)->cpuiddx & 0x08) && (getcr4() & 0x10))		pse = 1;	else		pse = 0;	for(off=0; off<size; off+=pgsz){		table = &pdb[PDX(va+off)];		if((*table&PTEVALID) && (*table&PTESIZE))			panic("vmap: va=%#.8lux pa=%#.8lux pde=%#.8lux",				va+off, pa+off, *table);		/*		 * Check if it can be mapped using a 4MB page:		 * va, pa aligned and size >= 4MB and processor can do it.		 */		if(pse && (pa+off)%(4*MB) == 0 && (va+off)%(4*MB) == 0 && (size-off) >= 4*MB){			*table = (pa+off)|flag|PTESIZE|PTEVALID;			pgsz = 4*MB;		}else{			pte = mmuwalk(pdb, va+off, 2, 1);			if(*pte&PTEVALID)				panic("vmap: va=%#.8lux pa=%#.8lux pte=%#.8lux",					va+off, pa+off, *pte);			*pte = (pa+off)|flag|PTEVALID;			pgsz = BY2PG;		}	}	return 0;}/* * Remove mappings.  Must already exist, for sanity. * Only used for kernel mappings, so okay to use KADDR. */static voidpdbunmap(ulong *pdb, ulong va, int size){	ulong vae;	ulong *table;		vae = va+size;	while(va < vae){		table = &pdb[PDX(va)];		if(!(*table & PTEVALID)){			panic("vunmap: not mapped");			/* 			va = (va+4*MB-1) & ~(4*MB-1);			continue;			*/		}		if(*table & PTESIZE){			*table = 0;			va = (va+4*MB-1) & ~(4*MB-1);			continue;		}		table = KADDR(PPN(*table));		if(!(table[PTX(va)] & PTEVALID))			panic("vunmap: not mapped");		table[PTX(va)] = 0;		va += BY2PG;	}}/* * Handle a fault by bringing vmap up to date. * Only copy pdb entries and they never go away, * so no locking needed. */intvmapsync(ulong va){	ulong entry, *table;	if(va < VMAP || va >= VMAP+VMAPSIZE)		return 0;	entry = MACHP(0)->pdb[PDX(va)];	if(!(entry&PTEVALID))		return 0;	if(!(entry&PTESIZE)){		/* make sure entry will help the fault */		table = KADDR(PPN(entry));		if(!(table[PTX(va)]&PTEVALID))			return 0;	}	vpd[PDX(va)] = entry;	/*	 * TLB doesn't cache negative results, so no flush needed.	 */	return 1;}/* * KMap is used to map individual pages into virtual memory. * It is rare to have more than a few KMaps at a time (in the  * absence of interrupts, only two at a time are ever used, * but interrupts can stack).  The mappings are local to a process, * so we can use the same range of virtual address space for * all processes without any coordination. */#define kpt (vpt+VPTX(KMAP))#define NKPT (KMAPSIZE/BY2PG)KMap*kmap(Page *page){	int i, o, s;	if(up == nil)		panic("kmap: up=0 pc=%#.8lux", getcallerpc(&page));	if(up->mmupdb == nil)		upallocpdb();	if(up->nkmap < 0)		panic("kmap %lud %s: nkmap=%d", up->pid, up->text, up->nkmap);		/*	 * Splhi shouldn't be necessary here, but paranoia reigns.	 * See comment in putmmu above.	 */	s = splhi();	up->nkmap++;	if(!(vpd[PDX(KMAP)]&PTEVALID)){		/* allocate page directory */		if(KMAPSIZE > BY2XPG)			panic("bad kmapsize");		if(up->kmaptable != nil)			panic("kmaptable");		spllo();		up->kmaptable = newpage(0, 0, 0);		splhi();		vpd[PDX(KMAP)] = up->kmaptable->pa|PTEWRITE|PTEVALID;		flushpg((ulong)kpt);		memset(kpt, 0, BY2PG);		kpt[0] = page->pa|PTEWRITE|PTEVALID;		up->lastkmap = 0;		splx(s);		return (KMap*)KMAP;	}	if(up->kmaptable == nil)		panic("no kmaptable");	o = up->lastkmap+1;	for(i=0; i<NKPT; i++){		if(kpt[(i+o)%NKPT] == 0){			o = (i+o)%NKPT;			kpt[o] = page->pa|PTEWRITE|PTEVALID;			up->lastkmap = o;			splx(s);			return (KMap*)(KMAP+o*BY2PG);		}	}	panic("out of kmap");	return nil;}voidkunmap(KMap *k){	ulong va;	va = (ulong)k;	if(up->mmupdb == nil || !(vpd[PDX(KMAP)]&PTEVALID))		panic("kunmap: no kmaps");	if(va < KMAP || va >= KMAP+KMAPSIZE)		panic("kunmap: bad address %#.8lux pc=%#.8lux", va, getcallerpc(&k));	if(!(vpt[VPTX(va)]&PTEVALID))		panic("kunmap: not mapped %#.8lux pc=%#.8lux", va, getcallerpc(&k));	up->nkmap--;	if(up->nkmap < 0)		panic("kunmap %lud %s: nkmap=%d", up->pid, up->text, up->nkmap);	vpt[VPTX(va)] = 0;	flushpg(va);}/* * Temporary one-page mapping used to edit page directories. * * The fasttmp #define controls whether the code optimizes * the case where the page is already mapped in the physical * memory window.   */#define fasttmp 1void*tmpmap(Page *p){	ulong i;	ulong *entry;		if(islo())		panic("tmpaddr: islo");	if(fasttmp && p->pa < -KZERO)		return KADDR(p->pa);	/*	 * PDX(TMPADDR) == PDX(MACHADDR), so this	 * entry is private to the processor and shared 	 * between up->mmupdb (if any) and m->pdb.	 */	entry = &vpt[VPTX(TMPADDR)];	if(!(*entry&PTEVALID)){		for(i=KZERO; i<=CPU0MACH; i+=BY2PG)			print("%.8lux: *%.8lux=%.8lux (vpt=%.8lux index=%.8lux)\n", i, &vpt[VPTX(i)], vpt[VPTX(i)], vpt, VPTX(i));		panic("tmpmap: no entry");	}	if(PPN(*entry) != PPN(TMPADDR-KZERO))		panic("tmpmap: already mapped entry=%#.8lux", *entry);	*entry = p->pa|PTEWRITE|PTEVALID;	flushpg(TMPADDR);	return (void*)TMPADDR;}voidtmpunmap(void *v){	ulong *entry;		if(islo())		panic("tmpaddr: islo");	if(fasttmp && (ulong)v >= KZERO && v != (void*)TMPADDR)		return;	if(v != (void*)TMPADDR)		panic("tmpunmap: bad address");	entry = &vpt[VPTX(TMPADDR)];	if(!(*entry&PTEVALID) || PPN(*entry) == PPN(PADDR(TMPADDR)))		panic("tmpmap: not mapped entry=%#.8lux", *entry);	*entry = PPN(TMPADDR-KZERO)|PTEWRITE|PTEVALID;	flushpg(TMPADDR);}/* * These could go back to being macros once the kernel is debugged, * but the extra checking is nice to have. */void*kaddr(ulong pa){	if(pa > (ulong)-KZERO)		panic("kaddr: pa=%#.8lux", pa);	return (void*)(pa+KZERO);}ulongpaddr(void *v){	ulong va;		va = (ulong)v;	if(va < KZERO)		panic("paddr: va=%#.8lux pc=%#.8lux", va, getcallerpc(&va));	return va-KZERO;}/* * More debugging. */voidcountpagerefs(ulong *ref, int print){	int i, n;	Mach *mm;	Page *pg;	Proc *p;		n = 0;	for(i=0; i<conf.nproc; i++){		p = proctab(i);		if(p->mmupdb){			if(print){				if(ref[pagenumber(p->mmupdb)])					iprint("page %#.8lux is proc %d (pid %lud) pdb\n",						p->mmupdb->pa, i, p->pid);				continue;			}			if(ref[pagenumber(p->mmupdb)]++ == 0)				n++;			else				iprint("page %#.8lux is proc %d (pid %lud) pdb but has other refs!\n",					p->mmupdb->pa, i, p->pid);		}		if(p->kmaptable){			if(print){				if(ref[pagenumber(p->kmaptable)])					iprint("page %#.8lux is proc %d (pid %lud) kmaptable\n",						p->kmaptable->pa, i, p->pid);				continue;			}			if(ref[pagenumber(p->kmaptable)]++ == 0)				n++;			else				iprint("page %#.8lux is proc %d (pid %lud) kmaptable but has other refs!\n",					p->kmaptable->pa, i, p->pid);		}		for(pg=p->mmuused; pg; pg=pg->next){			if(print){				if(ref[pagenumber(pg)])					iprint("page %#.8lux is on proc %d (pid %lud) mmuused\n",						pg->pa, i, p->pid);				continue;			}			if(ref[pagenumber(pg)]++ == 0)				n++;			else				iprint("page %#.8lux is on proc %d (pid %lud) mmuused but has other refs!\n",					pg->pa, i, p->pid);		}		for(pg=p->mmufree; pg; pg=pg->next){			if(print){				if(ref[pagenumber(pg)])					iprint("page %#.8lux is on proc %d (pid %lud) mmufree\n",						pg->pa, i, p->pid);				continue;			}			if(ref[pagenumber(pg)]++ == 0)				n++;			else				iprint("page %#.8lux is on proc %d (pid %lud) mmufree but has other refs!\n",					pg->pa, i, p->pid);		}	}	if(!print)		iprint("%d pages in proc mmu\n", n);	n = 0;	for(i=0; i<conf.nmach; i++){		mm = MACHP(i);		for(pg=mm->pdbpool; pg; pg=pg->next){			if(print){				if(ref[pagenumber(pg)])					iprint("page %#.8lux is in cpu%d pdbpool\n",						pg->pa, i);				continue;			}			if(ref[pagenumber(pg)]++ == 0)				n++;			else				iprint("page %#.8lux is in cpu%d pdbpool but has other refs!\n",					pg->pa, i);		}	}	if(!print){		iprint("%d pages in mach pdbpools\n", n);		for(i=0; i<conf.nmach; i++)			iprint("cpu%d: %d pdballoc, %d pdbfree\n",				i, MACHP(i)->pdballoc, MACHP(i)->pdbfree);	}}voidcheckfault(ulong, ulong){}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -