⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 vm_mem.c

📁 <B>Digital的Unix操作系统VAX 4.2源码</B>
💻 C
📖 第 1 页 / 共 3 页
字号:
#ifdef mips	XPRINTF(XPR_VM,"enter mpurge",0,0,0,0);#endif mips	for (i = 0; i < CMHSIZ; i++) {more:		c1 = &cmap[cmhash[i]];		if (c1 == ecmap)			continue;		if (c1->c_mdev == mdev)			cmhash[i] = c1->c_hlink;		else {			for (;;) {				c2 = c1;				c1 = &cmap[c1->c_hlink];				if (c1 == ecmap)					goto cont;				if (c1->c_mdev == mdev)					break;			}			c2->c_hlink = c1->c_hlink;		}		xp = &text[c1->c_ndx];		if (X_DO_RHASH(xp))		        G_RST_HCMAP(xp,xp->x_gptr,c1);		c1->c_mdev = (u_char) NODEV;		c1->c_blkno = 0;		c1->c_hlink = 0;		goto more;cont:		;	}	splx(si);}/* * Initialize core map */meminit(first, last)	int first, last;{	register int i;	register struct cmap *c;#ifdef mips	XPRINTF(XPR_VM,"enter meminit",0,0,0,0);#endif mips	firstfree = clrnd(first);	maxfree = clrnd(last - (CLSIZE - 1));	freemem = maxfree - firstfree;	ecmx = ecmap - cmap;#ifdef mips	if (ecmx < freemem / CLSIZE + 1)		freemem = (ecmx-1) * CLSIZE;#endif mips#ifdef vax	if (ecmx < freemem / CLSIZE)		freemem = ecmx * CLSIZE;#endif vax	for (i = 1; i <= freemem / CLSIZE; i++) {		cmap[i-1].c_next = i;		c = &cmap[i];		c->c_prev = i-1;		c->c_free = 1;		c->c_gone = 1;		c->c_type = CSYS;		c->c_mdev = (u_char) NODEV;		c->c_blkno = 0;	}	cmap[freemem / CLSIZE].c_next = CMHEAD;	for (i = 0; i < CMHSIZ; i++)		cmhash[i] = ecmx;	cmap[CMHEAD].c_prev = freemem / CLSIZE;	cmap[CMHEAD].c_type = CSYS;	avefree = freemem;	hand = 0;	ucmap = eucmap = -1;	nucmap = 0;#ifdef mips	flush_cache();#endif mips}/*  * Lock a virtual segment. * * For each cluster of pages, if the cluster is not valid, * touch it to fault it in, otherwise just lock page frame. * Called from physio to ensure that the pages  * participating in raw i/o are valid and locked. */vslock(base, count)	caddr_t base;{	register unsigned v;	register int npf;	register struct pte *pte;	register struct cmap *c;#ifdef mips	register unsigned pf;#endif mips#ifdef mips	XPRINTF(XPR_VM,"enter vslock",0,0,0,0);#endif mips	v = btop(base);	pte = vtopte(u.u_procp, v);	npf = btoc(count + ((int)base & CLOFSET));	while (npf > 0) {		if (pte->pg_v) {			c = &cmap[pgtocm(pte->pg_pfnum)];#ifdef mips			if (c->c_lock) {#endif mips			MLOCK(c);#ifdef mips				MUNLOCK(c);				continue;#endif mips		} #ifdef mips			MLOCK(c);		} #endif mips		else			pagein(ctob(v), 1);	/* return it locked */#ifdef mips#ifdef CACHETRICKS		if (pte->pg_n == 0) {			pf = pte->pg_pfnum;			c = &cmap[pgtocm(pf)];			c->c_icachecnt = icachecnt[pf & icachemask];			c->c_dcachecnt = dcachecnt[pf & dcachemask];		}#endif CACHETRICKS#endif mips		pte += CLSIZE;		v += CLSIZE;		npf -= CLSIZE;	}}/*  * Unlock a virtual segment. */vsunlock(base, count, rw)	caddr_t base;{	register struct pte *pte;	register int npf;	register struct cmap *c;#ifdef mips	XPRINTF(XPR_VM,"enter vsunlock",0,0,0,0);#endif mips	pte = vtopte(u.u_procp, btop(base));	npf = btoc(count + ((int)base & CLOFSET));	while (npf > 0) {		c = &cmap[pgtocm(pte->pg_pfnum)];		MUNLOCK(c);		if (rw == B_READ)	/* Reading from device writes memory */			pte->pg_m = 1;		pte += CLSIZE;		npf -= CLSIZE;	}}struct kmembuckets bucket[NBUCKET];struct kmemusage *kmemusage;int kmemu[KM_LAST];#ifdef KMEMSTATSstruct kmemstats kmemstats[KM_LAST];struct M_request {	int size;	int type;	int flags;} M_requests[KM_REQSIZ];	/* burn 12 KB */int M_request_debug = 1;int M_request_location = 0;#endif KMEMSTATS/* allocate system virtual and pte space */caddr_t get_sys_ptes(npg, pte)int npg;struct pte **pte;{	register long alloc = 0;	register int fraction;	register int s = splimp();#ifdef mips	XPRINTF(XPR_VM,"enter get_sys_ptes",0,0,0,0);#endif mips	npg += (fraction = npg % CLSIZE) ? (CLSIZE - fraction) : 0 ;	alloc = rmalloc(kmemmap, npg);	splx(s);	if (alloc <= 0) {#ifdef KMEMSTATS		if(km_debug) 			cprintf("get_sys_ptes: rmalloc: npg (%d)\n", npg);#endif KMEMSTATS		return((caddr_t)NULL);	}	*pte  = &kmempt[alloc];	return((caddr_t)kmemxtob(alloc));}/* * Duplicate a malloced area */km_memdup(va)register caddr_t va;{	register struct kmemusage *kup = (struct kmemusage *)NULL;	register int s = splimp();/* SMP: just have to lock usage struct (kup) */#ifdef mips	XPRINTF(XPR_VM,"enter km_memdup",0,0,0,0);#endif mips	kup = btokup(va);	if (kup->ku_indx >= KMBUCKET) {		kup->ku_refcnt++;	} else {		cprintf("km_memdup: va = 0x%x index = %d\n",		va, kup->ku_indx);		panic("km_memdup not a cluster");	}	splx(s);}/* * Allocate a block of memory */caddr_tkm_alloc(size, type, flags)	unsigned long size;	long type, flags;{	register caddr_t va = (caddr_t)NULL;	register long alloc = 0;	register struct kmembuckets *kbp = (struct kmembuckets *)NULL;	register struct kmemusage *kup = (struct kmemusage *)NULL;#ifdef KMEMSTATS	register struct kmemstats *ksp = (struct kmemstats *)NULL;#endif KMEMSTATS	int (*mem)(), memall(), vmemall();	long indx, npg, allocsize;	int s = splimp();	mem = (flags & KM_NOWAIT) ? memall : vmemall;#ifdef KMEMSTATS	if(size <= 0) 		panic("km_alloc: bad size");#ifdef vax	/* make sure sleep option is not set if on interrupt stack */	if( !(flags & KM_NOWAIT) && (movpsl() & PSL_IS) )		panic("km_alloc: SLEEP on interrupt stack");#endif vax	ksp = &kmemstats[type];	if(ksp->ks_inuse >= ksp->ks_limit)		goto bad;	if(M_request_debug) {		if(M_request_location >= KM_REQSIZ) 			M_request_location = 0;		M_requests[M_request_location].size = size;		M_requests[M_request_location].type = type;		M_requests[M_request_location++].flags = flags;	}#endif KMEMSTATS	indx = BUCKETINDX(size);#ifdef KMEMSTATS	if(indx < MINBUCKET || indx > NBUCKET)	        panic("km_alloc: bad index");#endif KMEMSTATS	kbp = &bucket[indx];/* SMP: need to lock bucket chain (kbp->kb_next) */	if(kbp->kb_next == (caddr_t)NULL || (flags & KM_CONTIG) ) {		register struct pte *pte = (struct pte *)NULL;		register int i = 0;		if(size <= MAXALLOCSAVE) 			allocsize = 1 << indx;		 else 			allocsize = roundup(size, CLBYTES);		npg = clrnd(btoc(allocsize));		if((flags & KM_NOWAIT) && freemem < npg) 			goto bad;		/* get the GUARD ptes allocated too */		while((alloc = rmalloc(kmemmap, npg+GUARDPAGES)) == 0) {			if(flags & KM_NOWAIT) 				goto bad;			sleep((caddr_t)&kmemmap, PSWP+2);		}		pte = (struct pte *) &kmempt[alloc];		if(flags & KM_CONTIG) {			register unsigned pfn = 0;			if((pfn = pfalloc(CSYS, npg/CLSIZE)) == 0) 				goto bad;			for (i = npg; i-- ; pte++, pfn++) {#ifdef KMEMSTATS			/* pte's are cleared in km_free */				if(*(int *)pte != 0) 					panic("km_alloc: bad pte1");#endif KMEMSTATS#ifdef mips				pte->pg_pfnum = pfn;#endif mips#ifdef vax				*(int *) pte |=  pfn;#endif vax			}		} else {#ifdef KMEMSTATS			/* pte's are cleared in km_free */			for (i = 0; i < npg+GUARDPAGES; i++, pte++) {				if(*(int *)pte != 0) 					panic("km_alloc: bad pte2");			}#endif KMEMSTATS			if(mem(&kmempt[alloc], npg, &proc[0], CSYS) == 0) 				goto bad;		}		va = (caddr_t) kmemxtob(alloc);#ifdef mips		vmaccess(&kmempt[alloc], va, npg, DO_CACHE);#endif	mips#ifdef vax		vmaccess(&kmempt[alloc], va, npg);#endif vax#ifdef KMEMSTATS		kbp->kb_total += kbp->kb_elmpercl;#endif	KMEMSTATS/* SMP: kup needs to be locked (usage structure) */		kup = btokup(va);		kup->ku_indx = indx;		kup->ku_refcnt = 0;		if(allocsize > MAXALLOCSAVE) {			kup->ku_pagecnt = npg;			goto done;		}#ifdef KMEMSTATS		kup->ku_freecnt = kbp->kb_elmpercl;		kbp->kb_totalfree += kbp->kb_elmpercl;#endif KMEMSTATS		if(kbp->kb_next == NULL) {			register caddr_t cp = (caddr_t) NULL;			kbp->kb_next = va + (npg * NBPG) - allocsize;			for(cp = kbp->kb_next; cp > va; cp -= allocsize) 				*(caddr_t *)cp = cp - allocsize;			*(caddr_t *)cp = NULL;		} else {			*((caddr_t *)va) = kbp->kb_next;			kbp->kb_next = va;		}	}	va = kbp->kb_next;	if( !IS_KMEM_VA(va)) 		panic("km_alloc: bucket corruption");	kbp->kb_next = *(caddr_t *)va;	kup = btokup(va);#ifdef KMEMSTATS	if(kup->ku_indx != indx) 		panic("km_alloc: wrong bucket");	if(kup->ku_freecnt == 0)		panic("km_alloc: lost data");	kup->ku_freecnt--;	kbp->kb_totalfree--;done:	kbp->kb_calls++;	ksp->ks_inuse++;	ksp->ks_calls++;	if(ksp->ks_inuse > ksp->ks_maxused) {		ksp->ks_maxused = ksp->ks_inuse;	}#else	KMEMSTATSdone:#endif	KMEMSTATS	kup->ku_refcnt++;	kmemu[type]++;	splx(s);	if(flags & KM_CLEAR) 		blkclr(va, size);	return (va);bad:	if(alloc > 0) {		/* remember to put back the GUARDPAGE ptes */		rmfree(kmemmap, npg+GUARDPAGES, alloc);		wakeup((caddr_t)&kmemmap);	}	splx(s);	return (NULL);}/* * Free a block of memory allocated by malloc. */voidkm_free(addr, type)	caddr_t addr;	long type;{	register struct kmemusage *kup = (struct kmemusage *)NULL;	register int s = splimp();#ifdef mips	XPRINTF(XPR_VM,"enter km_free",0,0,0,0);#endif mips	if( !IS_KMEM_VA(addr))   		panic("km_free: bad addr\n");	kup = btokup(addr);	if(kup->ku_indx < MINBUCKET || kup->ku_indx > NBUCKET)	        panic("km_free: bad index");/* SMP: lock usage (kup) struct and bucket */#ifdef KMEMSTATS	if(--kup->ku_refcnt < 0) 		panic("km_free: multiple frees");#endif	KMEMSTATS	if(kup->ku_indx < KMBUCKET || kup->ku_refcnt == 0) {		if(kup->ku_indx > MAXBUCKETSAVE) {			register long alloc = btokmemx(addr);			register int i = 0;			(void) memfree(&kmempt[alloc], kup->ku_pagecnt, 0);			/* 			 * set pte's (and GUARD's) to NOACCESS 			 * and invalidate tb			 */			for (i = 0; i < kup->ku_pagecnt+GUARDPAGES; i++) {				*((int *)&kmempt[alloc+i]) = 0;#ifdef vax				mtpr(TBIS, addr);#endif vax				addr += NBPG;			}			/* remember to put back the GUARDPAGE ptes */			(void) rmfree(kmemmap, (long)kup->ku_pagecnt+GUARDPAGES, alloc);			wakeup((caddr_t)&kmemmap);			kup->ku_indx = kup->ku_pagecnt = 0;		} else  {			register struct kmembuckets *kbp = (struct kmembuckets *)NULL;			kbp = &bucket[kup->ku_indx];#ifdef KMEMSTATS			kup->ku_freecnt++;			if(kup->ku_freecnt >= kbp->kb_elmpercl) {				if(kup->ku_freecnt > kbp->kb_elmpercl) {					panic("km_free: multiple frees");				} else if(kbp->kb_totalfree > kbp->kb_highwat) {					kbp->kb_couldfree++;				}			}			kbp->kb_totalfree++;#endif KMEMSTATS			*(caddr_t *)addr = kbp->kb_next;			kbp->kb_next = addr;		}#ifdef KMEMSTATS		kmemstats[type].ks_inuse--;#endif KMEMSTATS		kmemu[type]--;	}	splx(s);}#ifdef KMEM_SCRUBint kmem_pages_freed = 0;	/* how many freed over life of system *//* This array gives the number of free entries *//* needed before we try to free some - this needs tuning!*/int kmem_pages_left[NBUCKET] = {	 0, 0, 0, 0,	64,32,16, 8,	/*  16b,  32b,  64b, 128b  */	 8, 8, 8, 8,	/* 256b, 512b,   1K,   2K  */	 8, 8, 8, 1,	/*   4K,   8K,  16K,  32K  */	 1, 1, 1, 1	/*  64K, 128K, 256K, 512K+ */};int kmem_scrub_time = 60;	/* in seconds  - tunable */voidkmem_scrub(){	register struct kmembuckets *kbp;	register struct kmemusage *kup;	register caddr_t va;	register int pages;	register int *nfree;	register int pieces;	int s = splimp();#ifdef mips	XPRINTF(XPR_VM,"enter kmem_scrub",0,0,0,0);#endif mips	/* do the cluster size and up buckets */	for (nfree = &kmem_pages_left[KMBUCKET], kbp = &bucket[KMBUCKET];	kbp <= &bucket[MAXBUCKETSAVE]; kbp++, nfree++) {		for(pieces=0,va = kbp->kb_next;va != NULL;va = *(caddr_t *)va) {			pieces++;	/* actually just chunks on bucket */		}		while (pieces >= *nfree && kbp->kb_next) {			va = (caddr_t) kbp->kb_next;			kbp->kb_next = *(caddr_t *)va;			kup = btokup(va);			pages = (1<<kup->ku_indx)/NBPG;	/* number to free */			kmem_pages_freed += pages;	/* count them */			(void) memfree(&kmempt[btokmemx(va)],pages,0);			/* remember to put back the GUARDPAGE ptes */			rmfree(kmemmap, pages+GUARDPAGES,btokmemx(va));			wakeup((caddr_t)&kmemmap);			/* zero out usage struct */			kup->ku_indx = 0;			kup->ku_refcnt = 0;			kup->ku_pagecnt = 0;			pieces--;		}	}#ifdef SMALL_SCRUBBER	/* do the small buckets (< cluster size) */	for (nfree = &kmem_pages_left[MINBUCKET], kbp = &bucket[MINBUCKET];	kbp < &bucket[KMBUCKET]; kbp++, nfree++) {		for(pieces=0,va = kbp->kb_next;va != NULL;va = *(caddr_t *)va) {			if((btokup(va))->ku_refcnt == 0)				pieces++;/* actually just chunks from cluster */		}		if(pieces < *nfree ||		    pieces < CLBYTES/(1<<(btokup(kbp->kb_next)->ku_indx))) {			continue;		}#ifdef notdef		va = kbp->kb_next;		cprintf(	"There are %d chunks free of %d bytes each (total = %d)\n",	pieces,1<<(btokup(va)->ku_indx),pieces*(1<<(btokup(va)->ku_indx)));		while (va) {			if((btokup(va))->ku_refcnt == 0) {			cprintf("Cluster 0x%x refcnt %d is freeable.\n",				va, btokup(va)->ku_refcnt);			}			va = *(caddr_t *)va;		}#endif notdef	}#endif SMALL_SCRUBBER	splx(s);	timeout(kmem_scrub,(caddr_t)0,kmem_scrub_time*hz);}#endif KMEM_SCRUB/* * Initialize the kernel memory allocator */voidkmeminit(){#ifdef KMEMSTATS	register long indx;#ifdef mips	XPRINTF(XPR_VM,"enter kmeminit",0,0,0,0);#endif mips	if(!powerof2(MAXALLOCSAVE))		panic("kmeminit: MAXALLOCSAVE not power of 2");	if(MAXALLOCSAVE > MINALLOCSIZE * 32768)		panic("kmeminit: MAXALLOCSAVE too big");	if(MAXALLOCSAVE < CLBYTES)		panic("kmeminit: MAXALLOCSAVE too small");#endif KMEMSTATS	rminit(kmemmap, ((ekmempt - kmempt) - (long)2), (long)2,		"malloc map", (ekmempt - kmempt));#ifdef KMEMSTATS	if(km_debug) {		cprintf("m_limit = 0x%x; m_name %s; m_size = %d; m_addr 0x%x\n",		((struct map *)kmemmap)->m_limit,		((struct map *)kmemmap)->m_name,		((struct mapent *)kmemmap)->m_size,		((struct mapent *)kmemmap)->m_addr);	}	for (indx = 0; indx < MINBUCKET + 16; indx++) {		if(indx >= KMBUCKET) 			bucket[indx].kb_elmpercl = 1;		else 			bucket[indx].kb_elmpercl = CLBYTES / (1 << indx);		bucket[indx].kb_highwat = 5 * bucket[indx].kb_elmpercl;	}	for (indx = 0; indx < KM_LAST; indx++) 		kmemstats[indx].ks_limit = 0x7fffffff;#endif KMEMSTATS}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -