⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 diskmap.c

📁 T-kernel 的extension源代码
💻 C
📖 第 1 页 / 共 3 页
字号:
				pfe->err = TRUE;			}		}	}}/* * Page in as many pages as possible between *start and limit in MapEntry. * As *start, return the address of the page that follows the last paged-in page. * Therefore, when paged in up to the limit, "*start = limit" is returned. */LOCAL ER pageinMapPage( ME *me, VB **start, VB *limit ){	PFE_Q		pfe_q;	IndQueTop	rd_pfe;	PTH		pth1, pth2;	PTE		pte;	PFE		*pfe, *ppfe;	DBN		dbn;	PFS		stat;	PFE_ETC		mapblk;	DE		*de = toDiskEntry(me->did);	ER		err, error = E_OK;	InitPTH(&pth1, *start, me->mode.space);	/* Skip the pages that have already been paged in.  */	for ( ; *start < limit; *start += PAGESIZE ){		err = NextPTE(&pth1);		if ( err < E_OK ) {			break;		}		pte.w = GetPTE(&pth1);		/* After copy-on-write processing, skip valid pages. */		if ( CopyOnWriteP_done(pte.w) ) {			continue;		}		if ( isPresentP(pte.w) || isValidP(pte.w) ) {			continue;		}		break;	}	EndPTH(&pth1, FALSE);	InitPFE_Q(&pfe_q);	InitIndQue(&rd_pfe);	InitPTH(&pth1, *start, me->mode.space);	InitPTH(&pth2, *start, me->mode.space);	dbn.id = me->did;	ppfe = NULL;	/* Select pages to be paged in. */	for ( ; *start < limit; *start += PAGESIZE ) {		if ( rd_pfe.num >= (W)MaxPageIO ) {			break;		}		err = NextPTE(&pth1);		if ( err < E_OK ) {			break;		}		pte.w = GetPTE(&pth1);		/* After copy-on-write processing, terminate, if any, valid pages. */		if ( CopyOnWriteP_done(pte.w) ) {			break;		}		if ( isPresentP(pte.w) || isValidP(pte.w) ) {			break;		}		/* Numbers of blocks to be processed */		dbn.no = mappingBlock(me, *start, &mapblk);		/* If disk blocks are not arranged consecutively, page frames 		   do not have to be arranged consecutively either. */		if (( ppfe != NULL )		  &&( (ppfe->dbn_no + de->blkcnt) != dbn.no )) {			ppfe = NULL;		}		/* Obtain page frame */		err = GetPageFrame(&pfe, dbn, ppfe, me->mode.rank);		if ( err < E_OK ) {			break;		}		pfe->etc = ( isClearP(pte.w) != 0 )? mapblk.w: 0;		/* It is no necessary to load pages if they have 		   already been loaded or all pages are cleared. */		if ( !((err > 0 )			|| ((isClearP(pte.w) && (mapblk.c.len == de->blkcnt)))) ) {			/* Necessary to load from disk. */			err = InsertIndQue(&rd_pfe, pfe, FALSE);			if ( err < E_OK ) {				goto err_brk;			}		}		/* Connect to MEL. */		err = appendMapLink(me, pfe);		if ( err < E_OK ) {err_brk:			/* Release page frames. */			RemoveIndQue(&rd_pfe, pfe);			UngetPageFrame(pfe);			break;		}		InsertPFE_Q(&pfe_q, pfe);		ppfe = pfe;	}	EndPTH(&pth1, FALSE);	/* Load disk. */	err = ReadWritePFE_IndQue(&rd_pfe, TDC_READ);	if ( err < E_OK ) {		/* If errors are notified to the error handler,		   ignore the errors here. */		if ( SearchErrHdr(me->tid) == NULL ) {			error = err;		}	}	DeleteIndQue(&rd_pfe);	if ( isLocalSpace(me->mode.addr) != 0 ) {		/* Update process statistical information */		PINFO *pinfo = GetPINFO_lsid(me->mode.space);		if ( pinfo != NULL ) {			pinfo->allocpage += (UH)pfe_q.diskmap;		}	}	if ( me->mode.real != 0 ) {		pte.w = PT_Present;		stat  = PFS_lock;	} else {		pte.w = PT_Present|PT_Valid;		stat  = PFS_use;	}	/* Set page table and register page frame */	while ( (pfe = RemoveNextPFE_Q(&pfe_q)) != NULL ) {		(void)NextPTE(&pth2);		if ( pfe->etc != 0 ) {			VP	adr;			size_t	len;			mapblk.w = pfe->etc;			adr = (VB*)PFEtoLADR(pfe)				+ (mapblk.c.ofs * de->info.blocksize);			len = mapblk.c.len * (UW)de->info.blocksize;			/* Clear page frame */			bzero(adr, len);			WriteBackDCachePage(adr, 0);			pfe->upd = TRUE;		}		/* Register page frame */		RegistPageFrame(pfe,			( pfe->stat == PFS_lock )? PFS_lock: stat);		/* Set page table */		pte.c.pfa = PFEtoPFA(pfe);		(void)ChgPTE(&pth2, pte.w, ~(UW)(PT_Address|PT_Present|PT_Valid						|PT_Clear|PT_Update), TRUE);	}	EndPTH(&pth2, FALSE);#ifdef DEBUG	if ( error < E_OK ) {		DEBUG_PRINT(("pageinMapPage err = %d\n", error));	}#endif	return error;}/* * Page-in of MapEntry *	laddr == INVADR	Page in all mapped pages.  *			All pages are paged in at least once,  *			but they are sometimes paged out immediately.  *	laddr != INVADR	Always page in pages that contain laddr.  *			Include several of their preceding and succeeding pages if possible.  * *	If real I/O is specified in map mode, make them resident (lock them) as well.  */LOCAL ER pageinMapEntry( ME *me, VP laddr ){	VB	*topaddr, *start, *limit, *stopaddr;	UW	n, max;	ER	err, error = E_OK;	topaddr = PageAlignL(me->mode.addr);	/* Decide on the range of addresses to be paged in. */	max = (UW)MaxPreLoad();	if (( me->npage <= max )||( laddr == INVADR )) {		/* Entire map */		start = topaddr;		limit = start + (me->npage * (UW)PAGESIZE);	} else {		/* Up to max pages around laddr */		n = (UW)(((VB*)laddr - topaddr) / PAGESIZE);		if ( (me->npage - n) >= max ) {			start = PageAlignL(laddr);			limit = start + (max * (UW)PAGESIZE);		} else {			n = me->npage - max;			start = topaddr + (n * (UW)PAGESIZE);			limit = topaddr + (me->npage * (UW)PAGESIZE);		}	}	stopaddr = ( laddr == INVADR )? limit: NextPage(PageAlignL(laddr));	while ( start < stopaddr ) {		topaddr = start;		/* Page in a possible range at a time. */		err = pageinMapPage(me, &start, limit);		if ( err < E_OK ) {			error = err;		}		if ( start == topaddr ) {			/* Could not be paged in at all. */			error = E_NOMEM;			break;		}	}#ifdef DEBUG	if ( error < E_OK ) {		DEBUG_PRINT(("pageinMapEntry err = %d\n", error));	}#endif	return error;}/* * Map disk memory to logical spaces. */LOCAL ER mapDiskMemory( DE *de, ME *me, VP laddr ){	PTE		pte;	UW		clr   = me->mode.clear;	W		npage = (W)me->npage;	VB		*la   = laddr;	VB		*top  = de->memadr;	UW		blksz = (UW)de->info.blocksize;	W		i, n, ct, ce;	PFE_ETC		mapblk;	VP		padr;	ER		err;	pte   = PTE_MemDiskMap(me->mode.level);	/* Number of pages corresponding to a logical block */	n = (W)RoundPage(de->lbsz);	if ( n <= 0 ) {		n = 1;	}	if ( n > npage ) {		n = npage;	}	ct = ( (clr & MapNoClrTop) == MapNoClrTop )? n: 0;	ce = ( (clr & MapNoClrEnd) == MapNoClrEnd )? (npage - n): 0;	for ( i = 0; i < npage; ++i ) {		/* Determine page frame addresses of disk blocks to be processed. */		n = mappingBlock(me, la, &mapblk);		err = CnvPhysicalAddr(top + ((UW)n * blksz), PAGESIZE, &padr);		if ( err < E_OK ) {			goto err_ret;		}		pte.c.pfa = PADRtoPFA(padr);		if (( clr != MapNoClr )&&( (i >= ct) && (i < ce) )) {			VP	adr;			size_t	len;			adr = (VB*)toLogicalAddress(padr)				+ (mapblk.c.ofs * de->info.blocksize);			len = mapblk.c.len * (UW)de->info.blocksize;			/* Clear page. */			bzero(adr, len);			WriteBackDCachePage(adr, 0);		}		/* Map to logical address. */		err = __MakeSpace(la, 1, me->mode.space, pte.w);		if ( err < E_OK ) {			goto err_ret;		}		la += PAGESIZE;	}	return E_OK;err_ret:	DEBUG_PRINT(("mapDiskMemory err = %d\n", err));	return err;}/* * Map disk blocks to logical spaces. */LOCAL ER mapDiskSpace( DE *de, ME *me, VP laddr ){	PTE	pte;	UW	clr   = me->mode.clear;	UW	lsid  = me->mode.space;	UW	npage = me->npage;	VB	*la   = laddr;	UW	n;	ER	err;	pte   = PTE_DiskMap(toMapID(me), me->mode.level);	if ( LimitMapRank(me->mode.rank) ) {		MapCount++;	}	/* Number of pages corresponding to a logical block */	n = RoundPage(de->lbsz);	if ( n <= 0U ) {		n = 1U;	}	if ( n > npage ) {		n = npage;	}	if (( npage > 0U )&&( (clr & MapNoClrTop) == MapNoClrTop )) {		/* Do not clear a page that corresponds to the first logical block. */		err = __MakeSpace(la, (W)n, lsid, pte.w);		if ( err < E_OK ) {			goto err_ret;		}		la += n * (UW)PAGESIZE;		npage -= n;		if ( n > npage ) {			n = npage;		}	}	if (( npage > 0U )&&( (clr & MapNoClrEnd) == MapNoClrEnd )) {		/* Do not clear a page that corresponds to the last logical block. */		err = __MakeSpace(la + ((npage - n) * (UW)PAGESIZE), (W)n, lsid, pte.w);		if ( err < E_OK ) {			goto err_ret;		}		npage -= n;	}	if ( npage > 0U ) {		/* The rest of the pages must be set to be cleared. */		if ( clr != MapNoClr ) {			pte.w |= PT_Clear;		}		err = __MakeSpace(la, (W)npage, lsid, pte.w);		if ( err < E_OK ) {			goto err_ret;		}	}	/* As for pages set to be cleared, perform page-in here to clear them.	 * Even if pages are set to be cleared, the disk is not cleared	 * without page-in.	 * If real I/O (real == 1) is specified, perform page-in here to make them resident.	 */	if (( clr != MapNoClr )||( me->mode.real != 0 )) {		err = pageinMapEntry(me, INVADR);		if ( err < E_OK ) {			goto err_ret;		}	}	return E_OK;err_ret:	DEBUG_PRINT(("mapDiskSpace err = %d\n", err));	return err;}/* * Write pages that correspond to MapEntry. *	opt = MARKUPD *		Place an update mark in all page frames. Do not write them to the disk. *	opt = SYNCONLY *		Write updated pages only.  *	opt = WRITEALL *		Write all pages regardless of whether updated or not.  */LOCAL ER writeMapEntry( ME *me, SyncOpt wopt ){	IndQueTop	wr_pfe;	MEL		*mel;	PFE		*pfe;	ER		err, error = E_OK;	/* When mapped directly to memory disk, nothing has to be done. */	if ( me->memmap != 0 ) {		return error;	}	InitIndQue(&wr_pfe);	for ( mel = me->mel; mel != NULL; mel = mel->next_pfe ) {		pfe = mel->pfe;		/* Check update state. */		checkUpdatePFE(pfe);		if ( wopt != SYNCONLY ) {			pfe->upd = TRUE;			if ( wopt == MARKUPD ) {				continue;			}		}		if ( !pfe->upd || pfe->err ) {			continue;		}		/* Write and register in queue. */		err = InsertIndQue(&wr_pfe, pfe, TRUE);		if ( err < E_OK ) {			/* Write only pfe that could not be registered in queue. */			ER err = ReadWritePFE(pfe, TDC_WRITE);			if ( err < E_OK ) {				error = err;			}		}		if (( wr_pfe.num >= (W)MaxPageIO )||( err < E_OK )) {			/* Write pages located up to here. */			err = ReadWritePFE_IndQue(&wr_pfe, TDC_WRITE);			if ( err < E_OK ) {				error = err;			}			DeleteIndQue(&wr_pfe);		}	}	/* Write */	err = ReadWritePFE_IndQue(&wr_pfe, TDC_WRITE);	if ( err < E_OK ) {		error = err;	}	DeleteIndQue(&wr_pfe);#ifdef DEBUG	if ( error < E_OK ) {		DEBUG_PRINT(("writeMapEntry err = %d\n", error));	}#endif	return error;}/* * Check if page frames indicated by mel are mapped from other MapEntry. * If there is another map that has the same information as that of mel->me or that is mapped with 0, return TRUE. */LOCAL BOOL checkDuplicateMapEntry( MEL *mel ){	VW	info = mel->me->mode.info;	VW	i;	MEL	*p;	for ( p = mel->pfe->md.mel; p != NULL; p = p->next_me ) {		if ( p == mel ) {			continue;		}		i = p->me->mode.info;		if (( i == 0 )||( i == info )) {			return TRUE;		}	}	return FALSE;}/* * Synchronize MapEntry *	opt = NOSYNC *		Do not synchronize (write) page frames. *	opt = MARKUPD *		Place an update mark in all page frames. *		Do not perform synchronization (writing). *	opt = SYNCONLY *		Perform synchronization (writing). *	opt = WRITEALL *		Write all pages regardless of whether updated or not. *	opt = DISCARD or FORCEDEL *		After synchronizing page frames, cancel these frames. *		However, do not cancel page frames if they are mapped from *		other MapEntry. * *	If there are no yet-to-be-written pages (anymore) in page frames that  *	correspond to MapEntry, release MapEntry me. */LOCAL ER syncMapEntry( ME *me, SyncOpt opt ){	MEL	*mel, *nmel;	PFE	*pfe;	ER	err, error = E_OK;	if ( opt != NOSYNC ) {		SyncOpt	wopt;		if ( opt < DISCARD ) {			wopt = opt;		} else {#if PT_Update == 0			/* If page updates cannot be checked on hardware:			 * If the pages is writable, they may have been updated.			 * Write all page frames to the hardware.			 */			wopt = ( (me->mode.level & MapWrite) != 0 )?						WRITEALL: SYNCONLY;#else			wopt = SYNCONLY;#endif		}		/* Write page frame */		err = writeMapEntry(me, wopt);		if ( err < E_OK ) {			error = err;		}	}	if ( me->mapped != 0 ) {		goto skip_ret;  /* When in the process of mapping, synchronization only. */	}	/* If page frames become unnecessary, separate them from me. */	nmel = me->mel;	while ( (mel = nmel) != NULL ) {		pfe = mel->pfe;		nmel = mel->next_pfe;		/* If pages have not been updated or errors are recorded,		   delete MEL without writing pages to the disk. 		   If page frames are mapped from other MapEntry, delete MEL. */		if ( !pfe->upd || pfe->err || checkDuplicateMapEntry(mel) ) {			deleteMapLink(mel);		}		/* If page frames are not mapped from other MapEntry and		   errors are recorded, cancel these page frames. */		if ( ( (opt >= DISCARD) || pfe->err ) &&( pfe->md.mel == NULL )) {			DiscardPageFrame(pfe);		} else {			/* Link again to an appropriate page frame queue. */			CheckStateDiskMap(pfe);		}	}	/* If there are no page frames left in me, release me. */	if ( me->mel == NULL ) {		releaseMapEntry(me);	}skip_ret:#ifdef DEBUG	if ( error < E_OK ) {		DEBUG_PRINT(("syncMapEntry err = %d\n", error));	}#endif	return error;}/* * Reset disk blocks mapped to logical spaces. *	opt = NOSYNC *		Reset only. *	opt = MARKUPD *		Reset mapping and place an update mark in all page frames. *	opt = SYNCONLY *		Reset mapping and synchronize (write) page frames. *	opt = WRITEALL *		As in the case of SYNCONLY, write all pages regardless of  *		whether updated or not. *	opt = DISCARD or FORCEDEL *		Reset mapping and synchronize/cancel page frames. *		However, do not cancel page frames if they are mapped from  *		other MapEntry. * *	MapEntry me may be released due to map resetting. * *	(*) UnlockSEG may occur temporarily.

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -