📄 diskmap.c
字号:
/* *---------------------------------------------------------------------- * T-Kernel / Standard Extension * * Copyright (C) 2006 by Ken Sakamura. All rights reserved. * T-Kernel / Standard Extension is distributed * under the T-License for T-Kernel / Standard Extension. *---------------------------------------------------------------------- * * Version: 1.00.00 * Released by T-Engine Forum(http://www.t-engine.org) at 2006/8/11. * *---------------------------------------------------------------------- *//* * diskmap.c (memory) * * Segment management (Virtual storage version) * Disk map */#include "segmgr.h"#define TSD_CBP_VAL_2 2#define TSD_DPM_VAL_3 3#define TSD_IDM_ARR_5 5#define TSD_IDM_VAL_7 7U#define TSD_IDM_DIV_10 10U#define TSD_CSD_RAN_7 7#define TSD_MET_VAL_M1 (-1)#define TSD_MPB_RTN_M1 (-1)#define TSD_MNT_VAL_M1 (-1)LOCAL ER syncMapEntry( ME *me, SyncOpt opt );LOCAL ER syncUnmappedMapEntry( void );LOCAL W registMapEntry( ME *me, ME **old_me );LOCAL void releaseMapEntry( ME *me );LOCAL ME* checkMapID( ID mapid );LOCAL ME* checkFreeMapID( ID mapid );LOCAL ER appendMapLink( ME *me, PFE *pfe );LOCAL void deleteMapLink( MEL *mel );LOCAL W checkBlockPara( DE *de, PhyBlk *block, W *npage );LOCAL W mappingBlock( ME *me, VP laddr, PFE_ETC *mapblk );LOCAL void checkUpdatePFE( PFE *pfe );LOCAL ER pageinMapPage( ME *me, VB **start, VB *limit );LOCAL ER pageinMapEntry( ME *me, VP laddr );LOCAL ER mapDiskMemory( DE *de, ME *me, VP laddr );LOCAL ER mapDiskSpace( DE *de, ME *me, VP laddr );LOCAL ER writeMapEntry( ME *me, SyncOpt wopt );LOCAL BOOL checkDuplicateMapEntry( MEL *mel );LOCAL ER syncMapEntry( ME *me, SyncOpt opt );LOCAL ER unmapDiskSpace( ME *me, SyncOpt opt );/* * Structure of MapEntryTable * Manage unused MapEntry with the one-way lists. */typedef struct MapEntryTable MET;struct MapEntryTable { MET *next; /* Next unused MET */ ME *me; /* Pointer to MapEntry */};LOCAL UW MaxMapEntry; /* Maximum number of MapEntry */LOCAL MET *MapEntryTable; /* MapEntry table */LOCAL MET *FreeMapEntryTop; /* Start of unused MapEntry list */LOCAL MET *FreeMapEntryEnd; /* End of unused MapEntry list *//* * Maps of ranks MapRankPROG or MapRankFM3 are used in mapping that lasts * for a relatively long time. If all MapIDs are consumed for such purposes, * normal system operation cannot be maintained. Therefore, place restrictions * on the number of available MapIDs. */LOCAL UW MapLimit, MapCount; /* Maximum number available */#define LimitMapRank(rank) ( ((rank) == MapRankPROG) \ || ((rank) == MapRankFM3) )/* * MapID <--> MapEntry interconversion */#define toMapID(me) ( (me)->mapid )#define toMapEntry(mid) ( MapEntryTable[(mid) - 1].me )LOCAL ER syncMapEntry( ME *me, SyncOpt opt );/* ------------------------------------------------------------------------ *//* * MapEntry-related *//* * Perform synchronous processing of unmapped MapEntry to free up space for MapEntry. */LOCAL ER syncUnmappedMapEntry( void ){static W pos = 0; ME *me; W i; i = pos; do { if ( --i < 0 ) { i = (W)(MaxMapEntry - 1U); } /* Skip, if any, free entries although they are unlikely to exist. */ if ( MapEntryTable[i].next != (VP)TSD_MET_VAL_M1 ) { continue; } me = MapEntryTable[i].me; if ( me->mapped != 0 ) { continue; /* In the process of mapping */ } (void)syncMapEntry(me, SYNCONLY); pos = i; return E_OK; } while ( i != pos ); return E_LIMIT;}/* * Register MapEntry * A non-NULL value is returned to *old_me, * use Ifree() to free *old_me after UnlockSEG(). */LOCAL W registMapEntry( ME *me, ME **old_me ){ MET *met; ER err; while ( FreeMapEntryTop == NULL ) { /* Free up space for map IDs. */ err = syncUnmappedMapEntry(); if ( err < E_OK ) { goto err_ret; } } /* Fetch from the free list. */ met = FreeMapEntryTop; FreeMapEntryTop = FreeMapEntryTop->next; met->next = (VP)TSD_MNT_VAL_M1; /* In use */ *old_me = met->me; met->me = me; return me->mapid = met - MapEntryTable + 1;err_ret: DEBUG_PRINT(("registMapEntry err = %d\n", err)); return err;}/* * Release MapEntry. */LOCAL void releaseMapEntry( ME *me ){ MET *met = &MapEntryTable[me->mapid - 1]; if ( FreeMapEntryTop == NULL ) { /* There is no space at all. */ FreeMapEntryTop = met; } else { /* Add space */ FreeMapEntryEnd->next = met; } FreeMapEntryEnd = met; met->next = NULL; /* End */}/* * Check MapID. */LOCAL ME* checkMapID( ID mapid ){ MET *met; if (( mapid < 1 )||( mapid > (W)MaxMapEntry )) { return NULL; /* Out of range */ } met = &MapEntryTable[mapid - 1]; if ( met->next != (VP)TSD_MNT_VAL_M1 ) { return NULL; /* Unused entry */ } if ( !met->me->mapped ) { return NULL; /* Already unmapped */ } return met->me;}/* * Check if MapID is free; and if so, return ME. * After UnlockSEG(), use Ifree() to free the returned ME. */LOCAL ME* checkFreeMapID( ID mapid ){ MET *met; ME *old_me; met = &MapEntryTable[mapid - 1]; if ( met->next == (VP)TSD_MNT_VAL_M1 ) { return NULL; /* In use */ } old_me = met->me; met->me = NULL; return old_me;}/* ------------------------------------------------------------------------ *//* * MapEntryLink-related */LOCAL FMB_Pool MapEntryLinkPool; /* Memory pool for MEL *//* * Obtain and connect to MapEntryLink. */LOCAL ER appendMapLink( ME *me, PFE *pfe ){ MEL top, *cmel, *pmel, *mel; ER err; /* Obtain MEL. */ mel = AllocFMB(&MapEntryLinkPool, FALSE); if ( mel == NULL ) { err = E_NOMEM; goto err_ret; } mel->me = me; mel->pfe = pfe; mel->lkcnt = me->mode.real; /* Connect to ME (arrange disk blocks in numerical order). */ top.next_pfe = me->mel; pmel = ⊤ for ( ;; ) { cmel = pmel->next_pfe; if (( cmel == NULL )||( cmel->pfe->dbn_no > pfe->dbn_no )) { mel->next_pfe = cmel; pmel->next_pfe = mel; break; } pmel = cmel; } me->mel = top.next_pfe; /* Connect to PFE (arrange blocks in any order). */ mel->next_me = pfe->md.mel; pfe->md.mel = mel; return E_OK;err_ret: DEBUG_PRINT(("appendMapLink err = %d\n", err)); return err;}/* * Disconnect from/delete MapEntryLink. */LOCAL void deleteMapLink( MEL *mel ){ MEL top, *cmel, *pmel; /* Disconnect from ME. */ top.next_pfe = mel->me->mel; pmel = ⊤ while ( (cmel = pmel->next_pfe) != NULL ) { if ( cmel == mel ) { pmel->next_pfe = mel->next_pfe; break; } pmel = cmel; } mel->me->mel = top.next_pfe; /* Disconnect from PFE. */ top.next_me = mel->pfe->md.mel; pmel = ⊤ while ( (cmel = pmel->next_me) != NULL ) { if ( cmel == mel ) { pmel->next_me = mel->next_me; break; } pmel = cmel; } mel->pfe->md.mel = top.next_me; /* Delete MEL. */ FreeFMB(&MapEntryLinkPool, mel);}/* ------------------------------------------------------------------------ *//* * Return ID of disk to which mid is mapped. */EXPORT ID GetMapDid( ID mid ){ ME *me; ER err; me = checkMapID(mid); if ( me == NULL ) { err = E_ID; goto err_ret; } return me->did;err_ret: DEBUG_PRINT(("GetMapDid err = %d\n", err)); return err;}/* * Check on map block specification. * If a block is mappable, return the number of pages as npage and the number of block * entries as a return value. If not mappable, return error. */LOCAL W checkBlockPara( DE *de, PhyBlk *block, W *npage ){ W n, nblk, top; UW total = (UW)de->info.blockcont; ER err; if (( block->len <= 0U ) ||( block->blk >= total ) ||( block->len > (total - block->blk) )) { err = E_PAR; goto err_ret; } top = BLKTOP(de, (W)block->blk); nblk = (W)block->blk - top + (W)block->len; n = 1; while ( (++block)->len > 0U ) { if (( block->blk >= total ) ||( block->len > (total - block->blk) )) { err = E_PAR; goto err_ret; } /* A boundary between non-consecutive blocks must be a page boundary. */ if (( (nblk % de->blkcnt) != 0 ) ||( BLKOFS(de, (W)block->blk) != 0 )) { err = E_NOSPT; goto err_ret; } nblk += (W)block->len; n++; } n++; /* For end len = 0 */ if ( n > TSD_CBP_VAL_2 ) { /* If blocks on the first and last pages are the same, disable mapping. */ block--; if ( BLKTOP(de, (W)(block->blk + block->len - 1U)) == top ) { err = E_NOSPT; goto err_ret; } } *npage = (nblk + de->blkcnt - 1) / de->blkcnt; return n;err_ret: DEBUG_PRINT(("checkBlockPara err = %d\n", err)); return err;}/* * Determine logical addresses of pfe page frames mapped by me. * The address of the first mapped page is the first logical address of the mapped pages, * not always located on a page boundary. * When unmapped, return NULL. */EXPORT VP mappingAddress( ME *me, PFE *pfe ){ PhyBlk *pb = me->pb; DE *de = toDiskEntry(pfe->dbn_id); W no = pfe->dbn_no; W nblk = 0; if ( !me->mapped ) { return INVADR; /* Already unmapped */ } if ( BLKTOP(de, (W)pb->blk) == no ) { return me->mode.addr; } while ( pb->len > 0U ) { if (( no >= (W)pb->blk )&&( no < (W)(pb->blk + pb->len) )) { nblk += no - (W)pb->blk; return (VB*)me->mode.addr + (nblk * de->info.blocksize); } nblk += (W)pb->len; pb++; } DEBUG_PRINT(("mappingAddress error\n")); return INVADR; /* Unlikely */}/* * Determine block numbers corresponding to logical addresseses laddr mapped by me. * * It is assumed that laddr are located on page boundaries. * laddr indicates a range of pages mapped by me. Therefore, laddr may precede the address * of the first block of me->pb. * In this case, return a block number that precedes the first block of me->pb. * * Return the part of laddr page mapped by me to mapblk. * mapblk->c.ofs Offset block numbers within the page * mepblk->c.len Number of blocks */LOCAL W mappingBlock( ME *me, VP laddr, PFE_ETC *mapblk ){ DE *de = toDiskEntry(me->did); PhyBlk *pb = me->pb; W n, i; n = ((VB*)laddr - (VB*)me->mode.addr) / de->info.blocksize; if ( n <= 0 ) { mapblk->c.ofs = -n; i = de->blkcnt + n; mapblk->c.len = ( (W)pb->len < i )? (W)pb->len: i; return (W)pb->blk + n; } while ( pb->len > 0U ) { if ( (W)pb->len > n ) { mapblk->c.ofs = 0; i = (W)pb->len - n; mapblk->c.len = ( i <= de->blkcnt )? i: de->blkcnt; return (W)pb->blk + n; } n -= (W)pb->len; pb++; } DEBUG_PRINT(("mappingBlock error\n")); return TSD_MPB_RTN_M1;}/* * Check the state of access from MapEntry where pfe (PFS_use) page frames * are mapped; and if there is no access, link pfe again to an unused (PFS_free) * page frame queue. * PT_Accessed in the page table is cleared. */EXPORT void CheckAccessDiskMap( PFE *pfe ){ PFS stat = PFS_free; MEL *mel; ME *me; VP laddr; PTE pte; for ( mel = pfe->md.mel; mel != NULL; mel = mel->next_me ) { me = mel->me; laddr = mappingAddress(me, pfe); if ( laddr == INVADR ) { continue; /* Already unmapped */ } pte.w = GET_PTE(laddr, me->mode.space); /* Not treated as disk map after copy-on-write processing. */ if ( CopyOnWriteP_done(pte.w) ) { continue; } if ( !isPresentP(pte.w) ) { continue; /* Pages are already suspended. */ } if ( isAccessedP(pte.w) ) { /* There was access. */ stat = PFS_use; pte.w = ~(UW)PT_Accessed; } else { /* Since there was no access, suspend pages. */ pte.w = ~(UW)(PT_Accessed|PT_Present); } (void)CHG_PTE(laddr, me->mode.space, 0, pte.w, TRUE); } if ( pfe->stat != stat ) { MovePageFrame(pfe, stat); }}/* * Check the state of access from MapEntry where pfe page frames are mapped, * and link pfe again to an appropriate page frame queue. * * Check on ranks in the process of mapping, and set pfe to the highest rank. * However, locked ranks are not meaningful, and therefore, incorrect values are specified. */EXPORT void CheckStateDiskMap( PFE *pfe ){ PFS stat = PFS_free; UW rank = TSD_CSD_RAN_7; MEL *mel; ME *me; VP laddr; PTE pte; for ( mel = pfe->md.mel; mel != NULL; mel = mel->next_me ) { me = mel->me; laddr = mappingAddress(me, pfe); if ( laddr == INVADR ) { continue; /* Already unmapped */ } pte.w = GET_PTE(laddr, me->mode.space); /* Not treated as disk map after copy-on-write processing. */ if ( CopyOnWriteP_done(pte.w) ) { continue; } if ( me->mode.rank < rank ) { rank = me->mode.rank; } if ( isPresentP(pte.w) ) { stat = PFS_use; } if ( mel->lkcnt > 0 ) { stat = PFS_lock; break; } } if ( pfe->stat != stat ) { MovePageFrame(pfe, stat); } pfe->rank = rank;}/* * Check the update state of pfe. * PT_Update in the page table is cleared. */LOCAL void checkUpdatePFE( PFE *pfe ){ MEL *mel; ME *me; VP laddr; PTE pte; for ( mel = pfe->md.mel; mel != NULL; mel = mel->next_me ) { me = mel->me; laddr = mappingAddress(me, pfe); if ( laddr == INVADR ) { continue; /* Already unmapped */ } pte.w = GET_PTE(laddr, me->mode.space); /* Not treated as disk map after copy-on-write processing. */ if ( CopyOnWriteP_done(pte.w) ) { continue; } pte.w = CHG_PTE(laddr, me->mode.space, 0, ~(UW)PT_Update, TRUE); /* If a write-protect page has been updated, * return an error without writing it to the disk. * Write-protect pages are sometimes updated by BMS. * In such a case, do not write them to the disk. * Return an error so that such pages do not remain * on the disk cache. */ if ( isUpdateP(pte.w) ) { if ( isWritableP(pte.w) ) { pfe->upd = TRUE; } else {
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -