⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 jfs_txnmgr.c

📁 jfs-2.4-1.1.7.tar.gz jfs 2.4-1.1.7 源码
💻 C
📖 第 1 页 / 共 5 页
字号:
void txEA(tid_t tid, struct inode *ip, dxd_t * oldea, dxd_t * newea){	struct tlock *tlck = NULL;	struct pxd_lock *maplock = NULL, *pxdlock = NULL;	/*	 * format maplock for alloc of new EA extent	 */	if (newea) {		/* Since the newea could be a completely zeroed entry we need to		 * check for the two flags which indicate we should actually		 * commit new EA data		 */		if (newea->flag & DXD_EXTENT) {			tlck = txMaplock(tid, ip, tlckMAP);			maplock = (struct pxd_lock *) & tlck->lock;			pxdlock = (struct pxd_lock *) maplock;			pxdlock->flag = mlckALLOCPXD;			PXDaddress(&pxdlock->pxd, addressDXD(newea));			PXDlength(&pxdlock->pxd, lengthDXD(newea));			pxdlock++;			maplock->index = 1;		} else if (newea->flag & DXD_INLINE) {			tlck = NULL;			set_cflag(COMMIT_Inlineea, ip);		}	}	/*	 * format maplock for free of old EA extent	 */	if (!test_cflag(COMMIT_Nolink, ip) && oldea->flag & DXD_EXTENT) {		if (tlck == NULL) {			tlck = txMaplock(tid, ip, tlckMAP);			maplock = (struct pxd_lock *) & tlck->lock;			pxdlock = (struct pxd_lock *) maplock;			maplock->index = 0;		}		pxdlock->flag = mlckFREEPXD;		PXDaddress(&pxdlock->pxd, addressDXD(oldea));		PXDlength(&pxdlock->pxd, lengthDXD(oldea));		maplock->index++;	}}/* *      txForce() * * function: synchronously write pages locked by transaction *              after txLog() but before txUpdateMap(); */void txForce(struct tblock * tblk){	struct tlock *tlck;	lid_t lid, next;	struct metapage *mp;	/*	 * reverse the order of transaction tlocks in	 * careful update order of address index pages	 * (right to left, bottom up)	 */	tlck = lid_to_tlock(tblk->next);	lid = tlck->next;	tlck->next = 0;	while (lid) {		tlck = lid_to_tlock(lid);		next = tlck->next;		tlck->next = tblk->next;		tblk->next = lid;		lid = next;	}	/*	 * synchronously write the page, and	 * hold the page for txUpdateMap();	 */	for (lid = tblk->next; lid; lid = next) {		tlck = lid_to_tlock(lid);		next = tlck->next;		if ((mp = tlck->mp) != NULL &&		    (tlck->type & tlckBTROOT) == 0) {			assert(mp->xflag & COMMIT_PAGE);			if (tlck->flag & tlckWRITEPAGE) {				tlck->flag &= ~tlckWRITEPAGE;				/* do not release page to freelist */				/*				 * The "right" thing to do here is to				 * synchronously write the metadata.				 * With the current implementation this				 * is hard since write_metapage requires				 * us to kunmap & remap the page.  If we				 * have tlocks pointing into the metadata				 * pages, we don't want to do this.  I think				 * we can get by with synchronously writing				 * the pages when they are released.				 */				assert(atomic_read(&mp->nohomeok));				set_bit(META_dirty, &mp->flag);				set_bit(META_sync, &mp->flag);			}		}	}}/* *      txUpdateMap() * * function:    update persistent allocation map (and working map *              if appropriate); * * parameter: */static void txUpdateMap(struct tblock * tblk){	struct inode *ip;	struct inode *ipimap;	lid_t lid;	struct tlock *tlck;	struct maplock *maplock;	struct pxd_lock pxdlock;	int maptype;	int k, nlock;	struct metapage *mp = 0;	ipimap = JFS_SBI(tblk->sb)->ipimap;	maptype = (tblk->xflag & COMMIT_PMAP) ? COMMIT_PMAP : COMMIT_PWMAP;	/*	 *      update block allocation map	 *	 * update allocation state in pmap (and wmap) and	 * update lsn of the pmap page;	 */	/*	 * scan each tlock/page of transaction for block allocation/free:	 *	 * for each tlock/page of transaction, update map.	 *  ? are there tlock for pmap and pwmap at the same time ?	 */	for (lid = tblk->next; lid; lid = tlck->next) {		tlck = lid_to_tlock(lid);		if ((tlck->flag & tlckUPDATEMAP) == 0)			continue;		if (tlck->flag & tlckFREEPAGE) {			/*			 * Another thread may attempt to reuse freed space			 * immediately, so we want to get rid of the metapage			 * before anyone else has a chance to get it.			 * Lock metapage, update maps, then invalidate			 * the metapage.			 */			mp = tlck->mp;			ASSERT(mp->xflag & COMMIT_PAGE);			hold_metapage(mp, 0);		}		/*		 * extent list:		 * . in-line PXD list:		 * . out-of-line XAD list:		 */		maplock = (struct maplock *) & tlck->lock;		nlock = maplock->index;		for (k = 0; k < nlock; k++, maplock++) {			/*			 * allocate blocks in persistent map:			 *			 * blocks have been allocated from wmap at alloc time;			 */			if (maplock->flag & mlckALLOC) {				txAllocPMap(ipimap, maplock, tblk);			}			/*			 * free blocks in persistent and working map:			 * blocks will be freed in pmap and then in wmap;			 *			 * ? tblock specifies the PMAP/PWMAP based upon			 * transaction			 *			 * free blocks in persistent map:			 * blocks will be freed from wmap at last reference			 * release of the object for regular files;			 *			 * Alway free blocks from both persistent & working			 * maps for directories			 */			else {	/* (maplock->flag & mlckFREE) */				if (S_ISDIR(tlck->ip->i_mode))					txFreeMap(ipimap, maplock,						  tblk, COMMIT_PWMAP);				else					txFreeMap(ipimap, maplock,						  tblk, maptype);			}		}		if (tlck->flag & tlckFREEPAGE) {			if (!(tblk->flag & tblkGC_LAZY)) {				/* This is equivalent to txRelease */				ASSERT(mp->lid == lid);				tlck->mp->lid = 0;			}			assert(atomic_read(&mp->nohomeok) == 1);			atomic_dec(&mp->nohomeok);			discard_metapage(mp);			tlck->mp = 0;		}	}	/*	 *      update inode allocation map	 *	 * update allocation state in pmap and	 * update lsn of the pmap page;	 * update in-memory inode flag/state	 *	 * unlock mapper/write lock	 */	if (tblk->xflag & COMMIT_CREATE) {		ip = tblk->ip;		ASSERT(test_cflag(COMMIT_New, ip));		clear_cflag(COMMIT_New, ip);		diUpdatePMap(ipimap, ip->i_ino, FALSE, tblk);		ipimap->i_state |= I_DIRTY;		/* update persistent block allocation map		 * for the allocation of inode extent;		 */		pxdlock.flag = mlckALLOCPXD;		pxdlock.pxd = JFS_IP(ip)->ixpxd;		pxdlock.index = 1;		txAllocPMap(ip, (struct maplock *) & pxdlock, tblk);		iput(ip);	} else if (tblk->xflag & COMMIT_DELETE) {		ip = tblk->ip;		diUpdatePMap(ipimap, ip->i_ino, TRUE, tblk);		ipimap->i_state |= I_DIRTY;		iput(ip);	}}/* *      txAllocPMap() * * function: allocate from persistent map; * * parameter: *      ipbmap  - *      malock - *              xad list: *              pxd: * *      maptype - *              allocate from persistent map; *              free from persistent map; *              (e.g., tmp file - free from working map at releae *               of last reference); *              free from persistent and working map; * *      lsn     - log sequence number; */static void txAllocPMap(struct inode *ip, struct maplock * maplock,			struct tblock * tblk){	struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap;	struct xdlistlock *xadlistlock;	xad_t *xad;	s64 xaddr;	int xlen;	struct pxd_lock *pxdlock;	struct xdlistlock *pxdlistlock;	pxd_t *pxd;	int n;	/*	 * allocate from persistent map;	 */	if (maplock->flag & mlckALLOCXADLIST) {		xadlistlock = (struct xdlistlock *) maplock;		xad = xadlistlock->xdlist;		for (n = 0; n < xadlistlock->count; n++, xad++) {			if (xad->flag & (XAD_NEW | XAD_EXTENDED)) {				xaddr = addressXAD(xad);				xlen = lengthXAD(xad);				dbUpdatePMap(ipbmap, FALSE, xaddr,					     (s64) xlen, tblk);				xad->flag &= ~(XAD_NEW | XAD_EXTENDED);				jfs_info("allocPMap: xaddr:0x%lx xlen:%d",					 (ulong) xaddr, xlen);			}		}	} else if (maplock->flag & mlckALLOCPXD) {		pxdlock = (struct pxd_lock *) maplock;		xaddr = addressPXD(&pxdlock->pxd);		xlen = lengthPXD(&pxdlock->pxd);		dbUpdatePMap(ipbmap, FALSE, xaddr, (s64) xlen, tblk);		jfs_info("allocPMap: xaddr:0x%lx xlen:%d", (ulong) xaddr, xlen);	} else {		/* (maplock->flag & mlckALLOCPXDLIST) */		pxdlistlock = (struct xdlistlock *) maplock;		pxd = pxdlistlock->xdlist;		for (n = 0; n < pxdlistlock->count; n++, pxd++) {			xaddr = addressPXD(pxd);			xlen = lengthPXD(pxd);			dbUpdatePMap(ipbmap, FALSE, xaddr, (s64) xlen,				     tblk);			jfs_info("allocPMap: xaddr:0x%lx xlen:%d",				 (ulong) xaddr, xlen);		}	}}/* *      txFreeMap() * * function:    free from persistent and/or working map; * * todo: optimization */void txFreeMap(struct inode *ip,	       struct maplock * maplock, struct tblock * tblk, int maptype){	struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap;	struct xdlistlock *xadlistlock;	xad_t *xad;	s64 xaddr;	int xlen;	struct pxd_lock *pxdlock;	struct xdlistlock *pxdlistlock;	pxd_t *pxd;	int n;	jfs_info("txFreeMap: tblk:0x%p maplock:0x%p maptype:0x%x",		 tblk, maplock, maptype);	/*	 * free from persistent map;	 */	if (maptype == COMMIT_PMAP || maptype == COMMIT_PWMAP) {		if (maplock->flag & mlckFREEXADLIST) {			xadlistlock = (struct xdlistlock *) maplock;			xad = xadlistlock->xdlist;			for (n = 0; n < xadlistlock->count; n++, xad++) {				if (!(xad->flag & XAD_NEW)) {					xaddr = addressXAD(xad);					xlen = lengthXAD(xad);					dbUpdatePMap(ipbmap, TRUE, xaddr,						     (s64) xlen, tblk);					jfs_info("freePMap: xaddr:0x%lx "						 "xlen:%d",						 (ulong) xaddr, xlen);				}			}		} else if (maplock->flag & mlckFREEPXD) {			pxdlock = (struct pxd_lock *) maplock;			xaddr = addressPXD(&pxdlock->pxd);			xlen = lengthPXD(&pxdlock->pxd);			dbUpdatePMap(ipbmap, TRUE, xaddr, (s64) xlen,				     tblk);			jfs_info("freePMap: xaddr:0x%lx xlen:%d",				 (ulong) xaddr, xlen);		} else {	/* (maplock->flag & mlckALLOCPXDLIST) */			pxdlistlock = (struct xdlistlock *) maplock;			pxd = pxdlistlock->xdlist;			for (n = 0; n < pxdlistlock->count; n++, pxd++) {				xaddr = addressPXD(pxd);				xlen = lengthPXD(pxd);				dbUpdatePMap(ipbmap, TRUE, xaddr,					     (s64) xlen, tblk);				jfs_info("freePMap: xaddr:0x%lx xlen:%d",					 (ulong) xaddr, xlen);			}		}	}	/*	 * free from working map;	 */	if (maptype == COMMIT_PWMAP || maptype == COMMIT_WMAP) {		if (maplock->flag & mlckFREEXADLIST) {			xadlistlock = (struct xdlistlock *) maplock;			xad = xadlistlock->xdlist;			for (n = 0; n < xadlistlock->count; n++, xad++) {				xaddr = addressXAD(xad);				xlen = lengthXAD(xad);				dbFree(ip, xaddr, (s64) xlen);				xad->flag = 0;				jfs_info("freeWMap: xaddr:0x%lx xlen:%d",					 (ulong) xaddr, xlen);			}		} else if (maplock->flag & mlckFREEPXD) {			pxdlock = (struct pxd_lock *) maplock;			xaddr = addressPXD(&pxdlock->pxd);			xlen = lengthPXD(&pxdlock->pxd);			dbFree(ip, xaddr, (s64) xlen);			jfs_info("freeWMap: xaddr:0x%lx xlen:%d",				 (ulong) xaddr, xlen);		} else {	/* (maplock->flag & mlckFREEPXDLIST) */			pxdlistlock = (struct xdlistlock *) maplock;			pxd = pxdlistlock->xdlist;			for (n = 0; n < pxdlistlock->count; n++, pxd++) {				xaddr = addressPXD(pxd);				xlen = lengthPXD(pxd);				dbFree(ip, xaddr, (s64) xlen);				jfs_info("freeWMap: xaddr:0x%lx xlen:%d",					 (ulong) xaddr, xlen);			}		}	}}/* *      txFreelock() * * function:    remove tlock from inode anonymous locklist */void txFreelock(struct inode *ip){	struct jfs_inode_info *jfs_ip = JFS_IP(ip);	struct tlock *xtlck, *tlck;	lid_t xlid = 0, lid;	if (!jfs_ip->atlhead)		return;	xtlck = (struct tlock *) &jfs_ip->atlhead;	while ((lid = xtlck->next)) {		tlck = lid_to_tlock(lid);		if (tlck->flag & tlckFREELOCK) {			xtlck->next = tlck->next;			txLockFree(lid);		} else {			xtlck = tlck;			xlid = lid;		}	}	if (jfs_ip->atlhead)		jfs_ip->atltail = xlid;	else {		jfs_ip->atltail = 0;		/*		 * If inode was on anon_list, remove it		 */		TXN_LOCK();		list_del_init(&jfs_ip->anon_inode_list);		TXN_UNLOCK();	}}/* *      txAbort() * * function: abort tx before commit; * * frees line-locks and segment locks for all * segments in comdata structure. * Optionally sets state of file-system to FM_DIRTY in super-block. * log age of page-frames in memory for which caller has * are reset to 0 (to avoid logwarap). */void txAbort(tid_t tid, int dirty){	lid_t lid, next;	struct metapage *mp;	struct tblock *tblk = tid_to_tblock(tid);	struct tlock *tlck;	jfs_warn("txAbort: tid:%d dirty:0x%x", tid, dirty);	/*	 * free tlocks of the transaction	 */	for (lid = tblk->next; lid; lid = next) {		tlck = lid_to_tlock(lid);		next = tlck->next;		mp = tlck->mp;		JFS_IP(tlck->ip)->xtlid = 0;		if (mp) {			mp->lid = 0;			/*			 * reset lsn of page to avoid logwarap:			 *			 * (page may have been previously committed by another			 * transaction(s) but has not been paged, i.e.,			 * it may be on logsync list even though it has not			 * been logged for the current tx.)			 */			if (mp->xflag & COMMIT_PAGE && mp->lsn)				LogSyncRelease(mp);		}		/* insert tlock at head of freelist */		TXN_LOCK();		txLockFree(lid);		TXN_UNLOCK();	}	/* caller will free the transaction block */	tblk->next = tblk->last = 0;	/*	 * mark

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -