⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 jfs_txnmgr.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
 * function:	log from maplock of freed data extents; */static void mapLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,		   struct tlock * tlck){	struct pxd_lock *pxdlock;	int i, nlock;	pxd_t *pxd;	/*	 *	page relocation: free the source page extent	 *	 * a maplock for txUpdateMap() for free of the page	 * has been formatted at txLock() time saving the src	 * relocated page address;	 */	if (tlck->type & tlckRELOCATE) {		/* log LOG_NOREDOPAGE of the old relocated page		 * for logredo() to start NoRedoPage filter;		 */		lrd->type = cpu_to_le16(LOG_NOREDOPAGE);		pxdlock = (struct pxd_lock *) & tlck->lock;		pxd = &lrd->log.redopage.pxd;		*pxd = pxdlock->pxd;		lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, NULL));		/* (N.B. currently, logredo() does NOT update bmap		 * for free of the page itself for (LOG_XTREE|LOG_NOREDOPAGE);		 * if page free from relocation, LOG_UPDATEMAP log is		 * specifically generated now for logredo()		 * to update bmap for free of src relocated page;		 * (new flag LOG_RELOCATE may be introduced which will		 * inform logredo() to start NORedoPage filter and also		 * update block allocation map at the same time, thus		 * avoiding an extra log write);		 */		lrd->type = cpu_to_le16(LOG_UPDATEMAP);		lrd->log.updatemap.type = cpu_to_le16(LOG_FREEPXD);		lrd->log.updatemap.nxd = cpu_to_le16(1);		lrd->log.updatemap.pxd = pxdlock->pxd;		lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, NULL));		/* a maplock for txUpdateMap() for free of the page		 * has been formatted at txLock() time;		 */		tlck->flag |= tlckUPDATEMAP;		return;	}	/*	 * Otherwise it's not a relocate request	 *	 */	else {		/* log LOG_UPDATEMAP for logredo() to update bmap for		 * free of truncated/relocated delta extent of the data;		 * e.g.: external EA extent, relocated/truncated extent		 * from xtTailgate();		 */		lrd->type = cpu_to_le16(LOG_UPDATEMAP);		pxdlock = (struct pxd_lock *) & tlck->lock;		nlock = pxdlock->index;		for (i = 0; i < nlock; i++, pxdlock++) {			if (pxdlock->flag & mlckALLOCPXD)				lrd->log.updatemap.type =				    cpu_to_le16(LOG_ALLOCPXD);			else				lrd->log.updatemap.type =				    cpu_to_le16(LOG_FREEPXD);			lrd->log.updatemap.nxd = cpu_to_le16(1);			lrd->log.updatemap.pxd = pxdlock->pxd;			lrd->backchain =			    cpu_to_le32(lmLog(log, tblk, lrd, NULL));			jfs_info("mapLog: xaddr:0x%lx xlen:0x%x",				 (ulong) addressPXD(&pxdlock->pxd),				 lengthPXD(&pxdlock->pxd));		}		/* update bmap */		tlck->flag |= tlckUPDATEMAP;	}}/* *	txEA() * * function:	acquire maplock for EA/ACL extents or *		set COMMIT_INLINE flag; */void txEA(tid_t tid, struct inode *ip, dxd_t * oldea, dxd_t * newea){	struct tlock *tlck = NULL;	struct pxd_lock *maplock = NULL, *pxdlock = NULL;	/*	 * format maplock for alloc of new EA extent	 */	if (newea) {		/* Since the newea could be a completely zeroed entry we need to		 * check for the two flags which indicate we should actually		 * commit new EA data		 */		if (newea->flag & DXD_EXTENT) {			tlck = txMaplock(tid, ip, tlckMAP);			maplock = (struct pxd_lock *) & tlck->lock;			pxdlock = (struct pxd_lock *) maplock;			pxdlock->flag = mlckALLOCPXD;			PXDaddress(&pxdlock->pxd, addressDXD(newea));			PXDlength(&pxdlock->pxd, lengthDXD(newea));			pxdlock++;			maplock->index = 1;		} else if (newea->flag & DXD_INLINE) {			tlck = NULL;			set_cflag(COMMIT_Inlineea, ip);		}	}	/*	 * format maplock for free of old EA extent	 */	if (!test_cflag(COMMIT_Nolink, ip) && oldea->flag & DXD_EXTENT) {		if (tlck == NULL) {			tlck = txMaplock(tid, ip, tlckMAP);			maplock = (struct pxd_lock *) & tlck->lock;			pxdlock = (struct pxd_lock *) maplock;			maplock->index = 0;		}		pxdlock->flag = mlckFREEPXD;		PXDaddress(&pxdlock->pxd, addressDXD(oldea));		PXDlength(&pxdlock->pxd, lengthDXD(oldea));		maplock->index++;	}}/* *	txForce() * * function: synchronously write pages locked by transaction *	     after txLog() but before txUpdateMap(); */static void txForce(struct tblock * tblk){	struct tlock *tlck;	lid_t lid, next;	struct metapage *mp;	/*	 * reverse the order of transaction tlocks in	 * careful update order of address index pages	 * (right to left, bottom up)	 */	tlck = lid_to_tlock(tblk->next);	lid = tlck->next;	tlck->next = 0;	while (lid) {		tlck = lid_to_tlock(lid);		next = tlck->next;		tlck->next = tblk->next;		tblk->next = lid;		lid = next;	}	/*	 * synchronously write the page, and	 * hold the page for txUpdateMap();	 */	for (lid = tblk->next; lid; lid = next) {		tlck = lid_to_tlock(lid);		next = tlck->next;		if ((mp = tlck->mp) != NULL &&		    (tlck->type & tlckBTROOT) == 0) {			assert(mp->xflag & COMMIT_PAGE);			if (tlck->flag & tlckWRITEPAGE) {				tlck->flag &= ~tlckWRITEPAGE;				/* do not release page to freelist */				force_metapage(mp);#if 0				/*				 * The "right" thing to do here is to				 * synchronously write the metadata.				 * With the current implementation this				 * is hard since write_metapage requires				 * us to kunmap & remap the page.  If we				 * have tlocks pointing into the metadata				 * pages, we don't want to do this.  I think				 * we can get by with synchronously writing				 * the pages when they are released.				 */				assert(mp->nohomeok);				set_bit(META_dirty, &mp->flag);				set_bit(META_sync, &mp->flag);#endif			}		}	}}/* *	txUpdateMap() * * function:	update persistent allocation map (and working map *		if appropriate); * * parameter: */static void txUpdateMap(struct tblock * tblk){	struct inode *ip;	struct inode *ipimap;	lid_t lid;	struct tlock *tlck;	struct maplock *maplock;	struct pxd_lock pxdlock;	int maptype;	int k, nlock;	struct metapage *mp = NULL;	ipimap = JFS_SBI(tblk->sb)->ipimap;	maptype = (tblk->xflag & COMMIT_PMAP) ? COMMIT_PMAP : COMMIT_PWMAP;	/*	 *	update block allocation map	 *	 * update allocation state in pmap (and wmap) and	 * update lsn of the pmap page;	 */	/*	 * scan each tlock/page of transaction for block allocation/free:	 *	 * for each tlock/page of transaction, update map.	 *  ? are there tlock for pmap and pwmap at the same time ?	 */	for (lid = tblk->next; lid; lid = tlck->next) {		tlck = lid_to_tlock(lid);		if ((tlck->flag & tlckUPDATEMAP) == 0)			continue;		if (tlck->flag & tlckFREEPAGE) {			/*			 * Another thread may attempt to reuse freed space			 * immediately, so we want to get rid of the metapage			 * before anyone else has a chance to get it.			 * Lock metapage, update maps, then invalidate			 * the metapage.			 */			mp = tlck->mp;			ASSERT(mp->xflag & COMMIT_PAGE);			grab_metapage(mp);		}		/*		 * extent list:		 * . in-line PXD list:		 * . out-of-line XAD list:		 */		maplock = (struct maplock *) & tlck->lock;		nlock = maplock->index;		for (k = 0; k < nlock; k++, maplock++) {			/*			 * allocate blocks in persistent map:			 *			 * blocks have been allocated from wmap at alloc time;			 */			if (maplock->flag & mlckALLOC) {				txAllocPMap(ipimap, maplock, tblk);			}			/*			 * free blocks in persistent and working map:			 * blocks will be freed in pmap and then in wmap;			 *			 * ? tblock specifies the PMAP/PWMAP based upon			 * transaction			 *			 * free blocks in persistent map:			 * blocks will be freed from wmap at last reference			 * release of the object for regular files;			 *			 * Alway free blocks from both persistent & working			 * maps for directories			 */			else {	/* (maplock->flag & mlckFREE) */				if (tlck->flag & tlckDIRECTORY)					txFreeMap(ipimap, maplock,						  tblk, COMMIT_PWMAP);				else					txFreeMap(ipimap, maplock,						  tblk, maptype);			}		}		if (tlck->flag & tlckFREEPAGE) {			if (!(tblk->flag & tblkGC_LAZY)) {				/* This is equivalent to txRelease */				ASSERT(mp->lid == lid);				tlck->mp->lid = 0;			}			assert(mp->nohomeok == 1);			metapage_homeok(mp);			discard_metapage(mp);			tlck->mp = NULL;		}	}	/*	 *	update inode allocation map	 *	 * update allocation state in pmap and	 * update lsn of the pmap page;	 * update in-memory inode flag/state	 *	 * unlock mapper/write lock	 */	if (tblk->xflag & COMMIT_CREATE) {		diUpdatePMap(ipimap, tblk->ino, false, tblk);		/* update persistent block allocation map		 * for the allocation of inode extent;		 */		pxdlock.flag = mlckALLOCPXD;		pxdlock.pxd = tblk->u.ixpxd;		pxdlock.index = 1;		txAllocPMap(ipimap, (struct maplock *) & pxdlock, tblk);	} else if (tblk->xflag & COMMIT_DELETE) {		ip = tblk->u.ip;		diUpdatePMap(ipimap, ip->i_ino, true, tblk);		iput(ip);	}}/* *	txAllocPMap() * * function: allocate from persistent map; * * parameter: *	ipbmap	- *	malock	- *		xad list: *		pxd: * *	maptype - *		allocate from persistent map; *		free from persistent map; *		(e.g., tmp file - free from working map at releae *		 of last reference); *		free from persistent and working map; * *	lsn	- log sequence number; */static void txAllocPMap(struct inode *ip, struct maplock * maplock,			struct tblock * tblk){	struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap;	struct xdlistlock *xadlistlock;	xad_t *xad;	s64 xaddr;	int xlen;	struct pxd_lock *pxdlock;	struct xdlistlock *pxdlistlock;	pxd_t *pxd;	int n;	/*	 * allocate from persistent map;	 */	if (maplock->flag & mlckALLOCXADLIST) {		xadlistlock = (struct xdlistlock *) maplock;		xad = xadlistlock->xdlist;		for (n = 0; n < xadlistlock->count; n++, xad++) {			if (xad->flag & (XAD_NEW | XAD_EXTENDED)) {				xaddr = addressXAD(xad);				xlen = lengthXAD(xad);				dbUpdatePMap(ipbmap, false, xaddr,					     (s64) xlen, tblk);				xad->flag &= ~(XAD_NEW | XAD_EXTENDED);				jfs_info("allocPMap: xaddr:0x%lx xlen:%d",					 (ulong) xaddr, xlen);			}		}	} else if (maplock->flag & mlckALLOCPXD) {		pxdlock = (struct pxd_lock *) maplock;		xaddr = addressPXD(&pxdlock->pxd);		xlen = lengthPXD(&pxdlock->pxd);		dbUpdatePMap(ipbmap, false, xaddr, (s64) xlen, tblk);		jfs_info("allocPMap: xaddr:0x%lx xlen:%d", (ulong) xaddr, xlen);	} else {		/* (maplock->flag & mlckALLOCPXDLIST) */		pxdlistlock = (struct xdlistlock *) maplock;		pxd = pxdlistlock->xdlist;		for (n = 0; n < pxdlistlock->count; n++, pxd++) {			xaddr = addressPXD(pxd);			xlen = lengthPXD(pxd);			dbUpdatePMap(ipbmap, false, xaddr, (s64) xlen,				     tblk);			jfs_info("allocPMap: xaddr:0x%lx xlen:%d",				 (ulong) xaddr, xlen);		}	}}/* *	txFreeMap() * * function:	free from persistent and/or working map; * * todo: optimization */void txFreeMap(struct inode *ip,	       struct maplock * maplock, struct tblock * tblk, int maptype){	struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap;	struct xdlistlock *xadlistlock;	xad_t *xad;	s64 xaddr;	int xlen;	struct pxd_lock *pxdlock;	struct xdlistlock *pxdlistlock;	pxd_t *pxd;	int n;	jfs_info("txFreeMap: tblk:0x%p maplock:0x%p maptype:0x%x",		 tblk, maplock, maptype);	/*	 * free from persistent map;	 */	if (maptype == COMMIT_PMAP || maptype == COMMIT_PWMAP) {		if (maplock->flag & mlckFREEXADLIST) {			xadlistlock = (struct xdlistlock *) maplock;			xad = xadlistlock->xdlist;			for (n = 0; n < xadlistlock->count; n++, xad++) {				if (!(xad->flag & XAD_NEW)) {					xaddr = addressXAD(xad);					xlen = lengthXAD(xad);					dbUpdatePMap(ipbmap, true, xaddr,						     (s64) xlen, tblk);					jfs_info("freePMap: xaddr:0x%lx "						 "xlen:%d",						 (ulong) xaddr, xlen);				}			}		} else if (maplock->flag & mlckFREEPXD) {			pxdlock = (struct pxd_lock *) maplock;			xaddr = addressPXD(&pxdlock->pxd);			xlen = lengthPXD(&pxdlock->pxd);			dbUpdatePMap(ipbmap, true, xaddr, (s64) xlen,				     tblk);			jfs_info("freePMap: xaddr:0x%lx xlen:%d",				 (ulong) xaddr, xlen);		} else {	/* (maplock->flag & mlckALLOCPXDLIST) */			pxdlistlock = (struct xdlistlock *) maplock;			pxd = pxdlistlock->xdlist;			for (n = 0; n < pxdlistlock->count; n++, pxd++) {				xaddr = addressPXD(pxd);				xlen = lengthPXD(pxd);				dbUpdatePMap(ipbmap, true, xaddr,					     (s64) xlen, tblk);				jfs_info("freePMap: xaddr:0x%lx xlen:%d",					 (ulong) xaddr, xlen);			}		}	}	/*	 * free from working map;	 */	if (maptype == COMMIT_PWMAP || maptype == COMMIT_WMAP) {		if (maplock->flag & mlckFREEXADLIST) {			xadlistlock = (struct xdlistlock *) maplock;			xad = xadlistlock->xdlist;			for (n = 0; n < xadlistlock->count; n++, xad++) {				xaddr = addressXAD(xad);				xlen = lengthXAD(xad);				dbFree(ip, xaddr, (s64) xlen);				xad->flag = 0;				jfs_info("freeWMap: xaddr:0x%lx xlen:%d",					 (ulong) xaddr, xlen);			}		} else if (maplock->flag & mlckFREEPXD) {			pxdlock = (struct pxd_lock *) maplock;			xaddr = addressPXD(&pxdlock->pxd);			xlen = lengthPXD(&pxdlock->pxd);			dbFree(ip, xaddr, (s64) xlen);			jfs_info("freeWMap: xaddr:0x%lx xlen:%d",				 (ulong) xaddr, xlen);		} else {	/* (maplock->flag & mlckFREEPXDLIST) */			pxdlistlock = (struct xdlistlock *) maplock;			pxd = pxdlistlock->xdlist;			for (n = 0; n < pxdlistlock->count; n++, pxd++) {				xaddr = addressPXD(pxd);				xlen = lengthPXD(pxd);				dbFree(ip, xaddr, (s64) xlen);				jfs_info("freeWMap: xaddr:0x%lx xlen:%d",					 (ulong) xaddr, xlen);			}		}	}}/* *	txFreelock() * * function:	remove tlock from inode anonymous locklist */void txFreelock(struct inode *ip){	struct jfs_inode_info *jfs_ip = JFS_IP(ip);	struct tlock *xtlck, *tlck;	lid_t xlid = 0, lid;	if (!jfs_ip->atlhead)	

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -