⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 jfs_logmgr.c

📁 jfs-2.4-1.1.7.tar.gz jfs 2.4-1.1.7 源码
💻 C
📖 第 1 页 / 共 4 页
字号:
			 * of the pages since log pages will be added			 * continuously			 */			if (bp->l_wqnext == NULL)				lbmWrite(log, bp, 0, 0);		} else {			/*			 * No current GC leader, initiate group commit			 */			log->cflag |= logGC_PAGEOUT;			lmGCwrite(log, 0);		}	}	/* page is not bound with outstanding tblk:	 * init write or mark it to be redriven (lbmWRITE)	 */	else {		/* finalize the page */		bp->l_ceor = bp->l_eor;		lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_ceor);		lbmWrite(log, bp, lbmWRITE | lbmRELEASE | lbmFREE, 0);	}	LOGGC_UNLOCK(log);	/*	 *      allocate/initialize next page	 */	/* if log wraps, the first data page of log is 2	 * (0 never used, 1 is superblock).	 */	log->page = (pn == log->size - 1) ? 2 : pn + 1;	log->eor = LOGPHDRSIZE;	/* ? valid page empty/full at logRedo() */	/* allocate/initialize next log page buffer */	nextbp = lbmAllocate(log, log->page);	nextbp->l_eor = log->eor;	log->bp = nextbp;	/* initialize next log page */	lp = (struct logpage *) nextbp->l_ldata;	lp->h.page = lp->t.page = cpu_to_le32(lspn + 1);	lp->h.eor = lp->t.eor = cpu_to_le16(LOGPHDRSIZE);	return 0;}/* * NAME:	lmGroupCommit() * * FUNCTION:	group commit *	initiate pageout of the pages with COMMIT in the order of *	page number - redrive pageout of the page at the head of *	pageout queue until full page has been written. * * RETURN:	 * * NOTE: *	LOGGC_LOCK serializes log group commit queue, and *	transaction blocks on the commit queue. *	N.B. LOG_LOCK is NOT held during lmGroupCommit(). */int lmGroupCommit(struct jfs_log * log, struct tblock * tblk){	int rc = 0;	LOGGC_LOCK(log);	/* group committed already ? */	if (tblk->flag & tblkGC_COMMITTED) {		if (tblk->flag & tblkGC_ERROR)			rc = -EIO;		LOGGC_UNLOCK(log);		return rc;	}	jfs_info("lmGroup Commit: tblk = 0x%p, gcrtc = %d", tblk, log->gcrtc);	if (tblk->xflag & COMMIT_LAZY)		tblk->flag |= tblkGC_LAZY;	if ((!(log->cflag & logGC_PAGEOUT)) && log->cqueue.head &&	    (!(tblk->xflag & COMMIT_LAZY) || test_bit(log_FLUSH, &log->flag))) {		/*		 * No pageout in progress		 *		 * start group commit as its group leader.		 */		log->cflag |= logGC_PAGEOUT;		lmGCwrite(log, 0);	}	if (tblk->xflag & COMMIT_LAZY) {		/*		 * Lazy transactions can leave now		 */		LOGGC_UNLOCK(log);		return 0;	}	/* lmGCwrite gives up LOGGC_LOCK, check again */	if (tblk->flag & tblkGC_COMMITTED) {		if (tblk->flag & tblkGC_ERROR)			rc = -EIO;		LOGGC_UNLOCK(log);		return rc;	}	/* upcount transaction waiting for completion	 */	log->gcrtc++;	tblk->flag |= tblkGC_READY;	__SLEEP_COND(tblk->gcwait, (tblk->flag & tblkGC_COMMITTED),		     LOGGC_LOCK(log), LOGGC_UNLOCK(log));	/* removed from commit queue */	if (tblk->flag & tblkGC_ERROR)		rc = -EIO;	LOGGC_UNLOCK(log);	return rc;}/* * NAME:	lmGCwrite() * * FUNCTION:	group commit write *	initiate write of log page, building a group of all transactions *	with commit records on that page. * * RETURN:	None * * NOTE: *	LOGGC_LOCK must be held by caller. *	N.B. LOG_LOCK is NOT held during lmGroupCommit(). */static void lmGCwrite(struct jfs_log * log, int cant_write){	struct lbuf *bp;	struct logpage *lp;	int gcpn;		/* group commit page number */	struct tblock *tblk;	struct tblock *xtblk;	/*	 * build the commit group of a log page	 *	 * scan commit queue and make a commit group of all	 * transactions with COMMIT records on the same log page.	 */	/* get the head tblk on the commit queue */	tblk = xtblk = log->cqueue.head;	gcpn = tblk->pn;	while (tblk && tblk->pn == gcpn) {		xtblk = tblk;		/* state transition: (QUEUE, READY) -> COMMIT */		tblk->flag |= tblkGC_COMMIT;		tblk = tblk->cqnext;	}	tblk = xtblk;		/* last tblk of the page */	/*	 * pageout to commit transactions on the log page.	 */	bp = (struct lbuf *) tblk->bp;	lp = (struct logpage *) bp->l_ldata;	/* is page already full ? */	if (tblk->flag & tblkGC_EOP) {		/* mark page to free at end of group commit of the page */		tblk->flag &= ~tblkGC_EOP;		tblk->flag |= tblkGC_FREE;		bp->l_ceor = bp->l_eor;		lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_ceor);		lbmWrite(log, bp, lbmWRITE | lbmRELEASE | lbmGC,			 cant_write);		INCREMENT(lmStat.full_page);	}	/* page is not yet full */	else {		bp->l_ceor = tblk->eor;	/* ? bp->l_ceor = bp->l_eor; */		lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_ceor);		lbmWrite(log, bp, lbmWRITE | lbmGC, cant_write);		INCREMENT(lmStat.partial_page);	}}/* * NAME:	lmPostGC() * * FUNCTION:	group commit post-processing *	Processes transactions after their commit records have been written *	to disk, redriving log I/O if necessary. * * RETURN:	None * * NOTE: *	This routine is called a interrupt time by lbmIODone */static void lmPostGC(struct lbuf * bp){	unsigned long flags;	struct jfs_log *log = bp->l_log;	struct logpage *lp;	struct tblock *tblk;	//LOGGC_LOCK(log);	spin_lock_irqsave(&log->gclock, flags);	/*	 * current pageout of group commit completed.	 *	 * remove/wakeup transactions from commit queue who were	 * group committed with the current log page	 */	while ((tblk = log->cqueue.head) && (tblk->flag & tblkGC_COMMIT)) {		/* if transaction was marked GC_COMMIT then		 * it has been shipped in the current pageout		 * and made it to disk - it is committed.		 */		if (bp->l_flag & lbmERROR)			tblk->flag |= tblkGC_ERROR;		/* remove it from the commit queue */		log->cqueue.head = tblk->cqnext;		if (log->cqueue.head == NULL)			log->cqueue.tail = NULL;		tblk->flag &= ~tblkGC_QUEUE;		tblk->cqnext = 0;		if (tblk == log->flush_tblk) {			/* we can stop flushing the log now */			clear_bit(log_FLUSH, &log->flag);			log->flush_tblk = NULL;		}		jfs_info("lmPostGC: tblk = 0x%p, flag = 0x%x", tblk,			 tblk->flag);		if (!(tblk->xflag & COMMIT_FORCE))			/*			 * Hand tblk over to lazy commit thread			 */			txLazyUnlock(tblk);		else {			/* state transition: COMMIT -> COMMITTED */			tblk->flag |= tblkGC_COMMITTED;			if (tblk->flag & tblkGC_READY)				log->gcrtc--;			LOGGC_WAKEUP(tblk);		}		/* was page full before pageout ?		 * (and this is the last tblk bound with the page)		 */		if (tblk->flag & tblkGC_FREE)			lbmFree(bp);		/* did page become full after pageout ?		 * (and this is the last tblk bound with the page)		 */		else if (tblk->flag & tblkGC_EOP) {			/* finalize the page */			lp = (struct logpage *) bp->l_ldata;			bp->l_ceor = bp->l_eor;			lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_eor);			jfs_info("lmPostGC: calling lbmWrite");			lbmWrite(log, bp, lbmWRITE | lbmRELEASE | lbmFREE,				 1);		}	}	/* are there any transactions who have entered lnGroupCommit()	 * (whose COMMITs are after that of the last log page written.	 * They are waiting for new group commit (above at (SLEEP 1))	 * or lazy transactions are on a full (queued) log page,	 * select the latest ready transaction as new group leader and	 * wake her up to lead her group.	 */	if ((tblk = log->cqueue.head) &&	    ((log->gcrtc > 0) || (tblk->bp->l_wqnext != NULL) ||	     test_bit(log_FLUSH, &log->flag)))		/*		 * Call lmGCwrite with new group leader		 */		lmGCwrite(log, 1);	/* no transaction are ready yet (transactions are only just	 * queued (GC_QUEUE) and not entered for group commit yet).	 * the first transaction entering group commit	 * will elect herself as new group leader.	 */	else		log->cflag &= ~logGC_PAGEOUT;	//LOGGC_UNLOCK(log);	spin_unlock_irqrestore(&log->gclock, flags);	return;}/* * NAME:	lmLogSync() * * FUNCTION:	write log SYNCPT record for specified log *	if new sync address is available *	(normally the case if sync() is executed by back-ground *	process). *	if not, explicitly run jfs_blogsync() to initiate *	getting of new sync address. *	calculate new value of i_nextsync which determines when *	this code is called again. * *	this is called only from lmLog(). * * PARAMETER:	ip	- pointer to logs inode. * * RETURN:	0 *			 * serialization: LOG_LOCK() held on entry/exit */static int lmLogSync(struct jfs_log * log, int nosyncwait){	int logsize;	int written;		/* written since last syncpt */	int free;		/* free space left available */	int delta;		/* additional delta to write normally */	int more;		/* additional write granted */	struct lrd lrd;	int lsn;	struct logsyncblk *lp;	/*	 *      forward syncpt	 */	/* if last sync is same as last syncpt,	 * invoke sync point forward processing to update sync.	 */	if (log->sync == log->syncpt) {		LOGSYNC_LOCK(log);		/* ToDo: push dirty metapages out to disk *///              bmLogSync(log);		if (list_empty(&log->synclist))			log->sync = log->lsn;		else {			lp = list_entry(log->synclist.next,					struct logsyncblk, synclist);			log->sync = lp->lsn;		}		LOGSYNC_UNLOCK(log);	}	/* if sync is different from last syncpt,	 * write a SYNCPT record with syncpt = sync.	 * reset syncpt = sync	 */	if (log->sync != log->syncpt) {		struct super_block *sb = log->sb;		struct jfs_sb_info *sbi = JFS_SBI(sb);		/*		 * We need to make sure all of the "written" metapages		 * actually make it to disk		 */		fsync_inode_data_buffers(sbi->ipbmap);		fsync_inode_data_buffers(sbi->ipimap);		fsync_inode_data_buffers(sb->s_bdev->bd_inode);		lrd.logtid = 0;		lrd.backchain = 0;		lrd.type = cpu_to_le16(LOG_SYNCPT);		lrd.length = 0;		lrd.log.syncpt.sync = cpu_to_le32(log->sync);		lsn = lmWriteRecord(log, NULL, &lrd, NULL);		log->syncpt = log->sync;	} else		lsn = log->lsn;	/*	 *      setup next syncpt trigger (SWAG)	 */	logsize = log->logsize;	logdiff(written, lsn, log);	free = logsize - written;	delta = LOGSYNC_DELTA(logsize);	more = min(free / 2, delta);	if (more < 2 * LOGPSIZE) {		jfs_warn("\n ... Log Wrap ... Log Wrap ... Log Wrap ...\n");		/*		 *      log wrapping		 *		 * option 1 - panic ? No.!		 * option 2 - shutdown file systems		 *            associated with log ?		 * option 3 - extend log ?		 */		/*		 * option 4 - second chance		 *		 * mark log wrapped, and continue.		 * when all active transactions are completed,		 * mark log vaild for recovery.		 * if crashed during invalid state, log state		 * implies invald log, forcing fsck().		 */		/* mark log state log wrap in log superblock */		/* log->state = LOGWRAP; */		/* reset sync point computation */		log->syncpt = log->sync = lsn;		log->nextsync = delta;	} else		/* next syncpt trigger = written + more */		log->nextsync = written + more;	/* return if lmLogSync() from outside of transaction, e.g., sync() */	if (nosyncwait)		return lsn;	/* if number of bytes written from last sync point is more	 * than 1/4 of the log size, stop new transactions from	 * starting until all current transactions are completed	 * by setting syncbarrier flag.	 */	if (written > LOGSYNC_BARRIER(logsize) && logsize > 32 * LOGPSIZE) {		set_bit(log_SYNCBARRIER, &log->flag);		jfs_info("log barrier on: lsn=0x%x syncpt=0x%x", lsn,			 log->syncpt);		/*		 * We may have to initiate group commit		 */		jfs_flush_journal(log, 0);	}	return lsn;}/* * NAME:	lmLogOpen() * * FUNCTION:    open the log on first open; *	insert filesystem in the active list of the log. * * PARAMETER:	ipmnt	- file system mount inode *		iplog 	- log inode (out) * * RETURN: * * serialization: */int lmLogOpen(struct super_block *sb, struct jfs_log ** logptr){	int rc;	struct block_device *bdev;	struct jfs_log *log;	if (!(log = kmalloc(sizeof(struct jfs_log), GFP_KERNEL)))		return -ENOMEM;	memset(log, 0, sizeof(struct jfs_log));	init_waitqueue_head(&log->syncwait);	log->sb = sb;		/* This should be a list */	if (!(JFS_SBI(sb)->mntflag & JFS_INLINELOG))		goto externalLog;	/*	 *      in-line log in host file system	 *	 * file system to log have 1-to-1 relationship;	 */	set_bit(log_INLINELOG, &log->flag);	log->bdev = sb->s_bdev;	log->base = addressPXD(&JFS_SBI(sb)->logpxd);	log->size = lengthPXD(&JFS_SBI(sb)->logpxd) >>	    (L2LOGPSIZE - sb->s_blocksize_bits);	log->l2bsize = sb->s_blocksize_bits;	ASSERT(L2LOGPSIZE >= sb->s_blocksize_bits);	/*	 * initialize log.	 */	if ((rc = lmLogInit(log)))		goto free;	goto out;	/*	 *      external log as separate logical volume	 *	 * file systems to log may have n-to-1 relationship;	 */      externalLog:	/*	 * TODO: Check for already opened log devices	 */	if (!(bdev = bdget(kdev_t_to_nr(JFS_SBI(sb)->logdev)))) {		rc = -ENODEV;		goto free;	}	if ((rc = blkdev_get(bdev, FMODE_READ|FMODE_WRITE, 0, BDEV_FS)))		goto free;	log->bdev = bdev;	memcpy(log->uuid, JFS_SBI(sb)->loguuid, sizeof(log->uuid));		/*	 * initialize log:	 */	if ((rc = lmLogInit(log)))		goto close;	/*	 * add file system to log active file system list	 */	if ((rc = lmLogFileSystem(log, JFS_SBI(sb)->uuid, 1)))		goto shutdown;      out:	*logptr = log;	return 0;	/*	 *      unwind on error	 */      shutdown:		/* unwind lbmLogInit() */	lbmLogShutdown(log);      close:		/* close external log device */	blkdev_put(bdev, BDEV_FS);      free:		/* free log descriptor */	kfree(log);	jfs_warn("lmLogOpen: exit(%d)", rc);	return rc;}/* * NAME:	lmLogInit() * * FUNCTION:	log initialization at first log open. * *	logredo() (or logformat()) should have been run previously. *	initialize the log inode from log superblock. *	set the log state in the superblock to LOGMOUNT and *	write SYNCPT log record. *		 * PARAMETER:	log	- log structure * * RETURN:	0	- if ok *		-EINVAL	- bad log magic number or superblock dirty *		error returned from logwait() *			 * serialization: single first open thread */int lmLogInit(struct jfs_log * log){	int rc = 0;	struct lrd lrd;	struct logsuper *logsuper;	struct lbuf *bpsuper;	struct lbuf *bp;	struct logpage *lp;	int lsn;	jfs_info("lmLogInit: log:0x%p", log);	/*	 * log inode is overlaid on generic inode where	 * dinode have been zeroed out by iRead();	 */	/*	 * initialize log i/o	 */	if ((rc = lbmLogInit(log)))		return rc;	/*	 * validate log superblock	 */

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -