⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 jfs_logmgr.c

📁 jfs-2.4-1.1.7.tar.gz jfs 2.4-1.1.7 源码
💻 C
📖 第 1 页 / 共 4 页
字号:
	if (!test_bit(log_INLINELOG, &log->flag))		log->l2bsize = 12;	/* XXX kludge alert XXX */	if ((rc = lbmRead(log, 1, &bpsuper)))		goto errout10;	logsuper = (struct logsuper *) bpsuper->l_ldata;	if (logsuper->magic != cpu_to_le32(LOGMAGIC)) {		jfs_warn("*** Log Format Error ! ***");		rc = -EINVAL;		goto errout20;	}	/* logredo() should have been run successfully. */	if (logsuper->state != cpu_to_le32(LOGREDONE)) {		jfs_warn("*** Log Is Dirty ! ***");		rc = -EINVAL;		goto errout20;	}	/* initialize log inode from log superblock */	if (test_bit(log_INLINELOG,&log->flag)) {		if (log->size != le32_to_cpu(logsuper->size)) {			rc = -EINVAL;			goto errout20;		}		jfs_info("lmLogInit: inline log:0x%p base:0x%Lx size:0x%x",			log, (unsigned long long) log->base, log->size);	} else {		if (memcmp(logsuper->uuid, log->uuid, 16)) {			jfs_warn("wrong uuid on JFS log device");			goto errout20;		}		log->size = le32_to_cpu(logsuper->size);		log->l2bsize = le32_to_cpu(logsuper->l2bsize);		jfs_info("lmLogInit: external log:0x%p base:0x%Lx size:0x%x",			log, (unsigned long long) log->base, log->size);	}	log->page = le32_to_cpu(logsuper->end) / LOGPSIZE;	log->eor = le32_to_cpu(logsuper->end) - (LOGPSIZE * log->page);	/* check for disabled journaling to disk */	if (JFS_SBI(log->sb)->flag & JFS_NOINTEGRITY) {		log->no_integrity = 1;		log->ni_page = log->page;		log->ni_eor = log->eor;	}	else		log->no_integrity = 0;	/*	 * initialize for log append write mode	 */	/* establish current/end-of-log page/buffer */	if ((rc = lbmRead(log, log->page, &bp)))		goto errout20;	lp = (struct logpage *) bp->l_ldata;	jfs_info("lmLogInit: lsn:0x%x page:%d eor:%d:%d",		 le32_to_cpu(logsuper->end), log->page, log->eor,		 le16_to_cpu(lp->h.eor));//      ASSERT(log->eor == lp->h.eor);	log->bp = bp;	bp->l_pn = log->page;	bp->l_eor = log->eor;	/* initialize the group commit serialization lock */	LOGGC_LOCK_INIT(log);	/* if current page is full, move on to next page */	if (log->eor >= LOGPSIZE - LOGPTLRSIZE)		lmNextPage(log);	/* allocate/initialize the log write serialization lock */	LOG_LOCK_INIT(log);	/*	 * initialize log syncpoint	 */	/*	 * write the first SYNCPT record with syncpoint = 0	 * (i.e., log redo up to HERE !);	 * remove current page from lbm write queue at end of pageout	 * (to write log superblock update), but do not release to freelist;	 */	lrd.logtid = 0;	lrd.backchain = 0;	lrd.type = cpu_to_le16(LOG_SYNCPT);	lrd.length = 0;	lrd.log.syncpt.sync = 0;	lsn = lmWriteRecord(log, NULL, &lrd, NULL);	bp = log->bp;	bp->l_ceor = bp->l_eor;	lp = (struct logpage *) bp->l_ldata;	lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_eor);	lbmWrite(log, bp, lbmWRITE | lbmSYNC, 0);	if ((rc = lbmIOWait(bp, 0)))		goto errout30;	/* initialize logsync parameters */	log->logsize = (log->size - 2) << L2LOGPSIZE;	log->lsn = lsn;	log->syncpt = lsn;	log->sync = log->syncpt;	log->nextsync = LOGSYNC_DELTA(log->logsize);	jfs_info("lmLogInit: lsn:0x%x syncpt:0x%x sync:0x%x",		 log->lsn, log->syncpt, log->sync);	LOGSYNC_LOCK_INIT(log);	INIT_LIST_HEAD(&log->synclist);	log->cqueue.head = log->cqueue.tail = NULL;	log->flush_tblk = NULL;	log->count = 0;	/*	 * initialize for lazy/group commit	 */	log->clsn = lsn;	/*	 * update/write superblock	 */	logsuper->state = cpu_to_le32(LOGMOUNT);	log->serial = le32_to_cpu(logsuper->serial) + 1;	logsuper->serial = cpu_to_le32(log->serial);	lbmDirectWrite(log, bpsuper, lbmWRITE | lbmRELEASE | lbmSYNC);	if ((rc = lbmIOWait(bpsuper, lbmFREE)))		goto errout30;	return 0;	/*	 *      unwind on error	 */      errout30:		/* release log page */	lbmFree(bp);      errout20:		/* release log superblock */	lbmFree(bpsuper);      errout10:		/* unwind lbmLogInit() */	lbmLogShutdown(log);	jfs_warn("lmLogInit: exit(%d)", rc);	return rc;}/* * NAME:	lmLogClose() * * FUNCTION:	remove file system <ipmnt> from active list of log <iplog> *		and close it on last close. * * PARAMETER:	sb	- superblock *		log	- log inode * * RETURN:	errors from subroutines * * serialization: */int lmLogClose(struct super_block *sb, struct jfs_log * log){	int rc;	jfs_info("lmLogClose: log:0x%p", log);	if (!test_bit(log_INLINELOG, &log->flag))		goto externalLog;		/*	 *      in-line log in host file system	 */	rc = lmLogShutdown(log);	goto out;	/*	 *      external log as separate logical volume	 */      externalLog:	lmLogFileSystem(log, JFS_SBI(sb)->uuid, 0);	rc = lmLogShutdown(log);	blkdev_put(log->bdev, BDEV_FS);      out:	kfree(log);	jfs_info("lmLogClose: exit(%d)", rc);	return rc;}/* * NAME:	jfs_flush_journal() * * FUNCTION:	initiate write of any outstanding transactions to the journal *		and optionally wait until they are all written to disk * *		wait == 0  flush until latest txn is committed, don't wait *		wait == 1  flush until latest txn is committed, wait *		wait > 1   flush until all txn's are complete, wait */void jfs_flush_journal(struct jfs_log *log, int wait){	int i;	struct tblock *target;	if (!log)		/* jfs_write_inode may call us during read-only mount */		return;	jfs_info("jfs_flush_journal: log:0x%p wait=%d", log, wait);	LOGGC_LOCK(log);	target = log->cqueue.head;	if (target) {		/*		 * This ensures that we will keep writing to the journal as long		 * as there are unwritten commit records		 */		if (test_bit(log_FLUSH, &log->flag)) {			/*			 * We're already flushing.			 * if flush_tblk is NULL, we are flushing everything,			 * so leave it that way.  Otherwise, update it to the			 * latest transaction			 */			if (log->flush_tblk)				log->flush_tblk = target;		} else {			/* Only flush until latest transaction is committed */			log->flush_tblk = target;			set_bit(log_FLUSH, &log->flag);			/*			 * Initiate I/O on outstanding transactions			 */			if (!(log->cflag & logGC_PAGEOUT)) {				log->cflag |= logGC_PAGEOUT;				lmGCwrite(log, 0);			}		}	}	if ((wait > 1) || test_bit(log_SYNCBARRIER, &log->flag)) {		/* Flush until all activity complete */		set_bit(log_FLUSH, &log->flag);		log->flush_tblk = NULL;	}	if (wait && target && !(target->flag & tblkGC_COMMITTED)) {		DECLARE_WAITQUEUE(__wait, current);		add_wait_queue(&target->gcwait, &__wait);		set_current_state(TASK_UNINTERRUPTIBLE);		LOGGC_UNLOCK(log);		schedule();		current->state = TASK_RUNNING;		LOGGC_LOCK(log);		remove_wait_queue(&target->gcwait, &__wait);	}	LOGGC_UNLOCK(log);	if (wait < 2)		return;	/*	 * If there was recent activity, we may need to wait	 * for the lazycommit thread to catch up	 */	if (log->cqueue.head || !list_empty(&log->synclist)) {		for (i = 0; i < 800; i++) {	/* Too much? */			current->state = TASK_INTERRUPTIBLE;			schedule_timeout(HZ / 4);			if ((log->cqueue.head == NULL) &&			    list_empty(&log->synclist))				break;		}	}	assert(log->cqueue.head == NULL);	assert(list_empty(&log->synclist));	clear_bit(log_FLUSH, &log->flag);}/* * NAME:	lmLogShutdown() * * FUNCTION:	log shutdown at last LogClose(). * *		write log syncpt record. *		update super block to set redone flag to 0. * * PARAMETER:	log	- log inode * * RETURN:	0	- success *			 * serialization: single last close thread */int lmLogShutdown(struct jfs_log * log){	int rc;	struct lrd lrd;	int lsn;	struct logsuper *logsuper;	struct lbuf *bpsuper;	struct lbuf *bp;	struct logpage *lp;	jfs_info("lmLogShutdown: log:0x%p", log);	jfs_flush_journal(log, 2);	/*	 * We need to make sure all of the "written" metapages	 * actually make it to disk	 */	fsync_no_super(log->sb->s_dev);	/*	 * write the last SYNCPT record with syncpoint = 0	 * (i.e., log redo up to HERE !)	 */	lrd.logtid = 0;	lrd.backchain = 0;	lrd.type = cpu_to_le16(LOG_SYNCPT);	lrd.length = 0;	lrd.log.syncpt.sync = 0;		/* check for disabled journaling to disk */	if (JFS_SBI(log->sb)->flag & JFS_NOINTEGRITY) {		log->no_integrity = 0;		log->page = log->ni_page;		log->eor = log->ni_eor;	}	lsn = lmWriteRecord(log, NULL, &lrd, NULL);	bp = log->bp;	lp = (struct logpage *) bp->l_ldata;	lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_eor);	lbmWrite(log, log->bp, lbmWRITE | lbmRELEASE | lbmSYNC, 0);	lbmIOWait(log->bp, lbmFREE);	/*	 * synchronous update log superblock	 * mark log state as shutdown cleanly	 * (i.e., Log does not need to be replayed).	 */	if ((rc = lbmRead(log, 1, &bpsuper)))		goto out;	logsuper = (struct logsuper *) bpsuper->l_ldata;	logsuper->state = cpu_to_le32(LOGREDONE);	logsuper->end = cpu_to_le32(lsn);	lbmDirectWrite(log, bpsuper, lbmWRITE | lbmRELEASE | lbmSYNC);	rc = lbmIOWait(bpsuper, lbmFREE);	jfs_info("lmLogShutdown: lsn:0x%x page:%d eor:%d",		 lsn, log->page, log->eor);      out:    	/*	 * shutdown per log i/o	 */	lbmLogShutdown(log);	if (rc) {		jfs_warn("lmLogShutdown: exit(%d)", rc);	}	return rc;}/* * NAME:	lmLogFileSystem() * * FUNCTION:	insert (<activate> = true)/remove (<activate> = false) *	file system into/from log active file system list. * * PARAMETE:	log	- pointer to logs inode. *		fsdev	- kdev_t of filesystem. *		serial  - pointer to returned log serial number *		activate - insert/remove device from active list. * * RETURN:	0	- success *		errors returned by vms_iowait(). */static int lmLogFileSystem(struct jfs_log * log, char *uuid, int activate){	int rc = 0;	int i;	struct logsuper *logsuper;	struct lbuf *bpsuper;	/*	 * insert/remove file system device to log active file system list.	 */	if ((rc = lbmRead(log, 1, &bpsuper)))		return rc;	logsuper = (struct logsuper *) bpsuper->l_ldata;	if (activate) {		for (i = 0; i < MAX_ACTIVE; i++)			if (!memcmp(logsuper->active[i].uuid, NULL_UUID, 16)) {				memcpy(logsuper->active[i].uuid, uuid, 16);				break;			}		if (i == MAX_ACTIVE) {			jfs_warn("Too many file systems sharing journal!");			lbmFree(bpsuper);			return -EMFILE;	/* Is there a better rc? */		}	} else {		for (i = 0; i < MAX_ACTIVE; i++)			if (!memcmp(logsuper->active[i].uuid, uuid, 16)) {				memcpy(logsuper->active[i].uuid, NULL_UUID, 16);				break;			}		if (i == MAX_ACTIVE) {			jfs_warn("Somebody stomped on the journal!");			lbmFree(bpsuper);			return -EIO;		}			}	/*	 * synchronous write log superblock:	 *	 * write sidestream bypassing write queue:	 * at file system mount, log super block is updated for	 * activation of the file system before any log record	 * (MOUNT record) of the file system, and at file system	 * unmount, all meta data for the file system has been	 * flushed before log super block is updated for deactivation	 * of the file system.	 */	lbmDirectWrite(log, bpsuper, lbmWRITE | lbmRELEASE | lbmSYNC);	rc = lbmIOWait(bpsuper, lbmFREE);	return rc;}/* *		log buffer manager (lbm) *		------------------------ * * special purpose buffer manager supporting log i/o requirements. * * per log write queue: * log pageout occurs in serial order by fifo write queue and * restricting to a single i/o in pregress at any one time. * a circular singly-linked list * (log->wrqueue points to the tail, and buffers are linked via * bp->wrqueue field), and * maintains log page in pageout ot waiting for pageout in serial pageout. *//* *	lbmLogInit() * * initialize per log I/O setup at lmLogInit() */static int lbmLogInit(struct jfs_log * log){				/* log inode */	int i;	struct lbuf *lbuf;	jfs_info("lbmLogInit: log:0x%p", log);	/* initialize current buffer cursor */	log->bp = NULL;	/* initialize log device write queue */	log->wqueue = NULL;	/*	 * Each log has its own buffer pages allocated to it.  These are	 * not managed by the page cache.  This ensures that a transaction	 * writing to the log does not block trying to allocate a page from	 * the page cache (for the log).  This would be bad, since page	 * allocation waits on the kswapd thread that may be committing inodes	 * which would cause log activity.  Was that clear?  I'm trying to	 * avoid deadlock here.	 */	init_waitqueue_head(&log->free_wait);	log->lbuf_free = NULL;	for (i = 0; i < LOGPAGES; i++) {		lbuf = kmalloc(sizeof(struct lbuf), GFP_KERNEL);		if (lbuf == 0)			goto error;		lbuf->l_bh.b_data = lbuf->l_ldata =		    (char *) get_zeroed_page(GFP_KERNEL);		if (lbuf->l_ldata == 0) {			kfree(lbuf);			goto error;		}		lbuf->l_log = log;		init_waitqueue_head(&lbuf->l_ioevent);		lbuf->l_bh.b_size = LOGPSIZE;		lbuf->l_bh.b_dev = to_kdev_t(log->bdev->bd_dev);		lbuf->l_bh.b_end_io = lbmIODone;		lbuf->l_bh.b_private = lbuf;		lbuf->l_bh.b_page = virt_to_page(lbuf->l_ldata);		lbuf->l_bh.b_state = 0;		init_waitqueue_head(&lbuf->l_bh.b_wait);		lbuf->l_freelist = log->lbuf_free;		log->lbuf_free = lbuf;	}	return (0);      error:	lbmLogShutdown(log);	return -ENOMEM;}/* *	lbmLogShutdown() * * finalize per log I/O setup at lmLogShutdown() */static void lbmLogShutdown(struct jfs_log * log){	struct lbuf *lbuf;	jfs_info("lbmLogShutdown: log:0x%p", log);	lbuf = log->lbuf_free;	while (lbuf) {		struct lbuf *next = lbuf->l_freelist;		free_page((unsigned long) lbuf->l_ldata);		kfree(lbuf);		lbuf = next;	}	log->bp = NULL;}/* *	lbmAllocate() * * allocate an empty log buffer */static struct lbuf *lbmAllocate(struct jfs_log * log, int pn){	struct lbuf *bp;	unsigned long flags;	/*	 * recycle from log buffer freelist if any	 */	LCACHE_LOCK(flags);	LCACHE_SLEEP_COND(log->free_wait, (bp = log->lbuf_free), flags);	log->lbuf_free = bp->l_freelist;	LCACHE_UNLOCK(flags);	bp->l_flag = 0;	bp->l_wqnext = NULL;	bp->l_freelist = NULL;	bp->l_pn = pn;	bp->l_blkno = log->base + (pn << (L2LOGPSIZE - log->l2bsize));	bp->l_bh.b_blocknr = bp->l_blkno;	bp->l_ceor = 0;	return bp;}/* *	lbmFree() * * release a log buffer to freelist */static void lbmFree(struct lbuf * bp){	unsigned long flags;	LCACHE_LOCK(flags);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -