⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 jfs_logmgr.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 4 页
字号:
	jfs_info("lmLogInit: log:0x%p", log);	/* initialize the group commit serialization lock */	LOGGC_LOCK_INIT(log);	/* allocate/initialize the log write serialization lock */	LOG_LOCK_INIT(log);	LOGSYNC_LOCK_INIT(log);	INIT_LIST_HEAD(&log->synclist);	INIT_LIST_HEAD(&log->cqueue);	log->flush_tblk = NULL;	log->count = 0;	/*	 * initialize log i/o	 */	if ((rc = lbmLogInit(log)))		return rc;	if (!test_bit(log_INLINELOG, &log->flag))		log->l2bsize = L2LOGPSIZE;	/* check for disabled journaling to disk */	if (log->no_integrity) {		/*		 * Journal pages will still be filled.  When the time comes		 * to actually do the I/O, the write is not done, and the		 * endio routine is called directly.		 */		bp = lbmAllocate(log , 0);		log->bp = bp;		bp->l_pn = bp->l_eor = 0;	} else {		/*		 * validate log superblock		 */		if ((rc = lbmRead(log, 1, &bpsuper)))			goto errout10;		logsuper = (struct logsuper *) bpsuper->l_ldata;		if (logsuper->magic != cpu_to_le32(LOGMAGIC)) {			jfs_warn("*** Log Format Error ! ***");			rc = -EINVAL;			goto errout20;		}		/* logredo() should have been run successfully. */		if (logsuper->state != cpu_to_le32(LOGREDONE)) {			jfs_warn("*** Log Is Dirty ! ***");			rc = -EINVAL;			goto errout20;		}		/* initialize log from log superblock */		if (test_bit(log_INLINELOG,&log->flag)) {			if (log->size != le32_to_cpu(logsuper->size)) {				rc = -EINVAL;				goto errout20;			}			jfs_info("lmLogInit: inline log:0x%p base:0x%Lx "				 "size:0x%x", log,				 (unsigned long long) log->base, log->size);		} else {			if (memcmp(logsuper->uuid, log->uuid, 16)) {				jfs_warn("wrong uuid on JFS log device");				goto errout20;			}			log->size = le32_to_cpu(logsuper->size);			log->l2bsize = le32_to_cpu(logsuper->l2bsize);			jfs_info("lmLogInit: external log:0x%p base:0x%Lx "				 "size:0x%x", log,				 (unsigned long long) log->base, log->size);		}		log->page = le32_to_cpu(logsuper->end) / LOGPSIZE;		log->eor = le32_to_cpu(logsuper->end) - (LOGPSIZE * log->page);		/*		 * initialize for log append write mode		 */		/* establish current/end-of-log page/buffer */		if ((rc = lbmRead(log, log->page, &bp)))			goto errout20;		lp = (struct logpage *) bp->l_ldata;		jfs_info("lmLogInit: lsn:0x%x page:%d eor:%d:%d",			 le32_to_cpu(logsuper->end), log->page, log->eor,			 le16_to_cpu(lp->h.eor));		log->bp = bp;		bp->l_pn = log->page;		bp->l_eor = log->eor;		/* if current page is full, move on to next page */		if (log->eor >= LOGPSIZE - LOGPTLRSIZE)			lmNextPage(log);		/*		 * initialize log syncpoint		 */		/*		 * write the first SYNCPT record with syncpoint = 0		 * (i.e., log redo up to HERE !);		 * remove current page from lbm write queue at end of pageout		 * (to write log superblock update), but do not release to		 * freelist;		 */		lrd.logtid = 0;		lrd.backchain = 0;		lrd.type = cpu_to_le16(LOG_SYNCPT);		lrd.length = 0;		lrd.log.syncpt.sync = 0;		lsn = lmWriteRecord(log, NULL, &lrd, NULL);		bp = log->bp;		bp->l_ceor = bp->l_eor;		lp = (struct logpage *) bp->l_ldata;		lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_eor);		lbmWrite(log, bp, lbmWRITE | lbmSYNC, 0);		if ((rc = lbmIOWait(bp, 0)))			goto errout30;		/*		 * update/write superblock		 */		logsuper->state = cpu_to_le32(LOGMOUNT);		log->serial = le32_to_cpu(logsuper->serial) + 1;		logsuper->serial = cpu_to_le32(log->serial);		lbmDirectWrite(log, bpsuper, lbmWRITE | lbmRELEASE | lbmSYNC);		if ((rc = lbmIOWait(bpsuper, lbmFREE)))			goto errout30;	}	/* initialize logsync parameters */	log->logsize = (log->size - 2) << L2LOGPSIZE;	log->lsn = lsn;	log->syncpt = lsn;	log->sync = log->syncpt;	log->nextsync = LOGSYNC_DELTA(log->logsize);	jfs_info("lmLogInit: lsn:0x%x syncpt:0x%x sync:0x%x",		 log->lsn, log->syncpt, log->sync);	/*	 * initialize for lazy/group commit	 */	log->clsn = lsn;	return 0;	/*	 *	unwind on error	 */      errout30:		/* release log page */	log->wqueue = NULL;	bp->l_wqnext = NULL;	lbmFree(bp);      errout20:		/* release log superblock */	lbmFree(bpsuper);      errout10:		/* unwind lbmLogInit() */	lbmLogShutdown(log);	jfs_warn("lmLogInit: exit(%d)", rc);	return rc;}/* * NAME:	lmLogClose() * * FUNCTION:	remove file system <ipmnt> from active list of log <iplog> *		and close it on last close. * * PARAMETER:	sb	- superblock * * RETURN:	errors from subroutines * * serialization: */int lmLogClose(struct super_block *sb){	struct jfs_sb_info *sbi = JFS_SBI(sb);	struct jfs_log *log = sbi->log;	struct block_device *bdev;	int rc = 0;	jfs_info("lmLogClose: log:0x%p", log);	mutex_lock(&jfs_log_mutex);	LOG_LOCK(log);	list_del(&sbi->log_list);	LOG_UNLOCK(log);	sbi->log = NULL;	/*	 * We need to make sure all of the "written" metapages	 * actually make it to disk	 */	sync_blockdev(sb->s_bdev);	if (test_bit(log_INLINELOG, &log->flag)) {		/*		 *	in-line log in host file system		 */		rc = lmLogShutdown(log);		kfree(log);		goto out;	}	if (!log->no_integrity)		lmLogFileSystem(log, sbi, 0);	if (!list_empty(&log->sb_list))		goto out;	/*	 * TODO: ensure that the dummy_log is in a state to allow	 * lbmLogShutdown to deallocate all the buffers and call	 * kfree against dummy_log.  For now, leave dummy_log & its	 * buffers in memory, and resuse if another no-integrity mount	 * is requested.	 */	if (log->no_integrity)		goto out;	/*	 *	external log as separate logical volume	 */	list_del(&log->journal_list);	bdev = log->bdev;	rc = lmLogShutdown(log);	bd_release(bdev);	blkdev_put(bdev);	kfree(log);      out:	mutex_unlock(&jfs_log_mutex);	jfs_info("lmLogClose: exit(%d)", rc);	return rc;}/* * NAME:	jfs_flush_journal() * * FUNCTION:	initiate write of any outstanding transactions to the journal *		and optionally wait until they are all written to disk * *		wait == 0  flush until latest txn is committed, don't wait *		wait == 1  flush until latest txn is committed, wait *		wait > 1   flush until all txn's are complete, wait */void jfs_flush_journal(struct jfs_log *log, int wait){	int i;	struct tblock *target = NULL;	struct jfs_sb_info *sbi;	/* jfs_write_inode may call us during read-only mount */	if (!log)		return;	jfs_info("jfs_flush_journal: log:0x%p wait=%d", log, wait);	LOGGC_LOCK(log);	if (!list_empty(&log->cqueue)) {		/*		 * This ensures that we will keep writing to the journal as long		 * as there are unwritten commit records		 */		target = list_entry(log->cqueue.prev, struct tblock, cqueue);		if (test_bit(log_FLUSH, &log->flag)) {			/*			 * We're already flushing.			 * if flush_tblk is NULL, we are flushing everything,			 * so leave it that way.  Otherwise, update it to the			 * latest transaction			 */			if (log->flush_tblk)				log->flush_tblk = target;		} else {			/* Only flush until latest transaction is committed */			log->flush_tblk = target;			set_bit(log_FLUSH, &log->flag);			/*			 * Initiate I/O on outstanding transactions			 */			if (!(log->cflag & logGC_PAGEOUT)) {				log->cflag |= logGC_PAGEOUT;				lmGCwrite(log, 0);			}		}	}	if ((wait > 1) || test_bit(log_SYNCBARRIER, &log->flag)) {		/* Flush until all activity complete */		set_bit(log_FLUSH, &log->flag);		log->flush_tblk = NULL;	}	if (wait && target && !(target->flag & tblkGC_COMMITTED)) {		DECLARE_WAITQUEUE(__wait, current);		add_wait_queue(&target->gcwait, &__wait);		set_current_state(TASK_UNINTERRUPTIBLE);		LOGGC_UNLOCK(log);		schedule();		__set_current_state(TASK_RUNNING);		LOGGC_LOCK(log);		remove_wait_queue(&target->gcwait, &__wait);	}	LOGGC_UNLOCK(log);	if (wait < 2)		return;	list_for_each_entry(sbi, &log->sb_list, log_list) {		filemap_fdatawrite(sbi->ipbmap->i_mapping);		filemap_fdatawrite(sbi->ipimap->i_mapping);		filemap_fdatawrite(sbi->direct_inode->i_mapping);	}	/*	 * If there was recent activity, we may need to wait	 * for the lazycommit thread to catch up	 */	if ((!list_empty(&log->cqueue)) || !list_empty(&log->synclist)) {		for (i = 0; i < 200; i++) {	/* Too much? */			msleep(250);			if (list_empty(&log->cqueue) &&			    list_empty(&log->synclist))				break;		}	}	assert(list_empty(&log->cqueue));#ifdef CONFIG_JFS_DEBUG	if (!list_empty(&log->synclist)) {		struct logsyncblk *lp;		printk(KERN_ERR "jfs_flush_journal: synclist not empty\n");		list_for_each_entry(lp, &log->synclist, synclist) {			if (lp->xflag & COMMIT_PAGE) {				struct metapage *mp = (struct metapage *)lp;				print_hex_dump(KERN_ERR, "metapage: ",					       DUMP_PREFIX_ADDRESS, 16, 4,					       mp, sizeof(struct metapage), 0);				print_hex_dump(KERN_ERR, "page: ",					       DUMP_PREFIX_ADDRESS, 16,					       sizeof(long), mp->page,					       sizeof(struct page), 0);			} else				print_hex_dump(KERN_ERR, "tblock:",					       DUMP_PREFIX_ADDRESS, 16, 4,					       lp, sizeof(struct tblock), 0);		}	}#else	WARN_ON(!list_empty(&log->synclist));#endif	clear_bit(log_FLUSH, &log->flag);}/* * NAME:	lmLogShutdown() * * FUNCTION:	log shutdown at last LogClose(). * *		write log syncpt record. *		update super block to set redone flag to 0. * * PARAMETER:	log	- log inode * * RETURN:	0	- success * * serialization: single last close thread */int lmLogShutdown(struct jfs_log * log){	int rc;	struct lrd lrd;	int lsn;	struct logsuper *logsuper;	struct lbuf *bpsuper;	struct lbuf *bp;	struct logpage *lp;	jfs_info("lmLogShutdown: log:0x%p", log);	jfs_flush_journal(log, 2);	/*	 * write the last SYNCPT record with syncpoint = 0	 * (i.e., log redo up to HERE !)	 */	lrd.logtid = 0;	lrd.backchain = 0;	lrd.type = cpu_to_le16(LOG_SYNCPT);	lrd.length = 0;	lrd.log.syncpt.sync = 0;	lsn = lmWriteRecord(log, NULL, &lrd, NULL);	bp = log->bp;	lp = (struct logpage *) bp->l_ldata;	lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_eor);	lbmWrite(log, log->bp, lbmWRITE | lbmRELEASE | lbmSYNC, 0);	lbmIOWait(log->bp, lbmFREE);	log->bp = NULL;	/*	 * synchronous update log superblock	 * mark log state as shutdown cleanly	 * (i.e., Log does not need to be replayed).	 */	if ((rc = lbmRead(log, 1, &bpsuper)))		goto out;	logsuper = (struct logsuper *) bpsuper->l_ldata;	logsuper->state = cpu_to_le32(LOGREDONE);	logsuper->end = cpu_to_le32(lsn);	lbmDirectWrite(log, bpsuper, lbmWRITE | lbmRELEASE | lbmSYNC);	rc = lbmIOWait(bpsuper, lbmFREE);	jfs_info("lmLogShutdown: lsn:0x%x page:%d eor:%d",		 lsn, log->page, log->eor);      out:	/*	 * shutdown per log i/o	 */	lbmLogShutdown(log);	if (rc) {		jfs_warn("lmLogShutdown: exit(%d)", rc);	}	return rc;}/* * NAME:	lmLogFileSystem() * * FUNCTION:	insert (<activate> = true)/remove (<activate> = false) *	file system into/from log active file system list. * * PARAMETE:	log	- pointer to logs inode. *		fsdev	- kdev_t of filesystem. *		serial	- pointer to returned log serial number *		activate - insert/remove device from active list. * * RETURN:	0	- success *		errors returned by vms_iowait(). */static int lmLogFileSystem(struct jfs_log * log, struct jfs_sb_info *sbi,			   int activate){	int rc = 0;	int i;	struct logsuper *logsuper;	struct lbuf *bpsuper;	char *uuid = sbi->uuid;	/*	 * insert/remove file system device to log active file system list.	 */	if ((rc = lbmRead(log, 1, &bpsuper)))		return rc;	logsuper = (struct logsuper *) bpsuper->l_ldata;	if (activate) {		for (i = 0; i < MAX_ACTIVE; i++)			if (!memcmp(logsuper->active[i].uuid, NULL_UUID, 16)) {				memcpy(logsuper->active[i].uuid, uuid, 16);				sbi->aggregate = i;				break;			}		if (i == MAX_ACTIVE) {			jfs_warn("Too many file systems sharing journal!");			lbmFree(bpsuper);			return -EMFILE;	/* Is there a better rc? */		}	} else {		for (i = 0; i < MAX_ACTIVE; i++)			if (!memcmp(logsuper->active[i].uuid, uuid, 16)) {				memcpy(logsuper->active[i].uuid, NULL_UUID, 16);				break;			}		if (i == MAX_ACTIVE) {			jfs_warn("Somebody stomped on the journal!");			lbmFree(bpsuper);			return -EIO;		}	}	/*	 * synchronous write log superblock:	 *	 * write sidestream bypassing write queue:	 * at file system mount, log super block is updated for	 * activation of the file system before any log record	 * (MOUNT record) of the file system, and at file system	 * unmount, all meta data for the file system has been	 * flushed before log super block is updated for deactivation	 * of the file system.	 */	lbmDirectWrite(log, bpsuper, lbmWRITE | lbmRELEASE | lbmSYNC);	rc = lbmIOWait(bpsuper, lbmFREE);	return rc;}/* *		log buffer manager (lbm) *		------------------------ * * special purpose buffer manager supporting log i/o requirements. * * per log write queue: * log pageout occurs in serial order by fifo write queue and * restricting to a single i/o in pregress at any one time. * a circular singly-linked list * (log->wrqueue points to the tail, and buffers are linked via * bp->wrqueue field), and * maintains log page in pageout ot waiting for pageout in serial pageout. *//* *	lbmLogInit() * * initialize per log I/O setup at lmLogInit() */static int lbmLogInit(struct jfs_log * log){				/* log inode */	int i;	struct lbuf *lbuf;	jfs_info("lbmLogInit: log:0x%p", log);	/* initialize current buffer cursor */	log->bp = NULL;	/* initialize log device write queue */	log->wqueue = NULL;	/*	 * Each log has its own buffer pages allocated to it.  These are	 * not managed by the page cache.  This ensures that a transaction	 * writing to the log does not block trying to allocate a page from	 * the page cache (for the log).  This would be bad, since page	 * allocation waits on the kswapd thread that may be committing inodes	 * which would cause log activity.  Was that clear?  I'm trying to	 * avoid deadlock here.	 */	init_waitqueue_head(&log->free_wait);	log->lbuf_free = NULL;	for (i = 0; i < LOGPAGES;) {		char *buffer;		uint offset;		struct page *page;		buffer = (char *) get_zeroed_page(GFP_KERNEL);		if (buffer == NULL)			goto error;		page = virt_to_page(buffer);		for (offset = 0; offset < PAGE_SIZE; offset += LOGPSIZE) {			lbuf = kmalloc(sizeof(struct lbuf), GFP_KERNEL);			if (lbuf == NULL) {				if (offset == 0)					free_page((unsigned long) buffer);				goto error;			}			if (offset) /* we already have one reference */				get_page(page);			lbuf->l_offset = offset;			lbuf->l_ldata = buffer + offset;			lbuf->l_page = page;			lbuf->l_log = log;			init_waitqueue_head(&lbuf->l_ioevent);			lbuf->l_freelist = log->lbuf_free;			log->lbuf_free = lbuf;			i++;		}	}	return (0);      error:	lbmLogShutdown(log);	return -ENOMEM;}/* *	lbmLogShutdown() * * finalize per log I/O setup at lmLogShutdown() */static void lbmLogShutdown(struct jfs_log * log){	struct lbuf *lbuf;	jfs_info("lbmLogShutdown: log:0x%p", log);	lbuf = log->lbuf_free;	while (lbuf) {		struct lbuf *next = lbuf->l_freelist;		__free_page(lbuf->l_page);		kfree(lbuf);		lbuf = next;	}}/* *	lbmAllocate() * * allocate an empty log buffer */static struct lbuf *lbmAllocate(struct jfs_log * log, int pn){

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -