⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 xfs_log_recover.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
				"bad inode buffer log record (ptr = 0x%p, bp = 0x%p).  XFS trying to replay bad (0) inode di_next_unlinked field",				item, bp);			XFS_ERROR_REPORT("xlog_recover_do_inode_buf",					 XFS_ERRLEVEL_LOW, mp);			return XFS_ERROR(EFSCORRUPTED);		}		buffer_nextp = (xfs_agino_t *)xfs_buf_offset(bp,					      next_unlinked_offset);		*buffer_nextp = *logged_nextp;	}	return 0;}/* * Perform a 'normal' buffer recovery.  Each logged region of the * buffer should be copied over the corresponding region in the * given buffer.  The bitmap in the buf log format structure indicates * where to place the logged data. *//*ARGSUSED*/STATIC voidxlog_recover_do_reg_buffer(	xlog_recover_item_t	*item,	xfs_buf_t		*bp,	xfs_buf_log_format_t	*buf_f){	int			i;	int			bit;	int			nbits;	unsigned int		*data_map = NULL;	unsigned int		map_size = 0;	int                     error;	switch (buf_f->blf_type) {	case XFS_LI_BUF:		data_map = buf_f->blf_data_map;		map_size = buf_f->blf_map_size;		break;	}	bit = 0;	i = 1;  /* 0 is the buf format structure */	while (1) {		bit = xfs_next_bit(data_map, map_size, bit);		if (bit == -1)			break;		nbits = xfs_contig_bits(data_map, map_size, bit);		ASSERT(nbits > 0);		ASSERT(item->ri_buf[i].i_addr != NULL);		ASSERT(item->ri_buf[i].i_len % XFS_BLI_CHUNK == 0);		ASSERT(XFS_BUF_COUNT(bp) >=		       ((uint)bit << XFS_BLI_SHIFT)+(nbits<<XFS_BLI_SHIFT));		/*		 * Do a sanity check if this is a dquot buffer. Just checking		 * the first dquot in the buffer should do. XXXThis is		 * probably a good thing to do for other buf types also.		 */		error = 0;		if (buf_f->blf_flags &		   (XFS_BLI_UDQUOT_BUF|XFS_BLI_PDQUOT_BUF|XFS_BLI_GDQUOT_BUF)) {			error = xfs_qm_dqcheck((xfs_disk_dquot_t *)					       item->ri_buf[i].i_addr,					       -1, 0, XFS_QMOPT_DOWARN,					       "dquot_buf_recover");		}		if (!error)			memcpy(xfs_buf_offset(bp,				(uint)bit << XFS_BLI_SHIFT),	/* dest */				item->ri_buf[i].i_addr,		/* source */				nbits<<XFS_BLI_SHIFT);		/* length */		i++;		bit += nbits;	}	/* Shouldn't be any more regions */	ASSERT(i == item->ri_total);}/* * Do some primitive error checking on ondisk dquot data structures. */intxfs_qm_dqcheck(	xfs_disk_dquot_t *ddq,	xfs_dqid_t	 id,	uint		 type,	  /* used only when IO_dorepair is true */	uint		 flags,	char		 *str){	xfs_dqblk_t	 *d = (xfs_dqblk_t *)ddq;	int		errs = 0;	/*	 * We can encounter an uninitialized dquot buffer for 2 reasons:	 * 1. If we crash while deleting the quotainode(s), and those blks got	 *    used for user data. This is because we take the path of regular	 *    file deletion; however, the size field of quotainodes is never	 *    updated, so all the tricks that we play in itruncate_finish	 *    don't quite matter.	 *	 * 2. We don't play the quota buffers when there's a quotaoff logitem.	 *    But the allocation will be replayed so we'll end up with an	 *    uninitialized quota block.	 *	 * This is all fine; things are still consistent, and we haven't lost	 * any quota information. Just don't complain about bad dquot blks.	 */	if (be16_to_cpu(ddq->d_magic) != XFS_DQUOT_MAGIC) {		if (flags & XFS_QMOPT_DOWARN)			cmn_err(CE_ALERT,			"%s : XFS dquot ID 0x%x, magic 0x%x != 0x%x",			str, id, be16_to_cpu(ddq->d_magic), XFS_DQUOT_MAGIC);		errs++;	}	if (ddq->d_version != XFS_DQUOT_VERSION) {		if (flags & XFS_QMOPT_DOWARN)			cmn_err(CE_ALERT,			"%s : XFS dquot ID 0x%x, version 0x%x != 0x%x",			str, id, ddq->d_version, XFS_DQUOT_VERSION);		errs++;	}	if (ddq->d_flags != XFS_DQ_USER &&	    ddq->d_flags != XFS_DQ_PROJ &&	    ddq->d_flags != XFS_DQ_GROUP) {		if (flags & XFS_QMOPT_DOWARN)			cmn_err(CE_ALERT,			"%s : XFS dquot ID 0x%x, unknown flags 0x%x",			str, id, ddq->d_flags);		errs++;	}	if (id != -1 && id != be32_to_cpu(ddq->d_id)) {		if (flags & XFS_QMOPT_DOWARN)			cmn_err(CE_ALERT,			"%s : ondisk-dquot 0x%p, ID mismatch: "			"0x%x expected, found id 0x%x",			str, ddq, id, be32_to_cpu(ddq->d_id));		errs++;	}	if (!errs && ddq->d_id) {		if (ddq->d_blk_softlimit &&		    be64_to_cpu(ddq->d_bcount) >=				be64_to_cpu(ddq->d_blk_softlimit)) {			if (!ddq->d_btimer) {				if (flags & XFS_QMOPT_DOWARN)					cmn_err(CE_ALERT,					"%s : Dquot ID 0x%x (0x%p) "					"BLK TIMER NOT STARTED",					str, (int)be32_to_cpu(ddq->d_id), ddq);				errs++;			}		}		if (ddq->d_ino_softlimit &&		    be64_to_cpu(ddq->d_icount) >=				be64_to_cpu(ddq->d_ino_softlimit)) {			if (!ddq->d_itimer) {				if (flags & XFS_QMOPT_DOWARN)					cmn_err(CE_ALERT,					"%s : Dquot ID 0x%x (0x%p) "					"INODE TIMER NOT STARTED",					str, (int)be32_to_cpu(ddq->d_id), ddq);				errs++;			}		}		if (ddq->d_rtb_softlimit &&		    be64_to_cpu(ddq->d_rtbcount) >=				be64_to_cpu(ddq->d_rtb_softlimit)) {			if (!ddq->d_rtbtimer) {				if (flags & XFS_QMOPT_DOWARN)					cmn_err(CE_ALERT,					"%s : Dquot ID 0x%x (0x%p) "					"RTBLK TIMER NOT STARTED",					str, (int)be32_to_cpu(ddq->d_id), ddq);				errs++;			}		}	}	if (!errs || !(flags & XFS_QMOPT_DQREPAIR))		return errs;	if (flags & XFS_QMOPT_DOWARN)		cmn_err(CE_NOTE, "Re-initializing dquot ID 0x%x", id);	/*	 * Typically, a repair is only requested by quotacheck.	 */	ASSERT(id != -1);	ASSERT(flags & XFS_QMOPT_DQREPAIR);	memset(d, 0, sizeof(xfs_dqblk_t));	d->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC);	d->dd_diskdq.d_version = XFS_DQUOT_VERSION;	d->dd_diskdq.d_flags = type;	d->dd_diskdq.d_id = cpu_to_be32(id);	return errs;}/* * Perform a dquot buffer recovery. * Simple algorithm: if we have found a QUOTAOFF logitem of the same type * (ie. USR or GRP), then just toss this buffer away; don't recover it. * Else, treat it as a regular buffer and do recovery. */STATIC voidxlog_recover_do_dquot_buffer(	xfs_mount_t		*mp,	xlog_t			*log,	xlog_recover_item_t	*item,	xfs_buf_t		*bp,	xfs_buf_log_format_t	*buf_f){	uint			type;	/*	 * Filesystems are required to send in quota flags at mount time.	 */	if (mp->m_qflags == 0) {		return;	}	type = 0;	if (buf_f->blf_flags & XFS_BLI_UDQUOT_BUF)		type |= XFS_DQ_USER;	if (buf_f->blf_flags & XFS_BLI_PDQUOT_BUF)		type |= XFS_DQ_PROJ;	if (buf_f->blf_flags & XFS_BLI_GDQUOT_BUF)		type |= XFS_DQ_GROUP;	/*	 * This type of quotas was turned off, so ignore this buffer	 */	if (log->l_quotaoffs_flag & type)		return;	xlog_recover_do_reg_buffer(item, bp, buf_f);}/* * This routine replays a modification made to a buffer at runtime. * There are actually two types of buffer, regular and inode, which * are handled differently.  Inode buffers are handled differently * in that we only recover a specific set of data from them, namely * the inode di_next_unlinked fields.  This is because all other inode * data is actually logged via inode records and any data we replay * here which overlaps that may be stale. * * When meta-data buffers are freed at run time we log a buffer item * with the XFS_BLI_CANCEL bit set to indicate that previous copies * of the buffer in the log should not be replayed at recovery time. * This is so that if the blocks covered by the buffer are reused for * file data before we crash we don't end up replaying old, freed * meta-data into a user's file. * * To handle the cancellation of buffer log items, we make two passes * over the log during recovery.  During the first we build a table of * those buffers which have been cancelled, and during the second we * only replay those buffers which do not have corresponding cancel * records in the table.  See xlog_recover_do_buffer_pass[1,2] above * for more details on the implementation of the table of cancel records. */STATIC intxlog_recover_do_buffer_trans(	xlog_t			*log,	xlog_recover_item_t	*item,	int			pass){	xfs_buf_log_format_t	*buf_f;	xfs_mount_t		*mp;	xfs_buf_t		*bp;	int			error;	int			cancel;	xfs_daddr_t		blkno;	int			len;	ushort			flags;	buf_f = (xfs_buf_log_format_t *)item->ri_buf[0].i_addr;	if (pass == XLOG_RECOVER_PASS1) {		/*		 * In this pass we're only looking for buf items		 * with the XFS_BLI_CANCEL bit set.		 */		xlog_recover_do_buffer_pass1(log, buf_f);		return 0;	} else {		/*		 * In this pass we want to recover all the buffers		 * which have not been cancelled and are not		 * cancellation buffers themselves.  The routine		 * we call here will tell us whether or not to		 * continue with the replay of this buffer.		 */		cancel = xlog_recover_do_buffer_pass2(log, buf_f);		if (cancel) {			return 0;		}	}	switch (buf_f->blf_type) {	case XFS_LI_BUF:		blkno = buf_f->blf_blkno;		len = buf_f->blf_len;		flags = buf_f->blf_flags;		break;	default:		xfs_fs_cmn_err(CE_ALERT, log->l_mp,			"xfs_log_recover: unknown buffer type 0x%x, logdev %s",			buf_f->blf_type, log->l_mp->m_logname ?			log->l_mp->m_logname : "internal");		XFS_ERROR_REPORT("xlog_recover_do_buffer_trans",				 XFS_ERRLEVEL_LOW, log->l_mp);		return XFS_ERROR(EFSCORRUPTED);	}	mp = log->l_mp;	if (flags & XFS_BLI_INODE_BUF) {		bp = xfs_buf_read_flags(mp->m_ddev_targp, blkno, len,								XFS_BUF_LOCK);	} else {		bp = xfs_buf_read(mp->m_ddev_targp, blkno, len, 0);	}	if (XFS_BUF_ISERROR(bp)) {		xfs_ioerror_alert("xlog_recover_do..(read#1)", log->l_mp,				  bp, blkno);		error = XFS_BUF_GETERROR(bp);		xfs_buf_relse(bp);		return error;	}	error = 0;	if (flags & XFS_BLI_INODE_BUF) {		error = xlog_recover_do_inode_buffer(mp, item, bp, buf_f);	} else if (flags &		  (XFS_BLI_UDQUOT_BUF|XFS_BLI_PDQUOT_BUF|XFS_BLI_GDQUOT_BUF)) {		xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f);	} else {		xlog_recover_do_reg_buffer(item, bp, buf_f);	}	if (error)		return XFS_ERROR(error);	/*	 * Perform delayed write on the buffer.  Asynchronous writes will be	 * slower when taking into account all the buffers to be flushed.	 *	 * Also make sure that only inode buffers with good sizes stay in	 * the buffer cache.  The kernel moves inodes in buffers of 1 block	 * or XFS_INODE_CLUSTER_SIZE bytes, whichever is bigger.  The inode	 * buffers in the log can be a different size if the log was generated	 * by an older kernel using unclustered inode buffers or a newer kernel	 * running with a different inode cluster size.  Regardless, if the	 * the inode buffer size isn't MAX(blocksize, XFS_INODE_CLUSTER_SIZE)	 * for *our* value of XFS_INODE_CLUSTER_SIZE, then we need to keep	 * the buffer out of the buffer cache so that the buffer won't	 * overlap with future reads of those inodes.	 */	if (XFS_DINODE_MAGIC ==	    INT_GET(*((__uint16_t *)(xfs_buf_offset(bp, 0))), ARCH_CONVERT) &&	    (XFS_BUF_COUNT(bp) != MAX(log->l_mp->m_sb.sb_blocksize,			(__uint32_t)XFS_INODE_CLUSTER_SIZE(log->l_mp)))) {		XFS_BUF_STALE(bp);		error = xfs_bwrite(mp, bp);	} else {		ASSERT(XFS_BUF_FSPRIVATE(bp, void *) == NULL ||		       XFS_BUF_FSPRIVATE(bp, xfs_mount_t *) == mp);		XFS_BUF_SET_FSPRIVATE(bp, mp);		XFS_BUF_SET_IODONE_FUNC(bp, xlog_recover_iodone);		xfs_bdwrite(mp, bp);	}	return (error);}STATIC intxlog_recover_do_inode_trans(	xlog_t			*log,	xlog_recover_item_t	*item,	int			pass){	xfs_inode_log_format_t	*in_f;	xfs_mount_t		*mp;	xfs_buf_t		*bp;	xfs_imap_t		imap;	xfs_dinode_t		*dip;	xfs_ino_t		ino;	int			len;	xfs_caddr_t		src;	xfs_caddr_t		dest;	int			error;	int			attr_index;	uint			fields;	xfs_icdinode_t		*dicp;	int			need_free = 0;	if (pass == XLOG_RECOVER_PASS1) {		return 0;	}	if (item->ri_buf[0].i_len == sizeof(xfs_inode_log_format_t)) {		in_f = (xfs_inode_log_format_t *)item->ri_buf[0].i_addr;	} else {		in_f = (xfs_inode_log_format_t *)kmem_alloc(			sizeof(xfs_inode_log_format_t), KM_SLEEP);		need_free = 1;		error = xfs_inode_item_format_convert(&item->ri_buf[0], in_f);		if (error)			goto error;	}	ino = in_f->ilf_ino;	mp = log->l_mp;	if (ITEM_TYPE(item) == XFS_LI_INODE) {		imap.im_blkno = (xfs_daddr_t)in_f->ilf_blkno;		imap.im_len = in_f->ilf_len;		imap.im_boffset = in_f->ilf_boffset;	} else {		/*		 * It's an old inode format record.  We don't know where		 * its cluster is located on disk, and we can't allow		 * xfs_imap() to figure it out because the inode btrees		 * are not ready to be used.  Therefore do not pass the		 * XFS_IMAP_LOOKUP flag to xfs_imap().  This will give		 * us only the single block in which the inode lives		 * rather than its cluster, so we must make sure to		 * invalidate the buffer when we write it out below.		 */		imap.im_blkno = 0;		xfs_imap(log->l_mp, NULL, ino, &imap, 0);	}	/*	 * Inode buffers can be freed, look out for it,	 * and do not replay the inode.	 */	if (xlog_check_buffer_cancelled(log, imap.im_blkno, imap.im_len, 0)) {		error = 0;		goto error;	}	bp = xfs_buf_read_flags(mp->m_ddev_targp, imap.im_blkno, imap.im_len,								XFS_BUF_LOCK);	if (XFS_BUF_ISERROR(bp)) {		xfs_ioerror_alert("xlog_recover_do..(read#2)", mp,				  bp, imap.im_blkno);		error = XFS_BUF_GETERROR(bp);		xfs_buf_relse(bp);		goto error;	}	error = 0;	ASSERT(in_f->ilf_fields & XFS_ILOG_CORE);	dip = (xfs_dinode_t *)xfs_buf_offset(bp, imap.im_boffset);	/*	 * Make sure the place we're flushing out to really looks	 * like an inode!	 */	if (unlikely(be16_to_cpu(dip->di_core.di_magic) != XFS_DINODE_MAGIC)) {		

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -