⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 mft.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
 * it is dirty in the inode meta data rather than the data page cache of the * inode, and thus there are no data pages that need writing out.  Therefore, a * full mark_inode_dirty() is overkill.  A mark_inode_dirty_sync(), on the * other hand, is not sufficient, because I_DIRTY_DATASYNC needs to be set to * ensure ->write_inode is called from generic_osync_inode() and this needs to * happen or the file data would not necessarily hit the device synchronously, * even though the vfs inode has the O_SYNC flag set.  Also, I_DIRTY_DATASYNC * simply "feels" better than just I_DIRTY_SYNC, since the file data has not * actually hit the block device yet, which is not what I_DIRTY_SYNC on its own * would suggest. */void __mark_mft_record_dirty(ntfs_inode *ni){	ntfs_inode *base_ni;	ntfs_debug("Entering for inode 0x%lx.", ni->mft_no);	BUG_ON(NInoAttr(ni));	mark_ntfs_record_dirty(ni->page, ni->page_ofs);	/* Determine the base vfs inode and mark it dirty, too. */	mutex_lock(&ni->extent_lock);	if (likely(ni->nr_extents >= 0))		base_ni = ni;	else		base_ni = ni->ext.base_ntfs_ino;	mutex_unlock(&ni->extent_lock);	__mark_inode_dirty(VFS_I(base_ni), I_DIRTY_SYNC | I_DIRTY_DATASYNC);}static const char *ntfs_please_email = "Please email "		"linux-ntfs-dev@lists.sourceforge.net and say that you saw "		"this message.  Thank you.";/** * ntfs_sync_mft_mirror_umount - synchronise an mft record to the mft mirror * @vol:	ntfs volume on which the mft record to synchronize resides * @mft_no:	mft record number of mft record to synchronize * @m:		mapped, mst protected (extent) mft record to synchronize * * Write the mapped, mst protected (extent) mft record @m with mft record * number @mft_no to the mft mirror ($MFTMirr) of the ntfs volume @vol, * bypassing the page cache and the $MFTMirr inode itself. * * This function is only for use at umount time when the mft mirror inode has * already been disposed off.  We BUG() if we are called while the mft mirror * inode is still attached to the volume. * * On success return 0.  On error return -errno. * * NOTE:  This function is not implemented yet as I am not convinced it can * actually be triggered considering the sequence of commits we do in super.c:: * ntfs_put_super().  But just in case we provide this place holder as the * alternative would be either to BUG() or to get a NULL pointer dereference * and Oops. */static int ntfs_sync_mft_mirror_umount(ntfs_volume *vol,		const unsigned long mft_no, MFT_RECORD *m){	BUG_ON(vol->mftmirr_ino);	ntfs_error(vol->sb, "Umount time mft mirror syncing is not "			"implemented yet.  %s", ntfs_please_email);	return -EOPNOTSUPP;}/** * ntfs_sync_mft_mirror - synchronize an mft record to the mft mirror * @vol:	ntfs volume on which the mft record to synchronize resides * @mft_no:	mft record number of mft record to synchronize * @m:		mapped, mst protected (extent) mft record to synchronize * @sync:	if true, wait for i/o completion * * Write the mapped, mst protected (extent) mft record @m with mft record * number @mft_no to the mft mirror ($MFTMirr) of the ntfs volume @vol. * * On success return 0.  On error return -errno and set the volume errors flag * in the ntfs volume @vol. * * NOTE:  We always perform synchronous i/o and ignore the @sync parameter. * * TODO:  If @sync is false, want to do truly asynchronous i/o, i.e. just * schedule i/o via ->writepage or do it via kntfsd or whatever. */int ntfs_sync_mft_mirror(ntfs_volume *vol, const unsigned long mft_no,		MFT_RECORD *m, int sync){	struct page *page;	unsigned int blocksize = vol->sb->s_blocksize;	int max_bhs = vol->mft_record_size / blocksize;	struct buffer_head *bhs[max_bhs];	struct buffer_head *bh, *head;	u8 *kmirr;	runlist_element *rl;	unsigned int block_start, block_end, m_start, m_end, page_ofs;	int i_bhs, nr_bhs, err = 0;	unsigned char blocksize_bits = vol->sb->s_blocksize_bits;	ntfs_debug("Entering for inode 0x%lx.", mft_no);	BUG_ON(!max_bhs);	if (unlikely(!vol->mftmirr_ino)) {		/* This could happen during umount... */		err = ntfs_sync_mft_mirror_umount(vol, mft_no, m);		if (likely(!err))			return err;		goto err_out;	}	/* Get the page containing the mirror copy of the mft record @m. */	page = ntfs_map_page(vol->mftmirr_ino->i_mapping, mft_no >>			(PAGE_CACHE_SHIFT - vol->mft_record_size_bits));	if (IS_ERR(page)) {		ntfs_error(vol->sb, "Failed to map mft mirror page.");		err = PTR_ERR(page);		goto err_out;	}	lock_page(page);	BUG_ON(!PageUptodate(page));	ClearPageUptodate(page);	/* Offset of the mft mirror record inside the page. */	page_ofs = (mft_no << vol->mft_record_size_bits) & ~PAGE_CACHE_MASK;	/* The address in the page of the mirror copy of the mft record @m. */	kmirr = page_address(page) + page_ofs;	/* Copy the mst protected mft record to the mirror. */	memcpy(kmirr, m, vol->mft_record_size);	/* Create uptodate buffers if not present. */	if (unlikely(!page_has_buffers(page))) {		struct buffer_head *tail;		bh = head = alloc_page_buffers(page, blocksize, 1);		do {			set_buffer_uptodate(bh);			tail = bh;			bh = bh->b_this_page;		} while (bh);		tail->b_this_page = head;		attach_page_buffers(page, head);	}	bh = head = page_buffers(page);	BUG_ON(!bh);	rl = NULL;	nr_bhs = 0;	block_start = 0;	m_start = kmirr - (u8*)page_address(page);	m_end = m_start + vol->mft_record_size;	do {		block_end = block_start + blocksize;		/* If the buffer is outside the mft record, skip it. */		if (block_end <= m_start)			continue;		if (unlikely(block_start >= m_end))			break;		/* Need to map the buffer if it is not mapped already. */		if (unlikely(!buffer_mapped(bh))) {			VCN vcn;			LCN lcn;			unsigned int vcn_ofs;			bh->b_bdev = vol->sb->s_bdev;			/* Obtain the vcn and offset of the current block. */			vcn = ((VCN)mft_no << vol->mft_record_size_bits) +					(block_start - m_start);			vcn_ofs = vcn & vol->cluster_size_mask;			vcn >>= vol->cluster_size_bits;			if (!rl) {				down_read(&NTFS_I(vol->mftmirr_ino)->						runlist.lock);				rl = NTFS_I(vol->mftmirr_ino)->runlist.rl;				/*				 * $MFTMirr always has the whole of its runlist				 * in memory.				 */				BUG_ON(!rl);			}			/* Seek to element containing target vcn. */			while (rl->length && rl[1].vcn <= vcn)				rl++;			lcn = ntfs_rl_vcn_to_lcn(rl, vcn);			/* For $MFTMirr, only lcn >= 0 is a successful remap. */			if (likely(lcn >= 0)) {				/* Setup buffer head to correct block. */				bh->b_blocknr = ((lcn <<						vol->cluster_size_bits) +						vcn_ofs) >> blocksize_bits;				set_buffer_mapped(bh);			} else {				bh->b_blocknr = -1;				ntfs_error(vol->sb, "Cannot write mft mirror "						"record 0x%lx because its "						"location on disk could not "						"be determined (error code "						"%lli).", mft_no,						(long long)lcn);				err = -EIO;			}		}		BUG_ON(!buffer_uptodate(bh));		BUG_ON(!nr_bhs && (m_start != block_start));		BUG_ON(nr_bhs >= max_bhs);		bhs[nr_bhs++] = bh;		BUG_ON((nr_bhs >= max_bhs) && (m_end != block_end));	} while (block_start = block_end, (bh = bh->b_this_page) != head);	if (unlikely(rl))		up_read(&NTFS_I(vol->mftmirr_ino)->runlist.lock);	if (likely(!err)) {		/* Lock buffers and start synchronous write i/o on them. */		for (i_bhs = 0; i_bhs < nr_bhs; i_bhs++) {			struct buffer_head *tbh = bhs[i_bhs];			if (unlikely(test_set_buffer_locked(tbh)))				BUG();			BUG_ON(!buffer_uptodate(tbh));			clear_buffer_dirty(tbh);			get_bh(tbh);			tbh->b_end_io = end_buffer_write_sync;			submit_bh(WRITE, tbh);		}		/* Wait on i/o completion of buffers. */		for (i_bhs = 0; i_bhs < nr_bhs; i_bhs++) {			struct buffer_head *tbh = bhs[i_bhs];			wait_on_buffer(tbh);			if (unlikely(!buffer_uptodate(tbh))) {				err = -EIO;				/*				 * Set the buffer uptodate so the page and				 * buffer states do not become out of sync.				 */				set_buffer_uptodate(tbh);			}		}	} else /* if (unlikely(err)) */ {		/* Clean the buffers. */		for (i_bhs = 0; i_bhs < nr_bhs; i_bhs++)			clear_buffer_dirty(bhs[i_bhs]);	}	/* Current state: all buffers are clean, unlocked, and uptodate. */	/* Remove the mst protection fixups again. */	post_write_mst_fixup((NTFS_RECORD*)kmirr);	flush_dcache_page(page);	SetPageUptodate(page);	unlock_page(page);	ntfs_unmap_page(page);	if (likely(!err)) {		ntfs_debug("Done.");	} else {		ntfs_error(vol->sb, "I/O error while writing mft mirror "				"record 0x%lx!", mft_no);err_out:		ntfs_error(vol->sb, "Failed to synchronize $MFTMirr (error "				"code %i).  Volume will be left marked dirty "				"on umount.  Run ntfsfix on the partition "				"after umounting to correct this.", -err);		NVolSetErrors(vol);	}	return err;}/** * write_mft_record_nolock - write out a mapped (extent) mft record * @ni:		ntfs inode describing the mapped (extent) mft record * @m:		mapped (extent) mft record to write * @sync:	if true, wait for i/o completion * * Write the mapped (extent) mft record @m described by the (regular or extent) * ntfs inode @ni to backing store.  If the mft record @m has a counterpart in * the mft mirror, that is also updated. * * We only write the mft record if the ntfs inode @ni is dirty and the first * buffer belonging to its mft record is dirty, too.  We ignore the dirty state * of subsequent buffers because we could have raced with * fs/ntfs/aops.c::mark_ntfs_record_dirty(). * * On success, clean the mft record and return 0.  On error, leave the mft * record dirty and return -errno. * * NOTE:  We always perform synchronous i/o and ignore the @sync parameter. * However, if the mft record has a counterpart in the mft mirror and @sync is * true, we write the mft record, wait for i/o completion, and only then write * the mft mirror copy.  This ensures that if the system crashes either the mft * or the mft mirror will contain a self-consistent mft record @m.  If @sync is * false on the other hand, we start i/o on both and then wait for completion * on them.  This provides a speedup but no longer guarantees that you will end * up with a self-consistent mft record in the case of a crash but if you asked * for asynchronous writing you probably do not care about that anyway. * * TODO:  If @sync is false, want to do truly asynchronous i/o, i.e. just * schedule i/o via ->writepage or do it via kntfsd or whatever. */int write_mft_record_nolock(ntfs_inode *ni, MFT_RECORD *m, int sync){	ntfs_volume *vol = ni->vol;	struct page *page = ni->page;	unsigned int blocksize = vol->sb->s_blocksize;	unsigned char blocksize_bits = vol->sb->s_blocksize_bits;	int max_bhs = vol->mft_record_size / blocksize;	struct buffer_head *bhs[max_bhs];	struct buffer_head *bh, *head;	runlist_element *rl;	unsigned int block_start, block_end, m_start, m_end;	int i_bhs, nr_bhs, err = 0;	ntfs_debug("Entering for inode 0x%lx.", ni->mft_no);	BUG_ON(NInoAttr(ni));	BUG_ON(!max_bhs);	BUG_ON(!PageLocked(page));	/*	 * If the ntfs_inode is clean no need to do anything.  If it is dirty,	 * mark it as clean now so that it can be redirtied later on if needed.	 * There is no danger of races since the caller is holding the locks	 * for the mft record @m and the page it is in.	 */	if (!NInoTestClearDirty(ni))		goto done;	bh = head = page_buffers(page);	BUG_ON(!bh);	rl = NULL;	nr_bhs = 0;	block_start = 0;	m_start = ni->page_ofs;	m_end = m_start + vol->mft_record_size;	do {		block_end = block_start + blocksize;		/* If the buffer is outside the mft record, skip it. */		if (block_end <= m_start)			continue;		if (unlikely(block_start >= m_end))			break;		/*		 * If this block is not the first one in the record, we ignore		 * the buffer's dirty state because we could have raced with a		 * parallel mark_ntfs_record_dirty().		 */		if (block_start == m_start) {			/* This block is the first one in the record. */			if (!buffer_dirty(bh)) {				BUG_ON(nr_bhs);				/* Clean records are not written out. */				break;			}		}		/* Need to map the buffer if it is not mapped already. */		if (unlikely(!buffer_mapped(bh))) {			VCN vcn;			LCN lcn;			unsigned int vcn_ofs;			bh->b_bdev = vol->sb->s_bdev;			/* Obtain the vcn and offset of the current block. */			vcn = ((VCN)ni->mft_no << vol->mft_record_size_bits) +					(block_start - m_start);			vcn_ofs = vcn & vol->cluster_size_mask;			vcn >>= vol->cluster_size_bits;			if (!rl) {				down_read(&NTFS_I(vol->mft_ino)->runlist.lock);				rl = NTFS_I(vol->mft_ino)->runlist.rl;				BUG_ON(!rl);			}			/* Seek to element containing target vcn. */			while (rl->length && rl[1].vcn <= vcn)				rl++;			lcn = ntfs_rl_vcn_to_lcn(rl, vcn);			/* For $MFT, only lcn >= 0 is a successful remap. */			if (likely(lcn >= 0)) {				/* Setup buffer head to correct block. */				bh->b_blocknr = ((lcn <<						vol->cluster_size_bits) +						vcn_ofs) >> blocksize_bits;				set_buffer_mapped(bh);			} else {				bh->b_blocknr = -1;				ntfs_error(vol->sb, "Cannot write mft record "						"0x%lx because its location "						"on disk could not be "						"determined (error code %lli).",						ni->mft_no, (long long)lcn);				err = -EIO;			}		}		BUG_ON(!buffer_uptodate(bh));		BUG_ON(!nr_bhs && (m_start != block_start));		BUG_ON(nr_bhs >= max_bhs);		bhs[nr_bhs++] = bh;		BUG_ON((nr_bhs >= max_bhs) && (m_end != block_end));	} while (block_start = block_end, (bh = bh->b_this_page) != head);	if (unlikely(rl))		up_read(&NTFS_I(vol->mft_ino)->runlist.lock);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -