⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 dir.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 4 页
字号:
	 * the search context and unmap the mft record before calling the	 * filldir() callback.  We need to do this because of NFSd which calls	 * ->lookup() from its filldir callback() and this causes NTFS to	 * deadlock as ntfs_lookup() maps the mft record of the directory and	 * we have got it mapped here already.  The only solution is for us to	 * unmap the mft record here so that a call to ntfs_lookup() is able to	 * map the mft record without deadlocking.	 */	rc = le32_to_cpu(ctx->attr->data.resident.value_length);	ir = kmalloc(rc, GFP_NOFS);	if (unlikely(!ir)) {		err = -ENOMEM;		goto err_out;	}	/* Copy the index root value (it has been verified in read_inode). */	memcpy(ir, (u8*)ctx->attr +			le16_to_cpu(ctx->attr->data.resident.value_offset), rc);	ntfs_attr_put_search_ctx(ctx);	unmap_mft_record(ndir);	ctx = NULL;	m = NULL;	index_end = (u8*)&ir->index + le32_to_cpu(ir->index.index_length);	/* The first index entry. */	ie = (INDEX_ENTRY*)((u8*)&ir->index +			le32_to_cpu(ir->index.entries_offset));	/*	 * Loop until we exceed valid memory (corruption case) or until we	 * reach the last entry or until filldir tells us it has had enough	 * or signals an error (both covered by the rc test).	 */	for (;; ie = (INDEX_ENTRY*)((u8*)ie + le16_to_cpu(ie->length))) {		ntfs_debug("In index root, offset 0x%zx.", (u8*)ie - (u8*)ir);		/* Bounds checks. */		if (unlikely((u8*)ie < (u8*)ir || (u8*)ie +				sizeof(INDEX_ENTRY_HEADER) > index_end ||				(u8*)ie + le16_to_cpu(ie->key_length) >				index_end))			goto err_out;		/* The last entry cannot contain a name. */		if (ie->flags & INDEX_ENTRY_END)			break;		/* Skip index root entry if continuing previous readdir. */		if (ir_pos > (u8*)ie - (u8*)ir)			continue;		/* Advance the position even if going to skip the entry. */		fpos = (u8*)ie - (u8*)ir;		/* Submit the name to the filldir callback. */		rc = ntfs_filldir(vol, fpos, ndir, NULL, ie, name, dirent,				filldir);		if (rc) {			kfree(ir);			goto abort;		}	}	/* We are done with the index root and can free the buffer. */	kfree(ir);	ir = NULL;	/* If there is no index allocation attribute we are finished. */	if (!NInoIndexAllocPresent(ndir))		goto EOD;	/* Advance fpos to the beginning of the index allocation. */	fpos = vol->mft_record_size;skip_index_root:	kaddr = NULL;	prev_ia_pos = -1LL;	/* Get the offset into the index allocation attribute. */	ia_pos = (s64)fpos - vol->mft_record_size;	ia_mapping = vdir->i_mapping;	ntfs_debug("Inode 0x%lx, getting index bitmap.", vdir->i_ino);	bmp_vi = ntfs_attr_iget(vdir, AT_BITMAP, I30, 4);	if (IS_ERR(bmp_vi)) {		ntfs_error(sb, "Failed to get bitmap attribute.");		err = PTR_ERR(bmp_vi);		goto err_out;	}	bmp_mapping = bmp_vi->i_mapping;	/* Get the starting bitmap bit position and sanity check it. */	bmp_pos = ia_pos >> ndir->itype.index.block_size_bits;	if (unlikely(bmp_pos >> 3 >= i_size_read(bmp_vi))) {		ntfs_error(sb, "Current index allocation position exceeds "				"index bitmap size.");		goto iput_err_out;	}	/* Get the starting bit position in the current bitmap page. */	cur_bmp_pos = bmp_pos & ((PAGE_CACHE_SIZE * 8) - 1);	bmp_pos &= ~(u64)((PAGE_CACHE_SIZE * 8) - 1);get_next_bmp_page:	ntfs_debug("Reading bitmap with page index 0x%llx, bit ofs 0x%llx",			(unsigned long long)bmp_pos >> (3 + PAGE_CACHE_SHIFT),			(unsigned long long)bmp_pos &			(unsigned long long)((PAGE_CACHE_SIZE * 8) - 1));	bmp_page = ntfs_map_page(bmp_mapping,			bmp_pos >> (3 + PAGE_CACHE_SHIFT));	if (IS_ERR(bmp_page)) {		ntfs_error(sb, "Reading index bitmap failed.");		err = PTR_ERR(bmp_page);		bmp_page = NULL;		goto iput_err_out;	}	bmp = (u8*)page_address(bmp_page);	/* Find next index block in use. */	while (!(bmp[cur_bmp_pos >> 3] & (1 << (cur_bmp_pos & 7)))) {find_next_index_buffer:		cur_bmp_pos++;		/*		 * If we have reached the end of the bitmap page, get the next		 * page, and put away the old one.		 */		if (unlikely((cur_bmp_pos >> 3) >= PAGE_CACHE_SIZE)) {			ntfs_unmap_page(bmp_page);			bmp_pos += PAGE_CACHE_SIZE * 8;			cur_bmp_pos = 0;			goto get_next_bmp_page;		}		/* If we have reached the end of the bitmap, we are done. */		if (unlikely(((bmp_pos + cur_bmp_pos) >> 3) >= i_size))			goto unm_EOD;		ia_pos = (bmp_pos + cur_bmp_pos) <<				ndir->itype.index.block_size_bits;	}	ntfs_debug("Handling index buffer 0x%llx.",			(unsigned long long)bmp_pos + cur_bmp_pos);	/* If the current index buffer is in the same page we reuse the page. */	if ((prev_ia_pos & (s64)PAGE_CACHE_MASK) !=			(ia_pos & (s64)PAGE_CACHE_MASK)) {		prev_ia_pos = ia_pos;		if (likely(ia_page != NULL)) {			unlock_page(ia_page);			ntfs_unmap_page(ia_page);		}		/*		 * Map the page cache page containing the current ia_pos,		 * reading it from disk if necessary.		 */		ia_page = ntfs_map_page(ia_mapping, ia_pos >> PAGE_CACHE_SHIFT);		if (IS_ERR(ia_page)) {			ntfs_error(sb, "Reading index allocation data failed.");			err = PTR_ERR(ia_page);			ia_page = NULL;			goto err_out;		}		lock_page(ia_page);		kaddr = (u8*)page_address(ia_page);	}	/* Get the current index buffer. */	ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &			~(s64)(ndir->itype.index.block_size - 1)));	/* Bounds checks. */	if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {		ntfs_error(sb, "Out of bounds check failed. Corrupt directory "				"inode 0x%lx or driver bug.", vdir->i_ino);		goto err_out;	}	/* Catch multi sector transfer fixup errors. */	if (unlikely(!ntfs_is_indx_record(ia->magic))) {		ntfs_error(sb, "Directory index record with vcn 0x%llx is "				"corrupt.  Corrupt inode 0x%lx.  Run chkdsk.",				(unsigned long long)ia_pos >>				ndir->itype.index.vcn_size_bits, vdir->i_ino);		goto err_out;	}	if (unlikely(sle64_to_cpu(ia->index_block_vcn) != (ia_pos &			~(s64)(ndir->itype.index.block_size - 1)) >>			ndir->itype.index.vcn_size_bits)) {		ntfs_error(sb, "Actual VCN (0x%llx) of index buffer is "				"different from expected VCN (0x%llx). "				"Directory inode 0x%lx is corrupt or driver "				"bug. ", (unsigned long long)				sle64_to_cpu(ia->index_block_vcn),				(unsigned long long)ia_pos >>				ndir->itype.index.vcn_size_bits, vdir->i_ino);		goto err_out;	}	if (unlikely(le32_to_cpu(ia->index.allocated_size) + 0x18 !=			ndir->itype.index.block_size)) {		ntfs_error(sb, "Index buffer (VCN 0x%llx) of directory inode "				"0x%lx has a size (%u) differing from the "				"directory specified size (%u). Directory "				"inode is corrupt or driver bug.",				(unsigned long long)ia_pos >>				ndir->itype.index.vcn_size_bits, vdir->i_ino,				le32_to_cpu(ia->index.allocated_size) + 0x18,				ndir->itype.index.block_size);		goto err_out;	}	index_end = (u8*)ia + ndir->itype.index.block_size;	if (unlikely(index_end > kaddr + PAGE_CACHE_SIZE)) {		ntfs_error(sb, "Index buffer (VCN 0x%llx) of directory inode "				"0x%lx crosses page boundary. Impossible! "				"Cannot access! This is probably a bug in the "				"driver.", (unsigned long long)ia_pos >>				ndir->itype.index.vcn_size_bits, vdir->i_ino);		goto err_out;	}	ia_start = ia_pos & ~(s64)(ndir->itype.index.block_size - 1);	index_end = (u8*)&ia->index + le32_to_cpu(ia->index.index_length);	if (unlikely(index_end > (u8*)ia + ndir->itype.index.block_size)) {		ntfs_error(sb, "Size of index buffer (VCN 0x%llx) of directory "				"inode 0x%lx exceeds maximum size.",				(unsigned long long)ia_pos >>				ndir->itype.index.vcn_size_bits, vdir->i_ino);		goto err_out;	}	/* The first index entry in this index buffer. */	ie = (INDEX_ENTRY*)((u8*)&ia->index +			le32_to_cpu(ia->index.entries_offset));	/*	 * Loop until we exceed valid memory (corruption case) or until we	 * reach the last entry or until filldir tells us it has had enough	 * or signals an error (both covered by the rc test).	 */	for (;; ie = (INDEX_ENTRY*)((u8*)ie + le16_to_cpu(ie->length))) {		ntfs_debug("In index allocation, offset 0x%llx.",				(unsigned long long)ia_start +				(unsigned long long)((u8*)ie - (u8*)ia));		/* Bounds checks. */		if (unlikely((u8*)ie < (u8*)ia || (u8*)ie +				sizeof(INDEX_ENTRY_HEADER) > index_end ||				(u8*)ie + le16_to_cpu(ie->key_length) >				index_end))			goto err_out;		/* The last entry cannot contain a name. */		if (ie->flags & INDEX_ENTRY_END)			break;		/* Skip index block entry if continuing previous readdir. */		if (ia_pos - ia_start > (u8*)ie - (u8*)ia)			continue;		/* Advance the position even if going to skip the entry. */		fpos = (u8*)ie - (u8*)ia +				(sle64_to_cpu(ia->index_block_vcn) <<				ndir->itype.index.vcn_size_bits) +				vol->mft_record_size;		/*		 * Submit the name to the @filldir callback.  Note,		 * ntfs_filldir() drops the lock on @ia_page but it retakes it		 * before returning, unless a non-zero value is returned in		 * which case the page is left unlocked.		 */		rc = ntfs_filldir(vol, fpos, ndir, ia_page, ie, name, dirent,				filldir);		if (rc) {			/* @ia_page is already unlocked in this case. */			ntfs_unmap_page(ia_page);			ntfs_unmap_page(bmp_page);			iput(bmp_vi);			goto abort;		}	}	goto find_next_index_buffer;unm_EOD:	if (ia_page) {		unlock_page(ia_page);		ntfs_unmap_page(ia_page);	}	ntfs_unmap_page(bmp_page);	iput(bmp_vi);EOD:	/* We are finished, set fpos to EOD. */	fpos = i_size + vol->mft_record_size;abort:	kfree(name);done:#ifdef DEBUG	if (!rc)		ntfs_debug("EOD, fpos 0x%llx, returning 0.", fpos);	else		ntfs_debug("filldir returned %i, fpos 0x%llx, returning 0.",				rc, fpos);#endif	filp->f_pos = fpos;	return 0;err_out:	if (bmp_page) {		ntfs_unmap_page(bmp_page);iput_err_out:		iput(bmp_vi);	}	if (ia_page) {		unlock_page(ia_page);		ntfs_unmap_page(ia_page);	}	kfree(ir);	kfree(name);	if (ctx)		ntfs_attr_put_search_ctx(ctx);	if (m)		unmap_mft_record(ndir);	if (!err)		err = -EIO;	ntfs_debug("Failed. Returning error code %i.", -err);	filp->f_pos = fpos;	return err;}/** * ntfs_dir_open - called when an inode is about to be opened * @vi:		inode to be opened * @filp:	file structure describing the inode * * Limit directory size to the page cache limit on architectures where unsigned * long is 32-bits. This is the most we can do for now without overflowing the * page cache page index. Doing it this way means we don't run into problems * because of existing too large directories. It would be better to allow the * user to read the accessible part of the directory but I doubt very much * anyone is going to hit this check on a 32-bit architecture, so there is no * point in adding the extra complexity required to support this. * * On 64-bit architectures, the check is hopefully optimized away by the * compiler. */static int ntfs_dir_open(struct inode *vi, struct file *filp){	if (sizeof(unsigned long) < 8) {		if (i_size_read(vi) > MAX_LFS_FILESIZE)			return -EFBIG;	}	return 0;}#ifdef NTFS_RW/** * ntfs_dir_fsync - sync a directory to disk * @filp:	directory to be synced * @dentry:	dentry describing the directory to sync * @datasync:	if non-zero only flush user data and not metadata * * Data integrity sync of a directory to disk.  Used for fsync, fdatasync, and * msync system calls.  This function is based on file.c::ntfs_file_fsync(). * * Write the mft record and all associated extent mft records as well as the * $INDEX_ALLOCATION and $BITMAP attributes and then sync the block device. * * If @datasync is true, we do not wait on the inode(s) to be written out * but we always wait on the page cache pages to be written out. * * Note: In the past @filp could be NULL so we ignore it as we don't need it * anyway. * * Locking: Caller must hold i_mutex on the inode. * * TODO: We should probably also write all attribute/index inodes associated * with this inode but since we have no simple way of getting to them we ignore * this problem for now.  We do write the $BITMAP attribute if it is present * which is the important one for a directory so things are not too bad. */static int ntfs_dir_fsync(struct file *filp, struct dentry *dentry,		int datasync){	struct inode *bmp_vi, *vi = dentry->d_inode;	int err, ret;	ntfs_attr na;	ntfs_debug("Entering for inode 0x%lx.", vi->i_ino);	BUG_ON(!S_ISDIR(vi->i_mode));	/* If the bitmap attribute inode is in memory sync it, too. */	na.mft_no = vi->i_ino;	na.type = AT_BITMAP;	na.name = I30;	na.name_len = 4;	bmp_vi = ilookup5(vi->i_sb, vi->i_ino, (test_t)ntfs_test_inode, &na);	if (bmp_vi) { 		write_inode_now(bmp_vi, !datasync);		iput(bmp_vi);	}	ret = ntfs_write_inode(vi, 1);	write_inode_now(vi, !datasync);	err = sync_blockdev(vi->i_sb->s_bdev);	if (unlikely(err && !ret))		ret = err;	if (likely(!ret))		ntfs_debug("Done.");	else		ntfs_warning(vi->i_sb, "Failed to f%ssync inode 0x%lx.  Error "				"%u.", datasync ? "data" : "", vi->i_ino, -ret);	return ret;}#endif /* NTFS_RW */const struct file_operations ntfs_dir_ops = {	.llseek		= generic_file_llseek,	/* Seek inside directory. */	.read		= generic_read_dir,	/* Return -EISDIR. */	.readdir	= ntfs_readdir,		/* Read directory contents. */#ifdef NTFS_RW	.fsync		= ntfs_dir_fsync,	/* Sync a directory to disk. */	/*.aio_fsync	= ,*/			/* Sync all outstanding async						   i/o operations on a kiocb. */#endif /* NTFS_RW */	/*.ioctl	= ,*/			/* Perform function on the						   mounted filesystem. */	.open		= ntfs_dir_open,	/* Open directory. */};

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -