aops.c

来自「一个类似windows」· C语言 代码 · 共 1,795 行 · 第 1/4 页

C
1,795
字号
	if (unlikely(IS_ERR(m))) {
		err = PTR_ERR(m);
		m = NULL;
		ctx = NULL;
		goto err_out;
	}
	ctx = get_attr_search_ctx(base_ni, m);
	if (unlikely(!ctx)) {
		err = -ENOMEM;
		goto err_out;
	}
	if (unlikely(!lookup_attr(ni->type, ni->name, ni->name_len,
			IGNORE_CASE, 0, NULL, 0, ctx))) {
		err = -ENOENT;
		goto err_out;
	}

	/* Starting position of the page within the attribute value. */
	attr_pos = page->index << PAGE_CACHE_SHIFT;

	/* The total length of the attribute value. */
	attr_len = le32_to_cpu(ctx->attr->data.resident.value_length);

	if (unlikely(vi->i_size != attr_len)) {
		ntfs_error(vi->i_sb, "BUG()! i_size (0x%Lx) doesn't match "
				"attr_len (0x%x). Aborting write.", vi->i_size,
				attr_len);
		err = -EIO;
		goto err_out;
	}
	if (unlikely(attr_pos >= attr_len)) {
		ntfs_error(vi->i_sb, "BUG()! attr_pos (0x%Lx) > attr_len (0x%x)"
				". Aborting write.", attr_pos, attr_len);
		err = -EIO;
		goto err_out;
	}

	bytes = attr_len - attr_pos;
	if (unlikely(bytes > PAGE_CACHE_SIZE))
		bytes = PAGE_CACHE_SIZE;

	/*
	 * Here, we don't need to zero the out of bounds area everytime because
	 * the below memcpy() already takes care of the mmap-at-end-of-file
	 * requirements. If the file is converted to a non-resident one, then
	 * the code path use is switched to the non-resident one where the
	 * zeroing happens on each ntfs_writepage() invocation.
	 *
	 * The above also applies nicely when i_size is decreased.
	 *
	 * When i_size is increased, the memory between the old and new i_size
	 * _must_ be zeroed (or overwritten with new data). Otherwise we will
	 * expose data to userspace/disk which should never have been exposed.
	 *
	 * FIXME: Ensure that i_size increases do the zeroing/overwriting and
	 * if we cannot guarantee that, then enable the zeroing below.
	 */

	kaddr = kmap_atomic(page, KM_USER0);
	/* Copy the data from the page to the mft record. */
	memcpy((u8*)ctx->attr + le16_to_cpu(
			ctx->attr->data.resident.value_offset) + attr_pos,
			kaddr, bytes);
	flush_dcache_mft_record_page(ctx->ntfs_ino);
#if 0
	/* Zero out of bounds area. */
	if (likely(bytes < PAGE_CACHE_SIZE)) {
		memset(kaddr + bytes, 0, PAGE_CACHE_SIZE - bytes);
		flush_dcache_page(page);
	}
#endif
	kunmap_atomic(kaddr, KM_USER0);

	unlock_page(page);

	// TODO: Mark mft record dirty so it gets written back.
	ntfs_error(vi->i_sb, "Writing to resident files is not supported yet. "
			"Wrote to memory only...");

	put_attr_search_ctx(ctx);
	unmap_mft_record(base_ni);
	return 0;
err_out:
	if (err == -ENOMEM) {
		ntfs_warning(vi->i_sb, "Error allocating memory. Redirtying "
				"page so we try again later.");
		/*
		 * Put the page back on mapping->dirty_pages, but leave its
		 * buffer's dirty state as-is.
		 */
		// FIXME: Once Andrew's -EAGAIN patch goes in, remove the
		// __set_page_dirty_nobuffers(page) and set err to -EAGAIN
		// instead of zero.
		__set_page_dirty_nobuffers(page);
		err = 0;
	} else {
		ntfs_error(vi->i_sb, "Resident attribute write failed with "
				"error %i. Setting page error flag.", -err);
		SetPageError(page);
	}
	unlock_page(page);
	if (ctx)
		put_attr_search_ctx(ctx);
	if (m)
		unmap_mft_record(base_ni);
	return err;
}

/**
 * ntfs_prepare_nonresident_write -
 *
 */
static int ntfs_prepare_nonresident_write(struct page *page,
		unsigned from, unsigned to)
{
	VCN vcn;
	LCN lcn;
	sector_t block, ablock, iblock;
	struct inode *vi;
	ntfs_inode *ni;
	ntfs_volume *vol;
	run_list_element *rl;
	struct buffer_head *bh, *head, *wait[2], **wait_bh = wait;
	unsigned int vcn_ofs, block_start, block_end, blocksize;
	int err;
	BOOL is_retry;
	unsigned char blocksize_bits;

	vi = page->mapping->host;
	ni = NTFS_I(vi);
	vol = ni->vol;

	ntfs_debug("Entering for inode %li, attribute type 0x%x, page index "
			"0x%lx, from = %u, to = %u.", vi->i_ino, ni->type,
			page->index, from, to);

	BUG_ON(!NInoNonResident(ni));
	BUG_ON(NInoMstProtected(ni));

	blocksize_bits = vi->i_blkbits;
	blocksize = 1 << blocksize_bits;

	/*
	 * create_empty_buffers() will create uptodate/dirty buffers if the
	 * page is uptodate/dirty.
	 */
	if (!page_has_buffers(page))
		create_empty_buffers(page, blocksize, 0);
	bh = head = page_buffers(page);
	if (unlikely(!bh))
		return -ENOMEM;

	/* The first block in the page. */
	block = page->index << (PAGE_CACHE_SHIFT - blocksize_bits);

	/*
	 * The first out of bounds block for the allocated size. No need to
	 * round up as allocated_size is in multiples of cluster size and the
	 * minimum cluster size is 512 bytes, which is equal to the smallest
	 * blocksize.
	 */
	ablock = ni->allocated_size >> blocksize_bits;

	/* The last (fully or partially) initialized block. */
	iblock = ni->initialized_size >> blocksize_bits;

	/* Loop through all the buffers in the page. */
	block_start = 0;
	rl = NULL;
	err = 0;
	do {
		block_end = block_start + blocksize;
		/*
		 * If buffer @bh is outside the write, just mark it uptodate
		 * if the page is uptodate and continue with the next buffer.
		 */
		if (block_end <= from || block_start >= to) {
			if (PageUptodate(page)) {
				if (!buffer_uptodate(bh))
					set_buffer_uptodate(bh);
			}
			continue;
		}
		/*
		 * @bh is at least partially being written to.
		 * Make sure it is not marked as new.
		 */
		//if (buffer_new(bh))
		//	clear_buffer_new(bh);

		if (block >= ablock) {
			// TODO: block is above allocated_size, need to
			// allocate it. Best done in one go to accommodate not
			// only block but all above blocks up to and including:
			// ((page->index << PAGE_CACHE_SHIFT) + to + blocksize
			// - 1) >> blobksize_bits. Obviously will need to round
			// up to next cluster boundary, too. This should be
			// done with a helper function, so it can be reused.
			ntfs_error(vol->sb, "Writing beyond allocated size "
					"is not supported yet. Sorry.");
			err = -EOPNOTSUPP;
			goto err_out;
			// Need to update ablock.
			// Need to set_buffer_new() on all block bhs that are
			// newly allocated.
		}
		/*
		 * Now we have enough allocated size to fulfill the whole
		 * request, i.e. block < ablock is true.
		 */
		if (unlikely((block >= iblock) &&
				(ni->initialized_size < vi->i_size))) {
			/*
			 * If this page is fully outside initialized size, zero
			 * out all pages between the current initialized size
			 * and the current page. Just use ntfs_readpage() to do
			 * the zeroing transparently.
			 */
			if (block > iblock) {
				// TODO:
				// For each page do:
				// - read_cache_page()
				// Again for each page do:
				// - wait_on_page_locked()
				// - Check (PageUptodate(page) &&
				// 			!PageError(page))
				// Update initialized size in the attribute and
				// in the inode.
				// Again, for each page do:
				// 	__set_page_dirty_buffers();
				// page_cache_release()
				// We don't need to wait on the writes.
				// Update iblock.
			}
			/*
			 * The current page straddles initialized size. Zero
			 * all non-uptodate buffers and set them uptodate (and
			 * dirty?). Note, there aren't any non-uptodate buffers
			 * if the page is uptodate.
			 * FIXME: For an uptodate page, the buffers may need to
			 * be written out because they were not initialized on
			 * disk before.
			 */
			if (!PageUptodate(page)) {
				// TODO:
				// Zero any non-uptodate buffers up to i_size.
				// Set them uptodate and dirty.
			}
			// TODO:
			// Update initialized size in the attribute and in the
			// inode (up to i_size).
			// Update iblock.
			// FIXME: This is inefficient. Try to batch the two
			// size changes to happen in one go.
			ntfs_error(vol->sb, "Writing beyond initialized size "
					"is not supported yet. Sorry.");
			err = -EOPNOTSUPP;
			goto err_out;
			// Do NOT set_buffer_new() BUT DO clear buffer range
			// outside write request range.
			// set_buffer_uptodate() on complete buffers as well as
			// set_buffer_dirty().
		}

		/* Need to map unmapped buffers. */
		if (!buffer_mapped(bh)) {
			/* Unmapped buffer. Need to map it. */
			bh->b_bdev = vol->sb->s_bdev;

			/* Convert block into corresponding vcn and offset. */
			vcn = (VCN)block << blocksize_bits >>
					vol->cluster_size_bits;
			vcn_ofs = ((VCN)block << blocksize_bits) &
					vol->cluster_size_mask;

			is_retry = FALSE;
			if (!rl) {
lock_retry_remap:
				down_read(&ni->run_list.lock);
				rl = ni->run_list.rl;
			}
			if (likely(rl != NULL)) {
				/* Seek to element containing target vcn. */
				while (rl->length && rl[1].vcn <= vcn)
					rl++;
				lcn = vcn_to_lcn(rl, vcn);
			} else
				lcn = (LCN)LCN_RL_NOT_MAPPED;
			if (unlikely(lcn < 0)) {
				/*
				 * We extended the attribute allocation above.
				 * If we hit an ENOENT here it means that the
				 * allocation was insufficient which is a bug.
				 */
				BUG_ON(lcn == LCN_ENOENT);

				/* It is a hole, need to instantiate it. */
				if (lcn == LCN_HOLE) {
					// TODO: Instantiate the hole.
					// clear_buffer_new(bh);
					// unmap_underlying_metadata(bh->b_bdev,
					// 		bh->b_blocknr);
					// For non-uptodate buffers, need to
					// zero out the region outside the
					// request in this bh or all bhs,
					// depending on what we implemented
					// above.
					// Need to flush_dcache_page().
					// Or could use set_buffer_new()
					// instead?
					ntfs_error(vol->sb, "Writing into "
							"sparse regions is "
							"not supported yet. "
							"Sorry.");
					err = -EOPNOTSUPP;
					goto err_out;
				} else if (!is_retry &&
						lcn == LCN_RL_NOT_MAPPED) {
					is_retry = TRUE;
					/*
					 * Attempt to map run list, dropping
					 * lock for the duration.
					 */
					up_read(&ni->run_list.lock);
					err = map_run_list(ni, vcn);
					if (likely(!err))
						goto lock_retry_remap;
					rl = NULL;
				}
				/*
				 * Failed to map the buffer, even after
				 * retrying.
				 */
				bh->b_blocknr = -1UL;
				ntfs_error(vol->sb, "vcn_to_lcn(vcn = 0x%Lx) "
						"failed with error code "
						"0x%Lx%s.", (long long)vcn,
						(long long)-lcn, is_retry ?
						" even after retrying" : "");
				// FIXME: Depending on vol->on_errors, do
				// something.
				if (!err)
					err = -EIO;
				goto err_out;
			}
			/* We now have a successful remap, i.e. lcn >= 0. */

			/* Setup buffer head to correct block. */
			bh->b_blocknr = ((lcn << vol->cluster_size_bits)
					+ vcn_ofs) >> blocksize_bits;
			set_buffer_mapped(bh);

			// FIXME: Something analogous to this is needed for
			// each newly allocated block, i.e. BH_New.
			// FIXME: Might need to take this out of the
			// if (!buffer_mapped(bh)) {}, depending on how we
			// implement things during the allocated_size and
			// initialized_size extension code above.
			if (buffer_new(bh)) {
				clear_buffer_new(bh);
				unmap_underlying_metadata(bh->b_bdev,
						bh->b_blocknr);
				if (PageUptodate(page)) {
					set_buffer_uptodate(bh);
					continue;
				}
				/*
				 * Page is _not_ uptodate, zero surrounding
				 * region. NOTE: This is how we decide if to
				 * zero or not!
				 */
				if (block_end > to || block_start < from) {
					void *kaddr;

					kaddr = kmap_atomic(page, KM_USER0);
					if (block_end > to)
						memset(kaddr + to, 0,
								block_end - to);
					if (block_start < from)
						memset(kaddr + block_start, 0,
								from -
								block_start);
					flush_dcache_page(page);
					kunmap_atomic(kaddr, KM_USER0);
				}
				continue;
			}
		}
		/* @bh is mapped, set it uptodate if the page is uptodate. */
		if (PageUptodate(page)) {
			if (!buffer_uptodate(bh))
				set_buffer_uptodate(bh);
			continue;
		}
		/*
		 * The page is not uptodate. The buffer is mapped. If it is not
		 * uptodate, and it is only partially being written to, we need
		 * to read the buffer in before the write, i.e. right now.
		 */
		if (!buffer_uptodate(bh) &&
				(block_start < from || block_end > to)) {
			ll_rw_block(READ, 1, &bh);
			*wait_bh++ = bh;
		}
	} while (block++, block_start = block_end,
			(bh = bh->b_this_page) != head);

	/* Release the lock if we took it. */
	if (rl) {
		up_read(&ni->run_list.lock);
		rl = NULL;
	}

	/* If we issued read requests, let them complete. */
	while (wait_bh > wait) {
		wait_on_buffer(*--wait_bh);
		if (!buffer_uptodate(*wait_bh))
			return -EIO;
	}

	ntfs_debug("Done.");
	return 0;
err_out:
	/*
	 * Zero out any newly allocated blocks to avoid exposing stale data.
	 * If BH_New is set, we know that the block was newly allocated in the
	 * above loop.
	 * FIXME: What about initialized_size increments? Have we done all the
	 * required zeroing above? If not this error handling is broken, and
	 * in particular the if (block_end <= from) check is completely bogus.
	 */
	bh = head;
	block_start = 0;
	is_retry = FALSE;
	do {
		block_end = block_start + blocksize;
		if (block_end <= from)
			continue;
		if (block_start >= to)
			break;
		if (buffer_new(bh)) {
			void *kaddr;

			clear_buffer_new(bh);
			if (buffer_uptodate(bh))
				buffer_error();
			kaddr = kmap_atomic(page, KM_USER0);
			memset(kaddr + block_start, 0, bh->b_size);
			kunmap_atomic(kaddr, KM_USER0);

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?