aops.c

来自「Linux Kernel 2.6.9 for OMAP1710」· C语言 代码 · 共 2,031 行 · 第 1/4 页

C
2,031
字号
		}	} while (block++, block_start = block_end,			(bh = bh->b_this_page) != head);	/* Release the lock if we took it. */	if (rl) {		up_read(&ni->runlist.lock);		rl = NULL;	}	/* If we issued read requests, let them complete. */	while (wait_bh > wait) {		wait_on_buffer(*--wait_bh);		if (!buffer_uptodate(*wait_bh))			return -EIO;	}	ntfs_debug("Done.");	return 0;err_out:	/*	 * Zero out any newly allocated blocks to avoid exposing stale data.	 * If BH_New is set, we know that the block was newly allocated in the	 * above loop.	 * FIXME: What about initialized_size increments? Have we done all the	 * required zeroing above? If not this error handling is broken, and	 * in particular the if (block_end <= from) check is completely bogus.	 */	bh = head;	block_start = 0;	is_retry = FALSE;	do {		block_end = block_start + blocksize;		if (block_end <= from)			continue;		if (block_start >= to)			break;		if (buffer_new(bh)) {			void *kaddr;			clear_buffer_new(bh);			kaddr = kmap_atomic(page, KM_USER0);			memset(kaddr + block_start, 0, bh->b_size);			kunmap_atomic(kaddr, KM_USER0);			set_buffer_uptodate(bh);			mark_buffer_dirty(bh);			is_retry = TRUE;		}	} while (block_start = block_end, (bh = bh->b_this_page) != head);	if (is_retry)		flush_dcache_page(page);	if (rl)		up_read(&ni->runlist.lock);	return err;}/** * ntfs_prepare_write - prepare a page for receiving data * * This is called from generic_file_write() with i_sem held on the inode * (@page->mapping->host). The @page is locked and kmap()ped so page_address() * can simply be used. The source data has not yet been copied into the @page. * * Need to extend the attribute/fill in holes if necessary, create blocks and * make partially overwritten blocks uptodate, * * i_size is not to be modified yet. * * Return 0 on success or -errno on error. * * Should be using block_prepare_write() [support for sparse files] or * cont_prepare_write() [no support for sparse files]. Can't do that due to * ntfs specifics but can look at them for implementation guidancea. * * Note: In the range, @from is inclusive and @to is exclusive, i.e. @from is * the first byte in the page that will be written to and @to is the first byte * after the last byte that will be written to. */static int ntfs_prepare_write(struct file *file, struct page *page,		unsigned from, unsigned to){	struct inode *vi = page->mapping->host;	ntfs_inode   *ni = NTFS_I(vi);	ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, page index "			"0x%lx, from = %u, to = %u.", vi->i_ino, ni->type,			page->index, from, to);	BUG_ON(!PageLocked(page));	BUG_ON(from > PAGE_CACHE_SIZE);	BUG_ON(to > PAGE_CACHE_SIZE);	BUG_ON(from > to);	if (NInoNonResident(ni)) {		/*		 * Only unnamed $DATA attributes can be compressed, encrypted,		 * and/or sparse.		 */		if (ni->type == AT_DATA && !ni->name_len) {			/* If file is encrypted, deny access, just like NT4. */			if (NInoEncrypted(ni)) {				ntfs_debug("Denying write access to encrypted "						"file.");				return -EACCES;			}			/* Compressed data streams are handled in compress.c. */			if (NInoCompressed(ni)) {				// TODO: Implement and replace this check with				// return ntfs_write_compressed_block(page);				ntfs_error(vi->i_sb, "Writing to compressed "						"files is not supported yet. "						"Sorry.");				return -EOPNOTSUPP;			}			// TODO: Implement and remove this check.			if (NInoSparse(ni)) {				ntfs_error(vi->i_sb, "Writing to sparse files "						"is not supported yet. Sorry.");				return -EOPNOTSUPP;			}		}		// TODO: Implement and remove this check.		if (NInoMstProtected(ni)) {			ntfs_error(vi->i_sb, "Writing to MST protected "					"attributes is not supported yet. "					"Sorry.");			return -EOPNOTSUPP;		}		/* Normal data stream. */		return ntfs_prepare_nonresident_write(page, from, to);	}	/*	 * Attribute is resident, implying it is not compressed, encrypted, or	 * mst protected.	 */	BUG_ON(page_has_buffers(page));	/* Do we need to resize the attribute? */	if (((s64)page->index << PAGE_CACHE_SHIFT) + to > vi->i_size) {		// TODO: Implement resize...		ntfs_error(vi->i_sb, "Writing beyond the existing file size is "				"not supported yet. Sorry.");		return -EOPNOTSUPP;	}	/*	 * Because resident attributes are handled by memcpy() to/from the	 * corresponding MFT record, and because this form of i/o is byte	 * aligned rather than block aligned, there is no need to bring the	 * page uptodate here as in the non-resident case where we need to	 * bring the buffers straddled by the write uptodate before	 * generic_file_write() does the copying from userspace.	 *	 * We thus defer the uptodate bringing of the page region outside the	 * region written to to ntfs_commit_write(). The reason for doing this	 * is that we save one round of:	 *	map_mft_record(), ntfs_attr_get_search_ctx(),	 *	ntfs_attr_lookup(), kmap_atomic(), kunmap_atomic(),	 *	ntfs_attr_put_search_ctx(), unmap_mft_record().	 * Which is obviously a very worthwhile save.	 *	 * Thus we just return success now...	 */	ntfs_debug("Done.");	return 0;}/* * NOTES: There is a disparity between the apparent need to extend the * attribute in prepare write but to update i_size only in commit write. * Need to make sure i_sem protection is sufficient. And if not will need to * handle this in some way or another. *//** * ntfs_commit_nonresident_write - * */static int ntfs_commit_nonresident_write(struct page *page,		unsigned from, unsigned to){	s64 pos = ((s64)page->index << PAGE_CACHE_SHIFT) + to;	struct inode *vi;	struct buffer_head *bh, *head;	unsigned int block_start, block_end, blocksize;	BOOL partial;	vi = page->mapping->host;	ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, page index "			"0x%lx, from = %u, to = %u.", vi->i_ino,			NTFS_I(vi)->type, page->index, from, to);	blocksize = 1 << vi->i_blkbits;	// FIXME: We need a whole slew of special cases in here for MST	// protected attributes for example. For compressed files, too...	// For now, we know ntfs_prepare_write() would have failed so we can't	// get here in any of the cases which we have to special case, so we	// are just a ripped off unrolled generic_commit_write() at present.	bh = head = page_buffers(page);	block_start = 0;	partial = FALSE;	do {		block_end = block_start + blocksize;		if (block_end <= from || block_start >= to) {			if (!buffer_uptodate(bh))				partial = TRUE;		} else {			set_buffer_uptodate(bh);			mark_buffer_dirty(bh);		}	} while (block_start = block_end, (bh = bh->b_this_page) != head);	/*	 * If this is a partial write which happened to make all buffers	 * uptodate then we can optimize away a bogus ->readpage() for the next	 * read(). Here we 'discover' whether the page went uptodate as a	 * result of this (potentially partial) write.	 */	if (!partial)		SetPageUptodate(page);	/*	 * Not convinced about this at all. See disparity comment above. For	 * now we know ntfs_prepare_write() would have failed in the write	 * exceeds i_size case, so this will never trigger which is fine.	 */	if (pos > vi->i_size) {		ntfs_error(vi->i_sb, "Writing beyond the existing file size is "				"not supported yet. Sorry.");		return -EOPNOTSUPP;		// vi->i_size = pos;		// mark_inode_dirty(vi);	}	ntfs_debug("Done.");	return 0;}/** * ntfs_commit_write - commit the received data * * This is called from generic_file_write() with i_sem held on the inode * (@page->mapping->host). The @page is locked and kmap()ped so page_address() * can simply be used. The source data has already been copied into the @page. * * Need to mark modified blocks dirty so they get written out later when * ntfs_writepage() is invoked by the VM. * * Return 0 on success or -errno on error. * * Should be using generic_commit_write(). This marks buffers uptodate and * dirty, sets the page uptodate if all buffers in the page are uptodate, and * updates i_size if the end of io is beyond i_size. In that case, it also * marks the inode dirty. - We could still use this (obviously except for * NInoMstProtected() attributes, where we will need to duplicate the core code * because we need our own async_io completion handler) but we could just do * the i_size update in prepare write, when we resize the attribute. Then * we would avoid the i_size update and mark_inode_dirty() happening here. * * Can't use generic_commit_write() due to ntfs specialities but can look at * it for implementation guidance. * * If things have gone as outlined in ntfs_prepare_write(), then we do not * need to do any page content modifications here at all, except in the write * to resident attribute case, where we need to do the uptodate bringing here * which we combine with the copying into the mft record which means we only * need to map the mft record and find the attribute record in it only once. */static int ntfs_commit_write(struct file *file, struct page *page,		unsigned from, unsigned to){	s64 attr_pos;	struct inode *vi;	ntfs_inode *ni, *base_ni;	char *kaddr, *kattr;	ntfs_attr_search_ctx *ctx;	MFT_RECORD *m;	u32 attr_len, bytes;	int err;	vi = page->mapping->host;	ni = NTFS_I(vi);	ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, page index "			"0x%lx, from = %u, to = %u.", vi->i_ino, ni->type,			page->index, from, to);	if (NInoNonResident(ni)) {		/*		 * Only unnamed $DATA attributes can be compressed, encrypted,		 * and/or sparse.		 */		if (ni->type == AT_DATA && !ni->name_len) {			/* If file is encrypted, deny access, just like NT4. */			if (NInoEncrypted(ni)) {				// Should never get here!				ntfs_debug("Denying write access to encrypted "						"file.");				return -EACCES;			}			/* Compressed data streams are handled in compress.c. */			if (NInoCompressed(ni)) {				// TODO: Implement and replace this check with				// return ntfs_write_compressed_block(page);				// Should never get here!				ntfs_error(vi->i_sb, "Writing to compressed "						"files is not supported yet. "						"Sorry.");				return -EOPNOTSUPP;			}			// TODO: Implement and remove this check.			if (NInoSparse(ni)) {				// Should never get here!				ntfs_error(vi->i_sb, "Writing to sparse files "						"is not supported yet. Sorry.");				return -EOPNOTSUPP;			}		}		// TODO: Implement and remove this check.		if (NInoMstProtected(ni)) {			// Should never get here!			ntfs_error(vi->i_sb, "Writing to MST protected "					"attributes is not supported yet. "					"Sorry.");			return -EOPNOTSUPP;		}		/* Normal data stream. */		return ntfs_commit_nonresident_write(page, from, to);	}	/*	 * Attribute is resident, implying it is not compressed, encrypted, or	 * mst protected.	 */	/* Do we need to resize the attribute? */	if (((s64)page->index << PAGE_CACHE_SHIFT) + to > vi->i_size) {		// TODO: Implement resize...		// pos = ((s64)page->index << PAGE_CACHE_SHIFT) + to;		// vi->i_size = pos;		// mark_inode_dirty(vi);		// Should never get here!		ntfs_error(vi->i_sb, "Writing beyond the existing file size is "				"not supported yet. Sorry.");		return -EOPNOTSUPP;	}	if (!NInoAttr(ni))		base_ni = ni;	else		base_ni = ni->ext.base_ntfs_ino;	/* Map, pin, and lock the mft record. */	m = map_mft_record(base_ni);	if (IS_ERR(m)) {		err = PTR_ERR(m);		m = NULL;		ctx = NULL;		goto err_out;	}	ctx = ntfs_attr_get_search_ctx(base_ni, m);	if (unlikely(!ctx)) {		err = -ENOMEM;		goto err_out;	}	err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,			CASE_SENSITIVE, 0, NULL, 0, ctx);	if (unlikely(err))		goto err_out;	/* Starting position of the page within the attribute value. */	attr_pos = page->index << PAGE_CACHE_SHIFT;	/* The total length of the attribute value. */	attr_len = le32_to_cpu(ctx->attr->data.resident.value_length);	if (unlikely(vi->i_size != attr_len)) {		ntfs_error(vi->i_sb, "BUG()! i_size (0x%llx) doesn't match "				"attr_len (0x%x). Aborting write.", vi->i_size,				attr_len);		err = -EIO;		goto err_out;	}	if (unlikely(attr_pos >= attr_len)) {		ntfs_error(vi->i_sb, "BUG()! attr_pos (0x%llx) > attr_len "				"(0x%x). Aborting write.",				(unsigned long long)attr_pos, attr_len);		err = -EIO;		goto err_out;	}	bytes = attr_len - attr_pos;	if (unlikely(bytes > PAGE_CACHE_SIZE))		bytes = PAGE_CACHE_SIZE;	/*	 * Calculate the address of the attribute value corresponding to the	 * beginning of the current data @page.	 */	kattr = (u8*)ctx->attr + le16_to_cpu(			ctx->attr->data.resident.value_offset) + attr_pos;	kaddr = kmap_atomic(page, KM_USER0);	/* Copy the received data from the page to the mft record. */	memcpy(kattr + from, kaddr + from, to - from);	flush_dcache_mft_record_page(ctx->ntfs_ino);	if (!PageUptodate(page)) {		/*		 * Bring the out of bounds area(s) uptodate by copying data		 * from the mft record to the page.		 */		if (from > 0)			memcpy(kaddr, kattr, from);		if (to < bytes)			memcpy(kaddr + to, kattr + to, bytes - to);		/* Zero the region outside the end of the attribute value. */		if (likely(bytes < PAGE_CACHE_SIZE))			memset(kaddr + bytes, 0, PAGE_CACHE_SIZE - bytes);		/*		 * The probability of not having done any of the above is		 * extremely small, so we just flush unconditionally.		 */		flush_dcache_page(page);		SetPageUptodate(page);	}	kunmap_atomic(kaddr, KM_USER0);	/* Mark the mft record dirty, so it gets written back. */	mark_mft_record_dirty(ctx->ntfs_ino);	ntfs_attr_put_search_ctx(ctx);	unmap_mft_record(base_ni);	ntfs_debug("Done.");	return 0;err_out:	if (err == -ENOMEM) {		ntfs_warning(vi->i_sb, "Error allocating memory required to "				"commit the write.");		if (PageUptodate(page)) {			ntfs_warning(vi->i_sb, "Page is uptodate, setting "					"dirty so the write will be retried "					"later on by the VM.");			/*			 * Put the page on mapping->dirty_pages, but leave its			 * buffer's dirty state as-is.			 */			__set_page_dirty_nobuffers(page);			err = 0;		} else			ntfs_error(vi->i_sb, "Page is not uptodate. Written "					"data has been lost. )-:");	} else {		ntfs_error(vi->i_sb, "Resident attribute write failed with "				"error %i. Setting page error flag.", -err);		SetPageError(page);	}	if (ctx)		ntfs_attr_put_search_ctx(ctx);	if (m)		unmap_mft_record(base_ni);	return err;}#endif	/* NTFS_RW *//** * ntfs_aops - general address space operations for inodes and attributes */struct address_space_operations ntfs_aops = {	.readpage	= ntfs_readpage,	/* Fill page with data. */	.sync_page	= block_sync_page,	/* Currently, just unplugs the						   disk request queue. */#ifdef NTFS_RW	.writepage	= ntfs_writepage,	/* Write dirty page to disk. */	.prepare_write	= ntfs_prepare_write,	/* Prepare page and buffers						   ready to receive data. */	.commit_write	= ntfs_commit_write,	/* Commit received data. */#endif /* NTFS_RW */};/** * ntfs_mst_aops - general address space operations for mst protecteed inodes *		   and attributes */struct address_space_operations ntfs_mst_aops = {	.readpage	= ntfs_readpage,	/* Fill page with data. */	.sync_page	= block_sync_page,	/* Currently, just unplugs the						   disk request queue. */#ifdef NTFS_RW	.writepage	= ntfs_writepage,	/* Write dirty page to disk. */	.set_page_dirty	= __set_page_dirty_nobuffers,	/* Set the page dirty						   without touching the buffers						   belonging to the page. */#endif /* NTFS_RW */};

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?