⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 file.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
		len = PAGE_CACHE_SIZE - ofs;		if (len > bytes)			len = bytes;		addr = kmap_atomic(*pages, KM_USER0);		left = __copy_from_user_inatomic(addr + ofs, buf, len);		kunmap_atomic(addr, KM_USER0);		if (unlikely(left)) {			/* Do it the slow way. */			addr = kmap(*pages);			left = __copy_from_user(addr + ofs, buf, len);			kunmap(*pages);			if (unlikely(left))				goto err_out;		}		total += len;		bytes -= len;		if (!bytes)			break;		buf += len;		ofs = 0;	} while (++pages < last_page);out:	return total;err_out:	total += len - left;	/* Zero the rest of the target like __copy_from_user(). */	while (++pages < last_page) {		bytes -= len;		if (!bytes)			break;		len = PAGE_CACHE_SIZE;		if (len > bytes)			len = bytes;		zero_user_page(*pages, 0, len, KM_USER0);	}	goto out;}static size_t __ntfs_copy_from_user_iovec_inatomic(char *vaddr,		const struct iovec *iov, size_t iov_ofs, size_t bytes){	size_t total = 0;	while (1) {		const char __user *buf = iov->iov_base + iov_ofs;		unsigned len;		size_t left;		len = iov->iov_len - iov_ofs;		if (len > bytes)			len = bytes;		left = __copy_from_user_inatomic(vaddr, buf, len);		total += len;		bytes -= len;		vaddr += len;		if (unlikely(left)) {			total -= left;			break;		}		if (!bytes)			break;		iov++;		iov_ofs = 0;	}	return total;}static inline void ntfs_set_next_iovec(const struct iovec **iovp,		size_t *iov_ofsp, size_t bytes){	const struct iovec *iov = *iovp;	size_t iov_ofs = *iov_ofsp;	while (bytes) {		unsigned len;		len = iov->iov_len - iov_ofs;		if (len > bytes)			len = bytes;		bytes -= len;		iov_ofs += len;		if (iov->iov_len == iov_ofs) {			iov++;			iov_ofs = 0;		}	}	*iovp = iov;	*iov_ofsp = iov_ofs;}/* * This has the same side-effects and return value as ntfs_copy_from_user(). * The difference is that on a fault we need to memset the remainder of the * pages (out to offset + bytes), to emulate ntfs_copy_from_user()'s * single-segment behaviour. * * We call the same helper (__ntfs_copy_from_user_iovec_inatomic()) both * when atomic and when not atomic.  This is ok because * __ntfs_copy_from_user_iovec_inatomic() calls __copy_from_user_inatomic() * and it is ok to call this when non-atomic. * Infact, the only difference between __copy_from_user_inatomic() and * __copy_from_user() is that the latter calls might_sleep() and the former * should not zero the tail of the buffer on error.  And on many * architectures __copy_from_user_inatomic() is just defined to * __copy_from_user() so it makes no difference at all on those architectures. */static inline size_t ntfs_copy_from_user_iovec(struct page **pages,		unsigned nr_pages, unsigned ofs, const struct iovec **iov,		size_t *iov_ofs, size_t bytes){	struct page **last_page = pages + nr_pages;	char *addr;	size_t copied, len, total = 0;	do {		len = PAGE_CACHE_SIZE - ofs;		if (len > bytes)			len = bytes;		addr = kmap_atomic(*pages, KM_USER0);		copied = __ntfs_copy_from_user_iovec_inatomic(addr + ofs,				*iov, *iov_ofs, len);		kunmap_atomic(addr, KM_USER0);		if (unlikely(copied != len)) {			/* Do it the slow way. */			addr = kmap(*pages);			copied = __ntfs_copy_from_user_iovec_inatomic(addr + ofs,					*iov, *iov_ofs, len);			/*			 * Zero the rest of the target like __copy_from_user().			 */			memset(addr + ofs + copied, 0, len - copied);			kunmap(*pages);			if (unlikely(copied != len))				goto err_out;		}		total += len;		bytes -= len;		if (!bytes)			break;		ntfs_set_next_iovec(iov, iov_ofs, len);		ofs = 0;	} while (++pages < last_page);out:	return total;err_out:	total += copied;	/* Zero the rest of the target like __copy_from_user(). */	while (++pages < last_page) {		bytes -= len;		if (!bytes)			break;		len = PAGE_CACHE_SIZE;		if (len > bytes)			len = bytes;		zero_user_page(*pages, 0, len, KM_USER0);	}	goto out;}static inline void ntfs_flush_dcache_pages(struct page **pages,		unsigned nr_pages){	BUG_ON(!nr_pages);	/*	 * Warning: Do not do the decrement at the same time as the call to	 * flush_dcache_page() because it is a NULL macro on i386 and hence the	 * decrement never happens so the loop never terminates.	 */	do {		--nr_pages;		flush_dcache_page(pages[nr_pages]);	} while (nr_pages > 0);}/** * ntfs_commit_pages_after_non_resident_write - commit the received data * @pages:	array of destination pages * @nr_pages:	number of pages in @pages * @pos:	byte position in file at which the write begins * @bytes:	number of bytes to be written * * See description of ntfs_commit_pages_after_write(), below. */static inline int ntfs_commit_pages_after_non_resident_write(		struct page **pages, const unsigned nr_pages,		s64 pos, size_t bytes){	s64 end, initialized_size;	struct inode *vi;	ntfs_inode *ni, *base_ni;	struct buffer_head *bh, *head;	ntfs_attr_search_ctx *ctx;	MFT_RECORD *m;	ATTR_RECORD *a;	unsigned long flags;	unsigned blocksize, u;	int err;	vi = pages[0]->mapping->host;	ni = NTFS_I(vi);	blocksize = vi->i_sb->s_blocksize;	end = pos + bytes;	u = 0;	do {		s64 bh_pos;		struct page *page;		bool partial;		page = pages[u];		bh_pos = (s64)page->index << PAGE_CACHE_SHIFT;		bh = head = page_buffers(page);		partial = false;		do {			s64 bh_end;			bh_end = bh_pos + blocksize;			if (bh_end <= pos || bh_pos >= end) {				if (!buffer_uptodate(bh))					partial = true;			} else {				set_buffer_uptodate(bh);				mark_buffer_dirty(bh);			}		} while (bh_pos += blocksize, (bh = bh->b_this_page) != head);		/*		 * If all buffers are now uptodate but the page is not, set the		 * page uptodate.		 */		if (!partial && !PageUptodate(page))			SetPageUptodate(page);	} while (++u < nr_pages);	/*	 * Finally, if we do not need to update initialized_size or i_size we	 * are finished.	 */	read_lock_irqsave(&ni->size_lock, flags);	initialized_size = ni->initialized_size;	read_unlock_irqrestore(&ni->size_lock, flags);	if (end <= initialized_size) {		ntfs_debug("Done.");		return 0;	}	/*	 * Update initialized_size/i_size as appropriate, both in the inode and	 * the mft record.	 */	if (!NInoAttr(ni))		base_ni = ni;	else		base_ni = ni->ext.base_ntfs_ino;	/* Map, pin, and lock the mft record. */	m = map_mft_record(base_ni);	if (IS_ERR(m)) {		err = PTR_ERR(m);		m = NULL;		ctx = NULL;		goto err_out;	}	BUG_ON(!NInoNonResident(ni));	ctx = ntfs_attr_get_search_ctx(base_ni, m);	if (unlikely(!ctx)) {		err = -ENOMEM;		goto err_out;	}	err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,			CASE_SENSITIVE, 0, NULL, 0, ctx);	if (unlikely(err)) {		if (err == -ENOENT)			err = -EIO;		goto err_out;	}	a = ctx->attr;	BUG_ON(!a->non_resident);	write_lock_irqsave(&ni->size_lock, flags);	BUG_ON(end > ni->allocated_size);	ni->initialized_size = end;	a->data.non_resident.initialized_size = cpu_to_sle64(end);	if (end > i_size_read(vi)) {		i_size_write(vi, end);		a->data.non_resident.data_size =				a->data.non_resident.initialized_size;	}	write_unlock_irqrestore(&ni->size_lock, flags);	/* Mark the mft record dirty, so it gets written back. */	flush_dcache_mft_record_page(ctx->ntfs_ino);	mark_mft_record_dirty(ctx->ntfs_ino);	ntfs_attr_put_search_ctx(ctx);	unmap_mft_record(base_ni);	ntfs_debug("Done.");	return 0;err_out:	if (ctx)		ntfs_attr_put_search_ctx(ctx);	if (m)		unmap_mft_record(base_ni);	ntfs_error(vi->i_sb, "Failed to update initialized_size/i_size (error "			"code %i).", err);	if (err != -ENOMEM)		NVolSetErrors(ni->vol);	return err;}/** * ntfs_commit_pages_after_write - commit the received data * @pages:	array of destination pages * @nr_pages:	number of pages in @pages * @pos:	byte position in file at which the write begins * @bytes:	number of bytes to be written * * This is called from ntfs_file_buffered_write() with i_mutex held on the inode * (@pages[0]->mapping->host).  There are @nr_pages pages in @pages which are * locked but not kmap()ped.  The source data has already been copied into the * @page.  ntfs_prepare_pages_for_non_resident_write() has been called before * the data was copied (for non-resident attributes only) and it returned * success. * * Need to set uptodate and mark dirty all buffers within the boundary of the * write.  If all buffers in a page are uptodate we set the page uptodate, too. * * Setting the buffers dirty ensures that they get written out later when * ntfs_writepage() is invoked by the VM. * * Finally, we need to update i_size and initialized_size as appropriate both * in the inode and the mft record. * * This is modelled after fs/buffer.c::generic_commit_write(), which marks * buffers uptodate and dirty, sets the page uptodate if all buffers in the * page are uptodate, and updates i_size if the end of io is beyond i_size.  In * that case, it also marks the inode dirty. * * If things have gone as outlined in * ntfs_prepare_pages_for_non_resident_write(), we do not need to do any page * content modifications here for non-resident attributes.  For resident * attributes we need to do the uptodate bringing here which we combine with * the copying into the mft record which means we save one atomic kmap. * * Return 0 on success or -errno on error. */static int ntfs_commit_pages_after_write(struct page **pages,		const unsigned nr_pages, s64 pos, size_t bytes){	s64 end, initialized_size;	loff_t i_size;	struct inode *vi;	ntfs_inode *ni, *base_ni;	struct page *page;	ntfs_attr_search_ctx *ctx;	MFT_RECORD *m;	ATTR_RECORD *a;	char *kattr, *kaddr;	unsigned long flags;	u32 attr_len;	int err;	BUG_ON(!nr_pages);	BUG_ON(!pages);	page = pages[0];	BUG_ON(!page);	vi = page->mapping->host;	ni = NTFS_I(vi);	ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, start page "			"index 0x%lx, nr_pages 0x%x, pos 0x%llx, bytes 0x%zx.",			vi->i_ino, ni->type, page->index, nr_pages,			(long long)pos, bytes);	if (NInoNonResident(ni))		return ntfs_commit_pages_after_non_resident_write(pages,				nr_pages, pos, bytes);	BUG_ON(nr_pages > 1);	/*	 * Attribute is resident, implying it is not compressed, encrypted, or	 * sparse.	 */	if (!NInoAttr(ni))		base_ni = ni;	else		base_ni = ni->ext.base_ntfs_ino;	BUG_ON(NInoNonResident(ni));	/* Map, pin, and lock the mft record. */	m = map_mft_record(base_ni);	if (IS_ERR(m)) {		err = PTR_ERR(m);		m = NULL;		ctx = NULL;		goto err_out;	}	ctx = ntfs_attr_get_search_ctx(base_ni, m);	if (unlikely(!ctx)) {		err = -ENOMEM;		goto err_out;	}	err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,			CASE_SENSITIVE, 0, NULL, 0, ctx);	if (unlikely(err)) {		if (err == -ENOENT)			err = -EIO;		goto err_out;	}	a = ctx->attr;	BUG_ON(a->non_resident);	/* The total length of the attribute value. */	attr_len = le32_to_cpu(a->data.resident.value_length);	i_size = i_size_read(vi);	BUG_ON(attr_len != i_size);	BUG_ON(pos > attr_len);	end = pos + bytes;	BUG_ON(end > le32_to_cpu(a->length) -			le16_to_cpu(a->data.resident.value_offset));	kattr = (u8*)a + le16_to_cpu(a->data.resident.value_offset);	kaddr = kmap_atomic(page, KM_USER0);	/* Copy the received data from the page to the mft record. */	memcpy(kattr + pos, kaddr + pos, bytes);	/* Update the attribute length if necessary. */	if (end > attr_len) {		attr_len = end;		a->data.resident.value_length = cpu_to_le32(attr_len);	}	/*	 * If the page is not uptodate, bring the out of bounds area(s)	 * uptodate by copying data from the mft record to the page.	 */	if (!PageUptodate(page)) {		if (pos > 0)			memcpy(kaddr, kattr, pos);		if (end < attr_len)			memcpy(kaddr + end, kattr + end, attr_len - end);		/* Zero the region outside the end of the attribute value. */		memset(kaddr + attr_len, 0, PAGE_CACHE_SIZE - attr_len);		flush_dcache_page(page);		SetPageUptodate(page);	}	kunmap_atomic(kaddr, KM_USER0);	/* Update initialized_size/i_size if necessary. */	read_lock_irqsave(&ni->size_lock, flags);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -