⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 file.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
			if (unlikely(err)) {				if (err == -EEXIST)					continue;				goto err_out;			}			pages[nr] = *cached_page;			page_cache_get(*cached_page);			if (unlikely(!pagevec_add(lru_pvec, *cached_page)))				__pagevec_lru_add(lru_pvec);			*cached_page = NULL;		}		index++;		nr++;	} while (nr < nr_pages);out:	return err;err_out:	while (nr > 0) {		unlock_page(pages[--nr]);		page_cache_release(pages[nr]);	}	goto out;}static inline int ntfs_submit_bh_for_read(struct buffer_head *bh){	lock_buffer(bh);	get_bh(bh);	bh->b_end_io = end_buffer_read_sync;	return submit_bh(READ, bh);}/** * ntfs_prepare_pages_for_non_resident_write - prepare pages for receiving data * @pages:	array of destination pages * @nr_pages:	number of pages in @pages * @pos:	byte position in file at which the write begins * @bytes:	number of bytes to be written * * This is called for non-resident attributes from ntfs_file_buffered_write() * with i_mutex held on the inode (@pages[0]->mapping->host).  There are * @nr_pages pages in @pages which are locked but not kmap()ped.  The source * data has not yet been copied into the @pages. *  * Need to fill any holes with actual clusters, allocate buffers if necessary, * ensure all the buffers are mapped, and bring uptodate any buffers that are * only partially being written to. * * If @nr_pages is greater than one, we are guaranteed that the cluster size is * greater than PAGE_CACHE_SIZE, that all pages in @pages are entirely inside * the same cluster and that they are the entirety of that cluster, and that * the cluster is sparse, i.e. we need to allocate a cluster to fill the hole. * * i_size is not to be modified yet. * * Return 0 on success or -errno on error. */static int ntfs_prepare_pages_for_non_resident_write(struct page **pages,		unsigned nr_pages, s64 pos, size_t bytes){	VCN vcn, highest_vcn = 0, cpos, cend, bh_cpos, bh_cend;	LCN lcn;	s64 bh_pos, vcn_len, end, initialized_size;	sector_t lcn_block;	struct page *page;	struct inode *vi;	ntfs_inode *ni, *base_ni = NULL;	ntfs_volume *vol;	runlist_element *rl, *rl2;	struct buffer_head *bh, *head, *wait[2], **wait_bh = wait;	ntfs_attr_search_ctx *ctx = NULL;	MFT_RECORD *m = NULL;	ATTR_RECORD *a = NULL;	unsigned long flags;	u32 attr_rec_len = 0;	unsigned blocksize, u;	int err, mp_size;	bool rl_write_locked, was_hole, is_retry;	unsigned char blocksize_bits;	struct {		u8 runlist_merged:1;		u8 mft_attr_mapped:1;		u8 mp_rebuilt:1;		u8 attr_switched:1;	} status = { 0, 0, 0, 0 };	BUG_ON(!nr_pages);	BUG_ON(!pages);	BUG_ON(!*pages);	vi = pages[0]->mapping->host;	ni = NTFS_I(vi);	vol = ni->vol;	ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, start page "			"index 0x%lx, nr_pages 0x%x, pos 0x%llx, bytes 0x%zx.",			vi->i_ino, ni->type, pages[0]->index, nr_pages,			(long long)pos, bytes);	blocksize = vol->sb->s_blocksize;	blocksize_bits = vol->sb->s_blocksize_bits;	u = 0;	do {		page = pages[u];		BUG_ON(!page);		/*		 * create_empty_buffers() will create uptodate/dirty buffers if		 * the page is uptodate/dirty.		 */		if (!page_has_buffers(page)) {			create_empty_buffers(page, blocksize, 0);			if (unlikely(!page_has_buffers(page)))				return -ENOMEM;		}	} while (++u < nr_pages);	rl_write_locked = false;	rl = NULL;	err = 0;	vcn = lcn = -1;	vcn_len = 0;	lcn_block = -1;	was_hole = false;	cpos = pos >> vol->cluster_size_bits;	end = pos + bytes;	cend = (end + vol->cluster_size - 1) >> vol->cluster_size_bits;	/*	 * Loop over each page and for each page over each buffer.  Use goto to	 * reduce indentation.	 */	u = 0;do_next_page:	page = pages[u];	bh_pos = (s64)page->index << PAGE_CACHE_SHIFT;	bh = head = page_buffers(page);	do {		VCN cdelta;		s64 bh_end;		unsigned bh_cofs;		/* Clear buffer_new on all buffers to reinitialise state. */		if (buffer_new(bh))			clear_buffer_new(bh);		bh_end = bh_pos + blocksize;		bh_cpos = bh_pos >> vol->cluster_size_bits;		bh_cofs = bh_pos & vol->cluster_size_mask;		if (buffer_mapped(bh)) {			/*			 * The buffer is already mapped.  If it is uptodate,			 * ignore it.			 */			if (buffer_uptodate(bh))				continue;			/*			 * The buffer is not uptodate.  If the page is uptodate			 * set the buffer uptodate and otherwise ignore it.			 */			if (PageUptodate(page)) {				set_buffer_uptodate(bh);				continue;			}			/*			 * Neither the page nor the buffer are uptodate.  If			 * the buffer is only partially being written to, we			 * need to read it in before the write, i.e. now.			 */			if ((bh_pos < pos && bh_end > pos) ||					(bh_pos < end && bh_end > end)) {				/*				 * If the buffer is fully or partially within				 * the initialized size, do an actual read.				 * Otherwise, simply zero the buffer.				 */				read_lock_irqsave(&ni->size_lock, flags);				initialized_size = ni->initialized_size;				read_unlock_irqrestore(&ni->size_lock, flags);				if (bh_pos < initialized_size) {					ntfs_submit_bh_for_read(bh);					*wait_bh++ = bh;				} else {					zero_user_page(page, bh_offset(bh),							blocksize, KM_USER0);					set_buffer_uptodate(bh);				}			}			continue;		}		/* Unmapped buffer.  Need to map it. */		bh->b_bdev = vol->sb->s_bdev;		/*		 * If the current buffer is in the same clusters as the map		 * cache, there is no need to check the runlist again.  The		 * map cache is made up of @vcn, which is the first cached file		 * cluster, @vcn_len which is the number of cached file		 * clusters, @lcn is the device cluster corresponding to @vcn,		 * and @lcn_block is the block number corresponding to @lcn.		 */		cdelta = bh_cpos - vcn;		if (likely(!cdelta || (cdelta > 0 && cdelta < vcn_len))) {map_buffer_cached:			BUG_ON(lcn < 0);			bh->b_blocknr = lcn_block +					(cdelta << (vol->cluster_size_bits -					blocksize_bits)) +					(bh_cofs >> blocksize_bits);			set_buffer_mapped(bh);			/*			 * If the page is uptodate so is the buffer.  If the			 * buffer is fully outside the write, we ignore it if			 * it was already allocated and we mark it dirty so it			 * gets written out if we allocated it.  On the other			 * hand, if we allocated the buffer but we are not			 * marking it dirty we set buffer_new so we can do			 * error recovery.			 */			if (PageUptodate(page)) {				if (!buffer_uptodate(bh))					set_buffer_uptodate(bh);				if (unlikely(was_hole)) {					/* We allocated the buffer. */					unmap_underlying_metadata(bh->b_bdev,							bh->b_blocknr);					if (bh_end <= pos || bh_pos >= end)						mark_buffer_dirty(bh);					else						set_buffer_new(bh);				}				continue;			}			/* Page is _not_ uptodate. */			if (likely(!was_hole)) {				/*				 * Buffer was already allocated.  If it is not				 * uptodate and is only partially being written				 * to, we need to read it in before the write,				 * i.e. now.				 */				if (!buffer_uptodate(bh) && bh_pos < end &&						bh_end > pos &&						(bh_pos < pos ||						bh_end > end)) {					/*					 * If the buffer is fully or partially					 * within the initialized size, do an					 * actual read.  Otherwise, simply zero					 * the buffer.					 */					read_lock_irqsave(&ni->size_lock,							flags);					initialized_size = ni->initialized_size;					read_unlock_irqrestore(&ni->size_lock,							flags);					if (bh_pos < initialized_size) {						ntfs_submit_bh_for_read(bh);						*wait_bh++ = bh;					} else {						zero_user_page(page,							bh_offset(bh),							blocksize, KM_USER0);						set_buffer_uptodate(bh);					}				}				continue;			}			/* We allocated the buffer. */			unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);			/*			 * If the buffer is fully outside the write, zero it,			 * set it uptodate, and mark it dirty so it gets			 * written out.  If it is partially being written to,			 * zero region surrounding the write but leave it to			 * commit write to do anything else.  Finally, if the			 * buffer is fully being overwritten, do nothing.			 */			if (bh_end <= pos || bh_pos >= end) {				if (!buffer_uptodate(bh)) {					zero_user_page(page, bh_offset(bh),							blocksize, KM_USER0);					set_buffer_uptodate(bh);				}				mark_buffer_dirty(bh);				continue;			}			set_buffer_new(bh);			if (!buffer_uptodate(bh) &&					(bh_pos < pos || bh_end > end)) {				u8 *kaddr;				unsigned pofs;									kaddr = kmap_atomic(page, KM_USER0);				if (bh_pos < pos) {					pofs = bh_pos & ~PAGE_CACHE_MASK;					memset(kaddr + pofs, 0, pos - bh_pos);				}				if (bh_end > end) {					pofs = end & ~PAGE_CACHE_MASK;					memset(kaddr + pofs, 0, bh_end - end);				}				kunmap_atomic(kaddr, KM_USER0);				flush_dcache_page(page);			}			continue;		}		/*		 * Slow path: this is the first buffer in the cluster.  If it		 * is outside allocated size and is not uptodate, zero it and		 * set it uptodate.		 */		read_lock_irqsave(&ni->size_lock, flags);		initialized_size = ni->allocated_size;		read_unlock_irqrestore(&ni->size_lock, flags);		if (bh_pos > initialized_size) {			if (PageUptodate(page)) {				if (!buffer_uptodate(bh))					set_buffer_uptodate(bh);			} else if (!buffer_uptodate(bh)) {				zero_user_page(page, bh_offset(bh), blocksize,						KM_USER0);				set_buffer_uptodate(bh);			}			continue;		}		is_retry = false;		if (!rl) {			down_read(&ni->runlist.lock);retry_remap:			rl = ni->runlist.rl;		}		if (likely(rl != NULL)) {			/* Seek to element containing target cluster. */			while (rl->length && rl[1].vcn <= bh_cpos)				rl++;			lcn = ntfs_rl_vcn_to_lcn(rl, bh_cpos);			if (likely(lcn >= 0)) {				/*				 * Successful remap, setup the map cache and				 * use that to deal with the buffer.				 */				was_hole = false;				vcn = bh_cpos;				vcn_len = rl[1].vcn - vcn;				lcn_block = lcn << (vol->cluster_size_bits -						blocksize_bits);				cdelta = 0;				/*				 * If the number of remaining clusters touched				 * by the write is smaller or equal to the				 * number of cached clusters, unlock the				 * runlist as the map cache will be used from				 * now on.				 */				if (likely(vcn + vcn_len >= cend)) {					if (rl_write_locked) {						up_write(&ni->runlist.lock);						rl_write_locked = false;					} else						up_read(&ni->runlist.lock);					rl = NULL;				}				goto map_buffer_cached;			}		} else			lcn = LCN_RL_NOT_MAPPED;		/*		 * If it is not a hole and not out of bounds, the runlist is		 * probably unmapped so try to map it now.		 */		if (unlikely(lcn != LCN_HOLE && lcn != LCN_ENOENT)) {			if (likely(!is_retry && lcn == LCN_RL_NOT_MAPPED)) {				/* Attempt to map runlist. */				if (!rl_write_locked) {					/*					 * We need the runlist locked for					 * writing, so if it is locked for					 * reading relock it now and retry in					 * case it changed whilst we dropped					 * the lock.					 */					up_read(&ni->runlist.lock);					down_write(&ni->runlist.lock);					rl_write_locked = true;					goto retry_remap;				}				err = ntfs_map_runlist_nolock(ni, bh_cpos,						NULL);				if (likely(!err)) {					is_retry = true;					goto retry_remap;				}				/*				 * If @vcn is out of bounds, pretend @lcn is				 * LCN_ENOENT.  As long as the buffer is out				 * of bounds this will work fine.				 */				if (err == -ENOENT) {					lcn = LCN_ENOENT;					err = 0;					goto rl_not_mapped_enoent;				}			} else				err = -EIO;			/* Failed to map the buffer, even after retrying. */			bh->b_blocknr = -1;			ntfs_error(vol->sb, "Failed to write to inode 0x%lx, "					"attribute type 0x%x, vcn 0x%llx, "					"vcn offset 0x%x, because its "					"location on disk could not be "					"determined%s (error code %i).",					ni->mft_no, ni->type,					(unsigned long long)bh_cpos,					(unsigned)bh_pos &					vol->cluster_size_mask,					is_retry ? " even after retrying" : "",					err);			break;		}rl_not_mapped_enoent:		/*		 * The buffer is in a hole or out of bounds.  We need to fill		 * the hole, unless the buffer is in a cluster which is not		 * touched by the write, in which case we just leave the buffer		 * unmapped.  This can only happen when the cluster size is		 * less than the page cache size.		 */		if (unlikely(vol->cluster_size < PAGE_CACHE_SIZE)) {			bh_cend = (bh_end + vol->cluster_size - 1) >>					vol->cluster_size_bits;			if ((bh_cend <= cpos || bh_cpos >= cend)) {				bh->b_blocknr = -1;				/*				 * If the buffer is uptodate we skip it.  If it				 * is not but the page is uptodate, we can set				 * the buffer uptodate.  If the page is not				 * uptodate, we can clear the buffer and set it				 * uptodate.  Whether this is worthwhile is				 * debatable and this could be removed.				 */

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -