⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 file.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
				if (PageUptodate(page)) {					if (!buffer_uptodate(bh))						set_buffer_uptodate(bh);				} else if (!buffer_uptodate(bh)) {					zero_user_page(page, bh_offset(bh),							blocksize, KM_USER0);					set_buffer_uptodate(bh);				}				continue;			}		}		/*		 * Out of bounds buffer is invalid if it was not really out of		 * bounds.		 */		BUG_ON(lcn != LCN_HOLE);		/*		 * We need the runlist locked for writing, so if it is locked		 * for reading relock it now and retry in case it changed		 * whilst we dropped the lock.		 */		BUG_ON(!rl);		if (!rl_write_locked) {			up_read(&ni->runlist.lock);			down_write(&ni->runlist.lock);			rl_write_locked = true;			goto retry_remap;		}		/* Find the previous last allocated cluster. */		BUG_ON(rl->lcn != LCN_HOLE);		lcn = -1;		rl2 = rl;		while (--rl2 >= ni->runlist.rl) {			if (rl2->lcn >= 0) {				lcn = rl2->lcn + rl2->length;				break;			}		}		rl2 = ntfs_cluster_alloc(vol, bh_cpos, 1, lcn, DATA_ZONE,				false);		if (IS_ERR(rl2)) {			err = PTR_ERR(rl2);			ntfs_debug("Failed to allocate cluster, error code %i.",					err);			break;		}		lcn = rl2->lcn;		rl = ntfs_runlists_merge(ni->runlist.rl, rl2);		if (IS_ERR(rl)) {			err = PTR_ERR(rl);			if (err != -ENOMEM)				err = -EIO;			if (ntfs_cluster_free_from_rl(vol, rl2)) {				ntfs_error(vol->sb, "Failed to release "						"allocated cluster in error "						"code path.  Run chkdsk to "						"recover the lost cluster.");				NVolSetErrors(vol);			}			ntfs_free(rl2);			break;		}		ni->runlist.rl = rl;		status.runlist_merged = 1;		ntfs_debug("Allocated cluster, lcn 0x%llx.",				(unsigned long long)lcn);		/* Map and lock the mft record and get the attribute record. */		if (!NInoAttr(ni))			base_ni = ni;		else			base_ni = ni->ext.base_ntfs_ino;		m = map_mft_record(base_ni);		if (IS_ERR(m)) {			err = PTR_ERR(m);			break;		}		ctx = ntfs_attr_get_search_ctx(base_ni, m);		if (unlikely(!ctx)) {			err = -ENOMEM;			unmap_mft_record(base_ni);			break;		}		status.mft_attr_mapped = 1;		err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,				CASE_SENSITIVE, bh_cpos, NULL, 0, ctx);		if (unlikely(err)) {			if (err == -ENOENT)				err = -EIO;			break;		}		m = ctx->mrec;		a = ctx->attr;		/*		 * Find the runlist element with which the attribute extent		 * starts.  Note, we cannot use the _attr_ version because we		 * have mapped the mft record.  That is ok because we know the		 * runlist fragment must be mapped already to have ever gotten		 * here, so we can just use the _rl_ version.		 */		vcn = sle64_to_cpu(a->data.non_resident.lowest_vcn);		rl2 = ntfs_rl_find_vcn_nolock(rl, vcn);		BUG_ON(!rl2);		BUG_ON(!rl2->length);		BUG_ON(rl2->lcn < LCN_HOLE);		highest_vcn = sle64_to_cpu(a->data.non_resident.highest_vcn);		/*		 * If @highest_vcn is zero, calculate the real highest_vcn		 * (which can really be zero).		 */		if (!highest_vcn)			highest_vcn = (sle64_to_cpu(					a->data.non_resident.allocated_size) >>					vol->cluster_size_bits) - 1;		/*		 * Determine the size of the mapping pairs array for the new		 * extent, i.e. the old extent with the hole filled.		 */		mp_size = ntfs_get_size_for_mapping_pairs(vol, rl2, vcn,				highest_vcn);		if (unlikely(mp_size <= 0)) {			if (!(err = mp_size))				err = -EIO;			ntfs_debug("Failed to get size for mapping pairs "					"array, error code %i.", err);			break;		}		/*		 * Resize the attribute record to fit the new mapping pairs		 * array.		 */		attr_rec_len = le32_to_cpu(a->length);		err = ntfs_attr_record_resize(m, a, mp_size + le16_to_cpu(				a->data.non_resident.mapping_pairs_offset));		if (unlikely(err)) {			BUG_ON(err != -ENOSPC);			// TODO: Deal with this by using the current attribute			// and fill it with as much of the mapping pairs			// array as possible.  Then loop over each attribute			// extent rewriting the mapping pairs arrays as we go			// along and if when we reach the end we have not			// enough space, try to resize the last attribute			// extent and if even that fails, add a new attribute			// extent.			// We could also try to resize at each step in the hope			// that we will not need to rewrite every single extent.			// Note, we may need to decompress some extents to fill			// the runlist as we are walking the extents...			ntfs_error(vol->sb, "Not enough space in the mft "					"record for the extended attribute "					"record.  This case is not "					"implemented yet.");			err = -EOPNOTSUPP;			break ;		}		status.mp_rebuilt = 1;		/*		 * Generate the mapping pairs array directly into the attribute		 * record.		 */		err = ntfs_mapping_pairs_build(vol, (u8*)a + le16_to_cpu(				a->data.non_resident.mapping_pairs_offset),				mp_size, rl2, vcn, highest_vcn, NULL);		if (unlikely(err)) {			ntfs_error(vol->sb, "Cannot fill hole in inode 0x%lx, "					"attribute type 0x%x, because building "					"the mapping pairs failed with error "					"code %i.", vi->i_ino,					(unsigned)le32_to_cpu(ni->type), err);			err = -EIO;			break;		}		/* Update the highest_vcn but only if it was not set. */		if (unlikely(!a->data.non_resident.highest_vcn))			a->data.non_resident.highest_vcn =					cpu_to_sle64(highest_vcn);		/*		 * If the attribute is sparse/compressed, update the compressed		 * size in the ntfs_inode structure and the attribute record.		 */		if (likely(NInoSparse(ni) || NInoCompressed(ni))) {			/*			 * If we are not in the first attribute extent, switch			 * to it, but first ensure the changes will make it to			 * disk later.			 */			if (a->data.non_resident.lowest_vcn) {				flush_dcache_mft_record_page(ctx->ntfs_ino);				mark_mft_record_dirty(ctx->ntfs_ino);				ntfs_attr_reinit_search_ctx(ctx);				err = ntfs_attr_lookup(ni->type, ni->name,						ni->name_len, CASE_SENSITIVE,						0, NULL, 0, ctx);				if (unlikely(err)) {					status.attr_switched = 1;					break;				}				/* @m is not used any more so do not set it. */				a = ctx->attr;			}			write_lock_irqsave(&ni->size_lock, flags);			ni->itype.compressed.size += vol->cluster_size;			a->data.non_resident.compressed_size =					cpu_to_sle64(ni->itype.compressed.size);			write_unlock_irqrestore(&ni->size_lock, flags);		}		/* Ensure the changes make it to disk. */		flush_dcache_mft_record_page(ctx->ntfs_ino);		mark_mft_record_dirty(ctx->ntfs_ino);		ntfs_attr_put_search_ctx(ctx);		unmap_mft_record(base_ni);		/* Successfully filled the hole. */		status.runlist_merged = 0;		status.mft_attr_mapped = 0;		status.mp_rebuilt = 0;		/* Setup the map cache and use that to deal with the buffer. */		was_hole = true;		vcn = bh_cpos;		vcn_len = 1;		lcn_block = lcn << (vol->cluster_size_bits - blocksize_bits);		cdelta = 0;		/*		 * If the number of remaining clusters in the @pages is smaller		 * or equal to the number of cached clusters, unlock the		 * runlist as the map cache will be used from now on.		 */		if (likely(vcn + vcn_len >= cend)) {			up_write(&ni->runlist.lock);			rl_write_locked = false;			rl = NULL;		}		goto map_buffer_cached;	} while (bh_pos += blocksize, (bh = bh->b_this_page) != head);	/* If there are no errors, do the next page. */	if (likely(!err && ++u < nr_pages))		goto do_next_page;	/* If there are no errors, release the runlist lock if we took it. */	if (likely(!err)) {		if (unlikely(rl_write_locked)) {			up_write(&ni->runlist.lock);			rl_write_locked = false;		} else if (unlikely(rl))			up_read(&ni->runlist.lock);		rl = NULL;	}	/* If we issued read requests, let them complete. */	read_lock_irqsave(&ni->size_lock, flags);	initialized_size = ni->initialized_size;	read_unlock_irqrestore(&ni->size_lock, flags);	while (wait_bh > wait) {		bh = *--wait_bh;		wait_on_buffer(bh);		if (likely(buffer_uptodate(bh))) {			page = bh->b_page;			bh_pos = ((s64)page->index << PAGE_CACHE_SHIFT) +					bh_offset(bh);			/*			 * If the buffer overflows the initialized size, need			 * to zero the overflowing region.			 */			if (unlikely(bh_pos + blocksize > initialized_size)) {				int ofs = 0;				if (likely(bh_pos < initialized_size))					ofs = initialized_size - bh_pos;				zero_user_page(page, bh_offset(bh) + ofs,						blocksize - ofs, KM_USER0);			}		} else /* if (unlikely(!buffer_uptodate(bh))) */			err = -EIO;	}	if (likely(!err)) {		/* Clear buffer_new on all buffers. */		u = 0;		do {			bh = head = page_buffers(pages[u]);			do {				if (buffer_new(bh))					clear_buffer_new(bh);			} while ((bh = bh->b_this_page) != head);		} while (++u < nr_pages);		ntfs_debug("Done.");		return err;	}	if (status.attr_switched) {		/* Get back to the attribute extent we modified. */		ntfs_attr_reinit_search_ctx(ctx);		if (ntfs_attr_lookup(ni->type, ni->name, ni->name_len,				CASE_SENSITIVE, bh_cpos, NULL, 0, ctx)) {			ntfs_error(vol->sb, "Failed to find required "					"attribute extent of attribute in "					"error code path.  Run chkdsk to "					"recover.");			write_lock_irqsave(&ni->size_lock, flags);			ni->itype.compressed.size += vol->cluster_size;			write_unlock_irqrestore(&ni->size_lock, flags);			flush_dcache_mft_record_page(ctx->ntfs_ino);			mark_mft_record_dirty(ctx->ntfs_ino);			/*			 * The only thing that is now wrong is the compressed			 * size of the base attribute extent which chkdsk			 * should be able to fix.			 */			NVolSetErrors(vol);		} else {			m = ctx->mrec;			a = ctx->attr;			status.attr_switched = 0;		}	}	/*	 * If the runlist has been modified, need to restore it by punching a	 * hole into it and we then need to deallocate the on-disk cluster as	 * well.  Note, we only modify the runlist if we are able to generate a	 * new mapping pairs array, i.e. only when the mapped attribute extent	 * is not switched.	 */	if (status.runlist_merged && !status.attr_switched) {		BUG_ON(!rl_write_locked);		/* Make the file cluster we allocated sparse in the runlist. */		if (ntfs_rl_punch_nolock(vol, &ni->runlist, bh_cpos, 1)) {			ntfs_error(vol->sb, "Failed to punch hole into "					"attribute runlist in error code "					"path.  Run chkdsk to recover the "					"lost cluster.");			NVolSetErrors(vol);		} else /* if (success) */ {			status.runlist_merged = 0;			/*			 * Deallocate the on-disk cluster we allocated but only			 * if we succeeded in punching its vcn out of the			 * runlist.			 */			down_write(&vol->lcnbmp_lock);			if (ntfs_bitmap_clear_bit(vol->lcnbmp_ino, lcn)) {				ntfs_error(vol->sb, "Failed to release "						"allocated cluster in error "						"code path.  Run chkdsk to "						"recover the lost cluster.");				NVolSetErrors(vol);			}			up_write(&vol->lcnbmp_lock);		}	}	/*	 * Resize the attribute record to its old size and rebuild the mapping	 * pairs array.  Note, we only can do this if the runlist has been	 * restored to its old state which also implies that the mapped	 * attribute extent is not switched.	 */	if (status.mp_rebuilt && !status.runlist_merged) {		if (ntfs_attr_record_resize(m, a, attr_rec_len)) {			ntfs_error(vol->sb, "Failed to restore attribute "					"record in error code path.  Run "					"chkdsk to recover.");			NVolSetErrors(vol);		} else /* if (success) */ {			if (ntfs_mapping_pairs_build(vol, (u8*)a +					le16_to_cpu(a->data.non_resident.					mapping_pairs_offset), attr_rec_len -					le16_to_cpu(a->data.non_resident.					mapping_pairs_offset), ni->runlist.rl,					vcn, highest_vcn, NULL)) {				ntfs_error(vol->sb, "Failed to restore "						"mapping pairs array in error "						"code path.  Run chkdsk to "						"recover.");				NVolSetErrors(vol);			}			flush_dcache_mft_record_page(ctx->ntfs_ino);			mark_mft_record_dirty(ctx->ntfs_ino);		}	}	/* Release the mft record and the attribute. */	if (status.mft_attr_mapped) {		ntfs_attr_put_search_ctx(ctx);		unmap_mft_record(base_ni);	}	/* Release the runlist lock. */	if (rl_write_locked)		up_write(&ni->runlist.lock);	else if (rl)		up_read(&ni->runlist.lock);	/*	 * Zero out any newly allocated blocks to avoid exposing stale data.	 * If BH_New is set, we know that the block was newly allocated above	 * and that it has not been fully zeroed and marked dirty yet.	 */	nr_pages = u;	u = 0;	end = bh_cpos << vol->cluster_size_bits;	do {		page = pages[u];		bh = head = page_buffers(page);		do {			if (u == nr_pages &&					((s64)page->index << PAGE_CACHE_SHIFT) +					bh_offset(bh) >= end)				break;			if (!buffer_new(bh))				continue;			clear_buffer_new(bh);			if (!buffer_uptodate(bh)) {				if (PageUptodate(page))					set_buffer_uptodate(bh);				else {					zero_user_page(page, bh_offset(bh),							blocksize, KM_USER0);					set_buffer_uptodate(bh);				}			}			mark_buffer_dirty(bh);		} while ((bh = bh->b_this_page) != head);	} while (++u <= nr_pages);	ntfs_error(vol->sb, "Failed.  Returning error code %i.", err);	return err;}/* * Copy as much as we can into the pages and return the number of bytes which * were sucessfully copied.  If a fault is encountered then clear the pages * out to (ofs + bytes) and return the number of bytes which were copied. */static inline size_t ntfs_copy_from_user(struct page **pages,		unsigned nr_pages, unsigned ofs, const char __user *buf,		size_t bytes){	struct page **last_page = pages + nr_pages;	char *addr;	size_t total = 0;	unsigned len;	int left;	do {

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -