⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 attrib.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
	return 0;}/** * ntfs_attr_make_non_resident - convert a resident to a non-resident attribute * @ni:		ntfs inode describing the attribute to convert * @data_size:	size of the resident data to copy to the non-resident attribute * * Convert the resident ntfs attribute described by the ntfs inode @ni to a * non-resident one. * * @data_size must be equal to the attribute value size.  This is needed since * we need to know the size before we can map the mft record and our callers * always know it.  The reason we cannot simply read the size from the vfs * inode i_size is that this is not necessarily uptodate.  This happens when * ntfs_attr_make_non_resident() is called in the ->truncate call path(s). * * Return 0 on success and -errno on error.  The following error return codes * are defined: *	-EPERM	- The attribute is not allowed to be non-resident. *	-ENOMEM	- Not enough memory. *	-ENOSPC	- Not enough disk space. *	-EINVAL	- Attribute not defined on the volume. *	-EIO	- I/o error or other error. * Note that -ENOSPC is also returned in the case that there is not enough * space in the mft record to do the conversion.  This can happen when the mft * record is already very full.  The caller is responsible for trying to make * space in the mft record and trying again.  FIXME: Do we need a separate * error return code for this kind of -ENOSPC or is it always worth trying * again in case the attribute may then fit in a resident state so no need to * make it non-resident at all?  Ho-hum...  (AIA) * * NOTE to self: No changes in the attribute list are required to move from *		 a resident to a non-resident attribute. * * Locking: - The caller must hold i_mutex on the inode. */int ntfs_attr_make_non_resident(ntfs_inode *ni, const u32 data_size){	s64 new_size;	struct inode *vi = VFS_I(ni);	ntfs_volume *vol = ni->vol;	ntfs_inode *base_ni;	MFT_RECORD *m;	ATTR_RECORD *a;	ntfs_attr_search_ctx *ctx;	struct page *page;	runlist_element *rl;	u8 *kaddr;	unsigned long flags;	int mp_size, mp_ofs, name_ofs, arec_size, err, err2;	u32 attr_size;	u8 old_res_attr_flags;	/* Check that the attribute is allowed to be non-resident. */	err = ntfs_attr_can_be_non_resident(vol, ni->type);	if (unlikely(err)) {		if (err == -EPERM)			ntfs_debug("Attribute is not allowed to be "					"non-resident.");		else			ntfs_debug("Attribute not defined on the NTFS "					"volume!");		return err;	}	/*	 * FIXME: Compressed and encrypted attributes are not supported when	 * writing and we should never have gotten here for them.	 */	BUG_ON(NInoCompressed(ni));	BUG_ON(NInoEncrypted(ni));	/*	 * The size needs to be aligned to a cluster boundary for allocation	 * purposes.	 */	new_size = (data_size + vol->cluster_size - 1) &			~(vol->cluster_size - 1);	if (new_size > 0) {		/*		 * Will need the page later and since the page lock nests		 * outside all ntfs locks, we need to get the page now.		 */		page = find_or_create_page(vi->i_mapping, 0,				mapping_gfp_mask(vi->i_mapping));		if (unlikely(!page))			return -ENOMEM;		/* Start by allocating clusters to hold the attribute value. */		rl = ntfs_cluster_alloc(vol, 0, new_size >>				vol->cluster_size_bits, -1, DATA_ZONE, true);		if (IS_ERR(rl)) {			err = PTR_ERR(rl);			ntfs_debug("Failed to allocate cluster%s, error code "					"%i.", (new_size >>					vol->cluster_size_bits) > 1 ? "s" : "",					err);			goto page_err_out;		}	} else {		rl = NULL;		page = NULL;	}	/* Determine the size of the mapping pairs array. */	mp_size = ntfs_get_size_for_mapping_pairs(vol, rl, 0, -1);	if (unlikely(mp_size < 0)) {		err = mp_size;		ntfs_debug("Failed to get size for mapping pairs array, error "				"code %i.", err);		goto rl_err_out;	}	down_write(&ni->runlist.lock);	if (!NInoAttr(ni))		base_ni = ni;	else		base_ni = ni->ext.base_ntfs_ino;	m = map_mft_record(base_ni);	if (IS_ERR(m)) {		err = PTR_ERR(m);		m = NULL;		ctx = NULL;		goto err_out;	}	ctx = ntfs_attr_get_search_ctx(base_ni, m);	if (unlikely(!ctx)) {		err = -ENOMEM;		goto err_out;	}	err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,			CASE_SENSITIVE, 0, NULL, 0, ctx);	if (unlikely(err)) {		if (err == -ENOENT)			err = -EIO;		goto err_out;	}	m = ctx->mrec;	a = ctx->attr;	BUG_ON(NInoNonResident(ni));	BUG_ON(a->non_resident);	/*	 * Calculate new offsets for the name and the mapping pairs array.	 */	if (NInoSparse(ni) || NInoCompressed(ni))		name_ofs = (offsetof(ATTR_REC,				data.non_resident.compressed_size) +				sizeof(a->data.non_resident.compressed_size) +				7) & ~7;	else		name_ofs = (offsetof(ATTR_REC,				data.non_resident.compressed_size) + 7) & ~7;	mp_ofs = (name_ofs + a->name_length * sizeof(ntfschar) + 7) & ~7;	/*	 * Determine the size of the resident part of the now non-resident	 * attribute record.	 */	arec_size = (mp_ofs + mp_size + 7) & ~7;	/*	 * If the page is not uptodate bring it uptodate by copying from the	 * attribute value.	 */	attr_size = le32_to_cpu(a->data.resident.value_length);	BUG_ON(attr_size != data_size);	if (page && !PageUptodate(page)) {		kaddr = kmap_atomic(page, KM_USER0);		memcpy(kaddr, (u8*)a +				le16_to_cpu(a->data.resident.value_offset),				attr_size);		memset(kaddr + attr_size, 0, PAGE_CACHE_SIZE - attr_size);		kunmap_atomic(kaddr, KM_USER0);		flush_dcache_page(page);		SetPageUptodate(page);	}	/* Backup the attribute flag. */	old_res_attr_flags = a->data.resident.flags;	/* Resize the resident part of the attribute record. */	err = ntfs_attr_record_resize(m, a, arec_size);	if (unlikely(err))		goto err_out;	/*	 * Convert the resident part of the attribute record to describe a	 * non-resident attribute.	 */	a->non_resident = 1;	/* Move the attribute name if it exists and update the offset. */	if (a->name_length)		memmove((u8*)a + name_ofs, (u8*)a + le16_to_cpu(a->name_offset),				a->name_length * sizeof(ntfschar));	a->name_offset = cpu_to_le16(name_ofs);	/* Setup the fields specific to non-resident attributes. */	a->data.non_resident.lowest_vcn = 0;	a->data.non_resident.highest_vcn = cpu_to_sle64((new_size - 1) >>			vol->cluster_size_bits);	a->data.non_resident.mapping_pairs_offset = cpu_to_le16(mp_ofs);	memset(&a->data.non_resident.reserved, 0,			sizeof(a->data.non_resident.reserved));	a->data.non_resident.allocated_size = cpu_to_sle64(new_size);	a->data.non_resident.data_size =			a->data.non_resident.initialized_size =			cpu_to_sle64(attr_size);	if (NInoSparse(ni) || NInoCompressed(ni)) {		a->data.non_resident.compression_unit = 0;		if (NInoCompressed(ni) || vol->major_ver < 3)			a->data.non_resident.compression_unit = 4;		a->data.non_resident.compressed_size =				a->data.non_resident.allocated_size;	} else		a->data.non_resident.compression_unit = 0;	/* Generate the mapping pairs array into the attribute record. */	err = ntfs_mapping_pairs_build(vol, (u8*)a + mp_ofs,			arec_size - mp_ofs, rl, 0, -1, NULL);	if (unlikely(err)) {		ntfs_debug("Failed to build mapping pairs, error code %i.",				err);		goto undo_err_out;	}	/* Setup the in-memory attribute structure to be non-resident. */	ni->runlist.rl = rl;	write_lock_irqsave(&ni->size_lock, flags);	ni->allocated_size = new_size;	if (NInoSparse(ni) || NInoCompressed(ni)) {		ni->itype.compressed.size = ni->allocated_size;		if (a->data.non_resident.compression_unit) {			ni->itype.compressed.block_size = 1U << (a->data.					non_resident.compression_unit +					vol->cluster_size_bits);			ni->itype.compressed.block_size_bits =					ffs(ni->itype.compressed.block_size) -					1;			ni->itype.compressed.block_clusters = 1U <<					a->data.non_resident.compression_unit;		} else {			ni->itype.compressed.block_size = 0;			ni->itype.compressed.block_size_bits = 0;			ni->itype.compressed.block_clusters = 0;		}		vi->i_blocks = ni->itype.compressed.size >> 9;	} else		vi->i_blocks = ni->allocated_size >> 9;	write_unlock_irqrestore(&ni->size_lock, flags);	/*	 * This needs to be last since the address space operations ->readpage	 * and ->writepage can run concurrently with us as they are not	 * serialized on i_mutex.  Note, we are not allowed to fail once we flip	 * this switch, which is another reason to do this last.	 */	NInoSetNonResident(ni);	/* Mark the mft record dirty, so it gets written back. */	flush_dcache_mft_record_page(ctx->ntfs_ino);	mark_mft_record_dirty(ctx->ntfs_ino);	ntfs_attr_put_search_ctx(ctx);	unmap_mft_record(base_ni);	up_write(&ni->runlist.lock);	if (page) {		set_page_dirty(page);		unlock_page(page);		mark_page_accessed(page);		page_cache_release(page);	}	ntfs_debug("Done.");	return 0;undo_err_out:	/* Convert the attribute back into a resident attribute. */	a->non_resident = 0;	/* Move the attribute name if it exists and update the offset. */	name_ofs = (offsetof(ATTR_RECORD, data.resident.reserved) +			sizeof(a->data.resident.reserved) + 7) & ~7;	if (a->name_length)		memmove((u8*)a + name_ofs, (u8*)a + le16_to_cpu(a->name_offset),				a->name_length * sizeof(ntfschar));	mp_ofs = (name_ofs + a->name_length * sizeof(ntfschar) + 7) & ~7;	a->name_offset = cpu_to_le16(name_ofs);	arec_size = (mp_ofs + attr_size + 7) & ~7;	/* Resize the resident part of the attribute record. */	err2 = ntfs_attr_record_resize(m, a, arec_size);	if (unlikely(err2)) {		/*		 * This cannot happen (well if memory corruption is at work it		 * could happen in theory), but deal with it as well as we can.		 * If the old size is too small, truncate the attribute,		 * otherwise simply give it a larger allocated size.		 * FIXME: Should check whether chkdsk complains when the		 * allocated size is much bigger than the resident value size.		 */		arec_size = le32_to_cpu(a->length);		if ((mp_ofs + attr_size) > arec_size) {			err2 = attr_size;			attr_size = arec_size - mp_ofs;			ntfs_error(vol->sb, "Failed to undo partial resident "					"to non-resident attribute "					"conversion.  Truncating inode 0x%lx, "					"attribute type 0x%x from %i bytes to "					"%i bytes to maintain metadata "					"consistency.  THIS MEANS YOU ARE "					"LOSING %i BYTES DATA FROM THIS %s.",					vi->i_ino,					(unsigned)le32_to_cpu(ni->type),					err2, attr_size, err2 - attr_size,					((ni->type == AT_DATA) &&					!ni->name_len) ? "FILE": "ATTRIBUTE");			write_lock_irqsave(&ni->size_lock, flags);			ni->initialized_size = attr_size;			i_size_write(vi, attr_size);			write_unlock_irqrestore(&ni->size_lock, flags);		}	}	/* Setup the fields specific to resident attributes. */	a->data.resident.value_length = cpu_to_le32(attr_size);	a->data.resident.value_offset = cpu_to_le16(mp_ofs);	a->data.resident.flags = old_res_attr_flags;	memset(&a->data.resident.reserved, 0,			sizeof(a->data.resident.reserved));	/* Copy the data from the page back to the attribute value. */	if (page) {		kaddr = kmap_atomic(page, KM_USER0);		memcpy((u8*)a + mp_ofs, kaddr, attr_size);		kunmap_atomic(kaddr, KM_USER0);	}	/* Setup the allocated size in the ntfs inode in case it changed. */	write_lock_irqsave(&ni->size_lock, flags);	ni->allocated_size = arec_size - mp_ofs;	write_unlock_irqrestore(&ni->size_lock, flags);	/* Mark the mft record dirty, so it gets written back. */	flush_dcache_mft_record_page(ctx->ntfs_ino);	mark_mft_record_dirty(ctx->ntfs_ino);err_out:	if (ctx)		ntfs_attr_put_search_ctx(ctx);	if (m)		unmap_mft_record(base_ni);	ni->runlist.rl = NULL;	up_write(&ni->runlist.lock);rl_err_out:	if (rl) {		if (ntfs_cluster_free_from_rl(vol, rl) < 0) {			ntfs_error(vol->sb, "Failed to release allocated "					"cluster(s) in error code path.  Run "					"chkdsk to recover the lost "					"cluster(s).");			NVolSetErrors(vol);		}		ntfs_free(rl);page_err_out:		unlock_page(page);		page_cache_release(page);	}	if (err == -EINVAL)		err = -EIO;	return err;}/** * ntfs_attr_extend_allocation - extend the allocated space of an attribute * @ni:			ntfs inode of the attribute whose allocation to extend * @new_alloc_size:	new size in bytes to which to extend the allocation to * @new_data_size:	new size in bytes to which to extend the data to * @data_start:		beginning of region which is required to be non-sparse * * Extend the allocated space of an attribute described by the ntfs inode @ni * to @new_alloc_size bytes.  If @data_start is -1, the whole extension may be * implemented as a hole in the file (as long as both the volume and the ntfs * inode @ni have sparse support enabled).  If @data_start is >= 0, then the * region between the old allocated size and @data_start - 1 may be made sparse * but the regions between @data_start and @new_alloc_size must be backed by * actual clusters. * * If @new_data_size is -1, it is ignored.  If it is >= 0, then the data size * of the attribute is extended to @new_data_size.  Note that the i_size of the * vfs inode is not updated.  Only the data size in the base attribute record * is updated.  The caller has to update i_size separately if this is required. * WARNING: It is a BUG() for @new_data_size to be smaller than the old data * size as well as for @new_data_size to be greater than @new_alloc_size. * * For resident attributes this involves resizing the attribute record 

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -