⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 aops.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 4 页
字号:
	if (is_mft && sync) {do_mirror:		for (i = 0; i < nr_bhs; i++) {			unsigned long mft_no;			unsigned int ofs;			/*			 * Skip buffers which are not at the beginning of			 * records.			 */			if (i % bhs_per_rec)				continue;			tbh = bhs[i];			/* Skip removed buffers (and hence records). */			if (!tbh)				continue;			ofs = bh_offset(tbh);			/* Get the mft record number. */			mft_no = (((s64)page->index << PAGE_CACHE_SHIFT) + ofs)					>> rec_size_bits;			if (mft_no < vol->mftmirr_size)				ntfs_sync_mft_mirror(vol, mft_no,						(MFT_RECORD*)(kaddr + ofs),						sync);		}		if (!sync)			goto do_wait;	}	/* Remove the mst protection fixups again. */	for (i = 0; i < nr_bhs; i++) {		if (!(i % bhs_per_rec)) {			tbh = bhs[i];			if (!tbh)				continue;			post_write_mst_fixup((NTFS_RECORD*)(kaddr +					bh_offset(tbh)));		}	}	flush_dcache_page(page);unm_done:	/* Unlock any locked inodes. */	while (nr_locked_nis-- > 0) {		ntfs_inode *tni, *base_tni;				tni = locked_nis[nr_locked_nis];		/* Get the base inode. */		mutex_lock(&tni->extent_lock);		if (tni->nr_extents >= 0)			base_tni = tni;		else {			base_tni = tni->ext.base_ntfs_ino;			BUG_ON(!base_tni);		}		mutex_unlock(&tni->extent_lock);		ntfs_debug("Unlocking %s inode 0x%lx.",				tni == base_tni ? "base" : "extent",				tni->mft_no);		mutex_unlock(&tni->mrec_lock);		atomic_dec(&tni->count);		iput(VFS_I(base_tni));	}	SetPageUptodate(page);	kunmap(page);done:	if (unlikely(err && err != -ENOMEM)) {		/*		 * Set page error if there is only one ntfs record in the page.		 * Otherwise we would loose per-record granularity.		 */		if (ni->itype.index.block_size == PAGE_CACHE_SIZE)			SetPageError(page);		NVolSetErrors(vol);	}	if (page_is_dirty) {		ntfs_debug("Page still contains one or more dirty ntfs "				"records.  Redirtying the page starting at "				"record 0x%lx.", page->index <<				(PAGE_CACHE_SHIFT - rec_size_bits));		redirty_page_for_writepage(wbc, page);		unlock_page(page);	} else {		/*		 * Keep the VM happy.  This must be done otherwise the		 * radix-tree tag PAGECACHE_TAG_DIRTY remains set even though		 * the page is clean.		 */		BUG_ON(PageWriteback(page));		set_page_writeback(page);		unlock_page(page);		end_page_writeback(page);	}	if (likely(!err))		ntfs_debug("Done.");	return err;}/** * ntfs_writepage - write a @page to the backing store * @page:	page cache page to write out * @wbc:	writeback control structure * * This is called from the VM when it wants to have a dirty ntfs page cache * page cleaned.  The VM has already locked the page and marked it clean. * * For non-resident attributes, ntfs_writepage() writes the @page by calling * the ntfs version of the generic block_write_full_page() function, * ntfs_write_block(), which in turn if necessary creates and writes the * buffers associated with the page asynchronously. * * For resident attributes, OTOH, ntfs_writepage() writes the @page by copying * the data to the mft record (which at this stage is most likely in memory). * The mft record is then marked dirty and written out asynchronously via the * vfs inode dirty code path for the inode the mft record belongs to or via the * vm page dirty code path for the page the mft record is in. * * Based on ntfs_readpage() and fs/buffer.c::block_write_full_page(). * * Return 0 on success and -errno on error. */static int ntfs_writepage(struct page *page, struct writeback_control *wbc){	loff_t i_size;	struct inode *vi = page->mapping->host;	ntfs_inode *base_ni = NULL, *ni = NTFS_I(vi);	char *addr;	ntfs_attr_search_ctx *ctx = NULL;	MFT_RECORD *m = NULL;	u32 attr_len;	int err;retry_writepage:	BUG_ON(!PageLocked(page));	i_size = i_size_read(vi);	/* Is the page fully outside i_size? (truncate in progress) */	if (unlikely(page->index >= (i_size + PAGE_CACHE_SIZE - 1) >>			PAGE_CACHE_SHIFT)) {		/*		 * The page may have dirty, unmapped buffers.  Make them		 * freeable here, so the page does not leak.		 */		block_invalidatepage(page, 0);		unlock_page(page);		ntfs_debug("Write outside i_size - truncated?");		return 0;	}	/*	 * Only $DATA attributes can be encrypted and only unnamed $DATA	 * attributes can be compressed.  Index root can have the flags set but	 * this means to create compressed/encrypted files, not that the	 * attribute is compressed/encrypted.  Note we need to check for	 * AT_INDEX_ALLOCATION since this is the type of both directory and	 * index inodes.	 */	if (ni->type != AT_INDEX_ALLOCATION) {		/* If file is encrypted, deny access, just like NT4. */		if (NInoEncrypted(ni)) {			unlock_page(page);			BUG_ON(ni->type != AT_DATA);			ntfs_debug("Denying write access to encrypted file.");			return -EACCES;		}		/* Compressed data streams are handled in compress.c. */		if (NInoNonResident(ni) && NInoCompressed(ni)) {			BUG_ON(ni->type != AT_DATA);			BUG_ON(ni->name_len);			// TODO: Implement and replace this with			// return ntfs_write_compressed_block(page);			unlock_page(page);			ntfs_error(vi->i_sb, "Writing to compressed files is "					"not supported yet.  Sorry.");			return -EOPNOTSUPP;		}		// TODO: Implement and remove this check.		if (NInoNonResident(ni) && NInoSparse(ni)) {			unlock_page(page);			ntfs_error(vi->i_sb, "Writing to sparse files is not "					"supported yet.  Sorry.");			return -EOPNOTSUPP;		}	}	/* NInoNonResident() == NInoIndexAllocPresent() */	if (NInoNonResident(ni)) {		/* We have to zero every time due to mmap-at-end-of-file. */		if (page->index >= (i_size >> PAGE_CACHE_SHIFT)) {			/* The page straddles i_size. */			unsigned int ofs = i_size & ~PAGE_CACHE_MASK;			zero_user_page(page, ofs, PAGE_CACHE_SIZE - ofs,					KM_USER0);		}		/* Handle mst protected attributes. */		if (NInoMstProtected(ni))			return ntfs_write_mst_block(page, wbc);		/* Normal, non-resident data stream. */		return ntfs_write_block(page, wbc);	}	/*	 * Attribute is resident, implying it is not compressed, encrypted, or	 * mst protected.  This also means the attribute is smaller than an mft	 * record and hence smaller than a page, so can simply return error on	 * any pages with index above 0.  Note the attribute can actually be	 * marked compressed but if it is resident the actual data is not	 * compressed so we are ok to ignore the compressed flag here.	 */	BUG_ON(page_has_buffers(page));	BUG_ON(!PageUptodate(page));	if (unlikely(page->index > 0)) {		ntfs_error(vi->i_sb, "BUG()! page->index (0x%lx) > 0.  "				"Aborting write.", page->index);		BUG_ON(PageWriteback(page));		set_page_writeback(page);		unlock_page(page);		end_page_writeback(page);		return -EIO;	}	if (!NInoAttr(ni))		base_ni = ni;	else		base_ni = ni->ext.base_ntfs_ino;	/* Map, pin, and lock the mft record. */	m = map_mft_record(base_ni);	if (IS_ERR(m)) {		err = PTR_ERR(m);		m = NULL;		ctx = NULL;		goto err_out;	}	/*	 * If a parallel write made the attribute non-resident, drop the mft	 * record and retry the writepage.	 */	if (unlikely(NInoNonResident(ni))) {		unmap_mft_record(base_ni);		goto retry_writepage;	}	ctx = ntfs_attr_get_search_ctx(base_ni, m);	if (unlikely(!ctx)) {		err = -ENOMEM;		goto err_out;	}	err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,			CASE_SENSITIVE, 0, NULL, 0, ctx);	if (unlikely(err))		goto err_out;	/*	 * Keep the VM happy.  This must be done otherwise the radix-tree tag	 * PAGECACHE_TAG_DIRTY remains set even though the page is clean.	 */	BUG_ON(PageWriteback(page));	set_page_writeback(page);	unlock_page(page);	attr_len = le32_to_cpu(ctx->attr->data.resident.value_length);	i_size = i_size_read(vi);	if (unlikely(attr_len > i_size)) {		/* Race with shrinking truncate or a failed truncate. */		attr_len = i_size;		/*		 * If the truncate failed, fix it up now.  If a concurrent		 * truncate, we do its job, so it does not have to do anything.		 */		err = ntfs_resident_attr_value_resize(ctx->mrec, ctx->attr,				attr_len);		/* Shrinking cannot fail. */		BUG_ON(err);	}	addr = kmap_atomic(page, KM_USER0);	/* Copy the data from the page to the mft record. */	memcpy((u8*)ctx->attr +			le16_to_cpu(ctx->attr->data.resident.value_offset),			addr, attr_len);	/* Zero out of bounds area in the page cache page. */	memset(addr + attr_len, 0, PAGE_CACHE_SIZE - attr_len);	kunmap_atomic(addr, KM_USER0);	flush_dcache_page(page);	flush_dcache_mft_record_page(ctx->ntfs_ino);	/* We are done with the page. */	end_page_writeback(page);	/* Finally, mark the mft record dirty, so it gets written back. */	mark_mft_record_dirty(ctx->ntfs_ino);	ntfs_attr_put_search_ctx(ctx);	unmap_mft_record(base_ni);	return 0;err_out:	if (err == -ENOMEM) {		ntfs_warning(vi->i_sb, "Error allocating memory. Redirtying "				"page so we try again later.");		/*		 * Put the page back on mapping->dirty_pages, but leave its		 * buffers' dirty state as-is.		 */		redirty_page_for_writepage(wbc, page);		err = 0;	} else {		ntfs_error(vi->i_sb, "Resident attribute write failed with "				"error %i.", err);		SetPageError(page);		NVolSetErrors(ni->vol);	}	unlock_page(page);	if (ctx)		ntfs_attr_put_search_ctx(ctx);	if (m)		unmap_mft_record(base_ni);	return err;}#endif	/* NTFS_RW *//** * ntfs_aops - general address space operations for inodes and attributes */const struct address_space_operations ntfs_aops = {	.readpage	= ntfs_readpage,	/* Fill page with data. */	.sync_page	= block_sync_page,	/* Currently, just unplugs the						   disk request queue. */#ifdef NTFS_RW	.writepage	= ntfs_writepage,	/* Write dirty page to disk. */#endif /* NTFS_RW */	.migratepage	= buffer_migrate_page,	/* Move a page cache page from						   one physical page to an						   other. */};/** * ntfs_mst_aops - general address space operations for mst protecteed inodes *		   and attributes */const struct address_space_operations ntfs_mst_aops = {	.readpage	= ntfs_readpage,	/* Fill page with data. */	.sync_page	= block_sync_page,	/* Currently, just unplugs the						   disk request queue. */#ifdef NTFS_RW	.writepage	= ntfs_writepage,	/* Write dirty page to disk. */	.set_page_dirty	= __set_page_dirty_nobuffers,	/* Set the page dirty						   without touching the buffers						   belonging to the page. */#endif /* NTFS_RW */	.migratepage	= buffer_migrate_page,	/* Move a page cache page from						   one physical page to an						   other. */};#ifdef NTFS_RW/** * mark_ntfs_record_dirty - mark an ntfs record dirty * @page:	page containing the ntfs record to mark dirty * @ofs:	byte offset within @page at which the ntfs record begins * * Set the buffers and the page in which the ntfs record is located dirty. * * The latter also marks the vfs inode the ntfs record belongs to dirty * (I_DIRTY_PAGES only). * * If the page does not have buffers, we create them and set them uptodate. * The page may not be locked which is why we need to handle the buffers under * the mapping->private_lock.  Once the buffers are marked dirty we no longer * need the lock since try_to_free_buffers() does not free dirty buffers. */void mark_ntfs_record_dirty(struct page *page, const unsigned int ofs) {	struct address_space *mapping = page->mapping;	ntfs_inode *ni = NTFS_I(mapping->host);	struct buffer_head *bh, *head, *buffers_to_free = NULL;	unsigned int end, bh_size, bh_ofs;	BUG_ON(!PageUptodate(page));	end = ofs + ni->itype.index.block_size;	bh_size = VFS_I(ni)->i_sb->s_blocksize;	spin_lock(&mapping->private_lock);	if (unlikely(!page_has_buffers(page))) {		spin_unlock(&mapping->private_lock);		bh = head = alloc_page_buffers(page, bh_size, 1);		spin_lock(&mapping->private_lock);		if (likely(!page_has_buffers(page))) {			struct buffer_head *tail;			do {				set_buffer_uptodate(bh);				tail = bh;				bh = bh->b_this_page;			} while (bh);			tail->b_this_page = head;			attach_page_buffers(page, head);		} else			buffers_to_free = bh;	}	bh = head = page_buffers(page);	BUG_ON(!bh);	do {		bh_ofs = bh_offset(bh);		if (bh_ofs + bh_size <= ofs)			continue;		if (unlikely(bh_ofs >= end))			break;		set_buffer_dirty(bh);	} while ((bh = bh->b_this_page) != head);	spin_unlock(&mapping->private_lock);	__set_page_dirty_nobuffers(page);	if (unlikely(buffers_to_free)) {		do {			bh = buffers_to_free->b_this_page;			free_buffer_head(buffers_to_free);			buffers_to_free = bh;		} while (buffers_to_free);	}}#endif /* NTFS_RW */

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -