⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 aops.c

📁 ocfs1.4.1 oracle分布式文件系统
💻 C
📖 第 1 页 / 共 4 页
字号:
			if (ext_flags & OCFS2_EXT_UNWRITTEN)				*extents_to_split = *extents_to_split + 2;		} else if (phys) {			/*			 * Only increment phys if it doesn't describe			 * a hole.			 */			phys++;		}		desc->c_phys = phys;		if (phys == 0) {			desc->c_new = 1;			*clusters_to_alloc = *clusters_to_alloc + 1;		}		if (ext_flags & OCFS2_EXT_UNWRITTEN)			desc->c_unwritten = 1;		num_clusters--;	}	ret = 0;out:	return ret;}static int ocfs2_write_begin_inline(struct address_space *mapping,				    struct inode *inode,				    struct ocfs2_write_ctxt *wc){	int ret;	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);	struct page *page;	handle_t *handle;	struct ocfs2_dinode *di = (struct ocfs2_dinode *)wc->w_di_bh->b_data;	page = find_or_create_page(mapping, 0, GFP_NOFS);	if (!page) {		ret = -ENOMEM;		mlog_errno(ret);		goto out;	}	/*	 * If we don't set w_num_pages then this page won't get unlocked	 * and freed on cleanup of the write context.	 */	wc->w_pages[0] = wc->w_target_page = page;	wc->w_num_pages = 1;	handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);	if (IS_ERR(handle)) {		ret = PTR_ERR(handle);		mlog_errno(ret);		goto out;	}	ret = ocfs2_journal_access(handle, inode, wc->w_di_bh,				   OCFS2_JOURNAL_ACCESS_WRITE);	if (ret) {		ocfs2_commit_trans(osb, handle);		mlog_errno(ret);		goto out;	}	if (!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL))		ocfs2_set_inode_data_inline(inode, di);	if (!PageUptodate(page)) {		ret = ocfs2_read_inline_data(inode, page, wc->w_di_bh);		if (ret) {			ocfs2_commit_trans(osb, handle);			goto out;		}	}	wc->w_handle = handle;out:	return ret;}int ocfs2_size_fits_inline_data(struct buffer_head *di_bh, u64 new_size){	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;	if (new_size <= le16_to_cpu(di->id2.i_data.id_count))		return 1;	return 0;}static int ocfs2_try_to_write_inline_data(struct address_space *mapping,					  struct inode *inode, loff_t pos,					  unsigned len, struct page *mmap_page,					  struct ocfs2_write_ctxt *wc){	int ret, written = 0;	loff_t end = pos + len;	struct ocfs2_inode_info *oi = OCFS2_I(inode);	mlog(0, "Inode %llu, write of %u bytes at off %llu. features: 0x%x\n",	     (unsigned long long)oi->ip_blkno, len, (unsigned long long)pos,	     oi->ip_dyn_features);	/*	 * Handle inodes which already have inline data 1st.	 */	if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) {		if (mmap_page == NULL &&		    ocfs2_size_fits_inline_data(wc->w_di_bh, end))			goto do_inline_write;		/*		 * The write won't fit - we have to give this inode an		 * inline extent list now.		 */		ret = ocfs2_convert_inline_data_to_extents(inode, wc->w_di_bh);		if (ret)			mlog_errno(ret);		goto out;	}	/*	 * Check whether the inode can accept inline data.	 */	if (oi->ip_clusters != 0 || i_size_read(inode) != 0)		return 0;	/*	 * Check whether the write can fit.	 */	if (mmap_page || end > ocfs2_max_inline_data(inode->i_sb))		return 0;do_inline_write:	ret = ocfs2_write_begin_inline(mapping, inode, wc);	if (ret) {		mlog_errno(ret);		goto out;	}	/*	 * This signals to the caller that the data can be written	 * inline.	 */	written = 1;out:	return written ? written : ret;}/* * This function only does anything for file systems which can't * handle sparse files. * * What we want to do here is fill in any hole between the current end * of allocation and the end of our write. That way the rest of the * write path can treat it as an non-allocating write, which has no * special case code for sparse/nonsparse files. */static int ocfs2_expand_nonsparse_inode(struct inode *inode, loff_t pos,					unsigned len,					struct ocfs2_write_ctxt *wc){	int ret;	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);	loff_t newsize = pos + len;	if (ocfs2_sparse_alloc(osb))		return 0;	if (newsize <= i_size_read(inode))		return 0;	ret = ocfs2_extend_no_holes(inode, newsize, newsize - len);	if (ret)		mlog_errno(ret);	return ret;}int ocfs2_write_begin_nolock(struct address_space *mapping,			     loff_t pos, unsigned len, unsigned flags,			     struct page **pagep, void **fsdata,			     struct buffer_head *di_bh, struct page *mmap_page){	int ret, credits = OCFS2_INODE_UPDATE_CREDITS;	unsigned int clusters_to_alloc, extents_to_split;	struct ocfs2_write_ctxt *wc;	struct inode *inode = mapping->host;	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);	struct ocfs2_dinode *di;	struct ocfs2_alloc_context *data_ac = NULL;	struct ocfs2_alloc_context *meta_ac = NULL;	handle_t *handle;	ret = ocfs2_alloc_write_ctxt(&wc, osb, pos, len, di_bh);	if (ret) {		mlog_errno(ret);		return ret;	}	if (ocfs2_supports_inline_data(osb)) {		ret = ocfs2_try_to_write_inline_data(mapping, inode, pos, len,						     mmap_page, wc);		if (ret == 1) {			ret = 0;			goto success;		}		if (ret < 0) {			mlog_errno(ret);			goto out;		}	}	ret = ocfs2_expand_nonsparse_inode(inode, pos, len, wc);	if (ret) {		mlog_errno(ret);		goto out;	}	ret = ocfs2_populate_write_desc(inode, wc, &clusters_to_alloc,					&extents_to_split);	if (ret) {		mlog_errno(ret);		goto out;	}	di = (struct ocfs2_dinode *)wc->w_di_bh->b_data;	/*	 * We set w_target_from, w_target_to here so that	 * ocfs2_write_end() knows which range in the target page to	 * write out. An allocation requires that we write the entire	 * cluster range.	 */	if (clusters_to_alloc || extents_to_split) {		/*		 * XXX: We are stretching the limits of		 * ocfs2_lock_allocators(). It greatly over-estimates		 * the work to be done.		 */		ret = ocfs2_lock_allocators(inode, di, clusters_to_alloc,					    extents_to_split, &data_ac, &meta_ac);		if (ret) {			mlog_errno(ret);			goto out;		}		credits = ocfs2_calc_extend_credits(inode->i_sb, di,						    clusters_to_alloc);	}	ocfs2_set_target_boundaries(osb, wc, pos, len,				    clusters_to_alloc + extents_to_split);	handle = ocfs2_start_trans(osb, credits);	if (IS_ERR(handle)) {		ret = PTR_ERR(handle);		mlog_errno(ret);		goto out;	}	wc->w_handle = handle;	/*	 * We don't want this to fail in ocfs2_write_end(), so do it	 * here.	 */	ret = ocfs2_journal_access(handle, inode, wc->w_di_bh,				   OCFS2_JOURNAL_ACCESS_WRITE);	if (ret) {		mlog_errno(ret);		goto out_commit;	}	/*	 * Fill our page array first. That way we've grabbed enough so	 * that we can zero and flush if we error after adding the	 * extent.	 */	ret = ocfs2_grab_pages_for_write(mapping, wc, wc->w_cpos, pos,					 clusters_to_alloc + extents_to_split,					 mmap_page);	if (ret) {		mlog_errno(ret);		goto out_commit;	}	ret = ocfs2_write_cluster_by_desc(mapping, data_ac, meta_ac, wc, pos,					  len);	if (ret) {		mlog_errno(ret);		goto out_commit;	}	if (data_ac)		ocfs2_free_alloc_context(data_ac);	if (meta_ac)		ocfs2_free_alloc_context(meta_ac);success:	*pagep = wc->w_target_page;	*fsdata = wc;	return 0;out_commit:	ocfs2_commit_trans(osb, handle);out:	ocfs2_free_write_ctxt(wc);	if (data_ac)		ocfs2_free_alloc_context(data_ac);	if (meta_ac)		ocfs2_free_alloc_context(meta_ac);	return ret;}int ocfs2_write_begin(struct file *file, struct address_space *mapping,		      loff_t pos, unsigned len, unsigned flags,		      struct page **pagep, void **fsdata){	int ret;	struct buffer_head *di_bh = NULL;	struct inode *inode = mapping->host;	ret = ocfs2_inode_lock(inode, &di_bh, 1);	if (ret) {		mlog_errno(ret);		return ret;	}	/*	 * Take alloc sem here to prevent concurrent lookups. That way	 * the mapping, zeroing and tree manipulation within	 * ocfs2_write() will be safe against ->readpage(). This	 * should also serve to lock out allocation from a shared	 * writeable region.	 */	down_write(&OCFS2_I(inode)->ip_alloc_sem);	ret = ocfs2_write_begin_nolock(mapping, pos, len, flags, pagep,				       fsdata, di_bh, NULL);	if (ret) {		mlog_errno(ret);		goto out_fail;	}	brelse(di_bh);	return 0;out_fail:	up_write(&OCFS2_I(inode)->ip_alloc_sem);	brelse(di_bh);	ocfs2_inode_unlock(inode, 1);	return ret;}static void ocfs2_write_end_inline(struct inode *inode, loff_t pos,				   unsigned len, unsigned *copied,				   struct ocfs2_dinode *di,				   struct ocfs2_write_ctxt *wc){	void *kaddr;	if (unlikely(*copied < len)) {		if (!PageUptodate(wc->w_target_page)) {			*copied = 0;			return;		}	}	kaddr = kmap_atomic(wc->w_target_page, KM_USER0);	memcpy(di->id2.i_data.id_data + pos, kaddr + pos, *copied);	kunmap_atomic(kaddr, KM_USER0);	mlog(0, "Data written to inode at offset %llu. "	     "id_count = %u, copied = %u, i_dyn_features = 0x%x\n",	     (unsigned long long)pos, *copied,	     le16_to_cpu(di->id2.i_data.id_count),	     le16_to_cpu(di->i_dyn_features));}int ocfs2_write_end_nolock(struct address_space *mapping,			   loff_t pos, unsigned len, unsigned copied,			   struct page *page, void *fsdata){	int i;	unsigned from, to, start = pos & (PAGE_CACHE_SIZE - 1);	struct inode *inode = mapping->host;	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);	struct ocfs2_write_ctxt *wc = fsdata;	struct ocfs2_dinode *di = (struct ocfs2_dinode *)wc->w_di_bh->b_data;	handle_t *handle = wc->w_handle;	struct page *tmppage;	if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {		ocfs2_write_end_inline(inode, pos, len, &copied, di, wc);		goto out_write_size;	}	if (unlikely(copied < len)) {		if (!PageUptodate(wc->w_target_page))			copied = 0;		ocfs2_zero_new_buffers(wc->w_target_page, start+copied,				       start+len);	}	flush_dcache_page(wc->w_target_page);	for(i = 0; i < wc->w_num_pages; i++) {		tmppage = wc->w_pages[i];		if (tmppage == wc->w_target_page) {			from = wc->w_target_from;			to = wc->w_target_to;			BUG_ON(from > PAGE_CACHE_SIZE ||			       to > PAGE_CACHE_SIZE ||			       to < from);		} else {			/*			 * Pages adjacent to the target (if any) imply			 * a hole-filling write in which case we want			 * to flush their entire range.			 */			from = 0;			to = PAGE_CACHE_SIZE;		}		if (page_has_buffers(tmppage)) {			if (ocfs2_should_order_data(inode))				walk_page_buffers(wc->w_handle,						  page_buffers(tmppage),						  from, to, NULL,						  ocfs2_journal_dirty_data);			block_commit_write(tmppage, from, to);		}	}out_write_size:	pos += copied;	if (pos > inode->i_size) {		i_size_write(inode, pos);		mark_inode_dirty(inode);	}	inode->i_blocks = ocfs2_inode_sector_count(inode);	di->i_size = cpu_to_le64((u64)i_size_read(inode));	inode->i_mtime = inode->i_ctime = CURRENT_TIME;	di->i_mtime = di->i_ctime = cpu_to_le64(inode->i_mtime.tv_sec);	di->i_mtime_nsec = di->i_ctime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);	ocfs2_journal_dirty(handle, wc->w_di_bh);	ocfs2_commit_trans(osb, handle);	ocfs2_run_deallocs(osb, &wc->w_dealloc);	ocfs2_free_write_ctxt(wc);	return copied;}int ocfs2_write_end(struct file *file, struct address_space *mapping,		    loff_t pos, unsigned len, unsigned copied,		    struct page *page, void *fsdata){	int ret;	struct inode *inode = mapping->host;	ret = ocfs2_write_end_nolock(mapping, pos, len, copied, page, fsdata);	up_write(&OCFS2_I(inode)->ip_alloc_sem);	ocfs2_inode_unlock(inode, 1);	return ret;}#ifdef AOPS_IS_NOT_CONSTstruct address_space_operations ocfs2_aops = {#elseconst struct address_space_operations ocfs2_aops = {#endif	.readpage	= ocfs2_readpage,	.readpages	= ocfs2_readpages,	.writepage	= ocfs2_writepage,	.bmap		= ocfs2_bmap,	.sync_page	= block_sync_page,	.direct_IO	= ocfs2_direct_IO,	.invalidatepage	= ocfs2_invalidatepage,	.releasepage	= ocfs2_releasepage,	.migratepage	= buffer_migrate_page,};

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -