⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 alloc.c

📁 ocfs1.2.7 源码
💻 C
📖 第 1 页 / 共 4 页
字号:
	mlog_exit(status);	return status;}/* * Expects the tree to already have room in the rightmost leaf for the * extent.  Updates all the extent blocks (and the dinode) on the way * down. */static int ocfs2_do_insert_extent(struct ocfs2_super *osb,				  struct ocfs2_journal_handle *handle,				  struct inode *inode,				  struct buffer_head *fe_bh,				  u64 start_blk,				  u32 new_clusters){	int status, i, num_bhs = 0;	u64 next_blkno;	u16 next_free;	struct buffer_head **eb_bhs = NULL;	struct ocfs2_dinode *fe;	struct ocfs2_extent_block *eb;	struct ocfs2_extent_list  *el;	mlog_entry_void();	status = ocfs2_journal_access(handle, inode, fe_bh,				      OCFS2_JOURNAL_ACCESS_WRITE);	if (status < 0) {		mlog_errno(status);		goto bail;	}	fe = (struct ocfs2_dinode *) fe_bh->b_data;	el = &fe->id2.i_list;	if (el->l_tree_depth) {		/* This is another operation where we want to be		 * careful about our tree updates. An error here means		 * none of the previous changes we made should roll		 * forward. As a result, we have to record the buffers		 * for this part of the tree in an array and reserve a		 * journal write to them before making any changes. */		num_bhs = le16_to_cpu(fe->id2.i_list.l_tree_depth);		eb_bhs = kcalloc(num_bhs, sizeof(struct buffer_head *),				 GFP_KERNEL);		if (!eb_bhs) {			status = -ENOMEM;			mlog_errno(status);			goto bail;		}		i = 0;		while(el->l_tree_depth) {			next_free = le16_to_cpu(el->l_next_free_rec);			if (next_free == 0) {				ocfs2_error(inode->i_sb,					    "Dinode %"MLFu64" has a bad "					    "extent list",					    OCFS2_I(inode)->ip_blkno);				status = -EIO;				goto bail;			}			next_blkno = le64_to_cpu(el->l_recs[next_free - 1].e_blkno);			BUG_ON(i >= num_bhs);			status = ocfs2_read_block(osb, next_blkno, &eb_bhs[i],						  OCFS2_BH_CACHED, inode);			if (status < 0) {				mlog_errno(status);				goto bail;			}			eb = (struct ocfs2_extent_block *) eb_bhs[i]->b_data;			if (!OCFS2_IS_VALID_EXTENT_BLOCK(eb)) {				OCFS2_RO_ON_INVALID_EXTENT_BLOCK(inode->i_sb,								 eb);				status = -EIO;				goto bail;			}			status = ocfs2_journal_access(handle, inode, eb_bhs[i],						      OCFS2_JOURNAL_ACCESS_WRITE);			if (status < 0) {				mlog_errno(status);				goto bail;			}			el = &eb->h_list;			i++;			/* When we leave this loop, eb_bhs[num_bhs - 1] will			 * hold the bottom-most leaf extent block. */		}		BUG_ON(el->l_tree_depth);		el = &fe->id2.i_list;		/* If we have tree depth, then the fe update is		 * trivial, and we want to switch el out for the		 * bottom-most leaf in order to update it with the		 * actual extent data below. */		next_free = le16_to_cpu(el->l_next_free_rec);		if (next_free == 0) {			ocfs2_error(inode->i_sb,				    "Dinode %"MLFu64" has a bad "				    "extent list",				    OCFS2_I(inode)->ip_blkno);			status = -EIO;			goto bail;		}		le32_add_cpu(&el->l_recs[next_free - 1].e_clusters,			     new_clusters);		/* (num_bhs - 1) to avoid the leaf */		for(i = 0; i < (num_bhs - 1); i++) {			eb = (struct ocfs2_extent_block *) eb_bhs[i]->b_data;			el = &eb->h_list;			/* finally, make our actual change to the			 * intermediate extent blocks. */			next_free = le16_to_cpu(el->l_next_free_rec);			le32_add_cpu(&el->l_recs[next_free - 1].e_clusters,				     new_clusters);			status = ocfs2_journal_dirty(handle, eb_bhs[i]);			if (status < 0)				mlog_errno(status);		}		BUG_ON(i != (num_bhs - 1));		/* note that the leaf block wasn't touched in		 * the loop above */		eb = (struct ocfs2_extent_block *) eb_bhs[num_bhs - 1]->b_data;		el = &eb->h_list;		BUG_ON(el->l_tree_depth);	}	/* yay, we can finally add the actual extent now! */	i = le16_to_cpu(el->l_next_free_rec) - 1;	if (le16_to_cpu(el->l_next_free_rec) &&	    ocfs2_extent_contig(inode, &el->l_recs[i], start_blk)) {		le32_add_cpu(&el->l_recs[i].e_clusters, new_clusters);	} else if (le16_to_cpu(el->l_next_free_rec) &&		   (le32_to_cpu(el->l_recs[i].e_clusters) == 0)) {		/* having an empty extent at eof is legal. */		if (el->l_recs[i].e_cpos != fe->i_clusters) {			ocfs2_error(inode->i_sb,				    "Dinode %"MLFu64" trailing extent is bad: "				    "cpos (%u) != number of clusters (%u)",				    le32_to_cpu(el->l_recs[i].e_cpos),				    le32_to_cpu(fe->i_clusters));			status = -EIO;			goto bail;		}		el->l_recs[i].e_blkno = cpu_to_le64(start_blk);		el->l_recs[i].e_clusters = cpu_to_le32(new_clusters);	} else {		/* No contiguous record, or no empty record at eof, so		 * we add a new one. */		BUG_ON(le16_to_cpu(el->l_next_free_rec) >=		       le16_to_cpu(el->l_count));		i = le16_to_cpu(el->l_next_free_rec);		el->l_recs[i].e_blkno = cpu_to_le64(start_blk);		el->l_recs[i].e_clusters = cpu_to_le32(new_clusters);		el->l_recs[i].e_cpos = fe->i_clusters;		le16_add_cpu(&el->l_next_free_rec, 1);	}	/*	 * extent_map errors are not fatal, so they are ignored outside	 * of flushing the thing.	 */	status = ocfs2_extent_map_append(inode, &el->l_recs[i],					 new_clusters);	if (status) {		mlog_errno(status);		ocfs2_extent_map_drop(inode, le32_to_cpu(fe->i_clusters));	}	status = ocfs2_journal_dirty(handle, fe_bh);	if (status < 0)		mlog_errno(status);	if (fe->id2.i_list.l_tree_depth) {		status = ocfs2_journal_dirty(handle, eb_bhs[num_bhs - 1]);		if (status < 0)			mlog_errno(status);	}	status = 0;bail:	if (eb_bhs) {		for (i = 0; i < num_bhs; i++)			if (eb_bhs[i])				brelse(eb_bhs[i]);		kfree(eb_bhs);	}	mlog_exit(status);	return status;}/* * Should only be called when there is no space left in any of the * leaf nodes. What we want to do is find the lowest tree depth * non-leaf extent block with room for new records. There are three * valid results of this search: * * 1) a lowest extent block is found, then we pass it back in *    *lowest_eb_bh and return '0' * * 2) the search fails to find anything, but the dinode has room. We *    pass NULL back in *lowest_eb_bh, but still return '0' * * 3) the search fails to find anything AND the dinode is full, in *    which case we return > 0 * * return status < 0 indicates an error. */static int ocfs2_find_branch_target(struct ocfs2_super *osb,				    struct inode *inode,				    struct buffer_head *fe_bh,				    struct buffer_head **target_bh){	int status = 0, i;	u64 blkno;	struct ocfs2_dinode *fe;	struct ocfs2_extent_block *eb;	struct ocfs2_extent_list  *el;	struct buffer_head *bh = NULL;	struct buffer_head *lowest_bh = NULL;	mlog_entry_void();	*target_bh = NULL;	fe = (struct ocfs2_dinode *) fe_bh->b_data;	el = &fe->id2.i_list;	while(le16_to_cpu(el->l_tree_depth) > 1) {		if (le16_to_cpu(el->l_next_free_rec) == 0) {			ocfs2_error(inode->i_sb, "Dinode %"MLFu64" has empty "				    "extent list (next_free_rec == 0)",				    OCFS2_I(inode)->ip_blkno);			status = -EIO;			goto bail;		}		i = le16_to_cpu(el->l_next_free_rec) - 1;		blkno = le64_to_cpu(el->l_recs[i].e_blkno);		if (!blkno) {			ocfs2_error(inode->i_sb, "Dinode %"MLFu64" has extent "				    "list where extent # %d has no physical "				    "block start",				    OCFS2_I(inode)->ip_blkno, i);			status = -EIO;			goto bail;		}		if (bh) {			brelse(bh);			bh = NULL;		}		status = ocfs2_read_block(osb, blkno, &bh, OCFS2_BH_CACHED,					  inode);		if (status < 0) {			mlog_errno(status);			goto bail;		}		eb = (struct ocfs2_extent_block *) bh->b_data;		if (!OCFS2_IS_VALID_EXTENT_BLOCK(eb)) {			OCFS2_RO_ON_INVALID_EXTENT_BLOCK(inode->i_sb, eb);			status = -EIO;			goto bail;		}		el = &eb->h_list;		if (le16_to_cpu(el->l_next_free_rec) <		    le16_to_cpu(el->l_count)) {			if (lowest_bh)				brelse(lowest_bh);			lowest_bh = bh;			get_bh(lowest_bh);		}	}	/* If we didn't find one and the fe doesn't have any room,	 * then return '1' */	if (!lowest_bh	    && (fe->id2.i_list.l_next_free_rec == fe->id2.i_list.l_count))		status = 1;	*target_bh = lowest_bh;bail:	if (bh)		brelse(bh);	mlog_exit(status);	return status;}/* the caller needs to update fe->i_clusters */int ocfs2_insert_extent(struct ocfs2_super *osb,			struct ocfs2_journal_handle *handle,			struct inode *inode,			struct buffer_head *fe_bh,			u64 start_blk,			u32 new_clusters,			struct ocfs2_alloc_context *meta_ac){	int status, i, shift;	struct buffer_head *last_eb_bh = NULL;	struct buffer_head *bh = NULL;	struct ocfs2_dinode *fe;	struct ocfs2_extent_block *eb;	struct ocfs2_extent_list  *el;	mlog_entry_void();	mlog(0, "add %u clusters starting at block %"MLFu64" to "		"inode %"MLFu64"\n",	     new_clusters, start_blk, OCFS2_I(inode)->ip_blkno);	fe = (struct ocfs2_dinode *) fe_bh->b_data;	el = &fe->id2.i_list;	if (el->l_tree_depth) {		/* jump to end of tree */		status = ocfs2_read_block(osb, le64_to_cpu(fe->i_last_eb_blk),					  &last_eb_bh, OCFS2_BH_CACHED, inode);		if (status < 0) {			mlog_exit(status);			goto bail;		}		eb = (struct ocfs2_extent_block *) last_eb_bh->b_data;		el = &eb->h_list;	}	/* Can we allocate without adding/shifting tree bits? */	i = le16_to_cpu(el->l_next_free_rec) - 1;	if (le16_to_cpu(el->l_next_free_rec) == 0	    || (le16_to_cpu(el->l_next_free_rec) < le16_to_cpu(el->l_count))	    || le32_to_cpu(el->l_recs[i].e_clusters) == 0	    || ocfs2_extent_contig(inode, &el->l_recs[i], start_blk))		goto out_add;	mlog(0, "ocfs2_allocate_extent: couldn't do a simple add, traversing "	     "tree now.\n");	shift = ocfs2_find_branch_target(osb, inode, fe_bh, &bh);	if (shift < 0) {		status = shift;		mlog_errno(status);		goto bail;	}	/* We traveled all the way to the bottom of the allocation tree	 * and didn't find room for any more extents - we need to add	 * another tree level */	if (shift) {		/* if we hit a leaf, we'd better be empty :) */		BUG_ON(le16_to_cpu(el->l_next_free_rec) !=		       le16_to_cpu(el->l_count));		BUG_ON(bh);		mlog(0, "ocfs2_allocate_extent: need to shift tree depth "		     "(current = %u)\n",		     le16_to_cpu(fe->id2.i_list.l_tree_depth));		/* ocfs2_shift_tree_depth will return us a buffer with		 * the new extent block (so we can pass that to		 * ocfs2_add_branch). */		status = ocfs2_shift_tree_depth(osb, handle, inode, fe_bh,						meta_ac, &bh);		if (status < 0) {			mlog_errno(status);			goto bail;		}		/* Special case: we have room now if we shifted from		 * tree_depth 0 */		if (fe->id2.i_list.l_tree_depth == cpu_to_le16(1))			goto out_add;	}	/* call ocfs2_add_branch to add the final part of the tree with	 * the new data. */	mlog(0, "ocfs2_allocate_extent: add branch. bh = %p\n", bh);	status = ocfs2_add_branch(osb, handle, inode, fe_bh, bh, last_eb_bh,				  meta_ac);	if (status < 0) {		mlog_errno(status);		goto bail;	}out_add:	/* Finally, we can add clusters. */	status = ocfs2_do_insert_extent(osb, handle, inode, fe_bh,					start_blk, new_clusters);	if (status < 0)		mlog_errno(status);bail:	if (bh)		brelse(bh);	if (last_eb_bh)		brelse(last_eb_bh);	mlog_exit(status);	return status;}static inline int ocfs2_truncate_log_needs_flush(struct ocfs2_super *osb){	struct buffer_head *tl_bh = osb->osb_tl_bh;	struct ocfs2_dinode *di;	struct ocfs2_truncate_log *tl;	di = (struct ocfs2_dinode *) tl_bh->b_data;	tl = &di->id2.i_dealloc;	mlog_bug_on_msg(le16_to_cpu(tl->tl_used) > le16_to_cpu(tl->tl_count),			"slot %d, invalid truncate log parameters: used = "			"%u, count = %u\n", osb->slot_num,			le16_to_cpu(tl->tl_used), le16_to_cpu(tl->tl_count));	return le16_to_cpu(tl->tl_used) == le16_to_cpu(tl->tl_count);}static int ocfs2_truncate_log_can_coalesce(struct ocfs2_truncate_log *tl,					   unsigned int new_start){	unsigned int tail_index;	unsigned int current_tail;	/* No records, nothing to coalesce */	if (!le16_to_cpu(tl->tl_used))		return 0;	tail_index = le16_to_cpu(tl->tl_used) - 1;	current_tail = le32_to_cpu(tl->tl_recs[tail_index].t_start);	current_tail += le32_to_cpu(tl->tl_recs[tail_index].t_clusters);	return current_tail == new_start;}static int ocfs2_truncate_log_append(struct ocfs2_super *osb,				     struct ocfs2_journal_handle *handle,				     u64 start_blk,				     unsigned int num_clusters){	int status, index;	unsigned int start_cluster, tl_count;	struct inode *tl_inode = osb->osb_tl_inode;	struct buffer_head *tl_bh = osb->osb_tl_bh;	struct ocfs2_dinode *di;	struct ocfs2_truncate_log *tl;	mlog_entry("start_blk = %"MLFu64", num_clusters = %u\n", start_blk,		   num_clusters);	BUG_ON(mutex_trylock(&tl_inode->i_mutex));	start_cluster = ocfs2_blocks_to_clusters(osb->sb, start_blk);	di = (struct ocfs2_dinode *) tl_bh->b_data;	tl = &di->id2.i_dealloc;	if (!OCFS2_IS_VALID_DINODE(di)) {		OCFS2_RO_ON_INVALID_DINODE(osb->sb, di);		status = -EIO;		goto bail;	}	tl_count = le16_to_cpu(tl->tl_count);	mlog_bug_on_msg(tl_count > ocfs2_truncate_recs_per_inode(osb->sb) ||			tl_count == 0,			"Truncate record count on #%"MLFu64" invalid ("			"wanted %u, actual %u\n", OCFS2_I(tl_inode)->ip_blkno,			ocfs2_truncate_recs_per_inode(osb->sb),			le16_to_cpu(tl->tl_count));	/* Caller should have known to flush before calling us. */	index = le16_to_cpu(tl->tl_used);	if (index >= tl_count) {		status = -ENOSPC;		mlog_errno(status);		goto bail;	}	status = ocfs2_journal_access(handle, tl_inode, tl_bh,				      OCFS2_JOURNAL_ACCESS_WRITE);	if (status < 0) {		mlog_errno(status);		goto bail;	}	mlog(0, "Log truncate of %u clusters starting at cluster %u to "	     "%"MLFu64" (index = %d)\n", num_clusters, start_cluster,	     OCFS2_I(tl_inode)->ip_blkno, index);	if (ocfs2_truncate_log_can_coalesce(tl, start_cluster)) {		/*		 * Move index back to the record we are coalescing with.		 * ocfs2_truncate_log_can_coalesce() guarantees nonzero		 */		index--;		num_clusters += le32_to_cpu(tl->tl_recs[index].t_clusters);		mlog(0, "Coalesce with index %u (start = %u, clusters = %u)\n",		     index, le32_to_cpu(tl->tl_recs[index].t_start),		     num_clusters);	} else {		tl->tl_recs[index].t_start = cpu_to_le32(start_cluster);		tl->tl_used = cpu_to_le16(index + 1);	}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -