⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 ext3-extents-2.6.18-vanilla.patch

📁 lustre 1.6.5 source code
💻 PATCH
📖 第 1 页 / 共 5 页
字号:
++	/* probably first extent we're gonna free will be last in block */+	handle = ext3_journal_start(inode, depth + 1);+	if (IS_ERR(handle))+		return PTR_ERR(handle);++	ext3_ext_invalidate_cache(inode);++	/*+	 * we start scanning from right side freeing all the blocks+	 * after i_size and walking into the deep+	 */+	path = kmalloc(sizeof(struct ext3_ext_path) * (depth + 1), GFP_KERNEL);+	if (path == NULL) {+		ext3_journal_stop(handle);+		return -ENOMEM;+	}+	memset(path, 0, sizeof(struct ext3_ext_path) * (depth + 1));+	path[0].p_hdr = ext_inode_hdr(inode);+	if (ext3_ext_check_header(inode, path[0].p_hdr, depth)) {+		err = -EIO;+		goto out;+	}+	path[0].p_depth = depth;++	while (i >= 0 && err == 0) {+		if (i == depth) {+			/* this is leaf block */+			err = ext3_ext_rm_leaf(handle, inode, path, start);+			/* root level have p_bh == NULL, brelse() eats this */+			brelse(path[i].p_bh);+			path[i].p_bh = NULL;+			i--;+			continue;+		}++		/* this is index block */+		if (!path[i].p_hdr) {+			ext_debug(inode, "initialize header\n");+			path[i].p_hdr = ext_block_hdr(path[i].p_bh);+		}++		if (!path[i].p_idx) {+			/* this level hasn't touched yet */+			path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr);+			path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1;+			ext_debug(inode, "init index ptr: hdr 0x%p, num %d\n",+				  path[i].p_hdr,+				  le16_to_cpu(path[i].p_hdr->eh_entries));+		} else {+			/* we've already was here, see at next index */+			path[i].p_idx--;+		}++		ext_debug(inode, "level %d - index, first 0x%p, cur 0x%p\n",+				i, EXT_FIRST_INDEX(path[i].p_hdr),+				path[i].p_idx);+		if (ext3_ext_more_to_rm(path + i)) {+			struct buffer_head *bh;+			/* go to the next level */+			ext_debug(inode, "move to level %d (block %d)\n",+				  i + 1, le32_to_cpu(path[i].p_idx->ei_leaf));+			memset(path + i + 1, 0, sizeof(*path));+			bh = sb_bread(sb, le32_to_cpu(path[i].p_idx->ei_leaf));+			if (!bh) {+				/* should we reset i_size? */+				err = -EIO;+				break;+			}+			BUG_ON(i + 1 > depth);+			if (ext3_ext_check_header(inode, ext_block_hdr(bh),+							depth - i - 1)) {+				err = -EIO;+				break;+			}+			path[i+1].p_bh = bh;++			/* put actual number of indexes to know is this+			 * number got changed at the next iteration */+			path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries);+			i++;+		} else {+			/* we finish processing this index, go up */+			if (path[i].p_hdr->eh_entries == 0 && i > 0) {+				/* index is empty, remove it+				 * handle must be already prepared by the+				 * truncatei_leaf() */+				err = ext3_ext_rm_idx(handle, inode, path + i);+			}+			/* root level have p_bh == NULL, brelse() eats this */+			brelse(path[i].p_bh);+			path[i].p_bh = NULL;+			i--;+			ext_debug(inode, "return to level %d\n", i);+		}+	}++	/* TODO: flexible tree reduction should be here */+	if (path->p_hdr->eh_entries == 0) {+		/*+		 * truncate to zero freed all the tree+		 * so, we need to correct eh_depth+		 */+		err = ext3_ext_get_access(handle, inode, path);+		if (err == 0) {+			ext_inode_hdr(inode)->eh_depth = 0;+			ext_inode_hdr(inode)->eh_max =+				cpu_to_le16(ext3_ext_space_root(inode));+			err = ext3_ext_dirty(handle, inode, path);+		}+	}+out:+	ext3_ext_tree_changed(inode);+	ext3_ext_drop_refs(path);+	kfree(path);+	ext3_journal_stop(handle);++	return err;+}++/*+ * called at mount time+ */+void ext3_ext_init(struct super_block *sb)+{+	/*+	 * possible initialization would be here+	 */++	if (test_opt(sb, EXTENTS)) {+		printk("EXT3-fs: file extents enabled");+#ifdef AGRESSIVE_TEST+		printk(", agressive tests");+#endif+#ifdef CHECK_BINSEARCH+		printk(", check binsearch");+#endif+#ifdef EXTENTS_STATS+		printk(", stats");+#endif+		printk("\n");+#ifdef EXTENTS_STATS+		spin_lock_init(&EXT3_SB(sb)->s_ext_stats_lock);+		EXT3_SB(sb)->s_ext_min = 1 << 30;+		EXT3_SB(sb)->s_ext_max = 0;+#endif+	}+}++/*+ * called at umount time+ */+void ext3_ext_release(struct super_block *sb)+{+	if (!test_opt(sb, EXTENTS))+		return;++#ifdef EXTENTS_STATS+	if (EXT3_SB(sb)->s_ext_blocks && EXT3_SB(sb)->s_ext_extents) {+		struct ext3_sb_info *sbi = EXT3_SB(sb);+		printk(KERN_ERR "EXT3-fs: %lu blocks in %lu extents (%lu ave)\n",+			sbi->s_ext_blocks, sbi->s_ext_extents,+			sbi->s_ext_blocks / sbi->s_ext_extents);+		printk(KERN_ERR "EXT3-fs: extents: %lu min, %lu max, max depth %lu\n",+			sbi->s_ext_min, sbi->s_ext_max, sbi->s_depth_max);+	}+#endif+}++int ext3_ext_get_blocks(handle_t *handle, struct inode *inode, sector_t iblock,+			unsigned long max_blocks, struct buffer_head *bh_result,+			int create, int extend_disksize)+{+	struct ext3_ext_path *path = NULL;+	struct ext3_extent newex, *ex;+	int goal, newblock, err = 0, depth;+	unsigned long allocated = 0;+	unsigned long next;++	__clear_bit(BH_New, &bh_result->b_state);+	ext_debug(inode, "blocks %d/%lu requested for inode %u\n", (int) iblock,+			max_blocks, (unsigned) inode->i_ino);+	mutex_lock(&EXT3_I(inode)->truncate_mutex);++	/* check in cache */+	if ((goal = ext3_ext_in_cache(inode, iblock, &newex))) {+		if (goal == EXT3_EXT_CACHE_GAP) {+			if (!create) {+				/* block isn't allocated yet and+				 * user don't want to allocate it */+				goto out2;+			}+			/* we should allocate requested block */+		} else if (goal == EXT3_EXT_CACHE_EXTENT) {+			/* block is already allocated */+		        newblock = iblock+		                   - le32_to_cpu(newex.ee_block)+			           + le32_to_cpu(newex.ee_start);+			/* number of remain blocks in the extent */+			BUG_ON(iblock < le32_to_cpu(newex.ee_block));+			allocated = le16_to_cpu(newex.ee_len) -+					(iblock - le32_to_cpu(newex.ee_block));+			goto out;+		} else {+			BUG();+		}+	}++	/* find extent for this block */+	path = ext3_ext_find_extent(inode, iblock, NULL);+	if (IS_ERR(path)) {+		err = PTR_ERR(path);+		path = NULL;+		goto out2;+	}++	depth = ext_depth(inode);++	/*+	 * consistent leaf must not be empty+	 * this situations is possible, though, _during_ tree modification+	 * this is why assert can't be put in ext3_ext_find_extent()+	 */+	BUG_ON(path[depth].p_ext == NULL && depth != 0);++	if ((ex = path[depth].p_ext)) {+	        unsigned long ee_block = le32_to_cpu(ex->ee_block);+		unsigned long ee_start = le32_to_cpu(ex->ee_start);+		unsigned short ee_len  = le16_to_cpu(ex->ee_len);+		/* if found exent covers block, simple return it */+	        if (iblock >= ee_block && iblock < ee_block + ee_len) {+			newblock = iblock - ee_block + ee_start;+			/* number of remain blocks in the extent */+			allocated = ee_len - (iblock - ee_block);+			ext_debug(inode, "%d fit into %lu:%d -> %d\n", (int) iblock,+					ee_block, ee_len, newblock);+			ext3_ext_put_in_cache(inode, ee_block, ee_len,+						ee_start, EXT3_EXT_CACHE_EXTENT);+			goto out;+		}+	}++	/*+	 * requested block isn't allocated yet+	 * we couldn't try to create block if create flag is zero+	 */+	if (!create) {+		/* put just found gap into cache to speedup subsequest reqs */+		ext3_ext_put_gap_in_cache(inode, path, iblock);+		goto out2;+	}++	/*+	 * Okay, we need to do block allocation.  Lazily initialize the block+	 * allocation info here if necessary+	 */+	if (S_ISREG(inode->i_mode) && (!EXT3_I(inode)->i_block_alloc_info))+		ext3_init_block_alloc_info(inode);++	/* find next allocated block so that we know how many+	 * blocks we can allocate without ovelapping next extent */+	BUG_ON(iblock < le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len));+	next = ext3_ext_next_allocated_block(path);+	BUG_ON(next <= iblock);+	allocated = next - iblock;+	if (allocated > max_blocks)+		allocated = max_blocks;++	/* allocate new block */+	goal = ext3_ext_find_goal(inode, path, iblock);+	newblock = ext3_new_blocks(handle, inode, goal, &allocated, &err);+	if (!newblock)+		goto out2;+	ext_debug(inode, "allocate new block: goal %d, found %d/%lu\n",+			goal, newblock, allocated);++	/* try to insert new extent into found leaf and return */+	newex.ee_block = cpu_to_le32(iblock);+	newex.ee_start = cpu_to_le32(newblock);+	newex.ee_start_hi = 0;+	newex.ee_len = cpu_to_le16(allocated);+	err = ext3_ext_insert_extent(handle, inode, path, &newex);+	if (err) {+		/* free data blocks we just allocated */+		ext3_free_blocks(handle, inode, le32_to_cpu(newex.ee_start),+				le16_to_cpu(newex.ee_len));+		goto out2;+	}++	if (extend_disksize && inode->i_size > EXT3_I(inode)->i_disksize)+		EXT3_I(inode)->i_disksize = inode->i_size;++	/* previous routine could use block we allocated */+	newblock = le32_to_cpu(newex.ee_start);+	__set_bit(BH_New, &bh_result->b_state);++	ext3_ext_put_in_cache(inode, iblock, allocated, newblock,+				EXT3_EXT_CACHE_EXTENT);+out:+	if (allocated > max_blocks)+		allocated = max_blocks;+	ext3_ext_show_leaf(inode, path);+	__set_bit(BH_Mapped, &bh_result->b_state);+	bh_result->b_bdev = inode->i_sb->s_bdev;+	bh_result->b_blocknr = newblock;+	bh_result->b_size = (allocated << inode->i_blkbits);+out2:+	if (path) {+		ext3_ext_drop_refs(path);+		kfree(path);+	}+	mutex_unlock(&EXT3_I(inode)->truncate_mutex);++	return err ? err : allocated;+}++void ext3_ext_truncate(struct inode * inode, struct page *page)+{+	struct address_space *mapping = inode->i_mapping;+	struct super_block *sb = inode->i_sb;+	unsigned long last_block;+	handle_t *handle;+	int err = 0;++	/*+	 * probably first extent we're gonna free will be last in block+	 */+	err = ext3_writepage_trans_blocks(inode) + 3;+	handle = ext3_journal_start(inode, err);+	if (IS_ERR(handle)) {+		if (page) {+			clear_highpage(page);+			flush_dcache_page(page);+			unlock_page(page);+			page_cache_release(page);+		}+		return;+	}++	if (page)+		ext3_block_truncate_page(handle, page, mapping, inode->i_size);++	mutex_lock(&EXT3_I(inode)->truncate_mutex);+	ext3_ext_invalidate_cache(inode);++	/*+	 * TODO: optimization is possible here+	 * probably we need not scaning at all,+	 * because page truncation is enough+	 */+	if (ext3_orphan_add(handle, inode))+		goto out_stop;++	/* we have to know where to truncate from in crash case */+	EXT3_I(inode)->i_disksize = inode->i_size;+	ext3_mark_inode_dirty(handle, inode);++	last_block = (inode->i_size + sb->s_blocksize - 1)+			>> EXT3_BLOCK_SIZE_BITS(sb);+	err = ext3_ext_remove_space(inode, last_block);++	/* In a multi-transaction truncate, we only make the final+	 * transaction synchronous */+	if (IS_SYNC(inode))+		handle->h_sync = 1;++out_stop:+	/*+	 * If this was a simple ftruncate(), and the file will remain alive+	 * then we need to clear up the orphan record which we created above.+	 * However, if this was a real unlink then we were called by+	 * ext3_delete_inode(), and we allow that function to clean up the+	 * orphan info for us.+	 */+	if (inode->i_nlink)+		ext3_orphan_del(handle, inode);++	mutex_unlock(&EXT3_I(inode)->truncate_mutex);+	ext3_journal_stop(handle);+}++/*+ * this routine calculate max number of blocks we could modify+ * in order to allocate new block for an inode+ */+int ext3_ext_writepage_trans_blocks(struct inode *inode, int num)+{+	int needed;++	needed = ext3_ext_calc_credits_for_insert(inode, NULL);++	/* caller want to allocate num blocks, but note it includes sb */+	needed = needed * num - (num - 1);++#ifdef CONFIG_QUOTA+	needed += 2 * EXT3_QUOTA_TRANS_BLOCKS(inode->i_sb);+#endif++	return needed;+}++EXPORT_SYMBOL(ext3_mark_inode_dirty);+EXPORT_SYMBOL(ext3_ext_invalidate_cache);+EXPORT_SYMBOL(ext3_ext_insert_extent);+EXPORT_SYMBOL(ext3_ext_walk_space);+EXPORT_SYMBOL(ext3_ext_find_goal);+EXPORT_SYMBOL(ext3_ext_calc_credits_for_insert);Index: linux-2.6.18.8/fs/ext3/ialloc.c===================================================================--- linux-2.6.18.8.orig/fs/ext3/ialloc.c	2007-07-17 09:18:09.000000000 +0200+++ linux-2.6.18.8/fs/ext3/ialloc.c	2007-07-17 11:08:09.000000000 +0200@@ -652,6 +652,17 @@ got: 		ext3_std_error(sb, err); 		goto fail_free_drop; 	}+	if (test_opt(sb, EXTENTS) && S_ISREG(inode->i_mode)) {+		EXT3_I(inode)->i_flags |= EXT3_EXTENTS_FL;+		ext3_ext_tree_init(handle, inode);+		if (!EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_EXTENTS)) {+			err = ext3_journal_get_write_access(handle, EXT3_SB(sb)->s_sbh);+			if (err) goto fail;+			EXT3_SET_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_EXTENTS);+			BUFFER_TRACE(EXT3_SB(sb)->s_sbh, "call ext3_journal_dirty_metadata");+			err = ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh);+		}+	}  	ext3_debug("allocating inode %lu\n", inode->i_ino); 	goto really_out;Index: linux-2.6.18.8/fs/ext3/inode.c===================================================================--- linux-2.6.18.8.orig/fs/ext3/inode.c	2007-07-17 09:18:12.000000000 +0200+++ linux-2.6.18.8/fs/ext3/inode.c	2007-07-17 11:08:11.000000000 +0200@@ -40,8 +40,6 @@ #include "iopen.h" #include "acl.h" -static int ext3_writepage_trans_blocks(struct inode *inode);- /*  * Test whether an inode is a fast symlink.  */@@ -804,6 +802,7 @@ int ext3_get_blocks_handle(handle_t *han 	ext3_fsblk_t first_block = 0;  +	J_ASSERT(!(EXT3_I(inode)->i_flags & EXT3_EXTENTS_FL)); 	J_ASSERT(handle != NULL || create == 0); 	depth = ext3_block_to_path(inode,iblock,offsets,&blocks_to_boundary); @@ -984,12 +983,10 @@ static int ext3_get_block(struct inode *  get_block: 	if (ret == 0) {-		ret = ext3_get_blocks_handle(handle, inode, iblock,+		ret = ext3_get_blocks_wrap(handle, inode, iblock, 					max_blocks, bh_result, create, 0);-		if (ret > 0) {-			bh_result->b_size = (ret << inode->i_blkbits);+		if (ret > 0) 			ret = 0;-		} 	} 	return ret; }@@ -1008,7 +1005,7 @@ struct buffer_head *ext3_getblk(handle_t 	dummy.b_state = 0; 	dummy.b_blocknr = -1000; 	buffer

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -