⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 ext3-extents-2.6.18-vanilla.patch

📁 非常经典的一个分布式系统
💻 PATCH
📖 第 1 页 / 共 5 页
字号:
+		path = ext3_ext_find_extent(inode,+					    le32_to_cpu(newext->ee_block),+					    path);+		if (IS_ERR(path))+			err = PTR_ERR(path);+	} else {+		/* tree is full, time to grow in depth */+		err = ext3_ext_grow_indepth(handle, inode, path, newext);+		if (err)+			goto out;++		/* refill path */+		ext3_ext_drop_refs(path);+		path = ext3_ext_find_extent(inode,+					    le32_to_cpu(newext->ee_block),+					    path);+		if (IS_ERR(path)) {+			err = PTR_ERR(path);+			goto out;+		}++		/*+		 * only first (depth 0 -> 1) produces free space+		 * in all other cases we have to split growed tree+		 */+		depth = ext_depth(inode);+		if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) {+			/* now we need split */+			goto repeat;+		}+	}++out:+	return err;+}++/*+ * search the closest allocated block to the left for *logical+ * and returns it at @logical + it's physical address at @phys+ * if *logical is the smallest allocated block, the function+ * returns 0 at @phys+ * return value contains 0 (success) or error code+ */+int+ext3_ext_search_left(struct inode *inode, struct ext3_ext_path *path,+			unsigned long *logical, unsigned long *phys)+{+	struct ext3_extent_idx *ix;+	struct ext3_extent *ex;+	int depth;++	BUG_ON(path == NULL);+	depth = path->p_depth;+	*phys = 0;++	if (depth == 0 && path->p_ext == NULL)+		return 0;++	/* usually extent in the path covers blocks smaller+	 * then *logical, but it can be that extent is the+	 * first one in the file */++	ex = path[depth].p_ext;+	if (*logical < le32_to_cpu(ex->ee_block)) {+		BUG_ON(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex);+		while (--depth >= 0) {+			ix = path[depth].p_idx;+			BUG_ON(ix != EXT_FIRST_INDEX(path[depth].p_hdr));+		}+		return 0;+	}++	BUG_ON(*logical < le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len));++	*logical = le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len) - 1;+	*phys = le32_to_cpu(ex->ee_start) + le16_to_cpu(ex->ee_len) - 1;+	return 0;+}+EXPORT_SYMBOL(ext3_ext_search_left);++/*+ * search the closest allocated block to the right for *logical+ * and returns it at @logical + it's physical address at @phys+ * if *logical is the smallest allocated block, the function+ * returns 0 at @phys+ * return value contains 0 (success) or error code+ */+int+ext3_ext_search_right(struct inode *inode, struct ext3_ext_path *path,+			unsigned long *logical, unsigned long *phys)+{+	struct buffer_head *bh = NULL;+	struct ext3_extent_header *eh;+	struct ext3_extent_idx *ix;+	struct ext3_extent *ex;+	unsigned long block;+	int depth;++	BUG_ON(path == NULL);+	depth = path->p_depth;+	*phys = 0;++	if (depth == 0 && path->p_ext == NULL)+		return 0;++	/* usually extent in the path covers blocks smaller+	 * then *logical, but it can be that extent is the+	 * first one in the file */++	ex = path[depth].p_ext;+	if (*logical < le32_to_cpu(ex->ee_block)) {+		BUG_ON(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex);+		while (--depth >= 0) {+			ix = path[depth].p_idx;+			BUG_ON(ix != EXT_FIRST_INDEX(path[depth].p_hdr));+		}+		*logical = le32_to_cpu(ex->ee_block);+		*phys = le32_to_cpu(ex->ee_start);+		return 0;+	}++	BUG_ON(*logical < le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len));++	if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) {+		/* next allocated block in this leaf */+		ex++;+		*logical = le32_to_cpu(ex->ee_block);+		*phys = le32_to_cpu(ex->ee_start);+		return 0;+	}++	/* go up and search for index to the right */+	while (--depth >= 0) {+		ix = path[depth].p_idx;+		if (ix != EXT_LAST_INDEX(path[depth].p_hdr))+			break;+	}++	if (depth < 0) {+		/* we've gone up to the root and+		 * found no index to the right */+		return 0;+	}++	/* we've found index to the right, let's+	 * follow it and find the closest allocated+	 * block to the right */+	ix++;+	block = le32_to_cpu(ix->ei_leaf);+	while (++depth < path->p_depth) {+		bh = sb_bread(inode->i_sb, block);+		if (bh == NULL)+			return -EIO;+		eh = ext_block_hdr(bh);+		if (ext3_ext_check_header(inode, eh, path->p_depth - depth)) {+			brelse(bh);+			return -EIO;+		}+		ix = EXT_FIRST_INDEX(eh);+		block = le32_to_cpu(ix->ei_leaf);+		brelse(bh);+	}++	bh = sb_bread(inode->i_sb, block);+	if (bh == NULL)+		return -EIO;+	eh = ext_block_hdr(bh);+	if (ext3_ext_check_header(inode, eh, 0)) {+		brelse(bh);+		return -EIO;+	}+	ex = EXT_FIRST_EXTENT(eh);+	*logical = le32_to_cpu(ex->ee_block);+	*phys = le32_to_cpu(ex->ee_start);+	brelse(bh);+	return 0;++}+EXPORT_SYMBOL(ext3_ext_search_right);++++/*+ * returns allocated block in subsequent extent or EXT_MAX_BLOCK+ * NOTE: it consider block number from index entry as+ * allocated block. thus, index entries have to be consistent+ * with leafs+ */+static unsigned long+ext3_ext_next_allocated_block(struct ext3_ext_path *path)+{+	int depth;++	BUG_ON(path == NULL);+	depth = path->p_depth;++	if (depth == 0 && path->p_ext == NULL)+		return EXT_MAX_BLOCK;++	while (depth >= 0) {+		if (depth == path->p_depth) {+			/* leaf */+			if (path[depth].p_ext !=+					EXT_LAST_EXTENT(path[depth].p_hdr))+			  return le32_to_cpu(path[depth].p_ext[1].ee_block);+		} else {+			/* index */+			if (path[depth].p_idx !=+					EXT_LAST_INDEX(path[depth].p_hdr))+			  return le32_to_cpu(path[depth].p_idx[1].ei_block);+		}+		depth--;+	}++	return EXT_MAX_BLOCK;+}++/*+ * returns first allocated block from next leaf or EXT_MAX_BLOCK+ */+static unsigned ext3_ext_next_leaf_block(struct inode *inode,+                                               struct ext3_ext_path *path)+{+	int depth;++	BUG_ON(path == NULL);+	depth = path->p_depth;++	/* zero-tree has no leaf blocks at all */+	if (depth == 0)+		return EXT_MAX_BLOCK;++	/* go to index block */+	depth--;++	while (depth >= 0) {+		if (path[depth].p_idx !=+				EXT_LAST_INDEX(path[depth].p_hdr))+		  return le32_to_cpu(path[depth].p_idx[1].ei_block);+		depth--;+	}++	return EXT_MAX_BLOCK;+}++/*+ * if leaf gets modified and modified extent is first in the leaf+ * then we have to correct all indexes above+ * TODO: do we need to correct tree in all cases?+ */+int ext3_ext_correct_indexes(handle_t *handle, struct inode *inode,+				struct ext3_ext_path *path)+{+	struct ext3_extent_header *eh;+	int depth = ext_depth(inode);+	struct ext3_extent *ex;+	__le32 border;+	int k, err = 0;++	eh = path[depth].p_hdr;+	ex = path[depth].p_ext;+	BUG_ON(ex == NULL);+	BUG_ON(eh == NULL);++	if (depth == 0) {+		/* there is no tree at all */+		return 0;+	}++	if (ex != EXT_FIRST_EXTENT(eh)) {+		/* we correct tree if first leaf got modified only */+		return 0;+	}++	/*+	 * TODO: we need correction if border is smaller then current one+	 */+	k = depth - 1;+	border = path[depth].p_ext->ee_block;+	if ((err = ext3_ext_get_access(handle, inode, path + k)))+		return err;+	path[k].p_idx->ei_block = border;+	if ((err = ext3_ext_dirty(handle, inode, path + k)))+		return err;++	while (k--) {+		/* change all left-side indexes */+		if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr))+			break;+		if ((err = ext3_ext_get_access(handle, inode, path + k)))+			break;+		path[k].p_idx->ei_block = border;+		if ((err = ext3_ext_dirty(handle, inode, path + k)))+			break;+	}++	return err;+}++static int inline+ext3_can_extents_be_merged(struct inode *inode, struct ext3_extent *ex1,+				struct ext3_extent *ex2)+{+	/* FIXME: 48bit support */+        if (le32_to_cpu(ex1->ee_block) + le16_to_cpu(ex1->ee_len)+	    != le32_to_cpu(ex2->ee_block))+		return 0;++#ifdef AGRESSIVE_TEST+	if (le16_to_cpu(ex1->ee_len) >= 4)+		return 0;+#endif++        if (le32_to_cpu(ex1->ee_start) + le16_to_cpu(ex1->ee_len)+			== le32_to_cpu(ex2->ee_start))+		return 1;+	return 0;+}++/*+ * this routine tries to merge requsted extent into the existing+ * extent or inserts requested extent as new one into the tree,+ * creating new leaf in no-space case+ */+int ext3_ext_insert_extent(handle_t *handle, struct inode *inode,+				struct ext3_ext_path *path,+				struct ext3_extent *newext)+{+	struct ext3_extent_header * eh;+	struct ext3_extent *ex, *fex;+	struct ext3_extent *nearex; /* nearest extent */+	struct ext3_ext_path *npath = NULL;+	int depth, len, err, next;++	BUG_ON(newext->ee_len == 0);+	depth = ext_depth(inode);+	ex = path[depth].p_ext;+	BUG_ON(path[depth].p_hdr == NULL);++	/* try to insert block into found extent and return */+	if (ex && ext3_can_extents_be_merged(inode, ex, newext)) {+		ext_debug(inode, "append %d block to %d:%d (from %d)\n",+				le16_to_cpu(newext->ee_len),+				le32_to_cpu(ex->ee_block),+				le16_to_cpu(ex->ee_len),+				le32_to_cpu(ex->ee_start));+		if ((err = ext3_ext_get_access(handle, inode, path + depth)))+			return err;+		ex->ee_len = cpu_to_le16(le16_to_cpu(ex->ee_len)+					 + le16_to_cpu(newext->ee_len));+		eh = path[depth].p_hdr;+		nearex = ex;+		goto merge;+	}++repeat:+	depth = ext_depth(inode);+	eh = path[depth].p_hdr;+	if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max))+		goto has_space;++	/* probably next leaf has space for us? */+	fex = EXT_LAST_EXTENT(eh);+	next = ext3_ext_next_leaf_block(inode, path);+	if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block)+	    && next != EXT_MAX_BLOCK) {+		ext_debug(inode, "next leaf block - %d\n", next);+		BUG_ON(npath != NULL);+		npath = ext3_ext_find_extent(inode, next, NULL);+		if (IS_ERR(npath))+			return PTR_ERR(npath);+		BUG_ON(npath->p_depth != path->p_depth);+		eh = npath[depth].p_hdr;+		if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) {+			ext_debug(inode, "next leaf isnt full(%d)\n",+				  le16_to_cpu(eh->eh_entries));+			path = npath;+			goto repeat;+		}+		ext_debug(inode, "next leaf has no free space(%d,%d)\n",+			  le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));+	}++	/*+	 * there is no free space in found leaf+	 * we're gonna add new leaf in the tree+	 */+	err = ext3_ext_create_new_leaf(handle, inode, path, newext);+	if (err)+		goto cleanup;+	depth = ext_depth(inode);+	eh = path[depth].p_hdr;++has_space:+	nearex = path[depth].p_ext;++	if ((err = ext3_ext_get_access(handle, inode, path + depth)))+		goto cleanup;++	if (!nearex) {+		/* there is no extent in this leaf, create first one */+		ext_debug(inode, "first extent in the leaf: %d:%d:%d\n",+			        le32_to_cpu(newext->ee_block),+			        le32_to_cpu(newext->ee_start),+			        le16_to_cpu(newext->ee_len));+		path[depth].p_ext = EXT_FIRST_EXTENT(eh);+	} else if (le32_to_cpu(newext->ee_block)+		           > le32_to_cpu(nearex->ee_block)) {+		/* BUG_ON(newext->ee_block == nearex->ee_block); */+		if (nearex != EXT_LAST_EXTENT(eh)) {+			len = EXT_MAX_EXTENT(eh) - nearex;+			len = (len - 1) * sizeof(struct ext3_extent);+			len = len < 0 ? 0 : len;+			ext_debug(inode, "insert %d:%d:%d after: nearest 0x%p, "+					"move %d from 0x%p to 0x%p\n",+				        le32_to_cpu(newext->ee_block),+				        le32_to_cpu(newext->ee_start),+				        le16_to_cpu(newext->ee_len),+					nearex, len, nearex + 1, nearex + 2);+			memmove(nearex + 2, nearex + 1, len);+		}+		path[depth].p_ext = nearex + 1;+	} else {+		BUG_ON(newext->ee_block == nearex->ee_block);+		len = (EXT_MAX_EXTENT(eh) - nearex) * sizeof(struct ext3_extent);+		len = len < 0 ? 0 : len;+		ext_debug(inode, "insert %d:%d:%d before: nearest 0x%p, "+				"move %d from 0x%p to 0x%p\n",+				le32_to_cpu(newext->ee_block),+				le32_to_cpu(newext->ee_start),+				le16_to_cpu(newext->ee_len),+				nearex, len, nearex + 1, nearex + 2);+		memmove(nearex + 1, nearex, len);+		path[depth].p_ext = nearex;+	}++	eh->eh_entries = cpu_to_le16(le16_to_cpu(eh->eh_entries)+1);+	nearex = path[depth].p_ext;+	nearex->ee_block = newext->ee_block;+	nearex->ee_start = newext->ee_start;+	nearex->ee_len = newext->ee_len;+	/* FIXME: support for large fs */+	nearex->ee_start_hi = 0;++merge:+	/* try to merge extents to the right */+	while (nearex < EXT_LAST_EXTENT(eh)) {+		if (!ext3_can_extents_be_merged(inode, nearex, nearex + 1))+			break;+		/* merge with next extent! */+		nearex->ee_len = cpu_to_le16(le16_to_cpu(nearex->ee_len)+					     + le16_to_cpu(nearex[1].ee_len));+		if (nearex + 1 < EXT_LAST_EXTENT(eh)) {+			len = (EXT_LAST_EXTENT(eh) - nearex - 1)+					* sizeof(struct ext3_extent);+			memmove(nearex + 1, nearex + 2, len);+		}+		eh->eh_entries = cpu_to_le16(le16_to_cpu(eh->eh_entries)-1);+		BUG_ON(eh->eh_entries == 0);+	}++	/* try to merge extents to the left */++	/* time to correct all indexes above */+	err = ext3_ext_correct_indexes(handle, inode, path);+	if (err)+		goto cleanup;++	err = ext3_ext_dirty(handle, inode, path + depth);++cleanup:

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -