⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 ext3-extents-2.6.18-vanilla.patch

📁 非常经典的一个分布式系统
💻 PATCH
📖 第 1 页 / 共 5 页
字号:
+	if (npath) {+		ext3_ext_drop_refs(npath);+		kfree(npath);+	}+	ext3_ext_tree_changed(inode);+	ext3_ext_invalidate_cache(inode);+	return err;+}++int ext3_ext_walk_space(struct inode *inode, unsigned long block,+			unsigned long num, ext_prepare_callback func,+			void *cbdata)+{+	struct ext3_ext_path *path = NULL;+	struct ext3_ext_cache cbex;+	struct ext3_extent *ex;+	unsigned long next, start = 0, end = 0;+	unsigned long last = block + num;+	int depth, exists, err = 0;++	BUG_ON(func == NULL);+	BUG_ON(inode == NULL);++	while (block < last && block != EXT_MAX_BLOCK) {+		num = last - block;+		/* find extent for this block */+		path = ext3_ext_find_extent(inode, block, path);+		if (IS_ERR(path)) {+			err = PTR_ERR(path);+			path = NULL;+			break;+		}++		depth = ext_depth(inode);+		BUG_ON(path[depth].p_hdr == NULL);+		ex = path[depth].p_ext;+		next = ext3_ext_next_allocated_block(path);++		exists = 0;+		if (!ex) {+			/* there is no extent yet, so try to allocate+			 * all requested space */+			start = block;+			end = block + num;+		} else if (le32_to_cpu(ex->ee_block) > block) {+			/* need to allocate space before found extent */+			start = block;+			end = le32_to_cpu(ex->ee_block);+			if (block + num < end)+				end = block + num;+		} else if (block >=+			     le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len)) {+			/* need to allocate space after found extent */+			start = block;+			end = block + num;+			if (end >= next)+				end = next;+		} else if (block >= le32_to_cpu(ex->ee_block)) {+			/*+			 * some part of requested space is covered+			 * by found extent+			 */+			start = block;+			end = le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len);+			if (block + num < end)+				end = block + num;+			exists = 1;+		} else {+			BUG();+		}+		BUG_ON(end <= start);++		if (!exists) {+			cbex.ec_block = start;+			cbex.ec_len = end - start;+			cbex.ec_start = 0;+			cbex.ec_type = EXT3_EXT_CACHE_GAP;+		} else {+		        cbex.ec_block = le32_to_cpu(ex->ee_block);+		        cbex.ec_len = le16_to_cpu(ex->ee_len);+		        cbex.ec_start = le32_to_cpu(ex->ee_start);+			cbex.ec_type = EXT3_EXT_CACHE_EXTENT;+		}++		BUG_ON(cbex.ec_len == 0);+		err = func(inode, path, &cbex, cbdata);+		ext3_ext_drop_refs(path);++		if (err < 0)+			break;+		if (err == EXT_REPEAT)+			continue;+		else if (err == EXT_BREAK) {+			err = 0;+			break;+		}++		if (ext_depth(inode) != depth) {+			/* depth was changed. we have to realloc path */+			kfree(path);+			path = NULL;+		}++		block = cbex.ec_block + cbex.ec_len;+	}++	if (path) {+		ext3_ext_drop_refs(path);+		kfree(path);+	}++	return err;+}++static inline void+ext3_ext_put_in_cache(struct inode *inode, __u32 block,+			__u32 len, __u32 start, int type)+{+	struct ext3_ext_cache *cex;+	BUG_ON(len == 0);+	cex = &EXT3_I(inode)->i_cached_extent;+	cex->ec_type = type;+	cex->ec_block = block;+	cex->ec_len = len;+	cex->ec_start = start;+}++/*+ * this routine calculate boundaries of the gap requested block fits into+ * and cache this gap+ */+static inline void+ext3_ext_put_gap_in_cache(struct inode *inode, struct ext3_ext_path *path,+				unsigned long block)+{+	int depth = ext_depth(inode);+	unsigned long lblock, len;+	struct ext3_extent *ex;++	ex = path[depth].p_ext;+	if (ex == NULL) {+		/* there is no extent yet, so gap is [0;-] */+		lblock = 0;+		len = EXT_MAX_BLOCK;+		ext_debug(inode, "cache gap(whole file):");+	} else if (block < le32_to_cpu(ex->ee_block)) {+		lblock = block;+		len = le32_to_cpu(ex->ee_block) - block;+		ext_debug(inode, "cache gap(before): %lu [%lu:%lu]",+				(unsigned long) block,+			        (unsigned long) le32_to_cpu(ex->ee_block),+			        (unsigned long) le16_to_cpu(ex->ee_len));+	} else if (block >= le32_to_cpu(ex->ee_block)+		            + le16_to_cpu(ex->ee_len)) {+	        lblock = le32_to_cpu(ex->ee_block)+		         + le16_to_cpu(ex->ee_len);+		len = ext3_ext_next_allocated_block(path);+		ext_debug(inode, "cache gap(after): [%lu:%lu] %lu",+			        (unsigned long) le32_to_cpu(ex->ee_block),+			        (unsigned long) le16_to_cpu(ex->ee_len),+				(unsigned long) block);+		BUG_ON(len == lblock);+		len = len - lblock;+	} else {+		lblock = len = 0;+		BUG();+	}++	ext_debug(inode, " -> %lu:%lu\n", (unsigned long) lblock, len);+	ext3_ext_put_in_cache(inode, lblock, len, 0, EXT3_EXT_CACHE_GAP);+}++static inline int+ext3_ext_in_cache(struct inode *inode, unsigned long block,+			struct ext3_extent *ex)+{+	struct ext3_ext_cache *cex;++	cex = &EXT3_I(inode)->i_cached_extent;++	/* has cache valid data? */+	if (cex->ec_type == EXT3_EXT_CACHE_NO)+		return EXT3_EXT_CACHE_NO;++	BUG_ON(cex->ec_type != EXT3_EXT_CACHE_GAP &&+			cex->ec_type != EXT3_EXT_CACHE_EXTENT);+	if (block >= cex->ec_block && block < cex->ec_block + cex->ec_len) {+	        ex->ee_block = cpu_to_le32(cex->ec_block);+	        ex->ee_start = cpu_to_le32(cex->ec_start);+		ex->ee_start_hi = 0;+	        ex->ee_len = cpu_to_le16(cex->ec_len);+		ext_debug(inode, "%lu cached by %lu:%lu:%lu\n",+				(unsigned long) block,+				(unsigned long) cex->ec_block,+				(unsigned long) cex->ec_len,+				(unsigned long) cex->ec_start);+		return cex->ec_type;+	}++	/* not in cache */+	return EXT3_EXT_CACHE_NO;+}++/*+ * routine removes index from the index block+ * it's used in truncate case only. thus all requests are for+ * last index in the block only+ */+int ext3_ext_rm_idx(handle_t *handle, struct inode *inode,+			struct ext3_ext_path *path)+{+	struct buffer_head *bh;+	int err;+	unsigned long leaf;++	/* free index block */+	path--;+	leaf = le32_to_cpu(path->p_idx->ei_leaf);+	BUG_ON(path->p_hdr->eh_entries == 0);+	if ((err = ext3_ext_get_access(handle, inode, path)))+		return err;+	path->p_hdr->eh_entries = cpu_to_le16(le16_to_cpu(path->p_hdr->eh_entries)-1);+	if ((err = ext3_ext_dirty(handle, inode, path)))+		return err;+	ext_debug(inode, "index is empty, remove it, free block %lu\n", leaf);+	bh = sb_find_get_block(inode->i_sb, leaf);+	ext3_forget(handle, 1, inode, bh, leaf);+	ext3_free_blocks(handle, inode, leaf, 1);+	return err;+}++/*+ * This routine returns max. credits extent tree can consume.+ * It should be OK for low-performance paths like ->writepage()+ * To allow many writing process to fit a single transaction,+ * caller should calculate credits under truncate_mutex and+ * pass actual path.+ */+int inline ext3_ext_calc_credits_for_insert(struct inode *inode,+						struct ext3_ext_path *path)+{+	int depth, needed;++	if (path) {+		/* probably there is space in leaf? */+		depth = ext_depth(inode);+		if (le16_to_cpu(path[depth].p_hdr->eh_entries)+				< le16_to_cpu(path[depth].p_hdr->eh_max))+			return 1;+	}++	/*+	 * given 32bit logical block (4294967296 blocks), max. tree+	 * can be 4 levels in depth -- 4 * 340^4 == 53453440000.+	 * let's also add one more level for imbalance.+	 */+	depth = 5;++	/* allocation of new data block(s) */+	needed = 2;++	/*+	 * tree can be full, so it'd need to grow in depth:+	 * we need one credit to modify old root, credits for+	 * new root will be added in split accounting+	 */+	needed += 1;++	/*+	 * Index split can happen, we'd need:+	 *    allocate intermediate indexes (bitmap + group)+	 *  + change two blocks at each level, but root (already included)+	 */+	needed += (depth * 2) + (depth * 2);++	/* any allocation modifies superblock */+	needed += 1;++	return needed;+}++static int ext3_remove_blocks(handle_t *handle, struct inode *inode,+				struct ext3_extent *ex,+				unsigned long from, unsigned long to)+{+	struct buffer_head *bh;+	int i;++#ifdef EXTENTS_STATS+	{+		struct ext3_sb_info *sbi = EXT3_SB(inode->i_sb);+		unsigned short ee_len =  le16_to_cpu(ex->ee_len);+		spin_lock(&sbi->s_ext_stats_lock);+		sbi->s_ext_blocks += ee_len;+		sbi->s_ext_extents++;+		if (ee_len < sbi->s_ext_min)+			sbi->s_ext_min = ee_len;+		if (ee_len > sbi->s_ext_max)+			sbi->s_ext_max = ee_len;+		if (ext_depth(inode) > sbi->s_depth_max)+			sbi->s_depth_max = ext_depth(inode);+		spin_unlock(&sbi->s_ext_stats_lock);+	}+#endif+	if (from >= le32_to_cpu(ex->ee_block)+	    && to == le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len) - 1) {+		/* tail removal */+		unsigned long num, start;+		num = le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len) - from;+		start = le32_to_cpu(ex->ee_start) + le16_to_cpu(ex->ee_len) - num;+		ext_debug(inode, "free last %lu blocks starting %lu\n", num, start);+		for (i = 0; i < num; i++) {+			bh = sb_find_get_block(inode->i_sb, start + i);+			ext3_forget(handle, 0, inode, bh, start + i);+		}+		ext3_free_blocks(handle, inode, start, num);+	} else if (from == le32_to_cpu(ex->ee_block)+		   && to <= le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len) - 1) {+		printk("strange request: removal %lu-%lu from %u:%u\n",+		       from, to, le32_to_cpu(ex->ee_block), le16_to_cpu(ex->ee_len));+	} else {+		printk("strange request: removal(2) %lu-%lu from %u:%u\n",+		       from, to, le32_to_cpu(ex->ee_block), le16_to_cpu(ex->ee_len));+	}+	return 0;+}++static int+ext3_ext_rm_leaf(handle_t *handle, struct inode *inode,+		struct ext3_ext_path *path, unsigned long start)+{+	int err = 0, correct_index = 0;+	int depth = ext_depth(inode), credits;+	struct ext3_extent_header *eh;+	unsigned a, b, block, num;+	unsigned long ex_ee_block;+	unsigned short ex_ee_len;+	struct ext3_extent *ex;++	/* the header must be checked already in ext3_ext_remove_space() */+	ext_debug(inode, "truncate since %lu in leaf\n", start);+	if (!path[depth].p_hdr)+		path[depth].p_hdr = ext_block_hdr(path[depth].p_bh);+	eh = path[depth].p_hdr;+	BUG_ON(eh == NULL);++	/* find where to start removing */+	ex = EXT_LAST_EXTENT(eh);++	ex_ee_block = le32_to_cpu(ex->ee_block);+	ex_ee_len = le16_to_cpu(ex->ee_len);++	while (ex >= EXT_FIRST_EXTENT(eh) &&+			ex_ee_block + ex_ee_len > start) {+		ext_debug(inode, "remove ext %lu:%u\n", ex_ee_block, ex_ee_len);+		path[depth].p_ext = ex;++		a = ex_ee_block > start ? ex_ee_block : start;+		b = ex_ee_block + ex_ee_len - 1 < EXT_MAX_BLOCK ?+			ex_ee_block + ex_ee_len - 1 : EXT_MAX_BLOCK;++		ext_debug(inode, "  border %u:%u\n", a, b);++		if (a != ex_ee_block && b != ex_ee_block + ex_ee_len - 1) {+			block = 0;+			num = 0;+			BUG();+		} else if (a != ex_ee_block) {+			/* remove tail of the extent */+			block = ex_ee_block;+			num = a - block;+		} else if (b != ex_ee_block + ex_ee_len - 1) {+			/* remove head of the extent */+			block = a;+			num = b - a;+			/* there is no "make a hole" API yet */+			BUG();+		} else {+			/* remove whole extent: excellent! */+			block = ex_ee_block;+			num = 0;+			BUG_ON(a != ex_ee_block);+			BUG_ON(b != ex_ee_block + ex_ee_len - 1);+		}++		/* at present, extent can't cross block group */+		/* leaf + bitmap + group desc + sb + inode */+		credits = 5;+		if (ex == EXT_FIRST_EXTENT(eh)) {+			correct_index = 1;+			credits += (ext_depth(inode)) + 1;+		}+#ifdef CONFIG_QUOTA+		credits += 2 * EXT3_QUOTA_TRANS_BLOCKS(inode->i_sb);+#endif++		handle = ext3_ext_journal_restart(handle, credits);+		if (IS_ERR(handle)) {+			err = PTR_ERR(handle);+			goto out;+		}++		err = ext3_ext_get_access(handle, inode, path + depth);+		if (err)+			goto out;++		err = ext3_remove_blocks(handle, inode, ex, a, b);+		if (err)+			goto out;++		if (num == 0) {+			/* this extent is removed entirely mark slot unused */+			ex->ee_start = ex->ee_start_hi = 0;+			eh->eh_entries = cpu_to_le16(le16_to_cpu(eh->eh_entries)-1);+		}++		ex->ee_block = cpu_to_le32(block);+		ex->ee_len = cpu_to_le16(num);++		err = ext3_ext_dirty(handle, inode, path + depth);+		if (err)+			goto out;++		ext_debug(inode, "new extent: %u:%u:%u\n", block, num,+				le32_to_cpu(ex->ee_start));+		ex--;+		ex_ee_block = le32_to_cpu(ex->ee_block);+		ex_ee_len = le16_to_cpu(ex->ee_len);+	}++	if (correct_index && eh->eh_entries)+		err = ext3_ext_correct_indexes(handle, inode, path);++	/* if this leaf is free, then we should+	 * remove it from index block above */+	if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL)+		err = ext3_ext_rm_idx(handle, inode, path + depth);++out:+	return err;+}++/*+ * returns 1 if current index have to be freed (even partial)+ */+static int inline+ext3_ext_more_to_rm(struct ext3_ext_path *path)+{+	BUG_ON(path->p_idx == NULL);++	if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr))+		return 0;++	/*+	 * if truncate on deeper level happened it it wasn't partial+	 * so we have to consider current index for truncation+	 */+	if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block)+		return 0;+	return 1;+}++int ext3_ext_remove_space(struct inode *inode, unsigned long start)+{+	struct super_block *sb = inode->i_sb;+	int depth = ext_depth(inode);+	struct ext3_ext_path *path;+	handle_t *handle;+	int i = 0, err = 0;++	ext_debug(inode, "truncate since %lu\n", start);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -