⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 xattr.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 2 页
字号:
		/* Allocate a buffer where we construct the new block. */		header = kzalloc(sb->s_blocksize, GFP_KERNEL);		error = -ENOMEM;		if (header == NULL)			goto cleanup;		end = (char *)header + sb->s_blocksize;		header->h_magic = cpu_to_le32(EXT2_XATTR_MAGIC);		header->h_blocks = header->h_refcount = cpu_to_le32(1);		last = here = ENTRY(header+1);	}	/* Iff we are modifying the block in-place, bh is locked here. */	if (not_found) {		/* Insert the new name. */		size_t size = EXT2_XATTR_LEN(name_len);		size_t rest = (char *)last - (char *)here;		memmove((char *)here + size, here, rest);		memset(here, 0, size);		here->e_name_index = name_index;		here->e_name_len = name_len;		memcpy(here->e_name, name, name_len);	} else {		if (!here->e_value_block && here->e_value_size) {			char *first_val = (char *)header + min_offs;			size_t offs = le16_to_cpu(here->e_value_offs);			char *val = (char *)header + offs;			size_t size = EXT2_XATTR_SIZE(				le32_to_cpu(here->e_value_size));			if (size == EXT2_XATTR_SIZE(value_len)) {				/* The old and the new value have the same				   size. Just replace. */				here->e_value_size = cpu_to_le32(value_len);				memset(val + size - EXT2_XATTR_PAD, 0,				       EXT2_XATTR_PAD); /* Clear pad bytes. */				memcpy(val, value, value_len);				goto skip_replace;			}			/* Remove the old value. */			memmove(first_val + size, first_val, val - first_val);			memset(first_val, 0, size);			here->e_value_offs = 0;			min_offs += size;			/* Adjust all value offsets. */			last = ENTRY(header+1);			while (!IS_LAST_ENTRY(last)) {				size_t o = le16_to_cpu(last->e_value_offs);				if (!last->e_value_block && o < offs)					last->e_value_offs =						cpu_to_le16(o + size);				last = EXT2_XATTR_NEXT(last);			}		}		if (value == NULL) {			/* Remove the old name. */			size_t size = EXT2_XATTR_LEN(name_len);			last = ENTRY((char *)last - size);			memmove(here, (char*)here + size,				(char*)last - (char*)here);			memset(last, 0, size);		}	}	if (value != NULL) {		/* Insert the new value. */		here->e_value_size = cpu_to_le32(value_len);		if (value_len) {			size_t size = EXT2_XATTR_SIZE(value_len);			char *val = (char *)header + min_offs - size;			here->e_value_offs =				cpu_to_le16((char *)val - (char *)header);			memset(val + size - EXT2_XATTR_PAD, 0,			       EXT2_XATTR_PAD); /* Clear the pad bytes. */			memcpy(val, value, value_len);		}	}skip_replace:	if (IS_LAST_ENTRY(ENTRY(header+1))) {		/* This block is now empty. */		if (bh && header == HDR(bh))			unlock_buffer(bh);  /* we were modifying in-place. */		error = ext2_xattr_set2(inode, bh, NULL);	} else {		ext2_xattr_rehash(header, here);		if (bh && header == HDR(bh))			unlock_buffer(bh);  /* we were modifying in-place. */		error = ext2_xattr_set2(inode, bh, header);	}cleanup:	brelse(bh);	if (!(bh && header == HDR(bh)))		kfree(header);	up_write(&EXT2_I(inode)->xattr_sem);	return error;}/* * Second half of ext2_xattr_set(): Update the file system. */static intext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh,		struct ext2_xattr_header *header){	struct super_block *sb = inode->i_sb;	struct buffer_head *new_bh = NULL;	int error;	if (header) {		new_bh = ext2_xattr_cache_find(inode, header);		if (new_bh) {			/* We found an identical block in the cache. */			if (new_bh == old_bh) {				ea_bdebug(new_bh, "keeping this block");			} else {				/* The old block is released after updating				   the inode.  */				ea_bdebug(new_bh, "reusing block");				error = -EDQUOT;				if (DQUOT_ALLOC_BLOCK(inode, 1)) {					unlock_buffer(new_bh);					goto cleanup;				}				HDR(new_bh)->h_refcount = cpu_to_le32(1 +					le32_to_cpu(HDR(new_bh)->h_refcount));				ea_bdebug(new_bh, "refcount now=%d",					le32_to_cpu(HDR(new_bh)->h_refcount));			}			unlock_buffer(new_bh);		} else if (old_bh && header == HDR(old_bh)) {			/* Keep this block. No need to lock the block as we			   don't need to change the reference count. */			new_bh = old_bh;			get_bh(new_bh);			ext2_xattr_cache_insert(new_bh);		} else {			/* We need to allocate a new block */			int goal = le32_to_cpu(EXT2_SB(sb)->s_es->						           s_first_data_block) +				   EXT2_I(inode)->i_block_group *				   EXT2_BLOCKS_PER_GROUP(sb);			int block = ext2_new_block(inode, goal, &error);			if (error)				goto cleanup;			ea_idebug(inode, "creating block %d", block);			new_bh = sb_getblk(sb, block);			if (!new_bh) {				ext2_free_blocks(inode, block, 1);				error = -EIO;				goto cleanup;			}			lock_buffer(new_bh);			memcpy(new_bh->b_data, header, new_bh->b_size);			set_buffer_uptodate(new_bh);			unlock_buffer(new_bh);			ext2_xattr_cache_insert(new_bh);						ext2_xattr_update_super_block(sb);		}		mark_buffer_dirty(new_bh);		if (IS_SYNC(inode)) {			sync_dirty_buffer(new_bh);			error = -EIO;			if (buffer_req(new_bh) && !buffer_uptodate(new_bh))				goto cleanup;		}	}	/* Update the inode. */	EXT2_I(inode)->i_file_acl = new_bh ? new_bh->b_blocknr : 0;	inode->i_ctime = CURRENT_TIME_SEC;	if (IS_SYNC(inode)) {		error = ext2_sync_inode (inode);		/* In case sync failed due to ENOSPC the inode was actually		 * written (only some dirty data were not) so we just proceed		 * as if nothing happened and cleanup the unused block */		if (error && error != -ENOSPC) {			if (new_bh && new_bh != old_bh)				DQUOT_FREE_BLOCK(inode, 1);			goto cleanup;		}	} else		mark_inode_dirty(inode);	error = 0;	if (old_bh && old_bh != new_bh) {		struct mb_cache_entry *ce;		/*		 * If there was an old block and we are no longer using it,		 * release the old block.		 */		ce = mb_cache_entry_get(ext2_xattr_cache, old_bh->b_bdev,					old_bh->b_blocknr);		lock_buffer(old_bh);		if (HDR(old_bh)->h_refcount == cpu_to_le32(1)) {			/* Free the old block. */			if (ce)				mb_cache_entry_free(ce);			ea_bdebug(old_bh, "freeing");			ext2_free_blocks(inode, old_bh->b_blocknr, 1);			/* We let our caller release old_bh, so we			 * need to duplicate the buffer before. */			get_bh(old_bh);			bforget(old_bh);		} else {			/* Decrement the refcount only. */			HDR(old_bh)->h_refcount = cpu_to_le32(				le32_to_cpu(HDR(old_bh)->h_refcount) - 1);			if (ce)				mb_cache_entry_release(ce);			DQUOT_FREE_BLOCK(inode, 1);			mark_buffer_dirty(old_bh);			ea_bdebug(old_bh, "refcount now=%d",				le32_to_cpu(HDR(old_bh)->h_refcount));		}		unlock_buffer(old_bh);	}cleanup:	brelse(new_bh);	return error;}/* * ext2_xattr_delete_inode() * * Free extended attribute resources associated with this inode. This * is called immediately before an inode is freed. */voidext2_xattr_delete_inode(struct inode *inode){	struct buffer_head *bh = NULL;	struct mb_cache_entry *ce;	down_write(&EXT2_I(inode)->xattr_sem);	if (!EXT2_I(inode)->i_file_acl)		goto cleanup;	bh = sb_bread(inode->i_sb, EXT2_I(inode)->i_file_acl);	if (!bh) {		ext2_error(inode->i_sb, "ext2_xattr_delete_inode",			"inode %ld: block %d read error", inode->i_ino,			EXT2_I(inode)->i_file_acl);		goto cleanup;	}	ea_bdebug(bh, "b_count=%d", atomic_read(&(bh->b_count)));	if (HDR(bh)->h_magic != cpu_to_le32(EXT2_XATTR_MAGIC) ||	    HDR(bh)->h_blocks != cpu_to_le32(1)) {		ext2_error(inode->i_sb, "ext2_xattr_delete_inode",			"inode %ld: bad block %d", inode->i_ino,			EXT2_I(inode)->i_file_acl);		goto cleanup;	}	ce = mb_cache_entry_get(ext2_xattr_cache, bh->b_bdev, bh->b_blocknr);	lock_buffer(bh);	if (HDR(bh)->h_refcount == cpu_to_le32(1)) {		if (ce)			mb_cache_entry_free(ce);		ext2_free_blocks(inode, EXT2_I(inode)->i_file_acl, 1);		get_bh(bh);		bforget(bh);		unlock_buffer(bh);	} else {		HDR(bh)->h_refcount = cpu_to_le32(			le32_to_cpu(HDR(bh)->h_refcount) - 1);		if (ce)			mb_cache_entry_release(ce);		ea_bdebug(bh, "refcount now=%d",			le32_to_cpu(HDR(bh)->h_refcount));		unlock_buffer(bh);		mark_buffer_dirty(bh);		if (IS_SYNC(inode))			sync_dirty_buffer(bh);		DQUOT_FREE_BLOCK(inode, 1);	}	EXT2_I(inode)->i_file_acl = 0;cleanup:	brelse(bh);	up_write(&EXT2_I(inode)->xattr_sem);}/* * ext2_xattr_put_super() * * This is called when a file system is unmounted. */voidext2_xattr_put_super(struct super_block *sb){	mb_cache_shrink(sb->s_bdev);}/* * ext2_xattr_cache_insert() * * Create a new entry in the extended attribute cache, and insert * it unless such an entry is already in the cache. * * Returns 0, or a negative error number on failure. */static intext2_xattr_cache_insert(struct buffer_head *bh){	__u32 hash = le32_to_cpu(HDR(bh)->h_hash);	struct mb_cache_entry *ce;	int error;	ce = mb_cache_entry_alloc(ext2_xattr_cache);	if (!ce)		return -ENOMEM;	error = mb_cache_entry_insert(ce, bh->b_bdev, bh->b_blocknr, &hash);	if (error) {		mb_cache_entry_free(ce);		if (error == -EBUSY) {			ea_bdebug(bh, "already in cache (%d cache entries)",				atomic_read(&ext2_xattr_cache->c_entry_count));			error = 0;		}	} else {		ea_bdebug(bh, "inserting [%x] (%d cache entries)", (int)hash,			  atomic_read(&ext2_xattr_cache->c_entry_count));		mb_cache_entry_release(ce);	}	return error;}/* * ext2_xattr_cmp() * * Compare two extended attribute blocks for equality. * * Returns 0 if the blocks are equal, 1 if they differ, and * a negative error number on errors. */static intext2_xattr_cmp(struct ext2_xattr_header *header1,	       struct ext2_xattr_header *header2){	struct ext2_xattr_entry *entry1, *entry2;	entry1 = ENTRY(header1+1);	entry2 = ENTRY(header2+1);	while (!IS_LAST_ENTRY(entry1)) {		if (IS_LAST_ENTRY(entry2))			return 1;		if (entry1->e_hash != entry2->e_hash ||		    entry1->e_name_index != entry2->e_name_index ||		    entry1->e_name_len != entry2->e_name_len ||		    entry1->e_value_size != entry2->e_value_size ||		    memcmp(entry1->e_name, entry2->e_name, entry1->e_name_len))			return 1;		if (entry1->e_value_block != 0 || entry2->e_value_block != 0)			return -EIO;		if (memcmp((char *)header1 + le16_to_cpu(entry1->e_value_offs),			   (char *)header2 + le16_to_cpu(entry2->e_value_offs),			   le32_to_cpu(entry1->e_value_size)))			return 1;		entry1 = EXT2_XATTR_NEXT(entry1);		entry2 = EXT2_XATTR_NEXT(entry2);	}	if (!IS_LAST_ENTRY(entry2))		return 1;	return 0;}/* * ext2_xattr_cache_find() * * Find an identical extended attribute block. * * Returns a locked buffer head to the block found, or NULL if such * a block was not found or an error occurred. */static struct buffer_head *ext2_xattr_cache_find(struct inode *inode, struct ext2_xattr_header *header){	__u32 hash = le32_to_cpu(header->h_hash);	struct mb_cache_entry *ce;	if (!header->h_hash)		return NULL;  /* never share */	ea_idebug(inode, "looking for cached blocks [%x]", (int)hash);again:	ce = mb_cache_entry_find_first(ext2_xattr_cache, 0,				       inode->i_sb->s_bdev, hash);	while (ce) {		struct buffer_head *bh;		if (IS_ERR(ce)) {			if (PTR_ERR(ce) == -EAGAIN)				goto again;			break;		}		bh = sb_bread(inode->i_sb, ce->e_block);		if (!bh) {			ext2_error(inode->i_sb, "ext2_xattr_cache_find",				"inode %ld: block %ld read error",				inode->i_ino, (unsigned long) ce->e_block);		} else {			lock_buffer(bh);			if (le32_to_cpu(HDR(bh)->h_refcount) >				   EXT2_XATTR_REFCOUNT_MAX) {				ea_idebug(inode, "block %ld refcount %d>%d",					  (unsigned long) ce->e_block,					  le32_to_cpu(HDR(bh)->h_refcount),					  EXT2_XATTR_REFCOUNT_MAX);			} else if (!ext2_xattr_cmp(header, HDR(bh))) {				ea_bdebug(bh, "b_count=%d",					  atomic_read(&(bh->b_count)));				mb_cache_entry_release(ce);				return bh;			}			unlock_buffer(bh);			brelse(bh);		}		ce = mb_cache_entry_find_next(ce, 0, inode->i_sb->s_bdev, hash);	}	return NULL;}#define NAME_HASH_SHIFT 5#define VALUE_HASH_SHIFT 16/* * ext2_xattr_hash_entry() * * Compute the hash of an extended attribute. */static inline void ext2_xattr_hash_entry(struct ext2_xattr_header *header,					 struct ext2_xattr_entry *entry){	__u32 hash = 0;	char *name = entry->e_name;	int n;	for (n=0; n < entry->e_name_len; n++) {		hash = (hash << NAME_HASH_SHIFT) ^		       (hash >> (8*sizeof(hash) - NAME_HASH_SHIFT)) ^		       *name++;	}	if (entry->e_value_block == 0 && entry->e_value_size != 0) {		__le32 *value = (__le32 *)((char *)header +			le16_to_cpu(entry->e_value_offs));		for (n = (le32_to_cpu(entry->e_value_size) +		     EXT2_XATTR_ROUND) >> EXT2_XATTR_PAD_BITS; n; n--) {			hash = (hash << VALUE_HASH_SHIFT) ^			       (hash >> (8*sizeof(hash) - VALUE_HASH_SHIFT)) ^			       le32_to_cpu(*value++);		}	}	entry->e_hash = cpu_to_le32(hash);}#undef NAME_HASH_SHIFT#undef VALUE_HASH_SHIFT#define BLOCK_HASH_SHIFT 16/* * ext2_xattr_rehash() * * Re-compute the extended attribute hash value after an entry has changed. */static void ext2_xattr_rehash(struct ext2_xattr_header *header,			      struct ext2_xattr_entry *entry){	struct ext2_xattr_entry *here;	__u32 hash = 0;		ext2_xattr_hash_entry(header, entry);	here = ENTRY(header+1);	while (!IS_LAST_ENTRY(here)) {		if (!here->e_hash) {			/* Block is not shared if an entry's hash value == 0 */			hash = 0;			break;		}		hash = (hash << BLOCK_HASH_SHIFT) ^		       (hash >> (8*sizeof(hash) - BLOCK_HASH_SHIFT)) ^		       le32_to_cpu(here->e_hash);		here = EXT2_XATTR_NEXT(here);	}	header->h_hash = cpu_to_le32(hash);}#undef BLOCK_HASH_SHIFTint __initinit_ext2_xattr(void){	ext2_xattr_cache = mb_cache_create("ext2_xattr", NULL,		sizeof(struct mb_cache_entry) +		sizeof(((struct mb_cache_entry *) 0)->e_indexes[0]), 1, 6);	if (!ext2_xattr_cache)		return -ENOMEM;	return 0;}voidexit_ext2_xattr(void){	mb_cache_destroy(ext2_xattr_cache);}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -