⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 inode.c

📁 嵌入式系统设计与实例开发实验教材二源码 多线程应用程序设计 串行端口程序设计 AD接口实验 CAN总线通信实验 GPS通信实验 Linux内核移植与编译实验 IC卡读写实验 SD驱动使
💻 C
📖 第 1 页 / 共 3 页
字号:
/* * linux/fs/inode.c * * (C) 1997 Linus Torvalds */#include <linux/config.h>#include <linux/fs.h>#include <linux/string.h>#include <linux/mm.h>#include <linux/dcache.h>#include <linux/init.h>#include <linux/quotaops.h>#include <linux/slab.h>#include <linux/cache.h>#include <linux/swap.h>#include <linux/swapctl.h>#include <linux/prefetch.h>#include <linux/locks.h>/* * New inode.c implementation. * * This implementation has the basic premise of trying * to be extremely low-overhead and SMP-safe, yet be * simple enough to be "obviously correct". * * Famous last words. *//* inode dynamic allocation 1999, Andrea Arcangeli <andrea@suse.de> *//* #define INODE_PARANOIA 1 *//* #define INODE_DEBUG 1 *//* * Inode lookup is no longer as critical as it used to be: * most of the lookups are going to be through the dcache. */#define I_HASHBITS	i_hash_shift#define I_HASHMASK	i_hash_maskstatic unsigned int i_hash_mask;static unsigned int i_hash_shift;/* * Each inode can be on two separate lists. One is * the hash list of the inode, used for lookups. The * other linked list is the "type" list: *  "in_use" - valid inode, i_count > 0, i_nlink > 0 *  "dirty"  - as "in_use" but also dirty *  "unused" - valid inode, i_count = 0 * * A "dirty" list is maintained for each super block, * allowing for low-overhead inode sync() operations. */static LIST_HEAD(inode_in_use);static LIST_HEAD(inode_unused);static struct list_head *inode_hashtable;static LIST_HEAD(anon_hash_chain); /* for inodes with NULL i_sb *//* * A simple spinlock to protect the list manipulations. * * NOTE! You also have to own the lock if you change * the i_state of an inode while it is in use.. */static spinlock_t inode_lock = SPIN_LOCK_UNLOCKED;/* * Statistics gathering.. */struct inodes_stat_t inodes_stat;static kmem_cache_t * inode_cachep;#define alloc_inode() \	 ((struct inode *) kmem_cache_alloc(inode_cachep, SLAB_KERNEL))static void destroy_inode(struct inode *inode) {	if (inode_has_buffers(inode))		BUG();	kmem_cache_free(inode_cachep, (inode));}/* * These are initializations that only need to be done * once, because the fields are idempotent across use * of the inode, so let the slab aware of that. */static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags){	struct inode * inode = (struct inode *) foo;	if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==	    SLAB_CTOR_CONSTRUCTOR)	{		memset(inode, 0, sizeof(*inode));		init_waitqueue_head(&inode->i_wait);		INIT_LIST_HEAD(&inode->i_hash);		INIT_LIST_HEAD(&inode->i_data.clean_pages);		INIT_LIST_HEAD(&inode->i_data.dirty_pages);		INIT_LIST_HEAD(&inode->i_data.locked_pages);		INIT_LIST_HEAD(&inode->i_dentry);		INIT_LIST_HEAD(&inode->i_dirty_buffers);		INIT_LIST_HEAD(&inode->i_dirty_data_buffers);		INIT_LIST_HEAD(&inode->i_devices);		sema_init(&inode->i_sem, 1);		sema_init(&inode->i_zombie, 1);		spin_lock_init(&inode->i_data.i_shared_lock);	}}/* * Put the inode on the super block's dirty list. * * CAREFUL! We mark it dirty unconditionally, but * move it onto the dirty list only if it is hashed. * If it was not hashed, it will never be added to * the dirty list even if it is later hashed, as it * will have been marked dirty already. * * In short, make sure you hash any inodes _before_ * you start marking them dirty.. */ /** *	__mark_inode_dirty -	internal function *	@inode: inode to mark *	@flags: what kind of dirty (i.e. I_DIRTY_SYNC) *	Mark an inode as dirty. Callers should use mark_inode_dirty or *  	mark_inode_dirty_sync. */ void __mark_inode_dirty(struct inode *inode, int flags){	struct super_block * sb = inode->i_sb;	if (!sb)		return;	/* Don't do this for I_DIRTY_PAGES - that doesn't actually dirty the inode itself */	if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {		if (sb->s_op && sb->s_op->dirty_inode)			sb->s_op->dirty_inode(inode);	}	/* avoid the locking if we can */	if ((inode->i_state & flags) == flags)		return;	spin_lock(&inode_lock);	if ((inode->i_state & flags) != flags) {		inode->i_state |= flags;		/* Only add valid (ie hashed) inodes to the dirty list */		if (!(inode->i_state & I_LOCK) && !list_empty(&inode->i_hash)) {			list_del(&inode->i_list);			list_add(&inode->i_list, &sb->s_dirty);		}	}	spin_unlock(&inode_lock);}static void __wait_on_inode(struct inode * inode){	DECLARE_WAITQUEUE(wait, current);	add_wait_queue(&inode->i_wait, &wait);repeat:	set_current_state(TASK_UNINTERRUPTIBLE);	if (inode->i_state & I_LOCK) {		schedule();		goto repeat;	}	remove_wait_queue(&inode->i_wait, &wait);	current->state = TASK_RUNNING;}static inline void wait_on_inode(struct inode *inode){	if (inode->i_state & I_LOCK)		__wait_on_inode(inode);}static inline void write_inode(struct inode *inode, int sync){	if (inode->i_sb && inode->i_sb->s_op && inode->i_sb->s_op->write_inode && !is_bad_inode(inode))		inode->i_sb->s_op->write_inode(inode, sync);}static inline void __iget(struct inode * inode){	if (atomic_read(&inode->i_count)) {		atomic_inc(&inode->i_count);		return;	}	atomic_inc(&inode->i_count);	if (!(inode->i_state & (I_DIRTY|I_LOCK))) {		list_del(&inode->i_list);		list_add(&inode->i_list, &inode_in_use);	}	inodes_stat.nr_unused--;}static inline void __sync_one(struct inode *inode, int sync){	unsigned dirty;	list_del(&inode->i_list);	list_add(&inode->i_list, &inode->i_sb->s_locked_inodes);	if (inode->i_state & I_LOCK)		BUG();	/* Set I_LOCK, reset I_DIRTY */	dirty = inode->i_state & I_DIRTY;	inode->i_state |= I_LOCK;	inode->i_state &= ~I_DIRTY;	spin_unlock(&inode_lock);	filemap_fdatasync(inode->i_mapping);	/* Don't write the inode if only I_DIRTY_PAGES was set */	if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC))		write_inode(inode, sync);	filemap_fdatawait(inode->i_mapping);	spin_lock(&inode_lock);	inode->i_state &= ~I_LOCK;	if (!(inode->i_state & I_FREEING)) {		struct list_head *to;		if (inode->i_state & I_DIRTY)			to = &inode->i_sb->s_dirty;		else if (atomic_read(&inode->i_count))			to = &inode_in_use;		else			to = &inode_unused;		list_del(&inode->i_list);		list_add(&inode->i_list, to);	}	wake_up(&inode->i_wait);}static inline void sync_one(struct inode *inode, int sync){	if (inode->i_state & I_LOCK) {		__iget(inode);		spin_unlock(&inode_lock);		__wait_on_inode(inode);		iput(inode);		spin_lock(&inode_lock);	} else {		__sync_one(inode, sync);	}}static inline void sync_list(struct list_head *head){	struct list_head * tmp;	while ((tmp = head->prev) != head) 		__sync_one(list_entry(tmp, struct inode, i_list), 0);}static inline void wait_on_locked(struct list_head *head){	struct list_head * tmp;	while ((tmp = head->prev) != head) {		struct inode *inode = list_entry(tmp, struct inode, i_list);		__iget(inode);		spin_unlock(&inode_lock);		__wait_on_inode(inode);		iput(inode);		spin_lock(&inode_lock);	}}static inline int try_to_sync_unused_list(struct list_head *head, int nr_inodes){	struct list_head *tmp = head;	struct inode *inode;	while (nr_inodes && (tmp = tmp->prev) != head) {		inode = list_entry(tmp, struct inode, i_list);		if (!atomic_read(&inode->i_count)) {			__sync_one(inode, 0);			nr_inodes--;			/* 			 * __sync_one moved the inode to another list,			 * so we have to start looking from the list head.			 */			tmp = head;		}	}	return nr_inodes;}void sync_inodes_sb(struct super_block *sb){	spin_lock(&inode_lock);	while (!list_empty(&sb->s_dirty)||!list_empty(&sb->s_locked_inodes)) {		sync_list(&sb->s_dirty);		wait_on_locked(&sb->s_locked_inodes);	}	spin_unlock(&inode_lock);}/* * Note: * We don't need to grab a reference to superblock here. If it has non-empty * ->s_dirty it's hadn't been killed yet and kill_super() won't proceed * past sync_inodes_sb() until both ->s_dirty and ->s_locked_inodes are * empty. Since __sync_one() regains inode_lock before it finally moves * inode from superblock lists we are OK. */void sync_unlocked_inodes(void){	struct super_block * sb;	spin_lock(&inode_lock);	spin_lock(&sb_lock);	sb = sb_entry(super_blocks.next);	for (; sb != sb_entry(&super_blocks); sb = sb_entry(sb->s_list.next)) {		if (!list_empty(&sb->s_dirty)) {			spin_unlock(&sb_lock);			sync_list(&sb->s_dirty);			spin_lock(&sb_lock);		}	}	spin_unlock(&sb_lock);	spin_unlock(&inode_lock);}/* * Find a superblock with inodes that need to be synced */static struct super_block *get_super_to_sync(void){	struct list_head *p;restart:	spin_lock(&inode_lock);	spin_lock(&sb_lock);	list_for_each(p, &super_blocks) {		struct super_block *s = list_entry(p,struct super_block,s_list);		if (list_empty(&s->s_dirty) && list_empty(&s->s_locked_inodes))			continue;		s->s_count++;		spin_unlock(&sb_lock);		spin_unlock(&inode_lock);		down_read(&s->s_umount);		if (!s->s_root) {			drop_super(s);			goto restart;		}		return s;	}	spin_unlock(&sb_lock);	spin_unlock(&inode_lock);	return NULL;}/** *	sync_inodes *	@dev: device to sync the inodes from. * *	sync_inodes goes through the super block's dirty list,  *	writes them out, and puts them back on the normal list. */void sync_inodes(kdev_t dev){	struct super_block * s;	/*	 * Search the super_blocks array for the device(s) to sync.	 */	if (dev) {		if ((s = get_super(dev)) != NULL) {			sync_inodes_sb(s);			drop_super(s);		}	} else {		while ((s = get_super_to_sync()) != NULL) {			sync_inodes_sb(s);			drop_super(s);		}	}}static void try_to_sync_unused_inodes(void * arg){	struct super_block * sb;	int nr_inodes = inodes_stat.nr_unused;	spin_lock(&inode_lock);	spin_lock(&sb_lock);	sb = sb_entry(super_blocks.next);	for (; nr_inodes && sb != sb_entry(&super_blocks); sb = sb_entry(sb->s_list.next)) {		if (list_empty(&sb->s_dirty))			continue;		spin_unlock(&sb_lock);		nr_inodes = try_to_sync_unused_list(&sb->s_dirty, nr_inodes);		spin_lock(&sb_lock);	}	spin_unlock(&sb_lock);	spin_unlock(&inode_lock);}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -