⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 inode.c

📁 基于组件方式开发操作系统的OSKIT源代码
💻 C
📖 第 1 页 / 共 2 页
字号:
/* * linux/fs/inode.c * * (C) 1997 Linus Torvalds * *  OSKit support added by the University of Utah, 1997 */#include <linux/fs.h>#include <linux/string.h>#include <linux/mm.h>#include <linux/dcache.h>#include <linux/init.h>#include <linux/quotaops.h>/* * New inode.c implementation. * * This implementation has the basic premise of trying * to be extremely low-overhead and SMP-safe, yet be * simple enough to be "obviously correct". * * Famous last words. */#define INODE_PARANOIA 1/* #define INODE_DEBUG 1 *//* * Inode lookup is no longer as critical as it used to be: * most of the lookups are going to be through the dcache. */#define HASH_BITS	8#define HASH_SIZE	(1UL << HASH_BITS)#define HASH_MASK	(HASH_SIZE-1)/* * Each inode can be on two separate lists. One is * the hash list of the inode, used for lookups. The * other linked list is the "type" list: *  "in_use" - valid inode, hashed if i_nlink > 0 *  "dirty"  - valid inode, hashed if i_nlink > 0, dirty. *  "unused" - ready to be re-used. Not hashed. * * A "dirty" list is maintained for each super block, * allowing for low-overhead inode sync() operations. */LIST_HEAD(inode_in_use);static LIST_HEAD(inode_unused);static struct list_head inode_hashtable[HASH_SIZE];/* * A simple spinlock to protect the list manipulations. * * NOTE! You also have to own the lock if you change * the i_state of an inode while it is in use.. */spinlock_t inode_lock = SPIN_LOCK_UNLOCKED;/* * Statistics gathering.. */struct {	int nr_inodes;	int nr_free_inodes;	int dummy[5];} inodes_stat = {0, 0,};int max_inodes;/* * Put the inode on the super block's dirty list. * * CAREFUL! We mark it dirty unconditionally, but * move it onto the dirty list only if it is hashed. * If it was not hashed, it will never be added to * the dirty list even if it is later hashed, as it * will have been marked dirty already. * * In short, make sure you hash any inodes _before_ * you start marking them dirty.. */void __mark_inode_dirty(struct inode *inode){	struct super_block * sb = inode->i_sb;	if (sb) {		spin_lock(&inode_lock);		if (!(inode->i_state & I_DIRTY)) {			inode->i_state |= I_DIRTY;			/* Only add valid (ie hashed) inodes to the dirty list */			if (!list_empty(&inode->i_hash)) {				list_del(&inode->i_list);				list_add(&inode->i_list, &sb->s_dirty);			}		}		spin_unlock(&inode_lock);	}}static void __wait_on_inode(struct inode * inode){	struct wait_queue wait = { current, NULL };	add_wait_queue(&inode->i_wait, &wait);repeat:	current->state = TASK_UNINTERRUPTIBLE;	if (inode->i_state & I_LOCK) {		schedule();		goto repeat;	}	remove_wait_queue(&inode->i_wait, &wait);	current->state = TASK_RUNNING;}static inline void wait_on_inode(struct inode *inode){	if (inode->i_state & I_LOCK)		__wait_on_inode(inode);}/* * These are initializations that only need to be done * once, because the fields are idempotent across use * of the inode.. */static inline void init_once(struct inode * inode){	memset(inode, 0, sizeof(*inode));	init_waitqueue(&inode->i_wait);	INIT_LIST_HEAD(&inode->i_hash);	INIT_LIST_HEAD(&inode->i_dentry);	sema_init(&inode->i_sem, 1);	sema_init(&inode->i_atomic_write, 1);}static inline void write_inode(struct inode *inode){	if (inode->i_sb && inode->i_sb->s_op && inode->i_sb->s_op->write_inode)		inode->i_sb->s_op->write_inode(inode);}static inline void sync_one(struct inode *inode){	if (inode->i_state & I_LOCK) {		spin_unlock(&inode_lock);		__wait_on_inode(inode);		spin_lock(&inode_lock);	} else {		list_del(&inode->i_list);		list_add(&inode->i_list, &inode_in_use);		/* Set I_LOCK, reset I_DIRTY */		inode->i_state ^= I_DIRTY | I_LOCK;		spin_unlock(&inode_lock);		write_inode(inode);		spin_lock(&inode_lock);		inode->i_state &= ~I_LOCK;		wake_up(&inode->i_wait);	}}static inline void sync_list(struct list_head *head){	struct list_head * tmp;	while ((tmp = head->prev) != head)		sync_one(list_entry(tmp, struct inode, i_list));}/* * "sync_inodes()" goes through the super block's dirty list,  * writes them out, and puts them back on the normal list. */void sync_inodes(kdev_t dev){	struct super_block * sb = sb_entry(super_blocks.next);	/*	 * Search the super_blocks array for the device(s) to sync.	 */	spin_lock(&inode_lock);	for (; sb != sb_entry(&super_blocks); sb = sb_entry(sb->s_list.next)) {		if (!sb->s_dev)			continue;		if (dev && sb->s_dev != dev)			continue;		sync_list(&sb->s_dirty);		if (dev)			break;	}	spin_unlock(&inode_lock);}/* * Called with the spinlock already held.. */static void sync_all_inodes(void){	struct super_block * sb = sb_entry(super_blocks.next);	for (; sb != sb_entry(&super_blocks); sb = sb_entry(sb->s_list.next)) {		if (!sb->s_dev)			continue;		sync_list(&sb->s_dirty);	}}/* * Needed by knfsd */void write_inode_now(struct inode *inode){	struct super_block * sb = inode->i_sb;	if (sb) {		spin_lock(&inode_lock);		while (inode->i_state & I_DIRTY)			sync_one(inode);		spin_unlock(&inode_lock);	}	else		printk("write_inode_now: no super block\n");}/* * This is called by the filesystem to tell us * that the inode is no longer useful. We just * terminate it with extreme prejudice. */void clear_inode(struct inode *inode){	if (inode->i_nrpages)		truncate_inode_pages(inode, 0);	wait_on_inode(inode);	if (IS_QUOTAINIT(inode))		DQUOT_DROP(inode);	if (inode->i_sb && inode->i_sb->s_op && inode->i_sb->s_op->clear_inode)		inode->i_sb->s_op->clear_inode(inode);	inode->i_state = 0;}/* * Dispose-list gets a local list, so it doesn't need to * worry about list corruption. It releases the inode lock * while clearing the inodes. */static void dispose_list(struct list_head * head){	struct list_head *next;	int count = 0;	spin_unlock(&inode_lock);	next = head->next;	for (;;) {		struct list_head * tmp = next;		struct inode * inode;		next = next->next;		if (tmp == head)			break;		inode = list_entry(tmp, struct inode, i_list);		clear_inode(inode);		count++;	}	/* Add them all to the unused list in one fell swoop */	spin_lock(&inode_lock);	list_splice(head, &inode_unused);	inodes_stat.nr_free_inodes += count;}/* * Invalidate all inodes for a device. */static int invalidate_list(struct list_head *head, struct super_block * sb, struct list_head * dispose){	struct list_head *next;	int busy = 0;	next = head->next;	for (;;) {		struct list_head * tmp = next;		struct inode * inode;		next = next->next;		if (tmp == head)			break;		inode = list_entry(tmp, struct inode, i_list);		if (inode->i_sb != sb)			continue;		if (!inode->i_count) {			list_del(&inode->i_hash);			INIT_LIST_HEAD(&inode->i_hash);			list_del(&inode->i_list);			list_add(&inode->i_list, dispose);			inode->i_state |= I_FREEING;			continue;		}		busy = 1;	}	return busy;}/* * This is a two-stage process. First we collect all * offending inodes onto the throw-away list, and in * the second stage we actually dispose of them. This * is because we don't want to sleep while messing * with the global lists.. */int invalidate_inodes(struct super_block * sb){	int busy;	LIST_HEAD(throw_away);	spin_lock(&inode_lock);	busy = invalidate_list(&inode_in_use, sb, &throw_away);	busy |= invalidate_list(&sb->s_dirty, sb, &throw_away);	dispose_list(&throw_away);	spin_unlock(&inode_lock);	return busy;}/* * This is called with the inode lock held. It searches * the in-use for freeable inodes, which are moved to a * temporary list and then placed on the unused list by * dispose_list.  * * We don't expect to have to call this very often. * * N.B. The spinlock is released during the call to *      dispose_list. */#define CAN_UNUSE(inode) \	(((inode)->i_count | (inode)->i_state) == 0)#define INODE(entry)	(list_entry(entry, struct inode, i_list))static int free_inodes(void){	struct list_head list, *entry, *freeable = &list;	int found = 0;	INIT_LIST_HEAD(freeable);	entry = inode_in_use.next;	while (entry != &inode_in_use) {		struct list_head *tmp = entry;		entry = entry->next;		if (!CAN_UNUSE(INODE(tmp)))			continue;		list_del(tmp);		list_del(&INODE(tmp)->i_hash);		INIT_LIST_HEAD(&INODE(tmp)->i_hash);		list_add(tmp, freeable);		list_entry(tmp, struct inode, i_list)->i_state = I_FREEING;		found = 1;	}	if (found)		dispose_list(freeable);	return found;}/* * Searches the inodes list for freeable inodes, * shrinking the dcache before (and possible after, * if we're low) */static void try_to_free_inodes(int goal){	/*	 * First stry to just get rid of unused inodes.	 *	 * If we can't reach our goal that way, we'll have	 * to try to shrink the dcache and sync existing	 * inodes..	 */	free_inodes();	goal -= inodes_stat.nr_free_inodes;	if (goal > 0) {		spin_unlock(&inode_lock);		select_dcache(goal, 0);		prune_dcache(goal);		spin_lock(&inode_lock);		sync_all_inodes();		free_inodes();	}}/* * This is the externally visible routine for * inode memory management. */void free_inode_memory(int goal){	spin_lock(&inode_lock);	free_inodes();	spin_unlock(&inode_lock);}/* * This is called with the spinlock held, but releases * the lock when freeing or allocating inodes. * Look out! This returns with the inode lock held if * it got an inode.. * * We do inode allocations two pages at a time to reduce * fragmentation. */#define INODE_PAGE_ORDER	1#define INODE_ALLOCATION_SIZE	(PAGE_SIZE << INODE_PAGE_ORDER)#define INODES_PER_ALLOCATION	(INODE_ALLOCATION_SIZE/sizeof(struct inode))static struct inode * grow_inodes(void){	struct inode * inode;	/*	 * Check whether to restock the unused list.

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -