⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 inode.c

📁 嵌入式系统设计与实例开发实验教材二源码 多线程应用程序设计 串行端口程序设计 AD接口实验 CAN总线通信实验 GPS通信实验 Linux内核移植与编译实验 IC卡读写实验 SD驱动使
💻 C
📖 第 1 页 / 共 3 页
字号:
static struct tq_struct unused_inodes_flush_task;/** *	write_inode_now	-	write an inode to disk *	@inode: inode to write to disk *	@sync: whether the write should be synchronous or not * *	This function commits an inode to disk immediately if it is *	dirty. This is primarily needed by knfsd. */ void write_inode_now(struct inode *inode, int sync){	struct super_block * sb = inode->i_sb;	if (sb) {		spin_lock(&inode_lock);		while (inode->i_state & I_DIRTY)			sync_one(inode, sync);		spin_unlock(&inode_lock);		if (sync)			wait_on_inode(inode);	}	else		printk(KERN_ERR "write_inode_now: no super block\n");}/** * generic_osync_inode - flush all dirty data for a given inode to disk * @inode: inode to write * @datasync: if set, don't bother flushing timestamps * * This can be called by file_write functions for files which have the * O_SYNC flag set, to flush dirty writes to disk.   */int generic_osync_inode(struct inode *inode, int what){	int err = 0, err2 = 0, need_write_inode_now = 0;		/* 	 * WARNING	 *	 * Currently, the filesystem write path does not pass the	 * filp down to the low-level write functions.  Therefore it	 * is impossible for (say) __block_commit_write to know if	 * the operation is O_SYNC or not.	 *	 * Ideally, O_SYNC writes would have the filesystem call	 * ll_rw_block as it went to kick-start the writes, and we	 * could call osync_inode_buffers() here to wait only for	 * those IOs which have already been submitted to the device	 * driver layer.  As it stands, if we did this we'd not write	 * anything to disk since our writes have not been queued by	 * this point: they are still on the dirty LRU.	 * 	 * So, currently we will call fsync_inode_buffers() instead,	 * to flush _all_ dirty buffers for this inode to disk on 	 * every O_SYNC write, not just the synchronous I/Os.  --sct	 */	if (what & OSYNC_METADATA)		err = fsync_inode_buffers(inode);	if (what & OSYNC_DATA)		err2 = fsync_inode_data_buffers(inode);	if (!err)		err = err2;	spin_lock(&inode_lock);	if ((inode->i_state & I_DIRTY) &&	    ((what & OSYNC_INODE) || (inode->i_state & I_DIRTY_DATASYNC)))		need_write_inode_now = 1;	spin_unlock(&inode_lock);	if (need_write_inode_now)		write_inode_now(inode, 1);	else		wait_on_inode(inode);	return err;}/** * clear_inode - clear an inode * @inode: inode to clear * * This is called by the filesystem to tell us * that the inode is no longer useful. We just * terminate it with extreme prejudice. */ void clear_inode(struct inode *inode){	invalidate_inode_buffers(inode);       	if (inode->i_data.nrpages)		BUG();	if (!(inode->i_state & I_FREEING))		BUG();	if (inode->i_state & I_CLEAR)		BUG();	wait_on_inode(inode);	DQUOT_DROP(inode);	if (inode->i_sb && inode->i_sb->s_op && inode->i_sb->s_op->clear_inode)		inode->i_sb->s_op->clear_inode(inode);	if (inode->i_bdev)		bd_forget(inode);	else if (inode->i_cdev) {		cdput(inode->i_cdev);		inode->i_cdev = NULL;	}	inode->i_state = I_CLEAR;}/* * Dispose-list gets a local list with local inodes in it, so it doesn't * need to worry about list corruption and SMP locks. */static void dispose_list(struct list_head * head){	struct list_head * inode_entry;	struct inode * inode;	while ((inode_entry = head->next) != head)	{		list_del(inode_entry);		inode = list_entry(inode_entry, struct inode, i_list);		if (inode->i_data.nrpages)			truncate_inode_pages(&inode->i_data, 0);		clear_inode(inode);		destroy_inode(inode);		inodes_stat.nr_inodes--;	}}/* * Invalidate all inodes for a device. */static int invalidate_list(struct list_head *head, struct super_block * sb, struct list_head * dispose){	struct list_head *next;	int busy = 0, count = 0;	next = head->next;	for (;;) {		struct list_head * tmp = next;		struct inode * inode;		next = next->next;		if (tmp == head)			break;		inode = list_entry(tmp, struct inode, i_list);		if (inode->i_sb != sb)			continue;		invalidate_inode_buffers(inode);		if (!atomic_read(&inode->i_count)) {			list_del_init(&inode->i_hash);			list_del(&inode->i_list);			list_add(&inode->i_list, dispose);			inode->i_state |= I_FREEING;			count++;			continue;		}		busy = 1;	}	/* only unused inodes may be cached with i_count zero */	inodes_stat.nr_unused -= count;	return busy;}/* * This is a two-stage process. First we collect all * offending inodes onto the throw-away list, and in * the second stage we actually dispose of them. This * is because we don't want to sleep while messing * with the global lists.. */ /** *	invalidate_inodes	- discard the inodes on a device *	@sb: superblock * *	Discard all of the inodes for a given superblock. If the discard *	fails because there are busy inodes then a non zero value is returned. *	If the discard is successful all the inodes have been discarded. */ int invalidate_inodes(struct super_block * sb){	int busy;	LIST_HEAD(throw_away);	spin_lock(&inode_lock);	busy = invalidate_list(&inode_in_use, sb, &throw_away);	busy |= invalidate_list(&inode_unused, sb, &throw_away);	busy |= invalidate_list(&sb->s_dirty, sb, &throw_away);	busy |= invalidate_list(&sb->s_locked_inodes, sb, &throw_away);	spin_unlock(&inode_lock);	dispose_list(&throw_away);	return busy;} int invalidate_device(kdev_t dev, int do_sync){	struct super_block *sb;	int res;	if (do_sync)		fsync_dev(dev);	res = 0;	sb = get_super(dev);	if (sb) {		/*		 * no need to lock the super, get_super holds the		 * read semaphore so the filesystem cannot go away		 * under us (->put_super runs with the write lock		 * hold).		 */		shrink_dcache_sb(sb);		res = invalidate_inodes(sb);		drop_super(sb);	}	invalidate_buffers(dev);	return res;}/* * This is called with the inode lock held. It searches * the in-use for freeable inodes, which are moved to a * temporary list and then placed on the unused list by * dispose_list.  * * We don't expect to have to call this very often. * * N.B. The spinlock is released during the call to *      dispose_list. */#define CAN_UNUSE(inode) \	((((inode)->i_state | (inode)->i_data.nrpages) == 0)  && \	 !inode_has_buffers(inode))#define INODE(entry)	(list_entry(entry, struct inode, i_list))void prune_icache(int goal){	LIST_HEAD(list);	struct list_head *entry, *freeable = &list;	int count;	struct inode * inode;	spin_lock(&inode_lock);	count = 0;	entry = inode_unused.prev;	while (entry != &inode_unused)	{		struct list_head *tmp = entry;		entry = entry->prev;		inode = INODE(tmp);		if (inode->i_state & (I_FREEING|I_CLEAR|I_LOCK))			continue;		if (!CAN_UNUSE(inode))			continue;		if (atomic_read(&inode->i_count))			continue;		list_del(tmp);		list_del(&inode->i_hash);		INIT_LIST_HEAD(&inode->i_hash);		list_add(tmp, freeable);		inode->i_state |= I_FREEING;		count++;		if (!--goal)			break;	}	inodes_stat.nr_unused -= count;	spin_unlock(&inode_lock);	dispose_list(freeable);	/* 	 * If we didn't freed enough clean inodes schedule	 * a sync of the dirty inodes, we cannot do it	 * from here or we're either synchronously dogslow	 * or we deadlock with oom.	 */	if (goal)		schedule_task(&unused_inodes_flush_task);}int shrink_icache_memory(int priority, int gfp_mask){	int count = 0;	/*	 * Nasty deadlock avoidance..	 *	 * We may hold various FS locks, and we don't	 * want to recurse into the FS that called us	 * in clear_inode() and friends..	 */	if (!(gfp_mask & __GFP_FS))		return 0;	count = inodes_stat.nr_unused / priority;	prune_icache(count);	kmem_cache_shrink(inode_cachep);	return 0;}/* * Called with the inode lock held. * NOTE: we are not increasing the inode-refcount, you must call __iget() * by hand after calling find_inode now! This simplifies iunique and won't * add any additional branch in the common code. */static struct inode * find_inode(struct super_block * sb, unsigned long ino, struct list_head *head, find_inode_t find_actor, void *opaque){	struct list_head *tmp;	struct inode * inode;	tmp = head;	for (;;) {		tmp = tmp->next;		inode = NULL;		if (tmp == head)			break;		inode = list_entry(tmp, struct inode, i_hash);		if (inode->i_ino != ino)			continue;		if (inode->i_sb != sb)			continue;		if (find_actor && !find_actor(inode, ino, opaque))			continue;		break;	}	return inode;}/* * This just initializes the inode fields * to known values before returning the inode.. * * i_sb, i_ino, i_count, i_state and the lists have * been initialized elsewhere.. */static void clean_inode(struct inode *inode){	static struct address_space_operations empty_aops;	static struct inode_operations empty_iops;	static struct file_operations empty_fops;	memset(&inode->u, 0, sizeof(inode->u));	inode->i_sock = 0;	inode->i_op = &empty_iops;	inode->i_fop = &empty_fops;	inode->i_nlink = 1;	atomic_set(&inode->i_writecount, 0);	inode->i_size = 0;	inode->i_blocks = 0;	inode->i_generation = 0;	memset(&inode->i_dquot, 0, sizeof(inode->i_dquot));	inode->i_pipe = NULL;	inode->i_bdev = NULL;	inode->i_cdev = NULL;	inode->i_data.a_ops = &empty_aops;	inode->i_data.host = inode;	inode->i_data.gfp_mask = GFP_HIGHUSER;	inode->i_mapping = &inode->i_data;}/** * get_empty_inode 	- obtain an inode * * This is called by things like the networking layer * etc that want to get an inode without any inode * number, or filesystems that allocate new inodes with * no pre-existing information. * * On a successful return the inode pointer is returned. On a failure * a %NULL pointer is returned. The returned inode is not on any superblock * lists. */ struct inode * get_empty_inode(void){	static unsigned long last_ino;	struct inode * inode;	spin_lock_prefetch(&inode_lock);		inode = alloc_inode();	if (inode)	{		spin_lock(&inode_lock);		inodes_stat.nr_inodes++;		list_add(&inode->i_list, &inode_in_use);		inode->i_sb = NULL;		inode->i_dev = 0;		inode->i_blkbits = 0;		inode->i_ino = ++last_ino;		inode->i_flags = 0;		atomic_set(&inode->i_count, 1);		inode->i_state = 0;		spin_unlock(&inode_lock);		clean_inode(inode);	}	return inode;}/* * This is called without the inode lock held.. Be careful.

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -