📄 dcache.c
字号:
/* * fs/dcache.c * * Complete reimplementation * (C) 1997 Thomas Schoebel-Theuer, * with heavy changes by Linus Torvalds *//* * Notes on the allocation strategy: * * The dcache is a master of the icache - whenever a dcache entry * exists, the inode will always exist. "iput()" is done either when * the dcache entry is deleted or garbage collected. */#include <linux/config.h>#include <linux/string.h>#include <linux/mm.h>#include <linux/fs.h>#include <linux/slab.h>#include <linux/init.h>#include <linux/smp_lock.h>#include <linux/cache.h>#include <linux/module.h>#include <asm/uaccess.h>#define DCACHE_PARANOIA 1/* #define DCACHE_DEBUG 1 */spinlock_t dcache_lock __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;/* Right now the dcache depends on the kernel lock */#define check_lock() if (!kernel_locked()) BUG()static kmem_cache_t *dentry_cache; /* * This is the single most critical data structure when it comes * to the dcache: the hashtable for lookups. Somebody should try * to make this good - I've just made it work. * * This hash-function tries to avoid losing too many bits of hash * information, yet avoid using a prime hash-size or similar. */#define D_HASHBITS d_hash_shift#define D_HASHMASK d_hash_maskstatic unsigned int d_hash_mask;static unsigned int d_hash_shift;static struct list_head *dentry_hashtable;static LIST_HEAD(dentry_unused);/* Statistics gathering. */struct dentry_stat_t dentry_stat = {0, 0, 45, 0,};/* no dcache_lock, please */static inline void d_free(struct dentry *dentry){ if (dentry->d_op && dentry->d_op->d_release) dentry->d_op->d_release(dentry); if (dname_external(dentry)) kfree(dentry->d_name.name); kmem_cache_free(dentry_cache, dentry); dentry_stat.nr_dentry--;}/* * Release the dentry's inode, using the fileystem * d_iput() operation if defined. * Called with dcache_lock held, drops it. */static inline void dentry_iput(struct dentry * dentry){ struct inode *inode = dentry->d_inode; if (inode) { dentry->d_inode = NULL; list_del_init(&dentry->d_alias); spin_unlock(&dcache_lock); if (dentry->d_op && dentry->d_op->d_iput) dentry->d_op->d_iput(dentry, inode); else iput(inode); } else spin_unlock(&dcache_lock);}/* * This is dput * * This is complicated by the fact that we do not want to put * dentries that are no longer on any hash chain on the unused * list: we'd much rather just get rid of them immediately. * * However, that implies that we have to traverse the dentry * tree upwards to the parents which might _also_ now be * scheduled for deletion (it may have been only waiting for * its last child to go away). * * This tail recursion is done by hand as we don't want to depend * on the compiler to always get this right (gcc generally doesn't). * Real recursion would eat up our stack space. *//* * dput - release a dentry * @dentry: dentry to release * * Release a dentry. This will drop the usage count and if appropriate * call the dentry unlink method as well as removing it from the queues and * releasing its resources. If the parent dentries were scheduled for release * they too may now get deleted. * * no dcache lock, please. */void dput(struct dentry *dentry){ if (!dentry) return;repeat: if (!atomic_dec_and_lock(&dentry->d_count, &dcache_lock)) return; /* dput on a free dentry? */ if (!list_empty(&dentry->d_lru)) BUG(); /* * AV: ->d_delete() is _NOT_ allowed to block now. */ if (dentry->d_op && dentry->d_op->d_delete) { if (dentry->d_op->d_delete(dentry)) goto unhash_it; } /* Unreachable? Get rid of it */ if (list_empty(&dentry->d_hash)) goto kill_it; list_add(&dentry->d_lru, &dentry_unused); dentry_stat.nr_unused++; spin_unlock(&dcache_lock); return;unhash_it: list_del_init(&dentry->d_hash);kill_it: { struct dentry *parent; list_del(&dentry->d_child); /* drops the lock, at that point nobody can reach this dentry */ dentry_iput(dentry); parent = dentry->d_parent; d_free(dentry); if (dentry == parent) return; dentry = parent; goto repeat; }}/** * d_invalidate - invalidate a dentry * @dentry: dentry to invalidate * * Try to invalidate the dentry if it turns out to be * possible. If there are other dentries that can be * reached through this one we can't delete it and we * return -EBUSY. On success we return 0. * * no dcache lock. */ int d_invalidate(struct dentry * dentry){ /* * If it's already been dropped, return OK. */ spin_lock(&dcache_lock); if (list_empty(&dentry->d_hash)) { spin_unlock(&dcache_lock); return 0; } /* * Check whether to do a partial shrink_dcache * to get rid of unused child entries. */ if (!list_empty(&dentry->d_subdirs)) { spin_unlock(&dcache_lock); shrink_dcache_parent(dentry); spin_lock(&dcache_lock); } /* * Somebody else still using it? * * If it's a directory, we can't drop it * for fear of somebody re-populating it * with children (even though dropping it * would make it unreachable from the root, * we might still populate it if it was a * working directory or similar). */ if (atomic_read(&dentry->d_count) > 1) { if (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode)) { spin_unlock(&dcache_lock); return -EBUSY; } } list_del_init(&dentry->d_hash); spin_unlock(&dcache_lock); return 0;}/* This should be called _only_ with dcache_lock held */static inline struct dentry * __dget_locked(struct dentry *dentry){ atomic_inc(&dentry->d_count); if (atomic_read(&dentry->d_count) == 1) { dentry_stat.nr_unused--; list_del_init(&dentry->d_lru); } return dentry;}struct dentry * dget_locked(struct dentry *dentry){ return __dget_locked(dentry);}/** * d_find_alias - grab a hashed alias of inode * @inode: inode in question * * If inode has a hashed alias - acquire the reference to alias and * return it. Otherwise return NULL. Notice that if inode is a directory * there can be only one alias and it can be unhashed only if it has * no children. */struct dentry * d_find_alias(struct inode *inode){ struct list_head *head, *next, *tmp; struct dentry *alias; spin_lock(&dcache_lock); head = &inode->i_dentry; next = inode->i_dentry.next; while (next != head) { tmp = next; next = tmp->next; alias = list_entry(tmp, struct dentry, d_alias); if (!list_empty(&alias->d_hash)) { __dget_locked(alias); spin_unlock(&dcache_lock); return alias; } } spin_unlock(&dcache_lock); return NULL;}/* * Try to kill dentries associated with this inode. * WARNING: you must own a reference to inode. */void d_prune_aliases(struct inode *inode){ struct list_head *tmp, *head = &inode->i_dentry;restart: spin_lock(&dcache_lock); tmp = head; while ((tmp = tmp->next) != head) { struct dentry *dentry = list_entry(tmp, struct dentry, d_alias); if (!atomic_read(&dentry->d_count)) { __dget_locked(dentry); spin_unlock(&dcache_lock); d_drop(dentry); dput(dentry); goto restart; } } spin_unlock(&dcache_lock);}/* * Throw away a dentry - free the inode, dput the parent. * This requires that the LRU list has already been * removed. * Called with dcache_lock, drops it and then regains. */static inline void prune_one_dentry(struct dentry * dentry){ struct dentry * parent; list_del_init(&dentry->d_hash); list_del(&dentry->d_child); dentry_iput(dentry); parent = dentry->d_parent; d_free(dentry); if (parent != dentry) dput(parent); spin_lock(&dcache_lock);}/** * prune_dcache - shrink the dcache * @count: number of entries to try and free * * Shrink the dcache. This is done when we need * more memory, or simply when we need to unmount * something (at which point we need to unuse * all dentries). * * This function may fail to free any resources if * all the dentries are in use. */ void prune_dcache(int count){ spin_lock(&dcache_lock); for (;;) { struct dentry *dentry; struct list_head *tmp; tmp = dentry_unused.prev; if (tmp == &dentry_unused) break; list_del_init(tmp); dentry = list_entry(tmp, struct dentry, d_lru); /* If the dentry was recently referenced, don't free it. */ if (dentry->d_vfs_flags & DCACHE_REFERENCED) { dentry->d_vfs_flags &= ~DCACHE_REFERENCED; list_add(&dentry->d_lru, &dentry_unused); continue; } dentry_stat.nr_unused--; /* Unused dentry with a count? */ if (atomic_read(&dentry->d_count)) BUG(); prune_one_dentry(dentry); if (!--count) break; } spin_unlock(&dcache_lock);}/* * Shrink the dcache for the specified super block. * This allows us to unmount a device without disturbing * the dcache for the other devices. * * This implementation makes just two traversals of the * unused list. On the first pass we move the selected * dentries to the most recent end, and on the second * pass we free them. The second pass must restart after * each dput(), but since the target dentries are all at * the end, it's really just a single traversal. *//** * shrink_dcache_sb - shrink dcache for a superblock * @sb: superblock * * Shrink the dcache for the specified super block. This * is used to free the dcache before unmounting a file * system */void shrink_dcache_sb(struct super_block * sb){ struct list_head *tmp, *next; struct dentry *dentry; /* * Pass one ... move the dentries for the specified * superblock to the most recent end of the unused list. */ spin_lock(&dcache_lock); next = dentry_unused.next; while (next != &dentry_unused) { tmp = next; next = tmp->next; dentry = list_entry(tmp, struct dentry, d_lru); if (dentry->d_sb != sb) continue; list_del(tmp); list_add(tmp, &dentry_unused); } /* * Pass two ... free the dentries for this superblock. */repeat: next = dentry_unused.next; while (next != &dentry_unused) { tmp = next; next = tmp->next; dentry = list_entry(tmp, struct dentry, d_lru); if (dentry->d_sb != sb) continue; if (atomic_read(&dentry->d_count)) continue; dentry_stat.nr_unused--; list_del_init(tmp); prune_one_dentry(dentry); goto repeat; } spin_unlock(&dcache_lock);}/* * Search for at least 1 mount point in the dentry's subdirs. * We descend to the next level whenever the d_subdirs * list is non-empty and continue searching. */ /** * have_submounts - check for mounts over a dentry * @parent: dentry to check. * * Return true if the parent or its subdirectories contain * a mount point */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -