📄 audit_tree.c
字号:
#include "audit.h"#include <linux/inotify.h>#include <linux/namei.h>#include <linux/mount.h>struct audit_tree;struct audit_chunk;struct audit_tree { atomic_t count; int goner; struct audit_chunk *root; struct list_head chunks; struct list_head rules; struct list_head list; struct list_head same_root; struct rcu_head head; char pathname[];};struct audit_chunk { struct list_head hash; struct inotify_watch watch; struct list_head trees; /* with root here */ int dead; int count; struct rcu_head head; struct node { struct list_head list; struct audit_tree *owner; unsigned index; /* index; upper bit indicates 'will prune' */ } owners[];};static LIST_HEAD(tree_list);static LIST_HEAD(prune_list);/* * One struct chunk is attached to each inode of interest. * We replace struct chunk on tagging/untagging. * Rules have pointer to struct audit_tree. * Rules have struct list_head rlist forming a list of rules over * the same tree. * References to struct chunk are collected at audit_inode{,_child}() * time and used in AUDIT_TREE rule matching. * These references are dropped at the same time we are calling * audit_free_names(), etc. * * Cyclic lists galore: * tree.chunks anchors chunk.owners[].list hash_lock * tree.rules anchors rule.rlist audit_filter_mutex * chunk.trees anchors tree.same_root hash_lock * chunk.hash is a hash with middle bits of watch.inode as * a hash function. RCU, hash_lock * * tree is refcounted; one reference for "some rules on rules_list refer to * it", one for each chunk with pointer to it. * * chunk is refcounted by embedded inotify_watch. * * node.index allows to get from node.list to containing chunk. * MSB of that sucker is stolen to mark taggings that we might have to * revert - several operations have very unpleasant cleanup logics and * that makes a difference. Some. */static struct inotify_handle *rtree_ih;static struct audit_tree *alloc_tree(const char *s){ struct audit_tree *tree; tree = kmalloc(sizeof(struct audit_tree) + strlen(s) + 1, GFP_KERNEL); if (tree) { atomic_set(&tree->count, 1); tree->goner = 0; INIT_LIST_HEAD(&tree->chunks); INIT_LIST_HEAD(&tree->rules); INIT_LIST_HEAD(&tree->list); INIT_LIST_HEAD(&tree->same_root); tree->root = NULL; strcpy(tree->pathname, s); } return tree;}static inline void get_tree(struct audit_tree *tree){ atomic_inc(&tree->count);}static void __put_tree(struct rcu_head *rcu){ struct audit_tree *tree = container_of(rcu, struct audit_tree, head); kfree(tree);}static inline void put_tree(struct audit_tree *tree){ if (atomic_dec_and_test(&tree->count)) call_rcu(&tree->head, __put_tree);}/* to avoid bringing the entire thing in audit.h */const char *audit_tree_path(struct audit_tree *tree){ return tree->pathname;}static struct audit_chunk *alloc_chunk(int count){ struct audit_chunk *chunk; size_t size; int i; size = offsetof(struct audit_chunk, owners) + count * sizeof(struct node); chunk = kzalloc(size, GFP_KERNEL); if (!chunk) return NULL; INIT_LIST_HEAD(&chunk->hash); INIT_LIST_HEAD(&chunk->trees); chunk->count = count; for (i = 0; i < count; i++) { INIT_LIST_HEAD(&chunk->owners[i].list); chunk->owners[i].index = i; } inotify_init_watch(&chunk->watch); return chunk;}static void __free_chunk(struct rcu_head *rcu){ struct audit_chunk *chunk = container_of(rcu, struct audit_chunk, head); int i; for (i = 0; i < chunk->count; i++) { if (chunk->owners[i].owner) put_tree(chunk->owners[i].owner); } kfree(chunk);}static inline void free_chunk(struct audit_chunk *chunk){ call_rcu(&chunk->head, __free_chunk);}void audit_put_chunk(struct audit_chunk *chunk){ put_inotify_watch(&chunk->watch);}enum {HASH_SIZE = 128};static struct list_head chunk_hash_heads[HASH_SIZE];static __cacheline_aligned_in_smp DEFINE_SPINLOCK(hash_lock);static inline struct list_head *chunk_hash(const struct inode *inode){ unsigned long n = (unsigned long)inode / L1_CACHE_BYTES; return chunk_hash_heads + n % HASH_SIZE;}/* hash_lock is held by caller */static void insert_hash(struct audit_chunk *chunk){ struct list_head *list = chunk_hash(chunk->watch.inode); list_add_rcu(&chunk->hash, list);}/* called under rcu_read_lock */struct audit_chunk *audit_tree_lookup(const struct inode *inode){ struct list_head *list = chunk_hash(inode); struct audit_chunk *p; list_for_each_entry_rcu(p, list, hash) { if (p->watch.inode == inode) { get_inotify_watch(&p->watch); return p; } } return NULL;}int audit_tree_match(struct audit_chunk *chunk, struct audit_tree *tree){ int n; for (n = 0; n < chunk->count; n++) if (chunk->owners[n].owner == tree) return 1; return 0;}/* tagging and untagging inodes with trees */static void untag_chunk(struct audit_chunk *chunk, struct node *p){ struct audit_chunk *new; struct audit_tree *owner; int size = chunk->count - 1; int i, j; mutex_lock(&chunk->watch.inode->inotify_mutex); if (chunk->dead) { mutex_unlock(&chunk->watch.inode->inotify_mutex); return; } owner = p->owner; if (!size) { chunk->dead = 1; spin_lock(&hash_lock); list_del_init(&chunk->trees); if (owner->root == chunk) owner->root = NULL; list_del_init(&p->list); list_del_rcu(&chunk->hash); spin_unlock(&hash_lock); inotify_evict_watch(&chunk->watch); mutex_unlock(&chunk->watch.inode->inotify_mutex); put_inotify_watch(&chunk->watch); return; } new = alloc_chunk(size); if (!new) goto Fallback; if (inotify_clone_watch(&chunk->watch, &new->watch) < 0) { free_chunk(new); goto Fallback; } chunk->dead = 1; spin_lock(&hash_lock); list_replace_init(&chunk->trees, &new->trees); if (owner->root == chunk) { list_del_init(&owner->same_root); owner->root = NULL; } for (i = j = 0; i < size; i++, j++) { struct audit_tree *s; if (&chunk->owners[j] == p) { list_del_init(&p->list); i--; continue; } s = chunk->owners[j].owner; new->owners[i].owner = s; new->owners[i].index = chunk->owners[j].index - j + i; if (!s) /* result of earlier fallback */ continue; get_tree(s); list_replace_init(&chunk->owners[i].list, &new->owners[j].list); } list_replace_rcu(&chunk->hash, &new->hash); list_for_each_entry(owner, &new->trees, same_root) owner->root = new; spin_unlock(&hash_lock); inotify_evict_watch(&chunk->watch); mutex_unlock(&chunk->watch.inode->inotify_mutex); put_inotify_watch(&chunk->watch); return;Fallback: // do the best we can spin_lock(&hash_lock); if (owner->root == chunk) { list_del_init(&owner->same_root); owner->root = NULL; } list_del_init(&p->list); p->owner = NULL; put_tree(owner); spin_unlock(&hash_lock); mutex_unlock(&chunk->watch.inode->inotify_mutex);}static int create_chunk(struct inode *inode, struct audit_tree *tree){ struct audit_chunk *chunk = alloc_chunk(1); if (!chunk) return -ENOMEM; if (inotify_add_watch(rtree_ih, &chunk->watch, inode, IN_IGNORED | IN_DELETE_SELF) < 0) { free_chunk(chunk); return -ENOSPC; } mutex_lock(&inode->inotify_mutex); spin_lock(&hash_lock); if (tree->goner) { spin_unlock(&hash_lock); chunk->dead = 1; inotify_evict_watch(&chunk->watch); mutex_unlock(&inode->inotify_mutex); put_inotify_watch(&chunk->watch); return 0; } chunk->owners[0].index = (1U << 31); chunk->owners[0].owner = tree; get_tree(tree); list_add(&chunk->owners[0].list, &tree->chunks); if (!tree->root) { tree->root = chunk; list_add(&tree->same_root, &chunk->trees); } insert_hash(chunk); spin_unlock(&hash_lock); mutex_unlock(&inode->inotify_mutex); return 0;}/* the first tagged inode becomes root of tree */static int tag_chunk(struct inode *inode, struct audit_tree *tree){ struct inotify_watch *watch; struct audit_tree *owner; struct audit_chunk *chunk, *old; struct node *p; int n; if (inotify_find_watch(rtree_ih, inode, &watch) < 0) return create_chunk(inode, tree); old = container_of(watch, struct audit_chunk, watch); /* are we already there? */ spin_lock(&hash_lock); for (n = 0; n < old->count; n++) { if (old->owners[n].owner == tree) { spin_unlock(&hash_lock); put_inotify_watch(watch); return 0; } } spin_unlock(&hash_lock); chunk = alloc_chunk(old->count + 1); if (!chunk) return -ENOMEM; mutex_lock(&inode->inotify_mutex); if (inotify_clone_watch(&old->watch, &chunk->watch) < 0) { mutex_unlock(&inode->inotify_mutex); free_chunk(chunk); return -ENOSPC; } spin_lock(&hash_lock); if (tree->goner) { spin_unlock(&hash_lock); chunk->dead = 1; inotify_evict_watch(&chunk->watch); mutex_unlock(&inode->inotify_mutex); put_inotify_watch(&chunk->watch); return 0; } list_replace_init(&old->trees, &chunk->trees); for (n = 0, p = chunk->owners; n < old->count; n++, p++) { struct audit_tree *s = old->owners[n].owner; p->owner = s; p->index = old->owners[n].index; if (!s) /* result of fallback in untag */ continue; get_tree(s); list_replace_init(&old->owners[n].list, &p->list); } p->index = (chunk->count - 1) | (1U<<31); p->owner = tree; get_tree(tree); list_add(&p->list, &tree->chunks); list_replace_rcu(&old->hash, &chunk->hash); list_for_each_entry(owner, &chunk->trees, same_root) owner->root = chunk; old->dead = 1; if (!tree->root) { tree->root = chunk; list_add(&tree->same_root, &chunk->trees); } spin_unlock(&hash_lock); inotify_evict_watch(&old->watch); mutex_unlock(&inode->inotify_mutex); put_inotify_watch(&old->watch); return 0;}static struct audit_chunk *find_chunk(struct node *p){ int index = p->index & ~(1U<<31); p -= index; return container_of(p, struct audit_chunk, owners[0]);}static void kill_rules(struct audit_tree *tree){ struct audit_krule *rule, *next; struct audit_entry *entry; struct audit_buffer *ab; list_for_each_entry_safe(rule, next, &tree->rules, rlist) { entry = container_of(rule, struct audit_entry, rule); list_del_init(&rule->rlist); if (rule->tree) { /* not a half-baked one */ ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE); audit_log_format(ab, "op=remove rule dir="); audit_log_untrustedstring(ab, rule->tree->pathname); if (rule->filterkey) { audit_log_format(ab, " key="); audit_log_untrustedstring(ab, rule->filterkey); } else audit_log_format(ab, " key=(null)"); audit_log_format(ab, " list=%d res=1", rule->listnr); audit_log_end(ab); rule->tree = NULL; list_del_rcu(&entry->list); call_rcu(&entry->rcu, audit_free_rule_rcu); } }}/* * finish killing struct audit_tree */static void prune_one(struct audit_tree *victim){ spin_lock(&hash_lock); while (!list_empty(&victim->chunks)) { struct node *p; struct audit_chunk *chunk; p = list_entry(victim->chunks.next, struct node, list); chunk = find_chunk(p); get_inotify_watch(&chunk->watch); spin_unlock(&hash_lock); untag_chunk(chunk, p); put_inotify_watch(&chunk->watch); spin_lock(&hash_lock); } spin_unlock(&hash_lock); put_tree(victim);}/* trim the uncommitted chunks from tree */static void trim_marked(struct audit_tree *tree)
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -