📄 audit_tree.c
字号:
{ struct list_head *p, *q; spin_lock(&hash_lock); if (tree->goner) { spin_unlock(&hash_lock); return; } /* reorder */ for (p = tree->chunks.next; p != &tree->chunks; p = q) { struct node *node = list_entry(p, struct node, list); q = p->next; if (node->index & (1U<<31)) { list_del_init(p); list_add(p, &tree->chunks); } } while (!list_empty(&tree->chunks)) { struct node *node; struct audit_chunk *chunk; node = list_entry(tree->chunks.next, struct node, list); /* have we run out of marked? */ if (!(node->index & (1U<<31))) break; chunk = find_chunk(node); get_inotify_watch(&chunk->watch); spin_unlock(&hash_lock); untag_chunk(chunk, node); put_inotify_watch(&chunk->watch); spin_lock(&hash_lock); } if (!tree->root && !tree->goner) { tree->goner = 1; spin_unlock(&hash_lock); mutex_lock(&audit_filter_mutex); kill_rules(tree); list_del_init(&tree->list); mutex_unlock(&audit_filter_mutex); prune_one(tree); } else { spin_unlock(&hash_lock); }}/* called with audit_filter_mutex */int audit_remove_tree_rule(struct audit_krule *rule){ struct audit_tree *tree; tree = rule->tree; if (tree) { spin_lock(&hash_lock); list_del_init(&rule->rlist); if (list_empty(&tree->rules) && !tree->goner) { tree->root = NULL; list_del_init(&tree->same_root); tree->goner = 1; list_move(&tree->list, &prune_list); rule->tree = NULL; spin_unlock(&hash_lock); audit_schedule_prune(); return 1; } rule->tree = NULL; spin_unlock(&hash_lock); return 1; } return 0;}void audit_trim_trees(void){ struct list_head cursor; mutex_lock(&audit_filter_mutex); list_add(&cursor, &tree_list); while (cursor.next != &tree_list) { struct audit_tree *tree; struct nameidata nd; struct vfsmount *root_mnt; struct node *node; struct list_head list; int err; tree = container_of(cursor.next, struct audit_tree, list); get_tree(tree); list_del(&cursor); list_add(&cursor, &tree->list); mutex_unlock(&audit_filter_mutex); err = path_lookup(tree->pathname, 0, &nd); if (err) goto skip_it; root_mnt = collect_mounts(nd.path.mnt, nd.path.dentry); path_put(&nd.path); if (!root_mnt) goto skip_it; list_add_tail(&list, &root_mnt->mnt_list); spin_lock(&hash_lock); list_for_each_entry(node, &tree->chunks, list) { struct audit_chunk *chunk = find_chunk(node); struct inode *inode = chunk->watch.inode; struct vfsmount *mnt; node->index |= 1U<<31; list_for_each_entry(mnt, &list, mnt_list) { if (mnt->mnt_root->d_inode == inode) { node->index &= ~(1U<<31); break; } } } spin_unlock(&hash_lock); trim_marked(tree); put_tree(tree); list_del_init(&list); drop_collected_mounts(root_mnt);skip_it: mutex_lock(&audit_filter_mutex); } list_del(&cursor); mutex_unlock(&audit_filter_mutex);}static int is_under(struct vfsmount *mnt, struct dentry *dentry, struct nameidata *nd){ if (mnt != nd->path.mnt) { for (;;) { if (mnt->mnt_parent == mnt) return 0; if (mnt->mnt_parent == nd->path.mnt) break; mnt = mnt->mnt_parent; } dentry = mnt->mnt_mountpoint; } return is_subdir(dentry, nd->path.dentry);}int audit_make_tree(struct audit_krule *rule, char *pathname, u32 op){ if (pathname[0] != '/' || rule->listnr != AUDIT_FILTER_EXIT || op & ~AUDIT_EQUAL || rule->inode_f || rule->watch || rule->tree) return -EINVAL; rule->tree = alloc_tree(pathname); if (!rule->tree) return -ENOMEM; return 0;}void audit_put_tree(struct audit_tree *tree){ put_tree(tree);}/* called with audit_filter_mutex */int audit_add_tree_rule(struct audit_krule *rule){ struct audit_tree *seed = rule->tree, *tree; struct nameidata nd; struct vfsmount *mnt, *p; struct list_head list; int err; list_for_each_entry(tree, &tree_list, list) { if (!strcmp(seed->pathname, tree->pathname)) { put_tree(seed); rule->tree = tree; list_add(&rule->rlist, &tree->rules); return 0; } } tree = seed; list_add(&tree->list, &tree_list); list_add(&rule->rlist, &tree->rules); /* do not set rule->tree yet */ mutex_unlock(&audit_filter_mutex); err = path_lookup(tree->pathname, 0, &nd); if (err) goto Err; mnt = collect_mounts(nd.path.mnt, nd.path.dentry); path_put(&nd.path); if (!mnt) { err = -ENOMEM; goto Err; } list_add_tail(&list, &mnt->mnt_list); get_tree(tree); list_for_each_entry(p, &list, mnt_list) { err = tag_chunk(p->mnt_root->d_inode, tree); if (err) break; } list_del(&list); drop_collected_mounts(mnt); if (!err) { struct node *node; spin_lock(&hash_lock); list_for_each_entry(node, &tree->chunks, list) node->index &= ~(1U<<31); spin_unlock(&hash_lock); } else { trim_marked(tree); goto Err; } mutex_lock(&audit_filter_mutex); if (list_empty(&rule->rlist)) { put_tree(tree); return -ENOENT; } rule->tree = tree; put_tree(tree); return 0;Err: mutex_lock(&audit_filter_mutex); list_del_init(&tree->list); list_del_init(&tree->rules); put_tree(tree); return err;}int audit_tag_tree(char *old, char *new){ struct list_head cursor, barrier; int failed = 0; struct nameidata nd; struct vfsmount *tagged; struct list_head list; struct vfsmount *mnt; struct dentry *dentry; int err; err = path_lookup(new, 0, &nd); if (err) return err; tagged = collect_mounts(nd.path.mnt, nd.path.dentry); path_put(&nd.path); if (!tagged) return -ENOMEM; err = path_lookup(old, 0, &nd); if (err) { drop_collected_mounts(tagged); return err; } mnt = mntget(nd.path.mnt); dentry = dget(nd.path.dentry); path_put(&nd.path); if (dentry == tagged->mnt_root && dentry == mnt->mnt_root) follow_up(&mnt, &dentry); list_add_tail(&list, &tagged->mnt_list); mutex_lock(&audit_filter_mutex); list_add(&barrier, &tree_list); list_add(&cursor, &barrier); while (cursor.next != &tree_list) { struct audit_tree *tree; struct vfsmount *p; tree = container_of(cursor.next, struct audit_tree, list); get_tree(tree); list_del(&cursor); list_add(&cursor, &tree->list); mutex_unlock(&audit_filter_mutex); err = path_lookup(tree->pathname, 0, &nd); if (err) { put_tree(tree); mutex_lock(&audit_filter_mutex); continue; } spin_lock(&vfsmount_lock); if (!is_under(mnt, dentry, &nd)) { spin_unlock(&vfsmount_lock); path_put(&nd.path); put_tree(tree); mutex_lock(&audit_filter_mutex); continue; } spin_unlock(&vfsmount_lock); path_put(&nd.path); list_for_each_entry(p, &list, mnt_list) { failed = tag_chunk(p->mnt_root->d_inode, tree); if (failed) break; } if (failed) { put_tree(tree); mutex_lock(&audit_filter_mutex); break; } mutex_lock(&audit_filter_mutex); spin_lock(&hash_lock); if (!tree->goner) { list_del(&tree->list); list_add(&tree->list, &tree_list); } spin_unlock(&hash_lock); put_tree(tree); } while (barrier.prev != &tree_list) { struct audit_tree *tree; tree = container_of(barrier.prev, struct audit_tree, list); get_tree(tree); list_del(&tree->list); list_add(&tree->list, &barrier); mutex_unlock(&audit_filter_mutex); if (!failed) { struct node *node; spin_lock(&hash_lock); list_for_each_entry(node, &tree->chunks, list) node->index &= ~(1U<<31); spin_unlock(&hash_lock); } else { trim_marked(tree); } put_tree(tree); mutex_lock(&audit_filter_mutex); } list_del(&barrier); list_del(&cursor); list_del(&list); mutex_unlock(&audit_filter_mutex); dput(dentry); mntput(mnt); drop_collected_mounts(tagged); return failed;}/* * That gets run when evict_chunk() ends up needing to kill audit_tree. * Runs from a separate thread, with audit_cmd_mutex held. */void audit_prune_trees(void){ mutex_lock(&audit_filter_mutex); while (!list_empty(&prune_list)) { struct audit_tree *victim; victim = list_entry(prune_list.next, struct audit_tree, list); list_del_init(&victim->list); mutex_unlock(&audit_filter_mutex); prune_one(victim); mutex_lock(&audit_filter_mutex); } mutex_unlock(&audit_filter_mutex);}/* * Here comes the stuff asynchronous to auditctl operations *//* inode->inotify_mutex is locked */static void evict_chunk(struct audit_chunk *chunk){ struct audit_tree *owner; int n; if (chunk->dead) return; chunk->dead = 1; mutex_lock(&audit_filter_mutex); spin_lock(&hash_lock); while (!list_empty(&chunk->trees)) { owner = list_entry(chunk->trees.next, struct audit_tree, same_root); owner->goner = 1; owner->root = NULL; list_del_init(&owner->same_root); spin_unlock(&hash_lock); kill_rules(owner); list_move(&owner->list, &prune_list); audit_schedule_prune(); spin_lock(&hash_lock); } list_del_rcu(&chunk->hash); for (n = 0; n < chunk->count; n++) list_del_init(&chunk->owners[n].list); spin_unlock(&hash_lock); mutex_unlock(&audit_filter_mutex);}static void handle_event(struct inotify_watch *watch, u32 wd, u32 mask, u32 cookie, const char *dname, struct inode *inode){ struct audit_chunk *chunk = container_of(watch, struct audit_chunk, watch); if (mask & IN_IGNORED) { evict_chunk(chunk); put_inotify_watch(watch); }}static void destroy_watch(struct inotify_watch *watch){ struct audit_chunk *chunk = container_of(watch, struct audit_chunk, watch); free_chunk(chunk);}static const struct inotify_operations rtree_inotify_ops = { .handle_event = handle_event, .destroy_watch = destroy_watch,};static int __init audit_tree_init(void){ int i; rtree_ih = inotify_init(&rtree_inotify_ops); if (IS_ERR(rtree_ih)) audit_panic("cannot initialize inotify handle for rectree watches"); for (i = 0; i < HASH_SIZE; i++) INIT_LIST_HEAD(&chunk_hash_heads[i]); return 0;}__initcall(audit_tree_init);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -