2 #include <linux/inotify.h>
3 #include <linux/namei.h>
4 #include <linux/mount.h>
5 #include <linux/kthread.h>
6 #include <linux/slab.h>
14 struct audit_chunk *root;
15 struct list_head chunks;
16 struct list_head rules;
17 struct list_head list;
18 struct list_head same_root;
24 struct list_head hash;
25 struct inotify_watch watch;
26 struct list_head trees; /* with root here */
32 struct list_head list;
33 struct audit_tree *owner;
34 unsigned index; /* index; upper bit indicates 'will prune' */
38 static LIST_HEAD(tree_list);
39 static LIST_HEAD(prune_list);
42 * One struct chunk is attached to each inode of interest.
43 * We replace struct chunk on tagging/untagging.
44 * Rules have pointer to struct audit_tree.
45 * Rules have struct list_head rlist forming a list of rules over
47 * References to struct chunk are collected at audit_inode{,_child}()
48 * time and used in AUDIT_TREE rule matching.
49 * These references are dropped at the same time we are calling
50 * audit_free_names(), etc.
52 * Cyclic lists galore:
53 * tree.chunks anchors chunk.owners[].list hash_lock
54 * tree.rules anchors rule.rlist audit_filter_mutex
55 * chunk.trees anchors tree.same_root hash_lock
56 * chunk.hash is a hash with middle bits of watch.inode as
57 * a hash function. RCU, hash_lock
59 * tree is refcounted; one reference for "some rules on rules_list refer to
60 * it", one for each chunk with pointer to it.
62 * chunk is refcounted by embedded inotify_watch + .refs (non-zero refcount
63 * of watch contributes 1 to .refs).
65 * node.index allows to get from node.list to containing chunk.
66 * MSB of that sucker is stolen to mark taggings that we might have to
67 * revert - several operations have very unpleasant cleanup logics and
68 * that makes a difference. Some.
71 static struct inotify_handle *rtree_ih;
73 static struct audit_tree *alloc_tree(const char *s)
75 struct audit_tree *tree;
77 tree = kmalloc(sizeof(struct audit_tree) + strlen(s) + 1, GFP_KERNEL);
79 atomic_set(&tree->count, 1);
81 INIT_LIST_HEAD(&tree->chunks);
82 INIT_LIST_HEAD(&tree->rules);
83 INIT_LIST_HEAD(&tree->list);
84 INIT_LIST_HEAD(&tree->same_root);
86 strcpy(tree->pathname, s);
91 static inline void get_tree(struct audit_tree *tree)
93 atomic_inc(&tree->count);
96 static void __put_tree(struct rcu_head *rcu)
98 struct audit_tree *tree = container_of(rcu, struct audit_tree, head);
102 static inline void put_tree(struct audit_tree *tree)
104 if (atomic_dec_and_test(&tree->count))
105 call_rcu(&tree->head, __put_tree);
108 /* to avoid bringing the entire thing in audit.h */
109 const char *audit_tree_path(struct audit_tree *tree)
111 return tree->pathname;
114 static struct audit_chunk *alloc_chunk(int count)
116 struct audit_chunk *chunk;
120 size = offsetof(struct audit_chunk, owners) + count * sizeof(struct node);
121 chunk = kzalloc(size, GFP_KERNEL);
125 INIT_LIST_HEAD(&chunk->hash);
126 INIT_LIST_HEAD(&chunk->trees);
127 chunk->count = count;
128 atomic_long_set(&chunk->refs, 1);
129 for (i = 0; i < count; i++) {
130 INIT_LIST_HEAD(&chunk->owners[i].list);
131 chunk->owners[i].index = i;
133 inotify_init_watch(&chunk->watch);
137 static void free_chunk(struct audit_chunk *chunk)
141 for (i = 0; i < chunk->count; i++) {
142 if (chunk->owners[i].owner)
143 put_tree(chunk->owners[i].owner);
148 void audit_put_chunk(struct audit_chunk *chunk)
150 if (atomic_long_dec_and_test(&chunk->refs))
154 static void __put_chunk(struct rcu_head *rcu)
156 struct audit_chunk *chunk = container_of(rcu, struct audit_chunk, head);
157 audit_put_chunk(chunk);
160 enum {HASH_SIZE = 128};
161 static struct list_head chunk_hash_heads[HASH_SIZE];
162 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(hash_lock);
164 static inline struct list_head *chunk_hash(const struct inode *inode)
166 unsigned long n = (unsigned long)inode / L1_CACHE_BYTES;
167 return chunk_hash_heads + n % HASH_SIZE;
170 /* hash_lock is held by caller */
171 static void insert_hash(struct audit_chunk *chunk)
173 struct list_head *list = chunk_hash(chunk->watch.inode);
174 list_add_rcu(&chunk->hash, list);
177 /* called under rcu_read_lock */
178 struct audit_chunk *audit_tree_lookup(const struct inode *inode)
180 struct list_head *list = chunk_hash(inode);
181 struct audit_chunk *p;
183 list_for_each_entry_rcu(p, list, hash) {
184 if (p->watch.inode == inode) {
185 atomic_long_inc(&p->refs);
192 int audit_tree_match(struct audit_chunk *chunk, struct audit_tree *tree)
195 for (n = 0; n < chunk->count; n++)
196 if (chunk->owners[n].owner == tree)
201 /* tagging and untagging inodes with trees */
203 static struct audit_chunk *find_chunk(struct node *p)
205 int index = p->index & ~(1U<<31);
207 return container_of(p, struct audit_chunk, owners[0]);
210 static void untag_chunk(struct node *p)
212 struct audit_chunk *chunk = find_chunk(p);
213 struct audit_chunk *new;
214 struct audit_tree *owner;
215 int size = chunk->count - 1;
218 if (!pin_inotify_watch(&chunk->watch)) {
220 * Filesystem is shutting down; all watches are getting
221 * evicted, just take it off the node list for this
222 * tree and let the eviction logics take care of the
226 if (owner->root == chunk) {
227 list_del_init(&owner->same_root);
230 list_del_init(&p->list);
236 spin_unlock(&hash_lock);
239 * pin_inotify_watch() succeeded, so the watch won't go away
242 mutex_lock(&chunk->watch.inode->inotify_mutex);
244 mutex_unlock(&chunk->watch.inode->inotify_mutex);
252 spin_lock(&hash_lock);
253 list_del_init(&chunk->trees);
254 if (owner->root == chunk)
256 list_del_init(&p->list);
257 list_del_rcu(&chunk->hash);
258 spin_unlock(&hash_lock);
259 inotify_evict_watch(&chunk->watch);
260 mutex_unlock(&chunk->watch.inode->inotify_mutex);
261 put_inotify_watch(&chunk->watch);
265 new = alloc_chunk(size);
268 if (inotify_clone_watch(&chunk->watch, &new->watch) < 0) {
274 spin_lock(&hash_lock);
275 list_replace_init(&chunk->trees, &new->trees);
276 if (owner->root == chunk) {
277 list_del_init(&owner->same_root);
281 for (i = j = 0; j <= size; i++, j++) {
282 struct audit_tree *s;
283 if (&chunk->owners[j] == p) {
284 list_del_init(&p->list);
288 s = chunk->owners[j].owner;
289 new->owners[i].owner = s;
290 new->owners[i].index = chunk->owners[j].index - j + i;
291 if (!s) /* result of earlier fallback */
294 list_replace_init(&chunk->owners[j].list, &new->owners[i].list);
297 list_replace_rcu(&chunk->hash, &new->hash);
298 list_for_each_entry(owner, &new->trees, same_root)
300 spin_unlock(&hash_lock);
301 inotify_evict_watch(&chunk->watch);
302 mutex_unlock(&chunk->watch.inode->inotify_mutex);
303 put_inotify_watch(&chunk->watch);
307 // do the best we can
308 spin_lock(&hash_lock);
309 if (owner->root == chunk) {
310 list_del_init(&owner->same_root);
313 list_del_init(&p->list);
316 spin_unlock(&hash_lock);
317 mutex_unlock(&chunk->watch.inode->inotify_mutex);
319 unpin_inotify_watch(&chunk->watch);
320 spin_lock(&hash_lock);
323 static int create_chunk(struct inode *inode, struct audit_tree *tree)
325 struct audit_chunk *chunk = alloc_chunk(1);
329 if (inotify_add_watch(rtree_ih, &chunk->watch, inode, IN_IGNORED | IN_DELETE_SELF) < 0) {
334 mutex_lock(&inode->inotify_mutex);
335 spin_lock(&hash_lock);
337 spin_unlock(&hash_lock);
339 inotify_evict_watch(&chunk->watch);
340 mutex_unlock(&inode->inotify_mutex);
341 put_inotify_watch(&chunk->watch);
344 chunk->owners[0].index = (1U << 31);
345 chunk->owners[0].owner = tree;
347 list_add(&chunk->owners[0].list, &tree->chunks);
350 list_add(&tree->same_root, &chunk->trees);
353 spin_unlock(&hash_lock);
354 mutex_unlock(&inode->inotify_mutex);
358 /* the first tagged inode becomes root of tree */
359 static int tag_chunk(struct inode *inode, struct audit_tree *tree)
361 struct inotify_watch *watch;
362 struct audit_tree *owner;
363 struct audit_chunk *chunk, *old;
367 if (inotify_find_watch(rtree_ih, inode, &watch) < 0)
368 return create_chunk(inode, tree);
370 old = container_of(watch, struct audit_chunk, watch);
372 /* are we already there? */
373 spin_lock(&hash_lock);
374 for (n = 0; n < old->count; n++) {
375 if (old->owners[n].owner == tree) {
376 spin_unlock(&hash_lock);
377 put_inotify_watch(&old->watch);
381 spin_unlock(&hash_lock);
383 chunk = alloc_chunk(old->count + 1);
385 put_inotify_watch(&old->watch);
389 mutex_lock(&inode->inotify_mutex);
390 if (inotify_clone_watch(&old->watch, &chunk->watch) < 0) {
391 mutex_unlock(&inode->inotify_mutex);
392 put_inotify_watch(&old->watch);
396 spin_lock(&hash_lock);
398 spin_unlock(&hash_lock);
400 inotify_evict_watch(&chunk->watch);
401 mutex_unlock(&inode->inotify_mutex);
402 put_inotify_watch(&old->watch);
403 put_inotify_watch(&chunk->watch);
406 list_replace_init(&old->trees, &chunk->trees);
407 for (n = 0, p = chunk->owners; n < old->count; n++, p++) {
408 struct audit_tree *s = old->owners[n].owner;
410 p->index = old->owners[n].index;
411 if (!s) /* result of fallback in untag */
414 list_replace_init(&old->owners[n].list, &p->list);
416 p->index = (chunk->count - 1) | (1U<<31);
419 list_add(&p->list, &tree->chunks);
420 list_replace_rcu(&old->hash, &chunk->hash);
421 list_for_each_entry(owner, &chunk->trees, same_root)
426 list_add(&tree->same_root, &chunk->trees);
428 spin_unlock(&hash_lock);
429 inotify_evict_watch(&old->watch);
430 mutex_unlock(&inode->inotify_mutex);
431 put_inotify_watch(&old->watch); /* pair to inotify_find_watch */
432 put_inotify_watch(&old->watch); /* and kill it */
436 static void kill_rules(struct audit_tree *tree)
438 struct audit_krule *rule, *next;
439 struct audit_entry *entry;
440 struct audit_buffer *ab;
442 list_for_each_entry_safe(rule, next, &tree->rules, rlist) {
443 entry = container_of(rule, struct audit_entry, rule);
445 list_del_init(&rule->rlist);
447 /* not a half-baked one */
448 ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
449 audit_log_format(ab, "op=");
450 audit_log_string(ab, "remove rule");
451 audit_log_format(ab, " dir=");
452 audit_log_untrustedstring(ab, rule->tree->pathname);
453 audit_log_key(ab, rule->filterkey);
454 audit_log_format(ab, " list=%d res=1", rule->listnr);
457 list_del_rcu(&entry->list);
458 list_del(&entry->rule.list);
459 call_rcu(&entry->rcu, audit_free_rule_rcu);
465 * finish killing struct audit_tree
467 static void prune_one(struct audit_tree *victim)
469 spin_lock(&hash_lock);
470 while (!list_empty(&victim->chunks)) {
473 p = list_entry(victim->chunks.next, struct node, list);
477 spin_unlock(&hash_lock);
481 /* trim the uncommitted chunks from tree */
483 static void trim_marked(struct audit_tree *tree)
485 struct list_head *p, *q;
486 spin_lock(&hash_lock);
488 spin_unlock(&hash_lock);
492 for (p = tree->chunks.next; p != &tree->chunks; p = q) {
493 struct node *node = list_entry(p, struct node, list);
495 if (node->index & (1U<<31)) {
497 list_add(p, &tree->chunks);
501 while (!list_empty(&tree->chunks)) {
504 node = list_entry(tree->chunks.next, struct node, list);
506 /* have we run out of marked? */
507 if (!(node->index & (1U<<31)))
512 if (!tree->root && !tree->goner) {
514 spin_unlock(&hash_lock);
515 mutex_lock(&audit_filter_mutex);
517 list_del_init(&tree->list);
518 mutex_unlock(&audit_filter_mutex);
521 spin_unlock(&hash_lock);
525 static void audit_schedule_prune(void);
527 /* called with audit_filter_mutex */
528 int audit_remove_tree_rule(struct audit_krule *rule)
530 struct audit_tree *tree;
533 spin_lock(&hash_lock);
534 list_del_init(&rule->rlist);
535 if (list_empty(&tree->rules) && !tree->goner) {
537 list_del_init(&tree->same_root);
539 list_move(&tree->list, &prune_list);
541 spin_unlock(&hash_lock);
542 audit_schedule_prune();
546 spin_unlock(&hash_lock);
552 static int compare_root(struct vfsmount *mnt, void *arg)
554 return mnt->mnt_root->d_inode == arg;
557 void audit_trim_trees(void)
559 struct list_head cursor;
561 mutex_lock(&audit_filter_mutex);
562 list_add(&cursor, &tree_list);
563 while (cursor.next != &tree_list) {
564 struct audit_tree *tree;
566 struct vfsmount *root_mnt;
570 tree = container_of(cursor.next, struct audit_tree, list);
573 list_add(&cursor, &tree->list);
574 mutex_unlock(&audit_filter_mutex);
576 err = kern_path(tree->pathname, 0, &path);
580 root_mnt = collect_mounts(&path);
585 spin_lock(&hash_lock);
586 list_for_each_entry(node, &tree->chunks, list) {
587 struct inode *inode = find_chunk(node)->watch.inode;
588 node->index |= 1U<<31;
589 if (iterate_mounts(compare_root, inode, root_mnt))
590 node->index &= ~(1U<<31);
592 spin_unlock(&hash_lock);
595 drop_collected_mounts(root_mnt);
597 mutex_lock(&audit_filter_mutex);
600 mutex_unlock(&audit_filter_mutex);
603 int audit_make_tree(struct audit_krule *rule, char *pathname, u32 op)
606 if (pathname[0] != '/' ||
607 rule->listnr != AUDIT_FILTER_EXIT ||
609 rule->inode_f || rule->watch || rule->tree)
611 rule->tree = alloc_tree(pathname);
617 void audit_put_tree(struct audit_tree *tree)
622 static int tag_mount(struct vfsmount *mnt, void *arg)
624 return tag_chunk(mnt->mnt_root->d_inode, arg);
627 /* called with audit_filter_mutex */
628 int audit_add_tree_rule(struct audit_krule *rule)
630 struct audit_tree *seed = rule->tree, *tree;
632 struct vfsmount *mnt;
635 list_for_each_entry(tree, &tree_list, list) {
636 if (!strcmp(seed->pathname, tree->pathname)) {
639 list_add(&rule->rlist, &tree->rules);
644 list_add(&tree->list, &tree_list);
645 list_add(&rule->rlist, &tree->rules);
646 /* do not set rule->tree yet */
647 mutex_unlock(&audit_filter_mutex);
649 err = kern_path(tree->pathname, 0, &path);
652 mnt = collect_mounts(&path);
660 err = iterate_mounts(tag_mount, tree, mnt);
661 drop_collected_mounts(mnt);
665 spin_lock(&hash_lock);
666 list_for_each_entry(node, &tree->chunks, list)
667 node->index &= ~(1U<<31);
668 spin_unlock(&hash_lock);
674 mutex_lock(&audit_filter_mutex);
675 if (list_empty(&rule->rlist)) {
684 mutex_lock(&audit_filter_mutex);
685 list_del_init(&tree->list);
686 list_del_init(&tree->rules);
691 int audit_tag_tree(char *old, char *new)
693 struct list_head cursor, barrier;
695 struct path path1, path2;
696 struct vfsmount *tagged;
699 err = kern_path(new, 0, &path2);
702 tagged = collect_mounts(&path2);
707 err = kern_path(old, 0, &path1);
709 drop_collected_mounts(tagged);
713 mutex_lock(&audit_filter_mutex);
714 list_add(&barrier, &tree_list);
715 list_add(&cursor, &barrier);
717 while (cursor.next != &tree_list) {
718 struct audit_tree *tree;
721 tree = container_of(cursor.next, struct audit_tree, list);
724 list_add(&cursor, &tree->list);
725 mutex_unlock(&audit_filter_mutex);
727 err = kern_path(tree->pathname, 0, &path2);
729 good_one = path_is_under(&path1, &path2);
735 mutex_lock(&audit_filter_mutex);
739 failed = iterate_mounts(tag_mount, tree, tagged);
742 mutex_lock(&audit_filter_mutex);
746 mutex_lock(&audit_filter_mutex);
747 spin_lock(&hash_lock);
749 list_del(&tree->list);
750 list_add(&tree->list, &tree_list);
752 spin_unlock(&hash_lock);
756 while (barrier.prev != &tree_list) {
757 struct audit_tree *tree;
759 tree = container_of(barrier.prev, struct audit_tree, list);
761 list_del(&tree->list);
762 list_add(&tree->list, &barrier);
763 mutex_unlock(&audit_filter_mutex);
767 spin_lock(&hash_lock);
768 list_for_each_entry(node, &tree->chunks, list)
769 node->index &= ~(1U<<31);
770 spin_unlock(&hash_lock);
776 mutex_lock(&audit_filter_mutex);
780 mutex_unlock(&audit_filter_mutex);
782 drop_collected_mounts(tagged);
787 * That gets run when evict_chunk() ends up needing to kill audit_tree.
788 * Runs from a separate thread.
790 static int prune_tree_thread(void *unused)
792 mutex_lock(&audit_cmd_mutex);
793 mutex_lock(&audit_filter_mutex);
795 while (!list_empty(&prune_list)) {
796 struct audit_tree *victim;
798 victim = list_entry(prune_list.next, struct audit_tree, list);
799 list_del_init(&victim->list);
801 mutex_unlock(&audit_filter_mutex);
805 mutex_lock(&audit_filter_mutex);
808 mutex_unlock(&audit_filter_mutex);
809 mutex_unlock(&audit_cmd_mutex);
813 static void audit_schedule_prune(void)
815 kthread_run(prune_tree_thread, NULL, "audit_prune_tree");
819 * ... and that one is done if evict_chunk() decides to delay until the end
820 * of syscall. Runs synchronously.
822 void audit_kill_trees(struct list_head *list)
824 mutex_lock(&audit_cmd_mutex);
825 mutex_lock(&audit_filter_mutex);
827 while (!list_empty(list)) {
828 struct audit_tree *victim;
830 victim = list_entry(list->next, struct audit_tree, list);
832 list_del_init(&victim->list);
834 mutex_unlock(&audit_filter_mutex);
838 mutex_lock(&audit_filter_mutex);
841 mutex_unlock(&audit_filter_mutex);
842 mutex_unlock(&audit_cmd_mutex);
846 * Here comes the stuff asynchronous to auditctl operations
849 /* inode->inotify_mutex is locked */
850 static void evict_chunk(struct audit_chunk *chunk)
852 struct audit_tree *owner;
853 struct list_head *postponed = audit_killed_trees();
861 mutex_lock(&audit_filter_mutex);
862 spin_lock(&hash_lock);
863 while (!list_empty(&chunk->trees)) {
864 owner = list_entry(chunk->trees.next,
865 struct audit_tree, same_root);
868 list_del_init(&owner->same_root);
869 spin_unlock(&hash_lock);
872 list_move(&owner->list, &prune_list);
875 list_move(&owner->list, postponed);
877 spin_lock(&hash_lock);
879 list_del_rcu(&chunk->hash);
880 for (n = 0; n < chunk->count; n++)
881 list_del_init(&chunk->owners[n].list);
882 spin_unlock(&hash_lock);
884 audit_schedule_prune();
885 mutex_unlock(&audit_filter_mutex);
888 static void handle_event(struct inotify_watch *watch, u32 wd, u32 mask,
889 u32 cookie, const char *dname, struct inode *inode)
891 struct audit_chunk *chunk = container_of(watch, struct audit_chunk, watch);
893 if (mask & IN_IGNORED) {
895 put_inotify_watch(watch);
899 static void destroy_watch(struct inotify_watch *watch)
901 struct audit_chunk *chunk = container_of(watch, struct audit_chunk, watch);
902 call_rcu(&chunk->head, __put_chunk);
905 static const struct inotify_operations rtree_inotify_ops = {
906 .handle_event = handle_event,
907 .destroy_watch = destroy_watch,
910 static int __init audit_tree_init(void)
914 rtree_ih = inotify_init(&rtree_inotify_ops);
915 if (IS_ERR(rtree_ih))
916 audit_panic("cannot initialize inotify handle for rectree watches");
918 for (i = 0; i < HASH_SIZE; i++)
919 INIT_LIST_HEAD(&chunk_hash_heads[i]);
923 __initcall(audit_tree_init);