aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/audit_tree.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/audit_tree.c')
-rw-r--r--kernel/audit_tree.c139
1 files changed, 78 insertions, 61 deletions
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
index f7921a2ecf16..8b509441f49a 100644
--- a/kernel/audit_tree.c
+++ b/kernel/audit_tree.c
@@ -24,6 +24,7 @@ struct audit_chunk {
24 struct list_head trees; /* with root here */ 24 struct list_head trees; /* with root here */
25 int dead; 25 int dead;
26 int count; 26 int count;
27 atomic_long_t refs;
27 struct rcu_head head; 28 struct rcu_head head;
28 struct node { 29 struct node {
29 struct list_head list; 30 struct list_head list;
@@ -56,7 +57,8 @@ static LIST_HEAD(prune_list);
56 * tree is refcounted; one reference for "some rules on rules_list refer to 57 * tree is refcounted; one reference for "some rules on rules_list refer to
57 * it", one for each chunk with pointer to it. 58 * it", one for each chunk with pointer to it.
58 * 59 *
59 * chunk is refcounted by embedded inotify_watch. 60 * chunk is refcounted by embedded inotify_watch + .refs (non-zero refcount
61 * of watch contributes 1 to .refs).
60 * 62 *
61 * node.index allows to get from node.list to containing chunk. 63 * node.index allows to get from node.list to containing chunk.
62 * MSB of that sucker is stolen to mark taggings that we might have to 64 * MSB of that sucker is stolen to mark taggings that we might have to
@@ -121,6 +123,7 @@ static struct audit_chunk *alloc_chunk(int count)
121 INIT_LIST_HEAD(&chunk->hash); 123 INIT_LIST_HEAD(&chunk->hash);
122 INIT_LIST_HEAD(&chunk->trees); 124 INIT_LIST_HEAD(&chunk->trees);
123 chunk->count = count; 125 chunk->count = count;
126 atomic_long_set(&chunk->refs, 1);
124 for (i = 0; i < count; i++) { 127 for (i = 0; i < count; i++) {
125 INIT_LIST_HEAD(&chunk->owners[i].list); 128 INIT_LIST_HEAD(&chunk->owners[i].list);
126 chunk->owners[i].index = i; 129 chunk->owners[i].index = i;
@@ -129,9 +132,8 @@ static struct audit_chunk *alloc_chunk(int count)
129 return chunk; 132 return chunk;
130} 133}
131 134
132static void __free_chunk(struct rcu_head *rcu) 135static void free_chunk(struct audit_chunk *chunk)
133{ 136{
134 struct audit_chunk *chunk = container_of(rcu, struct audit_chunk, head);
135 int i; 137 int i;
136 138
137 for (i = 0; i < chunk->count; i++) { 139 for (i = 0; i < chunk->count; i++) {
@@ -141,14 +143,16 @@ static void __free_chunk(struct rcu_head *rcu)
141 kfree(chunk); 143 kfree(chunk);
142} 144}
143 145
144static inline void free_chunk(struct audit_chunk *chunk) 146void audit_put_chunk(struct audit_chunk *chunk)
145{ 147{
146 call_rcu(&chunk->head, __free_chunk); 148 if (atomic_long_dec_and_test(&chunk->refs))
149 free_chunk(chunk);
147} 150}
148 151
149void audit_put_chunk(struct audit_chunk *chunk) 152static void __put_chunk(struct rcu_head *rcu)
150{ 153{
151 put_inotify_watch(&chunk->watch); 154 struct audit_chunk *chunk = container_of(rcu, struct audit_chunk, head);
155 audit_put_chunk(chunk);
152} 156}
153 157
154enum {HASH_SIZE = 128}; 158enum {HASH_SIZE = 128};
@@ -176,7 +180,7 @@ struct audit_chunk *audit_tree_lookup(const struct inode *inode)
176 180
177 list_for_each_entry_rcu(p, list, hash) { 181 list_for_each_entry_rcu(p, list, hash) {
178 if (p->watch.inode == inode) { 182 if (p->watch.inode == inode) {
179 get_inotify_watch(&p->watch); 183 atomic_long_inc(&p->refs);
180 return p; 184 return p;
181 } 185 }
182 } 186 }
@@ -194,17 +198,49 @@ int audit_tree_match(struct audit_chunk *chunk, struct audit_tree *tree)
194 198
195/* tagging and untagging inodes with trees */ 199/* tagging and untagging inodes with trees */
196 200
197static void untag_chunk(struct audit_chunk *chunk, struct node *p) 201static struct audit_chunk *find_chunk(struct node *p)
202{
203 int index = p->index & ~(1U<<31);
204 p -= index;
205 return container_of(p, struct audit_chunk, owners[0]);
206}
207
208static void untag_chunk(struct node *p)
198{ 209{
210 struct audit_chunk *chunk = find_chunk(p);
199 struct audit_chunk *new; 211 struct audit_chunk *new;
200 struct audit_tree *owner; 212 struct audit_tree *owner;
201 int size = chunk->count - 1; 213 int size = chunk->count - 1;
202 int i, j; 214 int i, j;
203 215
216 if (!pin_inotify_watch(&chunk->watch)) {
217 /*
218 * Filesystem is shutting down; all watches are getting
219 * evicted, just take it off the node list for this
220 * tree and let the eviction logics take care of the
221 * rest.
222 */
223 owner = p->owner;
224 if (owner->root == chunk) {
225 list_del_init(&owner->same_root);
226 owner->root = NULL;
227 }
228 list_del_init(&p->list);
229 p->owner = NULL;
230 put_tree(owner);
231 return;
232 }
233
234 spin_unlock(&hash_lock);
235
236 /*
237 * pin_inotify_watch() succeeded, so the watch won't go away
238 * from under us.
239 */
204 mutex_lock(&chunk->watch.inode->inotify_mutex); 240 mutex_lock(&chunk->watch.inode->inotify_mutex);
205 if (chunk->dead) { 241 if (chunk->dead) {
206 mutex_unlock(&chunk->watch.inode->inotify_mutex); 242 mutex_unlock(&chunk->watch.inode->inotify_mutex);
207 return; 243 goto out;
208 } 244 }
209 245
210 owner = p->owner; 246 owner = p->owner;
@@ -221,7 +257,7 @@ static void untag_chunk(struct audit_chunk *chunk, struct node *p)
221 inotify_evict_watch(&chunk->watch); 257 inotify_evict_watch(&chunk->watch);
222 mutex_unlock(&chunk->watch.inode->inotify_mutex); 258 mutex_unlock(&chunk->watch.inode->inotify_mutex);
223 put_inotify_watch(&chunk->watch); 259 put_inotify_watch(&chunk->watch);
224 return; 260 goto out;
225 } 261 }
226 262
227 new = alloc_chunk(size); 263 new = alloc_chunk(size);
@@ -263,7 +299,7 @@ static void untag_chunk(struct audit_chunk *chunk, struct node *p)
263 inotify_evict_watch(&chunk->watch); 299 inotify_evict_watch(&chunk->watch);
264 mutex_unlock(&chunk->watch.inode->inotify_mutex); 300 mutex_unlock(&chunk->watch.inode->inotify_mutex);
265 put_inotify_watch(&chunk->watch); 301 put_inotify_watch(&chunk->watch);
266 return; 302 goto out;
267 303
268Fallback: 304Fallback:
269 // do the best we can 305 // do the best we can
@@ -277,6 +313,9 @@ Fallback:
277 put_tree(owner); 313 put_tree(owner);
278 spin_unlock(&hash_lock); 314 spin_unlock(&hash_lock);
279 mutex_unlock(&chunk->watch.inode->inotify_mutex); 315 mutex_unlock(&chunk->watch.inode->inotify_mutex);
316out:
317 unpin_inotify_watch(&chunk->watch);
318 spin_lock(&hash_lock);
280} 319}
281 320
282static int create_chunk(struct inode *inode, struct audit_tree *tree) 321static int create_chunk(struct inode *inode, struct audit_tree *tree)
@@ -387,13 +426,6 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
387 return 0; 426 return 0;
388} 427}
389 428
390static struct audit_chunk *find_chunk(struct node *p)
391{
392 int index = p->index & ~(1U<<31);
393 p -= index;
394 return container_of(p, struct audit_chunk, owners[0]);
395}
396
397static void kill_rules(struct audit_tree *tree) 429static void kill_rules(struct audit_tree *tree)
398{ 430{
399 struct audit_krule *rule, *next; 431 struct audit_krule *rule, *next;
@@ -431,17 +463,10 @@ static void prune_one(struct audit_tree *victim)
431 spin_lock(&hash_lock); 463 spin_lock(&hash_lock);
432 while (!list_empty(&victim->chunks)) { 464 while (!list_empty(&victim->chunks)) {
433 struct node *p; 465 struct node *p;
434 struct audit_chunk *chunk;
435 466
436 p = list_entry(victim->chunks.next, struct node, list); 467 p = list_entry(victim->chunks.next, struct node, list);
437 chunk = find_chunk(p);
438 get_inotify_watch(&chunk->watch);
439 spin_unlock(&hash_lock);
440
441 untag_chunk(chunk, p);
442 468
443 put_inotify_watch(&chunk->watch); 469 untag_chunk(p);
444 spin_lock(&hash_lock);
445 } 470 }
446 spin_unlock(&hash_lock); 471 spin_unlock(&hash_lock);
447 put_tree(victim); 472 put_tree(victim);
@@ -469,7 +494,6 @@ static void trim_marked(struct audit_tree *tree)
469 494
470 while (!list_empty(&tree->chunks)) { 495 while (!list_empty(&tree->chunks)) {
471 struct node *node; 496 struct node *node;
472 struct audit_chunk *chunk;
473 497
474 node = list_entry(tree->chunks.next, struct node, list); 498 node = list_entry(tree->chunks.next, struct node, list);
475 499
@@ -477,14 +501,7 @@ static void trim_marked(struct audit_tree *tree)
477 if (!(node->index & (1U<<31))) 501 if (!(node->index & (1U<<31)))
478 break; 502 break;
479 503
480 chunk = find_chunk(node); 504 untag_chunk(node);
481 get_inotify_watch(&chunk->watch);
482 spin_unlock(&hash_lock);
483
484 untag_chunk(chunk, node);
485
486 put_inotify_watch(&chunk->watch);
487 spin_lock(&hash_lock);
488 } 505 }
489 if (!tree->root && !tree->goner) { 506 if (!tree->root && !tree->goner) {
490 tree->goner = 1; 507 tree->goner = 1;
@@ -532,7 +549,7 @@ void audit_trim_trees(void)
532 list_add(&cursor, &tree_list); 549 list_add(&cursor, &tree_list);
533 while (cursor.next != &tree_list) { 550 while (cursor.next != &tree_list) {
534 struct audit_tree *tree; 551 struct audit_tree *tree;
535 struct nameidata nd; 552 struct path path;
536 struct vfsmount *root_mnt; 553 struct vfsmount *root_mnt;
537 struct node *node; 554 struct node *node;
538 struct list_head list; 555 struct list_head list;
@@ -544,12 +561,12 @@ void audit_trim_trees(void)
544 list_add(&cursor, &tree->list); 561 list_add(&cursor, &tree->list);
545 mutex_unlock(&audit_filter_mutex); 562 mutex_unlock(&audit_filter_mutex);
546 563
547 err = path_lookup(tree->pathname, 0, &nd); 564 err = kern_path(tree->pathname, 0, &path);
548 if (err) 565 if (err)
549 goto skip_it; 566 goto skip_it;
550 567
551 root_mnt = collect_mounts(nd.path.mnt, nd.path.dentry); 568 root_mnt = collect_mounts(path.mnt, path.dentry);
552 path_put(&nd.path); 569 path_put(&path);
553 if (!root_mnt) 570 if (!root_mnt)
554 goto skip_it; 571 goto skip_it;
555 572
@@ -580,19 +597,19 @@ skip_it:
580} 597}
581 598
582static int is_under(struct vfsmount *mnt, struct dentry *dentry, 599static int is_under(struct vfsmount *mnt, struct dentry *dentry,
583 struct nameidata *nd) 600 struct path *path)
584{ 601{
585 if (mnt != nd->path.mnt) { 602 if (mnt != path->mnt) {
586 for (;;) { 603 for (;;) {
587 if (mnt->mnt_parent == mnt) 604 if (mnt->mnt_parent == mnt)
588 return 0; 605 return 0;
589 if (mnt->mnt_parent == nd->path.mnt) 606 if (mnt->mnt_parent == path->mnt)
590 break; 607 break;
591 mnt = mnt->mnt_parent; 608 mnt = mnt->mnt_parent;
592 } 609 }
593 dentry = mnt->mnt_mountpoint; 610 dentry = mnt->mnt_mountpoint;
594 } 611 }
595 return is_subdir(dentry, nd->path.dentry); 612 return is_subdir(dentry, path->dentry);
596} 613}
597 614
598int audit_make_tree(struct audit_krule *rule, char *pathname, u32 op) 615int audit_make_tree(struct audit_krule *rule, char *pathname, u32 op)
@@ -618,7 +635,7 @@ void audit_put_tree(struct audit_tree *tree)
618int audit_add_tree_rule(struct audit_krule *rule) 635int audit_add_tree_rule(struct audit_krule *rule)
619{ 636{
620 struct audit_tree *seed = rule->tree, *tree; 637 struct audit_tree *seed = rule->tree, *tree;
621 struct nameidata nd; 638 struct path path;
622 struct vfsmount *mnt, *p; 639 struct vfsmount *mnt, *p;
623 struct list_head list; 640 struct list_head list;
624 int err; 641 int err;
@@ -637,11 +654,11 @@ int audit_add_tree_rule(struct audit_krule *rule)
637 /* do not set rule->tree yet */ 654 /* do not set rule->tree yet */
638 mutex_unlock(&audit_filter_mutex); 655 mutex_unlock(&audit_filter_mutex);
639 656
640 err = path_lookup(tree->pathname, 0, &nd); 657 err = kern_path(tree->pathname, 0, &path);
641 if (err) 658 if (err)
642 goto Err; 659 goto Err;
643 mnt = collect_mounts(nd.path.mnt, nd.path.dentry); 660 mnt = collect_mounts(path.mnt, path.dentry);
644 path_put(&nd.path); 661 path_put(&path);
645 if (!mnt) { 662 if (!mnt) {
646 err = -ENOMEM; 663 err = -ENOMEM;
647 goto Err; 664 goto Err;
@@ -690,29 +707,29 @@ int audit_tag_tree(char *old, char *new)
690{ 707{
691 struct list_head cursor, barrier; 708 struct list_head cursor, barrier;
692 int failed = 0; 709 int failed = 0;
693 struct nameidata nd; 710 struct path path;
694 struct vfsmount *tagged; 711 struct vfsmount *tagged;
695 struct list_head list; 712 struct list_head list;
696 struct vfsmount *mnt; 713 struct vfsmount *mnt;
697 struct dentry *dentry; 714 struct dentry *dentry;
698 int err; 715 int err;
699 716
700 err = path_lookup(new, 0, &nd); 717 err = kern_path(new, 0, &path);
701 if (err) 718 if (err)
702 return err; 719 return err;
703 tagged = collect_mounts(nd.path.mnt, nd.path.dentry); 720 tagged = collect_mounts(path.mnt, path.dentry);
704 path_put(&nd.path); 721 path_put(&path);
705 if (!tagged) 722 if (!tagged)
706 return -ENOMEM; 723 return -ENOMEM;
707 724
708 err = path_lookup(old, 0, &nd); 725 err = kern_path(old, 0, &path);
709 if (err) { 726 if (err) {
710 drop_collected_mounts(tagged); 727 drop_collected_mounts(tagged);
711 return err; 728 return err;
712 } 729 }
713 mnt = mntget(nd.path.mnt); 730 mnt = mntget(path.mnt);
714 dentry = dget(nd.path.dentry); 731 dentry = dget(path.dentry);
715 path_put(&nd.path); 732 path_put(&path);
716 733
717 if (dentry == tagged->mnt_root && dentry == mnt->mnt_root) 734 if (dentry == tagged->mnt_root && dentry == mnt->mnt_root)
718 follow_up(&mnt, &dentry); 735 follow_up(&mnt, &dentry);
@@ -733,7 +750,7 @@ int audit_tag_tree(char *old, char *new)
733 list_add(&cursor, &tree->list); 750 list_add(&cursor, &tree->list);
734 mutex_unlock(&audit_filter_mutex); 751 mutex_unlock(&audit_filter_mutex);
735 752
736 err = path_lookup(tree->pathname, 0, &nd); 753 err = kern_path(tree->pathname, 0, &path);
737 if (err) { 754 if (err) {
738 put_tree(tree); 755 put_tree(tree);
739 mutex_lock(&audit_filter_mutex); 756 mutex_lock(&audit_filter_mutex);
@@ -741,15 +758,15 @@ int audit_tag_tree(char *old, char *new)
741 } 758 }
742 759
743 spin_lock(&vfsmount_lock); 760 spin_lock(&vfsmount_lock);
744 if (!is_under(mnt, dentry, &nd)) { 761 if (!is_under(mnt, dentry, &path)) {
745 spin_unlock(&vfsmount_lock); 762 spin_unlock(&vfsmount_lock);
746 path_put(&nd.path); 763 path_put(&path);
747 put_tree(tree); 764 put_tree(tree);
748 mutex_lock(&audit_filter_mutex); 765 mutex_lock(&audit_filter_mutex);
749 continue; 766 continue;
750 } 767 }
751 spin_unlock(&vfsmount_lock); 768 spin_unlock(&vfsmount_lock);
752 path_put(&nd.path); 769 path_put(&path);
753 770
754 list_for_each_entry(p, &list, mnt_list) { 771 list_for_each_entry(p, &list, mnt_list) {
755 failed = tag_chunk(p->mnt_root->d_inode, tree); 772 failed = tag_chunk(p->mnt_root->d_inode, tree);
@@ -878,7 +895,7 @@ static void handle_event(struct inotify_watch *watch, u32 wd, u32 mask,
878static void destroy_watch(struct inotify_watch *watch) 895static void destroy_watch(struct inotify_watch *watch)
879{ 896{
880 struct audit_chunk *chunk = container_of(watch, struct audit_chunk, watch); 897 struct audit_chunk *chunk = container_of(watch, struct audit_chunk, watch);
881 free_chunk(chunk); 898 call_rcu(&chunk->head, __put_chunk);
882} 899}
883 900
884static const struct inotify_operations rtree_inotify_ops = { 901static const struct inotify_operations rtree_inotify_ops = {