diff options
author | Andrea Bastoni <bastoni@cs.unc.edu> | 2010-05-30 19:16:45 -0400 |
---|---|---|
committer | Andrea Bastoni <bastoni@cs.unc.edu> | 2010-05-30 19:16:45 -0400 |
commit | ada47b5fe13d89735805b566185f4885f5a3f750 (patch) | |
tree | 644b88f8a71896307d71438e9b3af49126ffb22b /kernel/audit_tree.c | |
parent | 43e98717ad40a4ae64545b5ba047c7b86aa44f4f (diff) | |
parent | 3280f21d43ee541f97f8cda5792150d2dbec20d5 (diff) |
Merge branch 'wip-2.6.34' into old-private-masterarchived-private-master
Diffstat (limited to 'kernel/audit_tree.c')
-rw-r--r-- | kernel/audit_tree.c | 114 |
1 files changed, 37 insertions, 77 deletions
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c index 2451dc6f3282..46a57b57a335 100644 --- a/kernel/audit_tree.c +++ b/kernel/audit_tree.c | |||
@@ -3,6 +3,7 @@ | |||
3 | #include <linux/namei.h> | 3 | #include <linux/namei.h> |
4 | #include <linux/mount.h> | 4 | #include <linux/mount.h> |
5 | #include <linux/kthread.h> | 5 | #include <linux/kthread.h> |
6 | #include <linux/slab.h> | ||
6 | 7 | ||
7 | struct audit_tree; | 8 | struct audit_tree; |
8 | struct audit_chunk; | 9 | struct audit_chunk; |
@@ -277,7 +278,7 @@ static void untag_chunk(struct node *p) | |||
277 | owner->root = NULL; | 278 | owner->root = NULL; |
278 | } | 279 | } |
279 | 280 | ||
280 | for (i = j = 0; i < size; i++, j++) { | 281 | for (i = j = 0; j <= size; i++, j++) { |
281 | struct audit_tree *s; | 282 | struct audit_tree *s; |
282 | if (&chunk->owners[j] == p) { | 283 | if (&chunk->owners[j] == p) { |
283 | list_del_init(&p->list); | 284 | list_del_init(&p->list); |
@@ -290,7 +291,7 @@ static void untag_chunk(struct node *p) | |||
290 | if (!s) /* result of earlier fallback */ | 291 | if (!s) /* result of earlier fallback */ |
291 | continue; | 292 | continue; |
292 | get_tree(s); | 293 | get_tree(s); |
293 | list_replace_init(&chunk->owners[i].list, &new->owners[j].list); | 294 | list_replace_init(&chunk->owners[j].list, &new->owners[i].list); |
294 | } | 295 | } |
295 | 296 | ||
296 | list_replace_rcu(&chunk->hash, &new->hash); | 297 | list_replace_rcu(&chunk->hash, &new->hash); |
@@ -373,15 +374,17 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree) | |||
373 | for (n = 0; n < old->count; n++) { | 374 | for (n = 0; n < old->count; n++) { |
374 | if (old->owners[n].owner == tree) { | 375 | if (old->owners[n].owner == tree) { |
375 | spin_unlock(&hash_lock); | 376 | spin_unlock(&hash_lock); |
376 | put_inotify_watch(watch); | 377 | put_inotify_watch(&old->watch); |
377 | return 0; | 378 | return 0; |
378 | } | 379 | } |
379 | } | 380 | } |
380 | spin_unlock(&hash_lock); | 381 | spin_unlock(&hash_lock); |
381 | 382 | ||
382 | chunk = alloc_chunk(old->count + 1); | 383 | chunk = alloc_chunk(old->count + 1); |
383 | if (!chunk) | 384 | if (!chunk) { |
385 | put_inotify_watch(&old->watch); | ||
384 | return -ENOMEM; | 386 | return -ENOMEM; |
387 | } | ||
385 | 388 | ||
386 | mutex_lock(&inode->inotify_mutex); | 389 | mutex_lock(&inode->inotify_mutex); |
387 | if (inotify_clone_watch(&old->watch, &chunk->watch) < 0) { | 390 | if (inotify_clone_watch(&old->watch, &chunk->watch) < 0) { |
@@ -425,7 +428,8 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree) | |||
425 | spin_unlock(&hash_lock); | 428 | spin_unlock(&hash_lock); |
426 | inotify_evict_watch(&old->watch); | 429 | inotify_evict_watch(&old->watch); |
427 | mutex_unlock(&inode->inotify_mutex); | 430 | mutex_unlock(&inode->inotify_mutex); |
428 | put_inotify_watch(&old->watch); | 431 | put_inotify_watch(&old->watch); /* pair to inotify_find_watch */ |
432 | put_inotify_watch(&old->watch); /* and kill it */ | ||
429 | return 0; | 433 | return 0; |
430 | } | 434 | } |
431 | 435 | ||
@@ -545,6 +549,11 @@ int audit_remove_tree_rule(struct audit_krule *rule) | |||
545 | return 0; | 549 | return 0; |
546 | } | 550 | } |
547 | 551 | ||
552 | static int compare_root(struct vfsmount *mnt, void *arg) | ||
553 | { | ||
554 | return mnt->mnt_root->d_inode == arg; | ||
555 | } | ||
556 | |||
548 | void audit_trim_trees(void) | 557 | void audit_trim_trees(void) |
549 | { | 558 | { |
550 | struct list_head cursor; | 559 | struct list_head cursor; |
@@ -556,7 +565,6 @@ void audit_trim_trees(void) | |||
556 | struct path path; | 565 | struct path path; |
557 | struct vfsmount *root_mnt; | 566 | struct vfsmount *root_mnt; |
558 | struct node *node; | 567 | struct node *node; |
559 | struct list_head list; | ||
560 | int err; | 568 | int err; |
561 | 569 | ||
562 | tree = container_of(cursor.next, struct audit_tree, list); | 570 | tree = container_of(cursor.next, struct audit_tree, list); |
@@ -574,24 +582,16 @@ void audit_trim_trees(void) | |||
574 | if (!root_mnt) | 582 | if (!root_mnt) |
575 | goto skip_it; | 583 | goto skip_it; |
576 | 584 | ||
577 | list_add_tail(&list, &root_mnt->mnt_list); | ||
578 | spin_lock(&hash_lock); | 585 | spin_lock(&hash_lock); |
579 | list_for_each_entry(node, &tree->chunks, list) { | 586 | list_for_each_entry(node, &tree->chunks, list) { |
580 | struct audit_chunk *chunk = find_chunk(node); | 587 | struct inode *inode = find_chunk(node)->watch.inode; |
581 | struct inode *inode = chunk->watch.inode; | ||
582 | struct vfsmount *mnt; | ||
583 | node->index |= 1U<<31; | 588 | node->index |= 1U<<31; |
584 | list_for_each_entry(mnt, &list, mnt_list) { | 589 | if (iterate_mounts(compare_root, inode, root_mnt)) |
585 | if (mnt->mnt_root->d_inode == inode) { | 590 | node->index &= ~(1U<<31); |
586 | node->index &= ~(1U<<31); | ||
587 | break; | ||
588 | } | ||
589 | } | ||
590 | } | 591 | } |
591 | spin_unlock(&hash_lock); | 592 | spin_unlock(&hash_lock); |
592 | trim_marked(tree); | 593 | trim_marked(tree); |
593 | put_tree(tree); | 594 | put_tree(tree); |
594 | list_del_init(&list); | ||
595 | drop_collected_mounts(root_mnt); | 595 | drop_collected_mounts(root_mnt); |
596 | skip_it: | 596 | skip_it: |
597 | mutex_lock(&audit_filter_mutex); | 597 | mutex_lock(&audit_filter_mutex); |
@@ -600,22 +600,6 @@ skip_it: | |||
600 | mutex_unlock(&audit_filter_mutex); | 600 | mutex_unlock(&audit_filter_mutex); |
601 | } | 601 | } |
602 | 602 | ||
603 | static int is_under(struct vfsmount *mnt, struct dentry *dentry, | ||
604 | struct path *path) | ||
605 | { | ||
606 | if (mnt != path->mnt) { | ||
607 | for (;;) { | ||
608 | if (mnt->mnt_parent == mnt) | ||
609 | return 0; | ||
610 | if (mnt->mnt_parent == path->mnt) | ||
611 | break; | ||
612 | mnt = mnt->mnt_parent; | ||
613 | } | ||
614 | dentry = mnt->mnt_mountpoint; | ||
615 | } | ||
616 | return is_subdir(dentry, path->dentry); | ||
617 | } | ||
618 | |||
619 | int audit_make_tree(struct audit_krule *rule, char *pathname, u32 op) | 603 | int audit_make_tree(struct audit_krule *rule, char *pathname, u32 op) |
620 | { | 604 | { |
621 | 605 | ||
@@ -635,13 +619,17 @@ void audit_put_tree(struct audit_tree *tree) | |||
635 | put_tree(tree); | 619 | put_tree(tree); |
636 | } | 620 | } |
637 | 621 | ||
622 | static int tag_mount(struct vfsmount *mnt, void *arg) | ||
623 | { | ||
624 | return tag_chunk(mnt->mnt_root->d_inode, arg); | ||
625 | } | ||
626 | |||
638 | /* called with audit_filter_mutex */ | 627 | /* called with audit_filter_mutex */ |
639 | int audit_add_tree_rule(struct audit_krule *rule) | 628 | int audit_add_tree_rule(struct audit_krule *rule) |
640 | { | 629 | { |
641 | struct audit_tree *seed = rule->tree, *tree; | 630 | struct audit_tree *seed = rule->tree, *tree; |
642 | struct path path; | 631 | struct path path; |
643 | struct vfsmount *mnt, *p; | 632 | struct vfsmount *mnt; |
644 | struct list_head list; | ||
645 | int err; | 633 | int err; |
646 | 634 | ||
647 | list_for_each_entry(tree, &tree_list, list) { | 635 | list_for_each_entry(tree, &tree_list, list) { |
@@ -667,16 +655,9 @@ int audit_add_tree_rule(struct audit_krule *rule) | |||
667 | err = -ENOMEM; | 655 | err = -ENOMEM; |
668 | goto Err; | 656 | goto Err; |
669 | } | 657 | } |
670 | list_add_tail(&list, &mnt->mnt_list); | ||
671 | 658 | ||
672 | get_tree(tree); | 659 | get_tree(tree); |
673 | list_for_each_entry(p, &list, mnt_list) { | 660 | err = iterate_mounts(tag_mount, tree, mnt); |
674 | err = tag_chunk(p->mnt_root->d_inode, tree); | ||
675 | if (err) | ||
676 | break; | ||
677 | } | ||
678 | |||
679 | list_del(&list); | ||
680 | drop_collected_mounts(mnt); | 661 | drop_collected_mounts(mnt); |
681 | 662 | ||
682 | if (!err) { | 663 | if (!err) { |
@@ -711,31 +692,23 @@ int audit_tag_tree(char *old, char *new) | |||
711 | { | 692 | { |
712 | struct list_head cursor, barrier; | 693 | struct list_head cursor, barrier; |
713 | int failed = 0; | 694 | int failed = 0; |
714 | struct path path; | 695 | struct path path1, path2; |
715 | struct vfsmount *tagged; | 696 | struct vfsmount *tagged; |
716 | struct list_head list; | ||
717 | struct vfsmount *mnt; | ||
718 | struct dentry *dentry; | ||
719 | int err; | 697 | int err; |
720 | 698 | ||
721 | err = kern_path(new, 0, &path); | 699 | err = kern_path(new, 0, &path2); |
722 | if (err) | 700 | if (err) |
723 | return err; | 701 | return err; |
724 | tagged = collect_mounts(&path); | 702 | tagged = collect_mounts(&path2); |
725 | path_put(&path); | 703 | path_put(&path2); |
726 | if (!tagged) | 704 | if (!tagged) |
727 | return -ENOMEM; | 705 | return -ENOMEM; |
728 | 706 | ||
729 | err = kern_path(old, 0, &path); | 707 | err = kern_path(old, 0, &path1); |
730 | if (err) { | 708 | if (err) { |
731 | drop_collected_mounts(tagged); | 709 | drop_collected_mounts(tagged); |
732 | return err; | 710 | return err; |
733 | } | 711 | } |
734 | mnt = mntget(path.mnt); | ||
735 | dentry = dget(path.dentry); | ||
736 | path_put(&path); | ||
737 | |||
738 | list_add_tail(&list, &tagged->mnt_list); | ||
739 | 712 | ||
740 | mutex_lock(&audit_filter_mutex); | 713 | mutex_lock(&audit_filter_mutex); |
741 | list_add(&barrier, &tree_list); | 714 | list_add(&barrier, &tree_list); |
@@ -743,7 +716,7 @@ int audit_tag_tree(char *old, char *new) | |||
743 | 716 | ||
744 | while (cursor.next != &tree_list) { | 717 | while (cursor.next != &tree_list) { |
745 | struct audit_tree *tree; | 718 | struct audit_tree *tree; |
746 | struct vfsmount *p; | 719 | int good_one = 0; |
747 | 720 | ||
748 | tree = container_of(cursor.next, struct audit_tree, list); | 721 | tree = container_of(cursor.next, struct audit_tree, list); |
749 | get_tree(tree); | 722 | get_tree(tree); |
@@ -751,30 +724,19 @@ int audit_tag_tree(char *old, char *new) | |||
751 | list_add(&cursor, &tree->list); | 724 | list_add(&cursor, &tree->list); |
752 | mutex_unlock(&audit_filter_mutex); | 725 | mutex_unlock(&audit_filter_mutex); |
753 | 726 | ||
754 | err = kern_path(tree->pathname, 0, &path); | 727 | err = kern_path(tree->pathname, 0, &path2); |
755 | if (err) { | 728 | if (!err) { |
756 | put_tree(tree); | 729 | good_one = path_is_under(&path1, &path2); |
757 | mutex_lock(&audit_filter_mutex); | 730 | path_put(&path2); |
758 | continue; | ||
759 | } | 731 | } |
760 | 732 | ||
761 | spin_lock(&vfsmount_lock); | 733 | if (!good_one) { |
762 | if (!is_under(mnt, dentry, &path)) { | ||
763 | spin_unlock(&vfsmount_lock); | ||
764 | path_put(&path); | ||
765 | put_tree(tree); | 734 | put_tree(tree); |
766 | mutex_lock(&audit_filter_mutex); | 735 | mutex_lock(&audit_filter_mutex); |
767 | continue; | 736 | continue; |
768 | } | 737 | } |
769 | spin_unlock(&vfsmount_lock); | ||
770 | path_put(&path); | ||
771 | |||
772 | list_for_each_entry(p, &list, mnt_list) { | ||
773 | failed = tag_chunk(p->mnt_root->d_inode, tree); | ||
774 | if (failed) | ||
775 | break; | ||
776 | } | ||
777 | 738 | ||
739 | failed = iterate_mounts(tag_mount, tree, tagged); | ||
778 | if (failed) { | 740 | if (failed) { |
779 | put_tree(tree); | 741 | put_tree(tree); |
780 | mutex_lock(&audit_filter_mutex); | 742 | mutex_lock(&audit_filter_mutex); |
@@ -815,10 +777,8 @@ int audit_tag_tree(char *old, char *new) | |||
815 | } | 777 | } |
816 | list_del(&barrier); | 778 | list_del(&barrier); |
817 | list_del(&cursor); | 779 | list_del(&cursor); |
818 | list_del(&list); | ||
819 | mutex_unlock(&audit_filter_mutex); | 780 | mutex_unlock(&audit_filter_mutex); |
820 | dput(dentry); | 781 | path_put(&path1); |
821 | mntput(mnt); | ||
822 | drop_collected_mounts(tagged); | 782 | drop_collected_mounts(tagged); |
823 | return failed; | 783 | return failed; |
824 | } | 784 | } |