aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorImre Palik <imrep@amazon.de>2015-02-23 15:37:59 -0500
committerPaul Moore <pmoore@redhat.com>2015-02-23 15:37:59 -0500
commitf1aaf26224bee779012aab136e5373ce3487982c (patch)
treed0c309cc10f29c2643e3214783fd65d835ba13bd /kernel
parent2fded7f44b8fcf79e274c3f0cfbd0298f95308f3 (diff)
audit: move the tree pruning to a dedicated thread
When file auditing is enabled, during a low memory situation, a memory allocation with __GFP_FS can lead to pruning the inode cache. Which can, in turn lead to audit_tree_freeing_mark() being called. This can call audit_schedule_prune(), that tries to fork a pruning thread, and waits until the thread is created. But forking needs memory, and the memory allocations there are done with __GFP_FS. So we are waiting merrily for some __GFP_FS memory allocations to complete, while holding some filesystem locks. This can take a while ... This patch creates a single thread for pruning the tree from audit_add_tree_rule(), and thus avoids the deadlock that the on-demand thread creation can cause. Reported-by: Matt Wilson <msw@amazon.com> Cc: Matt Wilson <msw@amazon.com> Signed-off-by: Imre Palik <imrep@amazon.de> Reviewed-by: Richard Guy Briggs <rgb@redhat.com> Signed-off-by: Paul Moore <pmoore@redhat.com>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/audit_tree.c88
1 files changed, 60 insertions, 28 deletions
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
index 80f29e015570..415072c8e875 100644
--- a/kernel/audit_tree.c
+++ b/kernel/audit_tree.c
@@ -37,6 +37,7 @@ struct audit_chunk {
37 37
38static LIST_HEAD(tree_list); 38static LIST_HEAD(tree_list);
39static LIST_HEAD(prune_list); 39static LIST_HEAD(prune_list);
40static struct task_struct *prune_thread;
40 41
41/* 42/*
42 * One struct chunk is attached to each inode of interest. 43 * One struct chunk is attached to each inode of interest.
@@ -651,6 +652,57 @@ static int tag_mount(struct vfsmount *mnt, void *arg)
651 return tag_chunk(mnt->mnt_root->d_inode, arg); 652 return tag_chunk(mnt->mnt_root->d_inode, arg);
652} 653}
653 654
655/*
656 * That gets run when evict_chunk() ends up needing to kill audit_tree.
657 * Runs from a separate thread.
658 */
659static int prune_tree_thread(void *unused)
660{
661 for (;;) {
662 set_current_state(TASK_INTERRUPTIBLE);
663 if (list_empty(&prune_list))
664 schedule();
665 __set_current_state(TASK_RUNNING);
666
667 mutex_lock(&audit_cmd_mutex);
668 mutex_lock(&audit_filter_mutex);
669
670 while (!list_empty(&prune_list)) {
671 struct audit_tree *victim;
672
673 victim = list_entry(prune_list.next,
674 struct audit_tree, list);
675 list_del_init(&victim->list);
676
677 mutex_unlock(&audit_filter_mutex);
678
679 prune_one(victim);
680
681 mutex_lock(&audit_filter_mutex);
682 }
683
684 mutex_unlock(&audit_filter_mutex);
685 mutex_unlock(&audit_cmd_mutex);
686 }
687 return 0;
688}
689
690static int audit_launch_prune(void)
691{
692 if (prune_thread)
693 return 0;
694 prune_thread = kthread_create(prune_tree_thread, NULL,
695 "audit_prune_tree");
696 if (IS_ERR(prune_thread)) {
697 pr_err("cannot start thread audit_prune_tree");
698 prune_thread = NULL;
699 return -ENOMEM;
700 } else {
701 wake_up_process(prune_thread);
702 return 0;
703 }
704}
705
654/* called with audit_filter_mutex */ 706/* called with audit_filter_mutex */
655int audit_add_tree_rule(struct audit_krule *rule) 707int audit_add_tree_rule(struct audit_krule *rule)
656{ 708{
@@ -674,6 +726,12 @@ int audit_add_tree_rule(struct audit_krule *rule)
674 /* do not set rule->tree yet */ 726 /* do not set rule->tree yet */
675 mutex_unlock(&audit_filter_mutex); 727 mutex_unlock(&audit_filter_mutex);
676 728
729 if (unlikely(!prune_thread)) {
730 err = audit_launch_prune();
731 if (err)
732 goto Err;
733 }
734
677 err = kern_path(tree->pathname, 0, &path); 735 err = kern_path(tree->pathname, 0, &path);
678 if (err) 736 if (err)
679 goto Err; 737 goto Err;
@@ -811,36 +869,10 @@ int audit_tag_tree(char *old, char *new)
811 return failed; 869 return failed;
812} 870}
813 871
814/*
815 * That gets run when evict_chunk() ends up needing to kill audit_tree.
816 * Runs from a separate thread.
817 */
818static int prune_tree_thread(void *unused)
819{
820 mutex_lock(&audit_cmd_mutex);
821 mutex_lock(&audit_filter_mutex);
822
823 while (!list_empty(&prune_list)) {
824 struct audit_tree *victim;
825
826 victim = list_entry(prune_list.next, struct audit_tree, list);
827 list_del_init(&victim->list);
828
829 mutex_unlock(&audit_filter_mutex);
830
831 prune_one(victim);
832
833 mutex_lock(&audit_filter_mutex);
834 }
835
836 mutex_unlock(&audit_filter_mutex);
837 mutex_unlock(&audit_cmd_mutex);
838 return 0;
839}
840 872
841static void audit_schedule_prune(void) 873static void audit_schedule_prune(void)
842{ 874{
843 kthread_run(prune_tree_thread, NULL, "audit_prune_tree"); 875 wake_up_process(prune_thread);
844} 876}
845 877
846/* 878/*
@@ -907,9 +939,9 @@ static void evict_chunk(struct audit_chunk *chunk)
907 for (n = 0; n < chunk->count; n++) 939 for (n = 0; n < chunk->count; n++)
908 list_del_init(&chunk->owners[n].list); 940 list_del_init(&chunk->owners[n].list);
909 spin_unlock(&hash_lock); 941 spin_unlock(&hash_lock);
942 mutex_unlock(&audit_filter_mutex);
910 if (need_prune) 943 if (need_prune)
911 audit_schedule_prune(); 944 audit_schedule_prune();
912 mutex_unlock(&audit_filter_mutex);
913} 945}
914 946
915static int audit_tree_handle_event(struct fsnotify_group *group, 947static int audit_tree_handle_event(struct fsnotify_group *group,