aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/kprobes.c
diff options
context:
space:
mode:
authorMasami Hiramatsu <masami.hiramatsu.pt@hitachi.com>2013-04-18 05:33:18 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-04-18 11:58:38 -0400
commit5c51543b0ae45967cfa99ca16748dc72888cc265 (patch)
tree3bcc3c5f26adf4c773e934f110b596914b4fb856 /kernel/kprobes.c
parentd202f05158442396033f416df376f8ece1f563df (diff)
kprobes: Fix a double lock bug of kprobe_mutex
Fix a double locking bug caused when debug.kprobe-optimization=0. While the proc_kprobes_optimization_handler locks kprobe_mutex, wait_for_kprobe_optimizer locks it again and that causes a double lock. To fix the bug, this introduces different mutex for protecting sysctl parameter and locks it in proc_kprobes_optimization_handler. Of course, since we need to lock kprobe_mutex when touching kprobes resources, that is done in *optimize_all_kprobes(). This bug was introduced by commit ad72b3bea744 ("kprobes: fix wait_for_kprobe_optimizer()") Signed-off-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com> Acked-by: Ananth N Mavinakayanahalli <ananth@in.ibm.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Tejun Heo <tj@kernel.org> Cc: "David S. Miller" <davem@davemloft.net> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel/kprobes.c')
-rw-r--r--kernel/kprobes.c19
1 files changed, 13 insertions, 6 deletions
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index e35be53f6613..3fed7f0cbcdf 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -794,16 +794,16 @@ out:
794} 794}
795 795
796#ifdef CONFIG_SYSCTL 796#ifdef CONFIG_SYSCTL
797/* This should be called with kprobe_mutex locked */
798static void __kprobes optimize_all_kprobes(void) 797static void __kprobes optimize_all_kprobes(void)
799{ 798{
800 struct hlist_head *head; 799 struct hlist_head *head;
801 struct kprobe *p; 800 struct kprobe *p;
802 unsigned int i; 801 unsigned int i;
803 802
803 mutex_lock(&kprobe_mutex);
804 /* If optimization is already allowed, just return */ 804 /* If optimization is already allowed, just return */
805 if (kprobes_allow_optimization) 805 if (kprobes_allow_optimization)
806 return; 806 goto out;
807 807
808 kprobes_allow_optimization = true; 808 kprobes_allow_optimization = true;
809 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 809 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
@@ -813,18 +813,22 @@ static void __kprobes optimize_all_kprobes(void)
813 optimize_kprobe(p); 813 optimize_kprobe(p);
814 } 814 }
815 printk(KERN_INFO "Kprobes globally optimized\n"); 815 printk(KERN_INFO "Kprobes globally optimized\n");
816out:
817 mutex_unlock(&kprobe_mutex);
816} 818}
817 819
818/* This should be called with kprobe_mutex locked */
819static void __kprobes unoptimize_all_kprobes(void) 820static void __kprobes unoptimize_all_kprobes(void)
820{ 821{
821 struct hlist_head *head; 822 struct hlist_head *head;
822 struct kprobe *p; 823 struct kprobe *p;
823 unsigned int i; 824 unsigned int i;
824 825
826 mutex_lock(&kprobe_mutex);
825 /* If optimization is already prohibited, just return */ 827 /* If optimization is already prohibited, just return */
826 if (!kprobes_allow_optimization) 828 if (!kprobes_allow_optimization) {
829 mutex_unlock(&kprobe_mutex);
827 return; 830 return;
831 }
828 832
829 kprobes_allow_optimization = false; 833 kprobes_allow_optimization = false;
830 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 834 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
@@ -834,11 +838,14 @@ static void __kprobes unoptimize_all_kprobes(void)
834 unoptimize_kprobe(p, false); 838 unoptimize_kprobe(p, false);
835 } 839 }
836 } 840 }
841 mutex_unlock(&kprobe_mutex);
842
837 /* Wait for unoptimizing completion */ 843 /* Wait for unoptimizing completion */
838 wait_for_kprobe_optimizer(); 844 wait_for_kprobe_optimizer();
839 printk(KERN_INFO "Kprobes globally unoptimized\n"); 845 printk(KERN_INFO "Kprobes globally unoptimized\n");
840} 846}
841 847
848static DEFINE_MUTEX(kprobe_sysctl_mutex);
842int sysctl_kprobes_optimization; 849int sysctl_kprobes_optimization;
843int proc_kprobes_optimization_handler(struct ctl_table *table, int write, 850int proc_kprobes_optimization_handler(struct ctl_table *table, int write,
844 void __user *buffer, size_t *length, 851 void __user *buffer, size_t *length,
@@ -846,7 +853,7 @@ int proc_kprobes_optimization_handler(struct ctl_table *table, int write,
846{ 853{
847 int ret; 854 int ret;
848 855
849 mutex_lock(&kprobe_mutex); 856 mutex_lock(&kprobe_sysctl_mutex);
850 sysctl_kprobes_optimization = kprobes_allow_optimization ? 1 : 0; 857 sysctl_kprobes_optimization = kprobes_allow_optimization ? 1 : 0;
851 ret = proc_dointvec_minmax(table, write, buffer, length, ppos); 858 ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
852 859
@@ -854,7 +861,7 @@ int proc_kprobes_optimization_handler(struct ctl_table *table, int write,
854 optimize_all_kprobes(); 861 optimize_all_kprobes();
855 else 862 else
856 unoptimize_all_kprobes(); 863 unoptimize_all_kprobes();
857 mutex_unlock(&kprobe_mutex); 864 mutex_unlock(&kprobe_sysctl_mutex);
858 865
859 return ret; 866 return ret;
860} 867}