summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndrea Righi <andrea.righi@canonical.com>2019-08-12 14:43:02 -0400
committerIngo Molnar <mingo@kernel.org>2019-08-19 06:22:19 -0400
commitf1c6ece23729257fb46562ff9224cf5f61b818da (patch)
treebc468dd8660cfa173579c9348213572cbb4e73e0
parent77d760328ee015cf89460c52bfd5a6b0a09b7472 (diff)
kprobes: Fix potential deadlock in kprobe_optimizer()
lockdep reports the following deadlock scenario: WARNING: possible circular locking dependency detected kworker/1:1/48 is trying to acquire lock: 000000008d7a62b2 (text_mutex){+.+.}, at: kprobe_optimizer+0x163/0x290 but task is already holding lock: 00000000850b5e2d (module_mutex){+.+.}, at: kprobe_optimizer+0x31/0x290 which lock already depends on the new lock. the existing dependency chain (in reverse order) is: -> #1 (module_mutex){+.+.}: __mutex_lock+0xac/0x9f0 mutex_lock_nested+0x1b/0x20 set_all_modules_text_rw+0x22/0x90 ftrace_arch_code_modify_prepare+0x1c/0x20 ftrace_run_update_code+0xe/0x30 ftrace_startup_enable+0x2e/0x50 ftrace_startup+0xa7/0x100 register_ftrace_function+0x27/0x70 arm_kprobe+0xb3/0x130 enable_kprobe+0x83/0xa0 enable_trace_kprobe.part.0+0x2e/0x80 kprobe_register+0x6f/0xc0 perf_trace_event_init+0x16b/0x270 perf_kprobe_init+0xa7/0xe0 perf_kprobe_event_init+0x3e/0x70 perf_try_init_event+0x4a/0x140 perf_event_alloc+0x93a/0xde0 __do_sys_perf_event_open+0x19f/0xf30 __x64_sys_perf_event_open+0x20/0x30 do_syscall_64+0x65/0x1d0 entry_SYSCALL_64_after_hwframe+0x49/0xbe -> #0 (text_mutex){+.+.}: __lock_acquire+0xfcb/0x1b60 lock_acquire+0xca/0x1d0 __mutex_lock+0xac/0x9f0 mutex_lock_nested+0x1b/0x20 kprobe_optimizer+0x163/0x290 process_one_work+0x22b/0x560 worker_thread+0x50/0x3c0 kthread+0x112/0x150 ret_from_fork+0x3a/0x50 other info that might help us debug this: Possible unsafe locking scenario: CPU0 CPU1 ---- ---- lock(module_mutex); lock(text_mutex); lock(module_mutex); lock(text_mutex); *** DEADLOCK *** As a reproducer I've been using bcc's funccount.py (https://github.com/iovisor/bcc/blob/master/tools/funccount.py), for example: # ./funccount.py '*interrupt*' That immediately triggers the lockdep splat. Fix by acquiring text_mutex before module_mutex in kprobe_optimizer(). Signed-off-by: Andrea Righi <andrea.righi@canonical.com> Acked-by: Masami Hiramatsu <mhiramat@kernel.org> Cc: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> Cc: David S. Miller <davem@davemloft.net> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Naveen N. Rao <naveen.n.rao@linux.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Fixes: d5b844a2cf50 ("ftrace/x86: Remove possible deadlock between register_kprobe() and ftrace_run_update_code()") Link: http://lkml.kernel.org/r/20190812184302.GA7010@xps-13 Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--kernel/kprobes.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 9873fc627d61..d9770a5393c8 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -470,6 +470,7 @@ static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
470 */ 470 */
471static void do_optimize_kprobes(void) 471static void do_optimize_kprobes(void)
472{ 472{
473 lockdep_assert_held(&text_mutex);
473 /* 474 /*
474 * The optimization/unoptimization refers online_cpus via 475 * The optimization/unoptimization refers online_cpus via
475 * stop_machine() and cpu-hotplug modifies online_cpus. 476 * stop_machine() and cpu-hotplug modifies online_cpus.
@@ -487,9 +488,7 @@ static void do_optimize_kprobes(void)
487 list_empty(&optimizing_list)) 488 list_empty(&optimizing_list))
488 return; 489 return;
489 490
490 mutex_lock(&text_mutex);
491 arch_optimize_kprobes(&optimizing_list); 491 arch_optimize_kprobes(&optimizing_list);
492 mutex_unlock(&text_mutex);
493} 492}
494 493
495/* 494/*
@@ -500,6 +499,7 @@ static void do_unoptimize_kprobes(void)
500{ 499{
501 struct optimized_kprobe *op, *tmp; 500 struct optimized_kprobe *op, *tmp;
502 501
502 lockdep_assert_held(&text_mutex);
503 /* See comment in do_optimize_kprobes() */ 503 /* See comment in do_optimize_kprobes() */
504 lockdep_assert_cpus_held(); 504 lockdep_assert_cpus_held();
505 505
@@ -507,7 +507,6 @@ static void do_unoptimize_kprobes(void)
507 if (list_empty(&unoptimizing_list)) 507 if (list_empty(&unoptimizing_list))
508 return; 508 return;
509 509
510 mutex_lock(&text_mutex);
511 arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list); 510 arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list);
512 /* Loop free_list for disarming */ 511 /* Loop free_list for disarming */
513 list_for_each_entry_safe(op, tmp, &freeing_list, list) { 512 list_for_each_entry_safe(op, tmp, &freeing_list, list) {
@@ -524,7 +523,6 @@ static void do_unoptimize_kprobes(void)
524 } else 523 } else
525 list_del_init(&op->list); 524 list_del_init(&op->list);
526 } 525 }
527 mutex_unlock(&text_mutex);
528} 526}
529 527
530/* Reclaim all kprobes on the free_list */ 528/* Reclaim all kprobes on the free_list */
@@ -556,6 +554,7 @@ static void kprobe_optimizer(struct work_struct *work)
556{ 554{
557 mutex_lock(&kprobe_mutex); 555 mutex_lock(&kprobe_mutex);
558 cpus_read_lock(); 556 cpus_read_lock();
557 mutex_lock(&text_mutex);
559 /* Lock modules while optimizing kprobes */ 558 /* Lock modules while optimizing kprobes */
560 mutex_lock(&module_mutex); 559 mutex_lock(&module_mutex);
561 560
@@ -583,6 +582,7 @@ static void kprobe_optimizer(struct work_struct *work)
583 do_free_cleaned_kprobes(); 582 do_free_cleaned_kprobes();
584 583
585 mutex_unlock(&module_mutex); 584 mutex_unlock(&module_mutex);
585 mutex_unlock(&text_mutex);
586 cpus_read_unlock(); 586 cpus_read_unlock();
587 mutex_unlock(&kprobe_mutex); 587 mutex_unlock(&kprobe_mutex);
588 588