aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2018-01-09 07:10:30 -0500
committerIngo Molnar <mingo@kernel.org>2018-01-25 08:48:29 -0500
commit82d94856fa221b5173eefd56bcd1057c037e9b07 (patch)
tree1e798ac4fd09b470a3c45e393048afab7ddb21bf
parent1f07476ec143bbed7bf0b641749783b1094b4c4f (diff)
perf/core: Fix lock inversion between perf,trace,cpuhp
Lockdep gifted us with noticing the following 4-way lockup scenario: perf_trace_init() #0 mutex_lock(&event_mutex) perf_trace_event_init() perf_trace_event_reg() tp_event->class->reg() := tracepoint_probe_register #1 mutex_lock(&tracepoints_mutex) trace_point_add_func() #2 static_key_enable() #2 do_cpu_up() perf_event_init_cpu() #3 mutex_lock(&pmus_lock) #4 mutex_lock(&ctx->mutex) perf_event_task_disable() mutex_lock(&current->perf_event_mutex) #4 ctx = perf_event_ctx_lock() #5 perf_event_for_each_child() do_exit() task_work_run() __fput() perf_release() perf_event_release_kernel() #4 mutex_lock(&ctx->mutex) #5 mutex_lock(&event->child_mutex) free_event() _free_event() event->destroy() := perf_trace_destroy #0 mutex_lock(&event_mutex); Fix that by moving the free_event() out from under the locks. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Cc: Steven Rostedt (VMware) <rostedt@goodmis.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vince Weaver <vincent.weaver@maine.edu> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--kernel/events/core.c13
1 files changed, 11 insertions, 2 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 4df5b695bf0d..2d80824298a7 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -1231,6 +1231,10 @@ static void put_ctx(struct perf_event_context *ctx)
1231 * perf_event_context::lock 1231 * perf_event_context::lock
1232 * perf_event::mmap_mutex 1232 * perf_event::mmap_mutex
1233 * mmap_sem 1233 * mmap_sem
1234 *
1235 * cpu_hotplug_lock
1236 * pmus_lock
1237 * cpuctx->mutex / perf_event_context::mutex
1234 */ 1238 */
1235static struct perf_event_context * 1239static struct perf_event_context *
1236perf_event_ctx_lock_nested(struct perf_event *event, int nesting) 1240perf_event_ctx_lock_nested(struct perf_event *event, int nesting)
@@ -4196,6 +4200,7 @@ int perf_event_release_kernel(struct perf_event *event)
4196{ 4200{
4197 struct perf_event_context *ctx = event->ctx; 4201 struct perf_event_context *ctx = event->ctx;
4198 struct perf_event *child, *tmp; 4202 struct perf_event *child, *tmp;
4203 LIST_HEAD(free_list);
4199 4204
4200 /* 4205 /*
4201 * If we got here through err_file: fput(event_file); we will not have 4206 * If we got here through err_file: fput(event_file); we will not have
@@ -4268,8 +4273,7 @@ again:
4268 struct perf_event, child_list); 4273 struct perf_event, child_list);
4269 if (tmp == child) { 4274 if (tmp == child) {
4270 perf_remove_from_context(child, DETACH_GROUP); 4275 perf_remove_from_context(child, DETACH_GROUP);
4271 list_del(&child->child_list); 4276 list_move(&child->child_list, &free_list);
4272 free_event(child);
4273 /* 4277 /*
4274 * This matches the refcount bump in inherit_event(); 4278 * This matches the refcount bump in inherit_event();
4275 * this can't be the last reference. 4279 * this can't be the last reference.
@@ -4284,6 +4288,11 @@ again:
4284 } 4288 }
4285 mutex_unlock(&event->child_mutex); 4289 mutex_unlock(&event->child_mutex);
4286 4290
4291 list_for_each_entry_safe(child, tmp, &free_list, child_list) {
4292 list_del(&child->child_list);
4293 free_event(child);
4294 }
4295
4287no_ctx: 4296no_ctx:
4288 put_event(event); /* Must be the 'last' reference */ 4297 put_event(event); /* Must be the 'last' reference */
4289 return 0; 4298 return 0;