aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2016-01-26 09:25:15 -0500
committerIngo Molnar <mingo@kernel.org>2016-01-29 02:35:36 -0500
commit5fa7c8ec57f70a7b5c6fe269fa9c51b9e465989c (patch)
treeb98a192b101a0813beb4f306925d887f30857842 /kernel
parentc6e5b73242d2d9172ea880483bc4ba7ffca0cfb2 (diff)
perf: Remove/simplify lockdep annotation
Now that the perf_event_ctx_lock_nested() call has moved from put_event() into perf_event_release_kernel() the first reason is no longer valid as that can no longer happen. The second reason seems to have been invalidated when Al Viro made fput() unconditionally async in the following commit: 4a9d4b024a31 ("switch fput to task_work_add") such that munmap()->fput()->release()->perf_release() would no longer happen. Therefore, remove the annotation. This should increase the efficiency of lockdep coverage of perf locking. Suggested-by: Alexander Shishkin <alexander.shishkin@linux.intel.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: David Ahern <dsahern@gmail.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vince Weaver <vincent.weaver@maine.edu> Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/events/core.c22
1 files changed, 1 insertions, 21 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 98c862aff8fa..f1e53e8d4ae2 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -3758,19 +3758,7 @@ int perf_event_release_kernel(struct perf_event *event)
3758 if (!is_kernel_event(event)) 3758 if (!is_kernel_event(event))
3759 perf_remove_from_owner(event); 3759 perf_remove_from_owner(event);
3760 3760
3761 /* 3761 ctx = perf_event_ctx_lock(event);
3762 * There are two ways this annotation is useful:
3763 *
3764 * 1) there is a lock recursion from perf_event_exit_task
3765 * see the comment there.
3766 *
3767 * 2) there is a lock-inversion with mmap_sem through
3768 * perf_read_group(), which takes faults while
3769 * holding ctx->mutex, however this is called after
3770 * the last filedesc died, so there is no possibility
3771 * to trigger the AB-BA case.
3772 */
3773 ctx = perf_event_ctx_lock_nested(event, SINGLE_DEPTH_NESTING);
3774 WARN_ON_ONCE(ctx->parent_ctx); 3762 WARN_ON_ONCE(ctx->parent_ctx);
3775 perf_remove_from_context(event, DETACH_GROUP | DETACH_STATE); 3763 perf_remove_from_context(event, DETACH_GROUP | DETACH_STATE);
3776 perf_event_ctx_unlock(event, ctx); 3764 perf_event_ctx_unlock(event, ctx);
@@ -8759,14 +8747,6 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
8759 * perf_event_create_kernel_count() which does find_get_context() 8747 * perf_event_create_kernel_count() which does find_get_context()
8760 * without ctx::mutex (it cannot because of the move_group double mutex 8748 * without ctx::mutex (it cannot because of the move_group double mutex
8761 * lock thing). See the comments in perf_install_in_context(). 8749 * lock thing). See the comments in perf_install_in_context().
8762 *
8763 * We can recurse on the same lock type through:
8764 *
8765 * perf_event_exit_event()
8766 * put_event()
8767 * mutex_lock(&ctx->mutex)
8768 *
8769 * But since its the parent context it won't be the same instance.
8770 */ 8750 */
8771 mutex_lock(&child_ctx->mutex); 8751 mutex_lock(&child_ctx->mutex);
8772 8752