diff options
author | Peter Zijlstra <peterz@infradead.org> | 2014-02-24 06:06:12 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2014-02-27 06:38:03 -0500 |
commit | e3703f8cdfcf39c25c4338c3ad8e68891cca3731 (patch) | |
tree | b4fc6439f43eca37c954ce4d5d2848279b115b99 /kernel | |
parent | 26e61e8939b1fe8729572dabe9a9e97d930dd4f6 (diff) |
perf: Fix hotplug splat
Drew Richardson reported that he could make the kernel go *boom* when hotplugging
while having perf events active.
It turned out that when you have a group event, the code in
__perf_event_exit_context() fails to remove the group siblings from
the context.
We then proceed with destroying and freeing the event, and when you
re-plug the CPU and try and add another event to that CPU, things go
*boom* because you've still got dead entries there.
Reported-by: Drew Richardson <drew.richardson@arm.com>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Cc: Will Deacon <will.deacon@arm.com>
Cc: <stable@vger.kernel.org>
Link: http://lkml.kernel.org/n/tip-k6v5wundvusvcseqj1si0oz0@git.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/events/core.c | 12 |
1 files changed, 6 insertions, 6 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c index 56003c6edfd3..fa0b2d4ad83c 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -7856,14 +7856,14 @@ static void perf_pmu_rotate_stop(struct pmu *pmu) | |||
7856 | static void __perf_event_exit_context(void *__info) | 7856 | static void __perf_event_exit_context(void *__info) |
7857 | { | 7857 | { |
7858 | struct perf_event_context *ctx = __info; | 7858 | struct perf_event_context *ctx = __info; |
7859 | struct perf_event *event, *tmp; | 7859 | struct perf_event *event; |
7860 | 7860 | ||
7861 | perf_pmu_rotate_stop(ctx->pmu); | 7861 | perf_pmu_rotate_stop(ctx->pmu); |
7862 | 7862 | ||
7863 | list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry) | 7863 | rcu_read_lock(); |
7864 | __perf_remove_from_context(event); | 7864 | list_for_each_entry_rcu(event, &ctx->event_list, event_entry) |
7865 | list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, group_entry) | ||
7866 | __perf_remove_from_context(event); | 7865 | __perf_remove_from_context(event); |
7866 | rcu_read_unlock(); | ||
7867 | } | 7867 | } |
7868 | 7868 | ||
7869 | static void perf_event_exit_cpu_context(int cpu) | 7869 | static void perf_event_exit_cpu_context(int cpu) |
@@ -7887,11 +7887,11 @@ static void perf_event_exit_cpu(int cpu) | |||
7887 | { | 7887 | { |
7888 | struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); | 7888 | struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); |
7889 | 7889 | ||
7890 | perf_event_exit_cpu_context(cpu); | ||
7891 | |||
7890 | mutex_lock(&swhash->hlist_mutex); | 7892 | mutex_lock(&swhash->hlist_mutex); |
7891 | swevent_hlist_release(swhash); | 7893 | swevent_hlist_release(swhash); |
7892 | mutex_unlock(&swhash->hlist_mutex); | 7894 | mutex_unlock(&swhash->hlist_mutex); |
7893 | |||
7894 | perf_event_exit_cpu_context(cpu); | ||
7895 | } | 7895 | } |
7896 | #else | 7896 | #else |
7897 | static inline void perf_event_exit_cpu(int cpu) { } | 7897 | static inline void perf_event_exit_cpu(int cpu) { } |