diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2012-10-02 09:41:23 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2012-10-05 07:59:07 -0400 |
commit | 95cf59ea72331d0093010543b8951bb43f262cac (patch) | |
tree | 7d0bc786d071b50217040691c4e1ab5a6ff73296 /kernel/events | |
parent | 3f1f33206c16c7b3839d71372bc2ac3f305aa802 (diff) |
perf: Fix perf_cgroup_switch for sw-events
Jiri reported that he could trigger the WARN_ON_ONCE() in
perf_cgroup_switch() using sw-events. This is because sw-events share
a cpuctx with multiple PMUs.
Use the ->unique_pmu pointer to limit the pmu iteration to unique
cpuctx instances.
Reported-and-Tested-by: Jiri Olsa <jolsa@redhat.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/n/tip-so7wi2zf3jjzrwcutm2mkz0j@git.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/events')
-rw-r--r-- | kernel/events/core.c | 9 |
1 files changed, 6 insertions, 3 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c index 81939e8999a5..fd15593c7f54 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -372,6 +372,8 @@ void perf_cgroup_switch(struct task_struct *task, int mode) | |||
372 | 372 | ||
373 | list_for_each_entry_rcu(pmu, &pmus, entry) { | 373 | list_for_each_entry_rcu(pmu, &pmus, entry) { |
374 | cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); | 374 | cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); |
375 | if (cpuctx->unique_pmu != pmu) | ||
376 | continue; /* ensure we process each cpuctx once */ | ||
375 | 377 | ||
376 | /* | 378 | /* |
377 | * perf_cgroup_events says at least one | 379 | * perf_cgroup_events says at least one |
@@ -395,9 +397,10 @@ void perf_cgroup_switch(struct task_struct *task, int mode) | |||
395 | 397 | ||
396 | if (mode & PERF_CGROUP_SWIN) { | 398 | if (mode & PERF_CGROUP_SWIN) { |
397 | WARN_ON_ONCE(cpuctx->cgrp); | 399 | WARN_ON_ONCE(cpuctx->cgrp); |
398 | /* set cgrp before ctxsw in to | 400 | /* |
399 | * allow event_filter_match() to not | 401 | * set cgrp before ctxsw in to allow |
400 | * have to pass task around | 402 | * event_filter_match() to not have to pass |
403 | * task around | ||
401 | */ | 404 | */ |
402 | cpuctx->cgrp = perf_cgroup_from_task(task); | 405 | cpuctx->cgrp = perf_cgroup_from_task(task); |
403 | cpu_ctx_sched_in(cpuctx, EVENT_ALL, task); | 406 | cpu_ctx_sched_in(cpuctx, EVENT_ALL, task); |