diff options
author | Stephane Eranian <eranian@google.com> | 2012-02-02 06:04:01 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2012-02-03 02:24:40 -0500 |
commit | 84f2b9b2edc09595569c7397cc3c888764ffd78b (patch) | |
tree | f959f7f5746b3dc8409b2aae9584101c9b2f71a1 /arch | |
parent | 6c073a7ee250118b8be3a2379c96fd7f78382b06 (diff) |
perf: Remove deprecated WARN_ON_ONCE()
With the new throttling/unthrottling code introduced with
commit:
e050e3f0a71b ("perf: Fix broken interrupt rate throttling")
we occasionally hit two WARN_ON_ONCE() checks in:
- intel_pmu_pebs_enable()
- intel_pmu_lbr_enable()
- x86_pmu_start()
The assertions are no longer problematic. There is a valid
path where they can trigger but it is harmless.
The assertion can be triggered with:
$ perf record -e instructions:pp ....
Leading to paths:
intel_pmu_pebs_enable
intel_pmu_enable_event
x86_perf_event_set_period
x86_pmu_start
perf_adjust_freq_unthr_context
perf_event_task_tick
scheduler_tick
And:
intel_pmu_lbr_enable
intel_pmu_enable_event
x86_perf_event_set_period
x86_pmu_start
perf_adjust_freq_unthr_context.
perf_event_task_tick
scheduler_tick
cpuc->enabled is always on because when we get to
perf_adjust_freq_unthr_context() the PMU is not totally
disabled. Furthermore when we need to adjust a period,
we only stop the event we need to change and not the
entire PMU. Thus, when we re-enable, cpuc->enabled is
already set. Note that when we stop the event, both
pebs and lbr are stopped if necessary (and possible).
Signed-off-by: Stephane Eranian <eranian@google.com>
Cc: peterz@infradead.org
Link: http://lkml.kernel.org/r/20120202110401.GA30911@quad
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/kernel/cpu/perf_event.c | 3 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_intel_ds.c | 1 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_intel_lbr.c | 2 |
3 files changed, 0 insertions, 6 deletions
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 5adce1040b11..2a30e5ae6acf 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
@@ -986,9 +986,6 @@ static void x86_pmu_start(struct perf_event *event, int flags) | |||
986 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 986 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
987 | int idx = event->hw.idx; | 987 | int idx = event->hw.idx; |
988 | 988 | ||
989 | if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) | ||
990 | return; | ||
991 | |||
992 | if (WARN_ON_ONCE(idx == -1)) | 989 | if (WARN_ON_ONCE(idx == -1)) |
993 | return; | 990 | return; |
994 | 991 | ||
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c index 73da6b64f5b7..d6bd49faa40c 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_ds.c +++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c | |||
@@ -439,7 +439,6 @@ void intel_pmu_pebs_enable(struct perf_event *event) | |||
439 | hwc->config &= ~ARCH_PERFMON_EVENTSEL_INT; | 439 | hwc->config &= ~ARCH_PERFMON_EVENTSEL_INT; |
440 | 440 | ||
441 | cpuc->pebs_enabled |= 1ULL << hwc->idx; | 441 | cpuc->pebs_enabled |= 1ULL << hwc->idx; |
442 | WARN_ON_ONCE(cpuc->enabled); | ||
443 | 442 | ||
444 | if (x86_pmu.intel_cap.pebs_trap && event->attr.precise_ip > 1) | 443 | if (x86_pmu.intel_cap.pebs_trap && event->attr.precise_ip > 1) |
445 | intel_pmu_lbr_enable(event); | 444 | intel_pmu_lbr_enable(event); |
diff --git a/arch/x86/kernel/cpu/perf_event_intel_lbr.c b/arch/x86/kernel/cpu/perf_event_intel_lbr.c index 3fab3de3ce96..47a7e63bfe54 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_lbr.c +++ b/arch/x86/kernel/cpu/perf_event_intel_lbr.c | |||
@@ -72,8 +72,6 @@ void intel_pmu_lbr_enable(struct perf_event *event) | |||
72 | if (!x86_pmu.lbr_nr) | 72 | if (!x86_pmu.lbr_nr) |
73 | return; | 73 | return; |
74 | 74 | ||
75 | WARN_ON_ONCE(cpuc->enabled); | ||
76 | |||
77 | /* | 75 | /* |
78 | * Reset the LBR stack if we changed task context to | 76 | * Reset the LBR stack if we changed task context to |
79 | * avoid data leaks. | 77 | * avoid data leaks. |