diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2010-03-06 07:26:11 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-03-10 07:23:36 -0500 |
commit | 8f4aebd2be9892bf8fb79a2d8576d3f3ee7f00f6 (patch) | |
tree | d1a45b566e03fa722d873c96a4304abc7b560e3f /arch/x86 | |
parent | cc7f00820b2f3be656569c41158d9323e425bcfe (diff) |
perf, x86: Fix pebs drains
I overlooked the perf_disable()/perf_enable() calls in
intel_pmu_handle_irq(), (pointed out by Markus) so we should not
explicitly disable_all/enable_all pebs counters in the drain functions,
these are already disabled and enabling them early is confusing.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Arnaldo Carvalho de Melo <acme@infradead.org>
Cc: paulus@samba.org
Cc: eranian@google.com
Cc: robert.richter@amd.com
Cc: fweisbec@gmail.com
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_intel_ds.c | 15 |
1 files changed, 3 insertions, 12 deletions
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c index a7401e4167df..66c6962f15f9 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_ds.c +++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c | |||
@@ -476,18 +476,16 @@ static void intel_pmu_drain_pebs_core(struct pt_regs *iregs) | |||
476 | if (!event || !ds || !x86_pmu.pebs) | 476 | if (!event || !ds || !x86_pmu.pebs) |
477 | return; | 477 | return; |
478 | 478 | ||
479 | intel_pmu_pebs_disable_all(); | ||
480 | |||
481 | at = (struct pebs_record_core *)(unsigned long)ds->pebs_buffer_base; | 479 | at = (struct pebs_record_core *)(unsigned long)ds->pebs_buffer_base; |
482 | top = (struct pebs_record_core *)(unsigned long)ds->pebs_index; | 480 | top = (struct pebs_record_core *)(unsigned long)ds->pebs_index; |
483 | 481 | ||
484 | if (top <= at) | 482 | if (top <= at) |
485 | goto out; | 483 | return; |
486 | 484 | ||
487 | ds->pebs_index = ds->pebs_buffer_base; | 485 | ds->pebs_index = ds->pebs_buffer_base; |
488 | 486 | ||
489 | if (!intel_pmu_save_and_restart(event)) | 487 | if (!intel_pmu_save_and_restart(event)) |
490 | goto out; | 488 | return; |
491 | 489 | ||
492 | perf_sample_data_init(&data, 0); | 490 | perf_sample_data_init(&data, 0); |
493 | data.period = event->hw.last_period; | 491 | data.period = event->hw.last_period; |
@@ -528,9 +526,6 @@ static void intel_pmu_drain_pebs_core(struct pt_regs *iregs) | |||
528 | 526 | ||
529 | if (perf_event_overflow(event, 1, &data, ®s)) | 527 | if (perf_event_overflow(event, 1, &data, ®s)) |
530 | x86_pmu_stop(event); | 528 | x86_pmu_stop(event); |
531 | |||
532 | out: | ||
533 | intel_pmu_pebs_enable_all(); | ||
534 | } | 529 | } |
535 | 530 | ||
536 | static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs) | 531 | static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs) |
@@ -547,13 +542,11 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs) | |||
547 | if (!ds || !x86_pmu.pebs) | 542 | if (!ds || !x86_pmu.pebs) |
548 | return; | 543 | return; |
549 | 544 | ||
550 | intel_pmu_pebs_disable_all(); | ||
551 | |||
552 | at = (struct pebs_record_nhm *)(unsigned long)ds->pebs_buffer_base; | 545 | at = (struct pebs_record_nhm *)(unsigned long)ds->pebs_buffer_base; |
553 | top = (struct pebs_record_nhm *)(unsigned long)ds->pebs_index; | 546 | top = (struct pebs_record_nhm *)(unsigned long)ds->pebs_index; |
554 | 547 | ||
555 | if (top <= at) | 548 | if (top <= at) |
556 | goto out; | 549 | return; |
557 | 550 | ||
558 | ds->pebs_index = ds->pebs_buffer_base; | 551 | ds->pebs_index = ds->pebs_buffer_base; |
559 | 552 | ||
@@ -604,8 +597,6 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs) | |||
604 | if (perf_event_overflow(event, 1, &data, ®s)) | 597 | if (perf_event_overflow(event, 1, &data, ®s)) |
605 | x86_pmu_stop(event); | 598 | x86_pmu_stop(event); |
606 | } | 599 | } |
607 | out: | ||
608 | intel_pmu_pebs_enable_all(); | ||
609 | } | 600 | } |
610 | 601 | ||
611 | /* | 602 | /* |