aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-06-02 10:08:20 -0400
committerIngo Molnar <mingo@elte.hu>2009-06-02 15:45:31 -0400
commite4abb5d4f7ddabc1fc7c392cf0a10d8e5868c9ca (patch)
tree9d9c71a70afd922d4b371b0243ab149456d634ca
parent8a016db386195b193e2a8aeddff9fe937dcb7a40 (diff)
perf_counter: x86: Emulate longer sample periods
Do as Power already does, emulate sample periods up to 2^63-1 by composing them of smaller values limited by hardware capabilities. Only once we wrap the software period do we generate an overflow event. Just 10 lines of new code. Reported-by: Stephane Eranian <eranian@googlemail.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Mike Galbraith <efault@gmx.de> Cc: Paul Mackerras <paulus@samba.org> Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: John Kacur <jkacur@redhat.com> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c31
1 files changed, 22 insertions, 9 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index 9e144fbebd20..904571bea710 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -287,8 +287,7 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
287 if (!hwc->sample_period) 287 if (!hwc->sample_period)
288 hwc->sample_period = x86_pmu.max_period; 288 hwc->sample_period = x86_pmu.max_period;
289 289
290 atomic64_set(&hwc->period_left, 290 atomic64_set(&hwc->period_left, hwc->sample_period);
291 min(x86_pmu.max_period, hwc->sample_period));
292 291
293 /* 292 /*
294 * Raw event type provide the config in the event structure 293 * Raw event type provide the config in the event structure
@@ -451,13 +450,13 @@ static DEFINE_PER_CPU(u64, prev_left[X86_PMC_IDX_MAX]);
451 * Set the next IRQ period, based on the hwc->period_left value. 450 * Set the next IRQ period, based on the hwc->period_left value.
452 * To be called with the counter disabled in hw: 451 * To be called with the counter disabled in hw:
453 */ 452 */
454static void 453static int
455x86_perf_counter_set_period(struct perf_counter *counter, 454x86_perf_counter_set_period(struct perf_counter *counter,
456 struct hw_perf_counter *hwc, int idx) 455 struct hw_perf_counter *hwc, int idx)
457{ 456{
458 s64 left = atomic64_read(&hwc->period_left); 457 s64 left = atomic64_read(&hwc->period_left);
459 s64 period = min(x86_pmu.max_period, hwc->sample_period); 458 s64 period = hwc->sample_period;
460 int err; 459 int err, ret = 0;
461 460
462 /* 461 /*
463 * If we are way outside a reasoable range then just skip forward: 462 * If we are way outside a reasoable range then just skip forward:
@@ -465,11 +464,13 @@ x86_perf_counter_set_period(struct perf_counter *counter,
465 if (unlikely(left <= -period)) { 464 if (unlikely(left <= -period)) {
466 left = period; 465 left = period;
467 atomic64_set(&hwc->period_left, left); 466 atomic64_set(&hwc->period_left, left);
467 ret = 1;
468 } 468 }
469 469
470 if (unlikely(left <= 0)) { 470 if (unlikely(left <= 0)) {
471 left += period; 471 left += period;
472 atomic64_set(&hwc->period_left, left); 472 atomic64_set(&hwc->period_left, left);
473 ret = 1;
473 } 474 }
474 /* 475 /*
475 * Quirk: certain CPUs dont like it if just 1 event is left: 476 * Quirk: certain CPUs dont like it if just 1 event is left:
@@ -477,6 +478,9 @@ x86_perf_counter_set_period(struct perf_counter *counter,
477 if (unlikely(left < 2)) 478 if (unlikely(left < 2))
478 left = 2; 479 left = 2;
479 480
481 if (left > x86_pmu.max_period)
482 left = x86_pmu.max_period;
483
480 per_cpu(prev_left[idx], smp_processor_id()) = left; 484 per_cpu(prev_left[idx], smp_processor_id()) = left;
481 485
482 /* 486 /*
@@ -487,6 +491,8 @@ x86_perf_counter_set_period(struct perf_counter *counter,
487 491
488 err = checking_wrmsrl(hwc->counter_base + idx, 492 err = checking_wrmsrl(hwc->counter_base + idx,
489 (u64)(-left) & x86_pmu.counter_mask); 493 (u64)(-left) & x86_pmu.counter_mask);
494
495 return ret;
490} 496}
491 497
492static inline void 498static inline void
@@ -706,16 +712,19 @@ static void x86_pmu_disable(struct perf_counter *counter)
706 * Save and restart an expired counter. Called by NMI contexts, 712 * Save and restart an expired counter. Called by NMI contexts,
707 * so it has to be careful about preempting normal counter ops: 713 * so it has to be careful about preempting normal counter ops:
708 */ 714 */
709static void intel_pmu_save_and_restart(struct perf_counter *counter) 715static int intel_pmu_save_and_restart(struct perf_counter *counter)
710{ 716{
711 struct hw_perf_counter *hwc = &counter->hw; 717 struct hw_perf_counter *hwc = &counter->hw;
712 int idx = hwc->idx; 718 int idx = hwc->idx;
719 int ret;
713 720
714 x86_perf_counter_update(counter, hwc, idx); 721 x86_perf_counter_update(counter, hwc, idx);
715 x86_perf_counter_set_period(counter, hwc, idx); 722 ret = x86_perf_counter_set_period(counter, hwc, idx);
716 723
717 if (counter->state == PERF_COUNTER_STATE_ACTIVE) 724 if (counter->state == PERF_COUNTER_STATE_ACTIVE)
718 intel_pmu_enable_counter(hwc, idx); 725 intel_pmu_enable_counter(hwc, idx);
726
727 return ret;
719} 728}
720 729
721static void intel_pmu_reset(void) 730static void intel_pmu_reset(void)
@@ -782,7 +791,9 @@ again:
782 if (!test_bit(bit, cpuc->active_mask)) 791 if (!test_bit(bit, cpuc->active_mask))
783 continue; 792 continue;
784 793
785 intel_pmu_save_and_restart(counter); 794 if (!intel_pmu_save_and_restart(counter))
795 continue;
796
786 if (perf_counter_overflow(counter, nmi, regs, 0)) 797 if (perf_counter_overflow(counter, nmi, regs, 0))
787 intel_pmu_disable_counter(&counter->hw, bit); 798 intel_pmu_disable_counter(&counter->hw, bit);
788 } 799 }
@@ -824,9 +835,11 @@ static int amd_pmu_handle_irq(struct pt_regs *regs, int nmi)
824 continue; 835 continue;
825 836
826 /* counter overflow */ 837 /* counter overflow */
827 x86_perf_counter_set_period(counter, hwc, idx);
828 handled = 1; 838 handled = 1;
829 inc_irq_stat(apic_perf_irqs); 839 inc_irq_stat(apic_perf_irqs);
840 if (!x86_perf_counter_set_period(counter, hwc, idx))
841 continue;
842
830 if (perf_counter_overflow(counter, nmi, regs, 0)) 843 if (perf_counter_overflow(counter, nmi, regs, 0))
831 amd_pmu_disable_counter(hwc, idx); 844 amd_pmu_disable_counter(hwc, idx);
832 } 845 }