aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu/perf_counter.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-05-13 10:21:38 -0400
committerIngo Molnar <mingo@elte.hu>2009-05-15 03:47:02 -0400
commit9e35ad388bea89f7d6f375af4c0ae98803688666 (patch)
tree9abbce9f6c9a914b1ea8d8dae82e159366030e4a /arch/x86/kernel/cpu/perf_counter.c
parent962bf7a66edca4d36a730a38ff8410a67f560e40 (diff)
perf_counter: Rework the perf counter disable/enable
The current disable/enable mechanism is: token = hw_perf_save_disable(); ... /* do bits */ ... hw_perf_restore(token); This works well, provided that the use nests properly. Except we don't. x86 NMI/INT throttling has non-nested use of this, breaking things. Therefore provide a reference counter disable/enable interface, where the first disable disables the hardware, and the last enable enables the hardware again. [ Impact: refactor, simplify the PMU disable/enable logic ] Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Paul Mackerras <paulus@samba.org> Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/cpu/perf_counter.c')
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c113
1 files changed, 42 insertions, 71 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index 7601c014f8f6..313638cecbb5 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -31,7 +31,6 @@ struct cpu_hw_counters {
31 unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; 31 unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
32 unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; 32 unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
33 unsigned long interrupts; 33 unsigned long interrupts;
34 u64 throttle_ctrl;
35 int enabled; 34 int enabled;
36}; 35};
37 36
@@ -42,8 +41,8 @@ struct x86_pmu {
42 const char *name; 41 const char *name;
43 int version; 42 int version;
44 int (*handle_irq)(struct pt_regs *, int); 43 int (*handle_irq)(struct pt_regs *, int);
45 u64 (*save_disable_all)(void); 44 void (*disable_all)(void);
46 void (*restore_all)(u64); 45 void (*enable_all)(void);
47 void (*enable)(struct hw_perf_counter *, int); 46 void (*enable)(struct hw_perf_counter *, int);
48 void (*disable)(struct hw_perf_counter *, int); 47 void (*disable)(struct hw_perf_counter *, int);
49 unsigned eventsel; 48 unsigned eventsel;
@@ -56,6 +55,7 @@ struct x86_pmu {
56 int counter_bits; 55 int counter_bits;
57 u64 counter_mask; 56 u64 counter_mask;
58 u64 max_period; 57 u64 max_period;
58 u64 intel_ctrl;
59}; 59};
60 60
61static struct x86_pmu x86_pmu __read_mostly; 61static struct x86_pmu x86_pmu __read_mostly;
@@ -311,22 +311,19 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
311 return 0; 311 return 0;
312} 312}
313 313
314static u64 intel_pmu_save_disable_all(void) 314static void intel_pmu_disable_all(void)
315{ 315{
316 u64 ctrl;
317
318 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
319 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0); 316 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
320
321 return ctrl;
322} 317}
323 318
324static u64 amd_pmu_save_disable_all(void) 319static void amd_pmu_disable_all(void)
325{ 320{
326 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); 321 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
327 int enabled, idx; 322 int idx;
323
324 if (!cpuc->enabled)
325 return;
328 326
329 enabled = cpuc->enabled;
330 cpuc->enabled = 0; 327 cpuc->enabled = 0;
331 /* 328 /*
332 * ensure we write the disable before we start disabling the 329 * ensure we write the disable before we start disabling the
@@ -334,8 +331,6 @@ static u64 amd_pmu_save_disable_all(void)
334 * right thing. 331 * right thing.
335 */ 332 */
336 barrier(); 333 barrier();
337 if (!enabled)
338 goto out;
339 334
340 for (idx = 0; idx < x86_pmu.num_counters; idx++) { 335 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
341 u64 val; 336 u64 val;
@@ -348,37 +343,31 @@ static u64 amd_pmu_save_disable_all(void)
348 val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE; 343 val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
349 wrmsrl(MSR_K7_EVNTSEL0 + idx, val); 344 wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
350 } 345 }
351
352out:
353 return enabled;
354} 346}
355 347
356u64 hw_perf_save_disable(void) 348void hw_perf_disable(void)
357{ 349{
358 if (!x86_pmu_initialized()) 350 if (!x86_pmu_initialized())
359 return 0; 351 return;
360 return x86_pmu.save_disable_all(); 352 return x86_pmu.disable_all();
361} 353}
362/*
363 * Exported because of ACPI idle
364 */
365EXPORT_SYMBOL_GPL(hw_perf_save_disable);
366 354
367static void intel_pmu_restore_all(u64 ctrl) 355static void intel_pmu_enable_all(void)
368{ 356{
369 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl); 357 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
370} 358}
371 359
372static void amd_pmu_restore_all(u64 ctrl) 360static void amd_pmu_enable_all(void)
373{ 361{
374 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); 362 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
375 int idx; 363 int idx;
376 364
377 cpuc->enabled = ctrl; 365 if (cpuc->enabled)
378 barrier();
379 if (!ctrl)
380 return; 366 return;
381 367
368 cpuc->enabled = 1;
369 barrier();
370
382 for (idx = 0; idx < x86_pmu.num_counters; idx++) { 371 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
383 u64 val; 372 u64 val;
384 373
@@ -392,16 +381,12 @@ static void amd_pmu_restore_all(u64 ctrl)
392 } 381 }
393} 382}
394 383
395void hw_perf_restore(u64 ctrl) 384void hw_perf_enable(void)
396{ 385{
397 if (!x86_pmu_initialized()) 386 if (!x86_pmu_initialized())
398 return; 387 return;
399 x86_pmu.restore_all(ctrl); 388 x86_pmu.enable_all();
400} 389}
401/*
402 * Exported because of ACPI idle
403 */
404EXPORT_SYMBOL_GPL(hw_perf_restore);
405 390
406static inline u64 intel_pmu_get_status(void) 391static inline u64 intel_pmu_get_status(void)
407{ 392{
@@ -735,15 +720,14 @@ static int intel_pmu_handle_irq(struct pt_regs *regs, int nmi)
735 int bit, cpu = smp_processor_id(); 720 int bit, cpu = smp_processor_id();
736 u64 ack, status; 721 u64 ack, status;
737 struct cpu_hw_counters *cpuc = &per_cpu(cpu_hw_counters, cpu); 722 struct cpu_hw_counters *cpuc = &per_cpu(cpu_hw_counters, cpu);
738 int ret = 0;
739
740 cpuc->throttle_ctrl = intel_pmu_save_disable_all();
741 723
724 perf_disable();
742 status = intel_pmu_get_status(); 725 status = intel_pmu_get_status();
743 if (!status) 726 if (!status) {
744 goto out; 727 perf_enable();
728 return 0;
729 }
745 730
746 ret = 1;
747again: 731again:
748 inc_irq_stat(apic_perf_irqs); 732 inc_irq_stat(apic_perf_irqs);
749 ack = status; 733 ack = status;
@@ -767,19 +751,11 @@ again:
767 status = intel_pmu_get_status(); 751 status = intel_pmu_get_status();
768 if (status) 752 if (status)
769 goto again; 753 goto again;
770out:
771 /*
772 * Restore - do not reenable when global enable is off or throttled:
773 */
774 if (cpuc->throttle_ctrl) {
775 if (++cpuc->interrupts < PERFMON_MAX_INTERRUPTS) {
776 intel_pmu_restore_all(cpuc->throttle_ctrl);
777 } else {
778 pr_info("CPU#%d: perfcounters: max interrupt rate exceeded! Throttle on.\n", smp_processor_id());
779 }
780 }
781 754
782 return ret; 755 if (++cpuc->interrupts != PERFMON_MAX_INTERRUPTS)
756 perf_enable();
757
758 return 1;
783} 759}
784 760
785static int amd_pmu_handle_irq(struct pt_regs *regs, int nmi) 761static int amd_pmu_handle_irq(struct pt_regs *regs, int nmi)
@@ -792,13 +768,11 @@ static int amd_pmu_handle_irq(struct pt_regs *regs, int nmi)
792 struct hw_perf_counter *hwc; 768 struct hw_perf_counter *hwc;
793 int idx, throttle = 0; 769 int idx, throttle = 0;
794 770
795 cpuc->throttle_ctrl = cpuc->enabled; 771 if (++cpuc->interrupts == PERFMON_MAX_INTERRUPTS) {
796 cpuc->enabled = 0; 772 throttle = 1;
797 barrier(); 773 __perf_disable();
798 774 cpuc->enabled = 0;
799 if (cpuc->throttle_ctrl) { 775 barrier();
800 if (++cpuc->interrupts >= PERFMON_MAX_INTERRUPTS)
801 throttle = 1;
802 } 776 }
803 777
804 for (idx = 0; idx < x86_pmu.num_counters; idx++) { 778 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
@@ -824,9 +798,6 @@ next:
824 amd_pmu_disable_counter(hwc, idx); 798 amd_pmu_disable_counter(hwc, idx);
825 } 799 }
826 800
827 if (cpuc->throttle_ctrl && !throttle)
828 cpuc->enabled = 1;
829
830 return handled; 801 return handled;
831} 802}
832 803
@@ -839,13 +810,11 @@ void perf_counter_unthrottle(void)
839 810
840 cpuc = &__get_cpu_var(cpu_hw_counters); 811 cpuc = &__get_cpu_var(cpu_hw_counters);
841 if (cpuc->interrupts >= PERFMON_MAX_INTERRUPTS) { 812 if (cpuc->interrupts >= PERFMON_MAX_INTERRUPTS) {
842 pr_info("CPU#%d: perfcounters: throttle off.\n", smp_processor_id());
843
844 /* 813 /*
845 * Clear them before re-enabling irqs/NMIs again: 814 * Clear them before re-enabling irqs/NMIs again:
846 */ 815 */
847 cpuc->interrupts = 0; 816 cpuc->interrupts = 0;
848 hw_perf_restore(cpuc->throttle_ctrl); 817 perf_enable();
849 } else { 818 } else {
850 cpuc->interrupts = 0; 819 cpuc->interrupts = 0;
851 } 820 }
@@ -931,8 +900,8 @@ static __read_mostly struct notifier_block perf_counter_nmi_notifier = {
931static struct x86_pmu intel_pmu = { 900static struct x86_pmu intel_pmu = {
932 .name = "Intel", 901 .name = "Intel",
933 .handle_irq = intel_pmu_handle_irq, 902 .handle_irq = intel_pmu_handle_irq,
934 .save_disable_all = intel_pmu_save_disable_all, 903 .disable_all = intel_pmu_disable_all,
935 .restore_all = intel_pmu_restore_all, 904 .enable_all = intel_pmu_enable_all,
936 .enable = intel_pmu_enable_counter, 905 .enable = intel_pmu_enable_counter,
937 .disable = intel_pmu_disable_counter, 906 .disable = intel_pmu_disable_counter,
938 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0, 907 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
@@ -951,8 +920,8 @@ static struct x86_pmu intel_pmu = {
951static struct x86_pmu amd_pmu = { 920static struct x86_pmu amd_pmu = {
952 .name = "AMD", 921 .name = "AMD",
953 .handle_irq = amd_pmu_handle_irq, 922 .handle_irq = amd_pmu_handle_irq,
954 .save_disable_all = amd_pmu_save_disable_all, 923 .disable_all = amd_pmu_disable_all,
955 .restore_all = amd_pmu_restore_all, 924 .enable_all = amd_pmu_enable_all,
956 .enable = amd_pmu_enable_counter, 925 .enable = amd_pmu_enable_counter,
957 .disable = amd_pmu_disable_counter, 926 .disable = amd_pmu_disable_counter,
958 .eventsel = MSR_K7_EVNTSEL0, 927 .eventsel = MSR_K7_EVNTSEL0,
@@ -1003,6 +972,8 @@ static int intel_pmu_init(void)
1003 x86_pmu.counter_bits = eax.split.bit_width; 972 x86_pmu.counter_bits = eax.split.bit_width;
1004 x86_pmu.counter_mask = (1ULL << eax.split.bit_width) - 1; 973 x86_pmu.counter_mask = (1ULL << eax.split.bit_width) - 1;
1005 974
975 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
976
1006 return 0; 977 return 0;
1007} 978}
1008 979