diff options
author | Peter Zijlstra <peterz@infradead.org> | 2010-03-26 09:08:44 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-03-26 10:47:24 -0400 |
commit | 11164cd4f6dab326a88bdf27f2f8f7c11977e91a (patch) | |
tree | 260a9f48f66cce8c5f4e23111ba6be8af6cfa578 /arch | |
parent | ea8e61b7bbc4a2faef77db34eb2db2a2c2372ff6 (diff) |
perf, x86: Add Nehelem PMU programming errata workaround
Implement the workaround for Intel Errata AAK100 and AAP53.
Also, remove the Core-i7 name for Nehalem events since there are
also Westmere based i7 chips.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Stephane Eranian <eranian@google.com>
LKML-Reference: <1269608924.12097.147.camel@laptop>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/kernel/cpu/perf_event.c | 8 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_intel.c | 43 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_p4.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_p6.c | 2 |
4 files changed, 45 insertions, 10 deletions
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index f571f514de2a..6f66d4a845ff 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
@@ -184,7 +184,7 @@ struct x86_pmu { | |||
184 | int version; | 184 | int version; |
185 | int (*handle_irq)(struct pt_regs *); | 185 | int (*handle_irq)(struct pt_regs *); |
186 | void (*disable_all)(void); | 186 | void (*disable_all)(void); |
187 | void (*enable_all)(void); | 187 | void (*enable_all)(int added); |
188 | void (*enable)(struct perf_event *); | 188 | void (*enable)(struct perf_event *); |
189 | void (*disable)(struct perf_event *); | 189 | void (*disable)(struct perf_event *); |
190 | int (*hw_config)(struct perf_event_attr *attr, struct hw_perf_event *hwc); | 190 | int (*hw_config)(struct perf_event_attr *attr, struct hw_perf_event *hwc); |
@@ -576,7 +576,7 @@ void hw_perf_disable(void) | |||
576 | x86_pmu.disable_all(); | 576 | x86_pmu.disable_all(); |
577 | } | 577 | } |
578 | 578 | ||
579 | static void x86_pmu_enable_all(void) | 579 | static void x86_pmu_enable_all(int added) |
580 | { | 580 | { |
581 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 581 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
582 | int idx; | 582 | int idx; |
@@ -784,7 +784,7 @@ void hw_perf_enable(void) | |||
784 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 784 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
785 | struct perf_event *event; | 785 | struct perf_event *event; |
786 | struct hw_perf_event *hwc; | 786 | struct hw_perf_event *hwc; |
787 | int i; | 787 | int i, added = cpuc->n_added; |
788 | 788 | ||
789 | if (!x86_pmu_initialized()) | 789 | if (!x86_pmu_initialized()) |
790 | return; | 790 | return; |
@@ -836,7 +836,7 @@ void hw_perf_enable(void) | |||
836 | cpuc->enabled = 1; | 836 | cpuc->enabled = 1; |
837 | barrier(); | 837 | barrier(); |
838 | 838 | ||
839 | x86_pmu.enable_all(); | 839 | x86_pmu.enable_all(added); |
840 | } | 840 | } |
841 | 841 | ||
842 | static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc) | 842 | static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc) |
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 044b8436b19d..676aac27aca4 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c | |||
@@ -483,7 +483,7 @@ static void intel_pmu_disable_all(void) | |||
483 | intel_pmu_lbr_disable_all(); | 483 | intel_pmu_lbr_disable_all(); |
484 | } | 484 | } |
485 | 485 | ||
486 | static void intel_pmu_enable_all(void) | 486 | static void intel_pmu_enable_all(int added) |
487 | { | 487 | { |
488 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 488 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
489 | 489 | ||
@@ -502,6 +502,40 @@ static void intel_pmu_enable_all(void) | |||
502 | } | 502 | } |
503 | } | 503 | } |
504 | 504 | ||
505 | /* | ||
506 | * Workaround for: | ||
507 | * Intel Errata AAK100 (model 26) | ||
508 | * Intel Errata AAP53 (model 30) | ||
509 | * | ||
510 | * These chips need to be 'reset' when adding counters by programming | ||
511 | * the magic three (non counting) events 0x4300D2, 0x4300B1 and 0x4300B5 | ||
512 | * either in sequence on the same PMC or on different PMCs. | ||
513 | */ | ||
514 | static void intel_pmu_nhm_enable_all(int added) | ||
515 | { | ||
516 | if (added) { | ||
517 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
518 | int i; | ||
519 | |||
520 | wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + 0, 0x4300D2); | ||
521 | wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + 1, 0x4300B1); | ||
522 | wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + 2, 0x4300B5); | ||
523 | |||
524 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x3); | ||
525 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0); | ||
526 | |||
527 | for (i = 0; i < 3; i++) { | ||
528 | struct perf_event *event = cpuc->events[i]; | ||
529 | |||
530 | if (!event) | ||
531 | continue; | ||
532 | |||
533 | __x86_pmu_enable_event(&event->hw); | ||
534 | } | ||
535 | } | ||
536 | intel_pmu_enable_all(added); | ||
537 | } | ||
538 | |||
505 | static inline u64 intel_pmu_get_status(void) | 539 | static inline u64 intel_pmu_get_status(void) |
506 | { | 540 | { |
507 | u64 status; | 541 | u64 status; |
@@ -658,7 +692,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs) | |||
658 | intel_pmu_drain_bts_buffer(); | 692 | intel_pmu_drain_bts_buffer(); |
659 | status = intel_pmu_get_status(); | 693 | status = intel_pmu_get_status(); |
660 | if (!status) { | 694 | if (!status) { |
661 | intel_pmu_enable_all(); | 695 | intel_pmu_enable_all(0); |
662 | return 0; | 696 | return 0; |
663 | } | 697 | } |
664 | 698 | ||
@@ -707,7 +741,7 @@ again: | |||
707 | goto again; | 741 | goto again; |
708 | 742 | ||
709 | done: | 743 | done: |
710 | intel_pmu_enable_all(); | 744 | intel_pmu_enable_all(0); |
711 | return 1; | 745 | return 1; |
712 | } | 746 | } |
713 | 747 | ||
@@ -920,7 +954,8 @@ static __init int intel_pmu_init(void) | |||
920 | intel_pmu_lbr_init_nhm(); | 954 | intel_pmu_lbr_init_nhm(); |
921 | 955 | ||
922 | x86_pmu.event_constraints = intel_nehalem_event_constraints; | 956 | x86_pmu.event_constraints = intel_nehalem_event_constraints; |
923 | pr_cont("Nehalem/Corei7 events, "); | 957 | x86_pmu.enable_all = intel_pmu_nhm_enable_all; |
958 | pr_cont("Nehalem events, "); | ||
924 | break; | 959 | break; |
925 | 960 | ||
926 | case 28: /* Atom */ | 961 | case 28: /* Atom */ |
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c index f8fe069f14e2..0d1be36cbe9e 100644 --- a/arch/x86/kernel/cpu/perf_event_p4.c +++ b/arch/x86/kernel/cpu/perf_event_p4.c | |||
@@ -535,7 +535,7 @@ static void p4_pmu_enable_event(struct perf_event *event) | |||
535 | (cccr & ~P4_CCCR_RESERVED) | P4_CCCR_ENABLE); | 535 | (cccr & ~P4_CCCR_RESERVED) | P4_CCCR_ENABLE); |
536 | } | 536 | } |
537 | 537 | ||
538 | static void p4_pmu_enable_all(void) | 538 | static void p4_pmu_enable_all(int added) |
539 | { | 539 | { |
540 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 540 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
541 | int idx; | 541 | int idx; |
diff --git a/arch/x86/kernel/cpu/perf_event_p6.c b/arch/x86/kernel/cpu/perf_event_p6.c index 6ff4d01d880f..877182c850df 100644 --- a/arch/x86/kernel/cpu/perf_event_p6.c +++ b/arch/x86/kernel/cpu/perf_event_p6.c | |||
@@ -66,7 +66,7 @@ static void p6_pmu_disable_all(void) | |||
66 | wrmsrl(MSR_P6_EVNTSEL0, val); | 66 | wrmsrl(MSR_P6_EVNTSEL0, val); |
67 | } | 67 | } |
68 | 68 | ||
69 | static void p6_pmu_enable_all(void) | 69 | static void p6_pmu_enable_all(int added) |
70 | { | 70 | { |
71 | unsigned long val; | 71 | unsigned long val; |
72 | 72 | ||