diff options
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/include/asm/perf_event.h | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event.c | 8 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_p6.c | 8 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perfctr-watchdog.c | 2 | ||||
-rw-r--r-- | arch/x86/oprofile/op_model_amd.c | 6 | ||||
-rw-r--r-- | arch/x86/oprofile/op_model_ppro.c | 6 |
6 files changed, 16 insertions, 16 deletions
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h index c7f60e1297ab..80e693684f18 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h | |||
@@ -18,7 +18,7 @@ | |||
18 | #define MSR_ARCH_PERFMON_EVENTSEL0 0x186 | 18 | #define MSR_ARCH_PERFMON_EVENTSEL0 0x186 |
19 | #define MSR_ARCH_PERFMON_EVENTSEL1 0x187 | 19 | #define MSR_ARCH_PERFMON_EVENTSEL1 0x187 |
20 | 20 | ||
21 | #define ARCH_PERFMON_EVENTSEL0_ENABLE (1 << 22) | 21 | #define ARCH_PERFMON_EVENTSEL_ENABLE (1 << 22) |
22 | #define ARCH_PERFMON_EVENTSEL_ANY (1 << 21) | 22 | #define ARCH_PERFMON_EVENTSEL_ANY (1 << 21) |
23 | #define ARCH_PERFMON_EVENTSEL_INT (1 << 20) | 23 | #define ARCH_PERFMON_EVENTSEL_INT (1 << 20) |
24 | #define ARCH_PERFMON_EVENTSEL_OS (1 << 17) | 24 | #define ARCH_PERFMON_EVENTSEL_OS (1 << 17) |
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 641ccb9dddbc..6531b4bdb22d 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
@@ -553,9 +553,9 @@ static void x86_pmu_disable_all(void) | |||
553 | if (!test_bit(idx, cpuc->active_mask)) | 553 | if (!test_bit(idx, cpuc->active_mask)) |
554 | continue; | 554 | continue; |
555 | rdmsrl(x86_pmu.eventsel + idx, val); | 555 | rdmsrl(x86_pmu.eventsel + idx, val); |
556 | if (!(val & ARCH_PERFMON_EVENTSEL0_ENABLE)) | 556 | if (!(val & ARCH_PERFMON_EVENTSEL_ENABLE)) |
557 | continue; | 557 | continue; |
558 | val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE; | 558 | val &= ~ARCH_PERFMON_EVENTSEL_ENABLE; |
559 | wrmsrl(x86_pmu.eventsel + idx, val); | 559 | wrmsrl(x86_pmu.eventsel + idx, val); |
560 | } | 560 | } |
561 | } | 561 | } |
@@ -590,7 +590,7 @@ static void x86_pmu_enable_all(void) | |||
590 | continue; | 590 | continue; |
591 | 591 | ||
592 | val = event->hw.config; | 592 | val = event->hw.config; |
593 | val |= ARCH_PERFMON_EVENTSEL0_ENABLE; | 593 | val |= ARCH_PERFMON_EVENTSEL_ENABLE; |
594 | wrmsrl(x86_pmu.eventsel + idx, val); | 594 | wrmsrl(x86_pmu.eventsel + idx, val); |
595 | } | 595 | } |
596 | } | 596 | } |
@@ -853,7 +853,7 @@ void hw_perf_enable(void) | |||
853 | static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, int idx) | 853 | static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, int idx) |
854 | { | 854 | { |
855 | (void)checking_wrmsrl(hwc->config_base + idx, | 855 | (void)checking_wrmsrl(hwc->config_base + idx, |
856 | hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE); | 856 | hwc->config | ARCH_PERFMON_EVENTSEL_ENABLE); |
857 | } | 857 | } |
858 | 858 | ||
859 | static inline void x86_pmu_disable_event(struct hw_perf_event *hwc, int idx) | 859 | static inline void x86_pmu_disable_event(struct hw_perf_event *hwc, int idx) |
diff --git a/arch/x86/kernel/cpu/perf_event_p6.c b/arch/x86/kernel/cpu/perf_event_p6.c index 1ca5ba078afd..a4e67b99d91c 100644 --- a/arch/x86/kernel/cpu/perf_event_p6.c +++ b/arch/x86/kernel/cpu/perf_event_p6.c | |||
@@ -62,7 +62,7 @@ static void p6_pmu_disable_all(void) | |||
62 | 62 | ||
63 | /* p6 only has one enable register */ | 63 | /* p6 only has one enable register */ |
64 | rdmsrl(MSR_P6_EVNTSEL0, val); | 64 | rdmsrl(MSR_P6_EVNTSEL0, val); |
65 | val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE; | 65 | val &= ~ARCH_PERFMON_EVENTSEL_ENABLE; |
66 | wrmsrl(MSR_P6_EVNTSEL0, val); | 66 | wrmsrl(MSR_P6_EVNTSEL0, val); |
67 | } | 67 | } |
68 | 68 | ||
@@ -72,7 +72,7 @@ static void p6_pmu_enable_all(void) | |||
72 | 72 | ||
73 | /* p6 only has one enable register */ | 73 | /* p6 only has one enable register */ |
74 | rdmsrl(MSR_P6_EVNTSEL0, val); | 74 | rdmsrl(MSR_P6_EVNTSEL0, val); |
75 | val |= ARCH_PERFMON_EVENTSEL0_ENABLE; | 75 | val |= ARCH_PERFMON_EVENTSEL_ENABLE; |
76 | wrmsrl(MSR_P6_EVNTSEL0, val); | 76 | wrmsrl(MSR_P6_EVNTSEL0, val); |
77 | } | 77 | } |
78 | 78 | ||
@@ -83,7 +83,7 @@ p6_pmu_disable_event(struct hw_perf_event *hwc, int idx) | |||
83 | u64 val = P6_NOP_EVENT; | 83 | u64 val = P6_NOP_EVENT; |
84 | 84 | ||
85 | if (cpuc->enabled) | 85 | if (cpuc->enabled) |
86 | val |= ARCH_PERFMON_EVENTSEL0_ENABLE; | 86 | val |= ARCH_PERFMON_EVENTSEL_ENABLE; |
87 | 87 | ||
88 | (void)checking_wrmsrl(hwc->config_base + idx, val); | 88 | (void)checking_wrmsrl(hwc->config_base + idx, val); |
89 | } | 89 | } |
@@ -95,7 +95,7 @@ static void p6_pmu_enable_event(struct hw_perf_event *hwc, int idx) | |||
95 | 95 | ||
96 | val = hwc->config; | 96 | val = hwc->config; |
97 | if (cpuc->enabled) | 97 | if (cpuc->enabled) |
98 | val |= ARCH_PERFMON_EVENTSEL0_ENABLE; | 98 | val |= ARCH_PERFMON_EVENTSEL_ENABLE; |
99 | 99 | ||
100 | (void)checking_wrmsrl(hwc->config_base + idx, val); | 100 | (void)checking_wrmsrl(hwc->config_base + idx, val); |
101 | } | 101 | } |
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c index 74f4e85a5727..fb329e9f8494 100644 --- a/arch/x86/kernel/cpu/perfctr-watchdog.c +++ b/arch/x86/kernel/cpu/perfctr-watchdog.c | |||
@@ -680,7 +680,7 @@ static int setup_intel_arch_watchdog(unsigned nmi_hz) | |||
680 | cpu_nmi_set_wd_enabled(); | 680 | cpu_nmi_set_wd_enabled(); |
681 | 681 | ||
682 | apic_write(APIC_LVTPC, APIC_DM_NMI); | 682 | apic_write(APIC_LVTPC, APIC_DM_NMI); |
683 | evntsel |= ARCH_PERFMON_EVENTSEL0_ENABLE; | 683 | evntsel |= ARCH_PERFMON_EVENTSEL_ENABLE; |
684 | wrmsr(evntsel_msr, evntsel, 0); | 684 | wrmsr(evntsel_msr, evntsel, 0); |
685 | intel_arch_wd_ops.checkbit = 1ULL << (eax.split.bit_width - 1); | 685 | intel_arch_wd_ops.checkbit = 1ULL << (eax.split.bit_width - 1); |
686 | return 1; | 686 | return 1; |
diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c index 8ddb9fa9c1b2..090cbbec7dbd 100644 --- a/arch/x86/oprofile/op_model_amd.c +++ b/arch/x86/oprofile/op_model_amd.c | |||
@@ -171,7 +171,7 @@ static void op_amd_setup_ctrs(struct op_x86_model_spec const *model, | |||
171 | continue; | 171 | continue; |
172 | } | 172 | } |
173 | rdmsrl(msrs->controls[i].addr, val); | 173 | rdmsrl(msrs->controls[i].addr, val); |
174 | if (val & ARCH_PERFMON_EVENTSEL0_ENABLE) | 174 | if (val & ARCH_PERFMON_EVENTSEL_ENABLE) |
175 | op_x86_warn_in_use(i); | 175 | op_x86_warn_in_use(i); |
176 | val &= model->reserved; | 176 | val &= model->reserved; |
177 | wrmsrl(msrs->controls[i].addr, val); | 177 | wrmsrl(msrs->controls[i].addr, val); |
@@ -398,7 +398,7 @@ static void op_amd_start(struct op_msrs const * const msrs) | |||
398 | if (!reset_value[op_x86_phys_to_virt(i)]) | 398 | if (!reset_value[op_x86_phys_to_virt(i)]) |
399 | continue; | 399 | continue; |
400 | rdmsrl(msrs->controls[i].addr, val); | 400 | rdmsrl(msrs->controls[i].addr, val); |
401 | val |= ARCH_PERFMON_EVENTSEL0_ENABLE; | 401 | val |= ARCH_PERFMON_EVENTSEL_ENABLE; |
402 | wrmsrl(msrs->controls[i].addr, val); | 402 | wrmsrl(msrs->controls[i].addr, val); |
403 | } | 403 | } |
404 | 404 | ||
@@ -418,7 +418,7 @@ static void op_amd_stop(struct op_msrs const * const msrs) | |||
418 | if (!reset_value[op_x86_phys_to_virt(i)]) | 418 | if (!reset_value[op_x86_phys_to_virt(i)]) |
419 | continue; | 419 | continue; |
420 | rdmsrl(msrs->controls[i].addr, val); | 420 | rdmsrl(msrs->controls[i].addr, val); |
421 | val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE; | 421 | val &= ~ARCH_PERFMON_EVENTSEL_ENABLE; |
422 | wrmsrl(msrs->controls[i].addr, val); | 422 | wrmsrl(msrs->controls[i].addr, val); |
423 | } | 423 | } |
424 | 424 | ||
diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c index 5d1727ba409e..2bf90fafa7b5 100644 --- a/arch/x86/oprofile/op_model_ppro.c +++ b/arch/x86/oprofile/op_model_ppro.c | |||
@@ -88,7 +88,7 @@ static void ppro_setup_ctrs(struct op_x86_model_spec const *model, | |||
88 | continue; | 88 | continue; |
89 | } | 89 | } |
90 | rdmsrl(msrs->controls[i].addr, val); | 90 | rdmsrl(msrs->controls[i].addr, val); |
91 | if (val & ARCH_PERFMON_EVENTSEL0_ENABLE) | 91 | if (val & ARCH_PERFMON_EVENTSEL_ENABLE) |
92 | op_x86_warn_in_use(i); | 92 | op_x86_warn_in_use(i); |
93 | val &= model->reserved; | 93 | val &= model->reserved; |
94 | wrmsrl(msrs->controls[i].addr, val); | 94 | wrmsrl(msrs->controls[i].addr, val); |
@@ -166,7 +166,7 @@ static void ppro_start(struct op_msrs const * const msrs) | |||
166 | for (i = 0; i < num_counters; ++i) { | 166 | for (i = 0; i < num_counters; ++i) { |
167 | if (reset_value[i]) { | 167 | if (reset_value[i]) { |
168 | rdmsrl(msrs->controls[i].addr, val); | 168 | rdmsrl(msrs->controls[i].addr, val); |
169 | val |= ARCH_PERFMON_EVENTSEL0_ENABLE; | 169 | val |= ARCH_PERFMON_EVENTSEL_ENABLE; |
170 | wrmsrl(msrs->controls[i].addr, val); | 170 | wrmsrl(msrs->controls[i].addr, val); |
171 | } | 171 | } |
172 | } | 172 | } |
@@ -184,7 +184,7 @@ static void ppro_stop(struct op_msrs const * const msrs) | |||
184 | if (!reset_value[i]) | 184 | if (!reset_value[i]) |
185 | continue; | 185 | continue; |
186 | rdmsrl(msrs->controls[i].addr, val); | 186 | rdmsrl(msrs->controls[i].addr, val); |
187 | val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE; | 187 | val &= ~ARCH_PERFMON_EVENTSEL_ENABLE; |
188 | wrmsrl(msrs->controls[i].addr, val); | 188 | wrmsrl(msrs->controls[i].addr, val); |
189 | } | 189 | } |
190 | } | 190 | } |