diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2008-12-09 15:43:39 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-12-11 09:45:45 -0500 |
commit | 4ac13294e44664bb7edf4daf52edb71e7c6bbe84 (patch) | |
tree | accb533350a655e39a8ab846abc37018b2f87ccf | |
parent | 43874d238d5f208854a73c3225ca2a22833eec8b (diff) |
perf counters: protect them against CSTATE transitions
Impact: fix rare lost events problem
There are CPUs whose performance counters misbehave on CSTATE transitions,
so provide a way to just disable/enable them around deep idle methods.
(hw_perf_enable_all() is cheap on x86.)
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | arch/x86/kernel/cpu/perf_counter.c | 14 | ||||
-rw-r--r-- | drivers/acpi/processor_idle.c | 8 | ||||
-rw-r--r-- | include/linux/perf_counter.h | 4 |
3 files changed, 25 insertions, 1 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 6a93d1f04d97..0a7f3bea2dc6 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/notifier.h> | 12 | #include <linux/notifier.h> |
13 | #include <linux/hardirq.h> | 13 | #include <linux/hardirq.h> |
14 | #include <linux/kprobes.h> | 14 | #include <linux/kprobes.h> |
15 | #include <linux/module.h> | ||
15 | #include <linux/kdebug.h> | 16 | #include <linux/kdebug.h> |
16 | #include <linux/sched.h> | 17 | #include <linux/sched.h> |
17 | 18 | ||
@@ -119,10 +120,21 @@ void hw_perf_enable_all(void) | |||
119 | wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, perf_counter_mask, 0); | 120 | wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, perf_counter_mask, 0); |
120 | } | 121 | } |
121 | 122 | ||
122 | void hw_perf_disable_all(void) | 123 | void hw_perf_restore_ctrl(u64 ctrl) |
123 | { | 124 | { |
125 | wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, ctrl, 0); | ||
126 | } | ||
127 | EXPORT_SYMBOL_GPL(hw_perf_restore_ctrl); | ||
128 | |||
129 | u64 hw_perf_disable_all(void) | ||
130 | { | ||
131 | u64 ctrl; | ||
132 | |||
133 | rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl); | ||
124 | wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0, 0); | 134 | wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0, 0); |
135 | return ctrl; | ||
125 | } | 136 | } |
137 | EXPORT_SYMBOL_GPL(hw_perf_disable_all); | ||
126 | 138 | ||
127 | static inline void | 139 | static inline void |
128 | __hw_perf_counter_disable(struct hw_perf_counter *hwc, unsigned int idx) | 140 | __hw_perf_counter_disable(struct hw_perf_counter *hwc, unsigned int idx) |
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 5f8d746a9b81..cca804e6f1dd 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c | |||
@@ -270,8 +270,11 @@ static atomic_t c3_cpu_count; | |||
270 | /* Common C-state entry for C2, C3, .. */ | 270 | /* Common C-state entry for C2, C3, .. */ |
271 | static void acpi_cstate_enter(struct acpi_processor_cx *cstate) | 271 | static void acpi_cstate_enter(struct acpi_processor_cx *cstate) |
272 | { | 272 | { |
273 | u64 pctrl; | ||
274 | |||
273 | /* Don't trace irqs off for idle */ | 275 | /* Don't trace irqs off for idle */ |
274 | stop_critical_timings(); | 276 | stop_critical_timings(); |
277 | pctrl = hw_perf_disable_all(); | ||
275 | if (cstate->entry_method == ACPI_CSTATE_FFH) { | 278 | if (cstate->entry_method == ACPI_CSTATE_FFH) { |
276 | /* Call into architectural FFH based C-state */ | 279 | /* Call into architectural FFH based C-state */ |
277 | acpi_processor_ffh_cstate_enter(cstate); | 280 | acpi_processor_ffh_cstate_enter(cstate); |
@@ -284,6 +287,7 @@ static void acpi_cstate_enter(struct acpi_processor_cx *cstate) | |||
284 | gets asserted in time to freeze execution properly. */ | 287 | gets asserted in time to freeze execution properly. */ |
285 | unused = inl(acpi_gbl_FADT.xpm_timer_block.address); | 288 | unused = inl(acpi_gbl_FADT.xpm_timer_block.address); |
286 | } | 289 | } |
290 | hw_perf_restore_ctrl(pctrl); | ||
287 | start_critical_timings(); | 291 | start_critical_timings(); |
288 | } | 292 | } |
289 | #endif /* !CONFIG_CPU_IDLE */ | 293 | #endif /* !CONFIG_CPU_IDLE */ |
@@ -1425,8 +1429,11 @@ static inline void acpi_idle_update_bm_rld(struct acpi_processor *pr, | |||
1425 | */ | 1429 | */ |
1426 | static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx) | 1430 | static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx) |
1427 | { | 1431 | { |
1432 | u64 pctrl; | ||
1433 | |||
1428 | /* Don't trace irqs off for idle */ | 1434 | /* Don't trace irqs off for idle */ |
1429 | stop_critical_timings(); | 1435 | stop_critical_timings(); |
1436 | pctrl = hw_perf_disable_all(); | ||
1430 | if (cx->entry_method == ACPI_CSTATE_FFH) { | 1437 | if (cx->entry_method == ACPI_CSTATE_FFH) { |
1431 | /* Call into architectural FFH based C-state */ | 1438 | /* Call into architectural FFH based C-state */ |
1432 | acpi_processor_ffh_cstate_enter(cx); | 1439 | acpi_processor_ffh_cstate_enter(cx); |
@@ -1441,6 +1448,7 @@ static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx) | |||
1441 | gets asserted in time to freeze execution properly. */ | 1448 | gets asserted in time to freeze execution properly. */ |
1442 | unused = inl(acpi_gbl_FADT.xpm_timer_block.address); | 1449 | unused = inl(acpi_gbl_FADT.xpm_timer_block.address); |
1443 | } | 1450 | } |
1451 | hw_perf_restore_ctrl(pctrl); | ||
1444 | start_critical_timings(); | 1452 | start_critical_timings(); |
1445 | } | 1453 | } |
1446 | 1454 | ||
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 22c4469abf44..5031b5614f25 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h | |||
@@ -156,6 +156,8 @@ extern void perf_counter_task_tick(struct task_struct *task, int cpu); | |||
156 | extern void perf_counter_init_task(struct task_struct *task); | 156 | extern void perf_counter_init_task(struct task_struct *task); |
157 | extern void perf_counter_notify(struct pt_regs *regs); | 157 | extern void perf_counter_notify(struct pt_regs *regs); |
158 | extern void perf_counter_print_debug(void); | 158 | extern void perf_counter_print_debug(void); |
159 | extern void hw_perf_restore_ctrl(u64 ctrl); | ||
160 | extern u64 hw_perf_disable_all(void); | ||
159 | #else | 161 | #else |
160 | static inline void | 162 | static inline void |
161 | perf_counter_task_sched_in(struct task_struct *task, int cpu) { } | 163 | perf_counter_task_sched_in(struct task_struct *task, int cpu) { } |
@@ -166,6 +168,8 @@ perf_counter_task_tick(struct task_struct *task, int cpu) { } | |||
166 | static inline void perf_counter_init_task(struct task_struct *task) { } | 168 | static inline void perf_counter_init_task(struct task_struct *task) { } |
167 | static inline void perf_counter_notify(struct pt_regs *regs) { } | 169 | static inline void perf_counter_notify(struct pt_regs *regs) { } |
168 | static inline void perf_counter_print_debug(void) { } | 170 | static inline void perf_counter_print_debug(void) { } |
171 | static inline void hw_perf_restore_ctrl(u64 ctrl) { } | ||
172 | static inline u64 hw_perf_disable_all(void) { return 0; } | ||
169 | #endif | 173 | #endif |
170 | 174 | ||
171 | #endif /* _LINUX_PERF_COUNTER_H */ | 175 | #endif /* _LINUX_PERF_COUNTER_H */ |