aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2008-12-09 15:43:39 -0500
committerIngo Molnar <mingo@elte.hu>2008-12-11 09:45:45 -0500
commit4ac13294e44664bb7edf4daf52edb71e7c6bbe84 (patch)
treeaccb533350a655e39a8ab846abc37018b2f87ccf /drivers
parent43874d238d5f208854a73c3225ca2a22833eec8b (diff)
perf counters: protect them against CSTATE transitions
Impact: fix rare lost events problem There are CPUs whose performance counters misbehave on CSTATE transitions, so provide a way to just disable/enable them around deep idle methods. (hw_perf_enable_all() is cheap on x86.) Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/processor_idle.c8
1 files changed, 8 insertions, 0 deletions
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 5f8d746a9b81..cca804e6f1dd 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -270,8 +270,11 @@ static atomic_t c3_cpu_count;
270/* Common C-state entry for C2, C3, .. */ 270/* Common C-state entry for C2, C3, .. */
271static void acpi_cstate_enter(struct acpi_processor_cx *cstate) 271static void acpi_cstate_enter(struct acpi_processor_cx *cstate)
272{ 272{
273 u64 pctrl;
274
273 /* Don't trace irqs off for idle */ 275 /* Don't trace irqs off for idle */
274 stop_critical_timings(); 276 stop_critical_timings();
277 pctrl = hw_perf_disable_all();
275 if (cstate->entry_method == ACPI_CSTATE_FFH) { 278 if (cstate->entry_method == ACPI_CSTATE_FFH) {
276 /* Call into architectural FFH based C-state */ 279 /* Call into architectural FFH based C-state */
277 acpi_processor_ffh_cstate_enter(cstate); 280 acpi_processor_ffh_cstate_enter(cstate);
@@ -284,6 +287,7 @@ static void acpi_cstate_enter(struct acpi_processor_cx *cstate)
284 gets asserted in time to freeze execution properly. */ 287 gets asserted in time to freeze execution properly. */
285 unused = inl(acpi_gbl_FADT.xpm_timer_block.address); 288 unused = inl(acpi_gbl_FADT.xpm_timer_block.address);
286 } 289 }
290 hw_perf_restore_ctrl(pctrl);
287 start_critical_timings(); 291 start_critical_timings();
288} 292}
289#endif /* !CONFIG_CPU_IDLE */ 293#endif /* !CONFIG_CPU_IDLE */
@@ -1425,8 +1429,11 @@ static inline void acpi_idle_update_bm_rld(struct acpi_processor *pr,
1425 */ 1429 */
1426static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx) 1430static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx)
1427{ 1431{
1432 u64 pctrl;
1433
1428 /* Don't trace irqs off for idle */ 1434 /* Don't trace irqs off for idle */
1429 stop_critical_timings(); 1435 stop_critical_timings();
1436 pctrl = hw_perf_disable_all();
1430 if (cx->entry_method == ACPI_CSTATE_FFH) { 1437 if (cx->entry_method == ACPI_CSTATE_FFH) {
1431 /* Call into architectural FFH based C-state */ 1438 /* Call into architectural FFH based C-state */
1432 acpi_processor_ffh_cstate_enter(cx); 1439 acpi_processor_ffh_cstate_enter(cx);
@@ -1441,6 +1448,7 @@ static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx)
1441 gets asserted in time to freeze execution properly. */ 1448 gets asserted in time to freeze execution properly. */
1442 unused = inl(acpi_gbl_FADT.xpm_timer_block.address); 1449 unused = inl(acpi_gbl_FADT.xpm_timer_block.address);
1443 } 1450 }
1451 hw_perf_restore_ctrl(pctrl);
1444 start_critical_timings(); 1452 start_critical_timings();
1445} 1453}
1446 1454