diff options
author | Thomas Renninger <trenn@suse.de> | 2007-02-27 12:13:00 -0500 |
---|---|---|
committer | Len Brown <len.brown@intel.com> | 2007-03-17 00:50:46 -0400 |
commit | 25496caec111481161e7f06bbfa12a533c43cc6f (patch) | |
tree | 8e7d2876d60406b5e932c1405b5580903d2a9043 | |
parent | db98e0b434a6265c451ffe94ec0a29b8d0aaf587 (diff) |
ACPI: Only use IPI on known broken machines (AMD, Dothan/BaniasPentium M)
Use IPI for blacklisted CPUs, add parameter IPI vs LAPIC
Currently, Linux disables lapic timer for all machines with C2 and higher
C-state support.
According to Intel only specific Intel models (Banias/Dothan) are broken
in respect of not waking up from C2 with lapic.
However, I am not sure about the naming of the parameter and how it
could/should get integrated into the dyntick part
(CONFIG_GENERIC_CLOCKEVENTS). There, a more fine grained check (TSC
still running?, ..) is needed? Does this make sense (always use
CLOCK_EVT_NOTIFY_BROADCAST_ON, but use OFF if forced by use_ipi=0:
clockevents_notify(use_ipi ? CLOCK_EVT_NOTIFY_BROADCAST_ON :
CLOCK_EVT_NOTIFY_BROADCAST_OFF, &pr->id);
Signed-off-by: Thomas Renninger <trenn@suse.de>
Signed-off-by: Len Brown <len.brown@intel.com>
-rw-r--r-- | drivers/acpi/processor_idle.c | 38 |
1 files changed, 29 insertions, 9 deletions
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 60773005b8af..562124ed785e 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c | |||
@@ -89,6 +89,12 @@ module_param(nocst, uint, 0000); | |||
89 | static unsigned int bm_history __read_mostly = | 89 | static unsigned int bm_history __read_mostly = |
90 | (HZ >= 800 ? 0xFFFFFFFF : ((1U << (HZ / 25)) - 1)); | 90 | (HZ >= 800 ? 0xFFFFFFFF : ((1U << (HZ / 25)) - 1)); |
91 | module_param(bm_history, uint, 0644); | 91 | module_param(bm_history, uint, 0644); |
92 | |||
93 | static unsigned use_ipi = 2; | ||
94 | module_param(use_ipi, uint, 0644); | ||
95 | MODULE_PARM_DESC(use_ipi, "IPI (vs. LAPIC) irqs for not waking up from C2/C3" | ||
96 | " machines. 0=apic, 1=ipi, 2=auto\n"); | ||
97 | |||
92 | /* -------------------------------------------------------------------------- | 98 | /* -------------------------------------------------------------------------- |
93 | Power Management | 99 | Power Management |
94 | -------------------------------------------------------------------------- */ | 100 | -------------------------------------------------------------------------- */ |
@@ -260,9 +266,8 @@ static void acpi_cstate_enter(struct acpi_processor_cx *cstate) | |||
260 | 266 | ||
261 | /* | 267 | /* |
262 | * Some BIOS implementations switch to C3 in the published C2 state. | 268 | * Some BIOS implementations switch to C3 in the published C2 state. |
263 | * This seems to be a common problem on AMD boxen, but other vendors | 269 | * This seems to be a common problem on AMD boxen and Intel Dothan/Banias |
264 | * are affected too. We pick the most conservative approach: we assume | 270 | * Pentium M machines. |
265 | * that the local APIC stops in both C2 and C3. | ||
266 | */ | 271 | */ |
267 | static void acpi_timer_check_state(int state, struct acpi_processor *pr, | 272 | static void acpi_timer_check_state(int state, struct acpi_processor *pr, |
268 | struct acpi_processor_cx *cx) | 273 | struct acpi_processor_cx *cx) |
@@ -276,8 +281,17 @@ static void acpi_timer_check_state(int state, struct acpi_processor *pr, | |||
276 | if (pwr->timer_broadcast_on_state < state) | 281 | if (pwr->timer_broadcast_on_state < state) |
277 | return; | 282 | return; |
278 | 283 | ||
279 | if (cx->type >= ACPI_STATE_C2) | 284 | if (cx->type >= ACPI_STATE_C2) { |
280 | pr->power.timer_broadcast_on_state = state; | 285 | if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) |
286 | pr->power.timer_broadcast_on_state = state; | ||
287 | else if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && | ||
288 | boot_cpu_data.x86 == 6) && | ||
289 | (boot_cpu_data.x86_model == 13 || | ||
290 | boot_cpu_data.x86_model == 9)) | ||
291 | { | ||
292 | pr->power.timer_broadcast_on_state = state; | ||
293 | } | ||
294 | } | ||
281 | } | 295 | } |
282 | 296 | ||
283 | static void acpi_propagate_timer_broadcast(struct acpi_processor *pr) | 297 | static void acpi_propagate_timer_broadcast(struct acpi_processor *pr) |
@@ -292,10 +306,16 @@ static void acpi_propagate_timer_broadcast(struct acpi_processor *pr) | |||
292 | #else | 306 | #else |
293 | cpumask_t mask = cpumask_of_cpu(pr->id); | 307 | cpumask_t mask = cpumask_of_cpu(pr->id); |
294 | 308 | ||
295 | if (pr->power.timer_broadcast_on_state < INT_MAX) | 309 | if (use_ipi == 0) |
296 | on_each_cpu(switch_APIC_timer_to_ipi, &mask, 1, 1); | 310 | on_each_cpu(switch_APIC_timer_to_ipi, &mask, 1, 1); |
297 | else | 311 | else if (use_ipi == 1) |
298 | on_each_cpu(switch_ipi_to_APIC_timer, &mask, 1, 1); | 312 | on_each_cpu(switch_ipi_to_APIC_timer, &mask, 1, 1); |
313 | else { | ||
314 | if (pr->power.timer_broadcast_on_state < INT_MAX) | ||
315 | on_each_cpu(switch_APIC_timer_to_ipi, &mask, 1, 1); | ||
316 | else | ||
317 | on_each_cpu(switch_ipi_to_APIC_timer, &mask, 1, 1); | ||
318 | } | ||
299 | #endif | 319 | #endif |
300 | } | 320 | } |
301 | 321 | ||
@@ -1013,13 +1033,13 @@ static int acpi_processor_power_verify(struct acpi_processor *pr) | |||
1013 | 1033 | ||
1014 | case ACPI_STATE_C2: | 1034 | case ACPI_STATE_C2: |
1015 | acpi_processor_power_verify_c2(cx); | 1035 | acpi_processor_power_verify_c2(cx); |
1016 | if (cx->valid) | 1036 | if (cx->valid && use_ipi != 0 && use_ipi != 1) |
1017 | acpi_timer_check_state(i, pr, cx); | 1037 | acpi_timer_check_state(i, pr, cx); |
1018 | break; | 1038 | break; |
1019 | 1039 | ||
1020 | case ACPI_STATE_C3: | 1040 | case ACPI_STATE_C3: |
1021 | acpi_processor_power_verify_c3(pr, cx); | 1041 | acpi_processor_power_verify_c3(pr, cx); |
1022 | if (cx->valid) | 1042 | if (cx->valid && use_ipi != 0 && use_ipi != 1) |
1023 | acpi_timer_check_state(i, pr, cx); | 1043 | acpi_timer_check_state(i, pr, cx); |
1024 | break; | 1044 | break; |
1025 | } | 1045 | } |