diff options
Diffstat (limited to 'drivers/acpi/processor_idle.c')
-rw-r--r-- | drivers/acpi/processor_idle.c | 125 |
1 files changed, 104 insertions, 21 deletions
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 6c6751b1405b..60773005b8af 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c | |||
@@ -39,6 +39,25 @@ | |||
39 | #include <linux/moduleparam.h> | 39 | #include <linux/moduleparam.h> |
40 | #include <linux/sched.h> /* need_resched() */ | 40 | #include <linux/sched.h> /* need_resched() */ |
41 | #include <linux/latency.h> | 41 | #include <linux/latency.h> |
42 | #include <linux/clockchips.h> | ||
43 | |||
44 | /* | ||
45 | * Include the apic definitions for x86 to have the APIC timer related defines | ||
46 | * available also for UP (on SMP it gets magically included via linux/smp.h). | ||
47 | * asm/acpi.h is not an option, as it would require more include magic. Also | ||
48 | * creating an empty asm-ia64/apic.h would just trade pest vs. cholera. | ||
49 | */ | ||
50 | #ifdef CONFIG_X86 | ||
51 | #include <asm/apic.h> | ||
52 | #endif | ||
53 | |||
54 | /* | ||
55 | * Include the apic definitions for x86 to have the APIC timer related defines | ||
56 | * available also for UP (on SMP it gets magically included via linux/smp.h). | ||
57 | */ | ||
58 | #ifdef CONFIG_X86 | ||
59 | #include <asm/apic.h> | ||
60 | #endif | ||
42 | 61 | ||
43 | #include <asm/io.h> | 62 | #include <asm/io.h> |
44 | #include <asm/uaccess.h> | 63 | #include <asm/uaccess.h> |
@@ -48,9 +67,8 @@ | |||
48 | 67 | ||
49 | #define ACPI_PROCESSOR_COMPONENT 0x01000000 | 68 | #define ACPI_PROCESSOR_COMPONENT 0x01000000 |
50 | #define ACPI_PROCESSOR_CLASS "processor" | 69 | #define ACPI_PROCESSOR_CLASS "processor" |
51 | #define ACPI_PROCESSOR_DRIVER_NAME "ACPI Processor Driver" | ||
52 | #define _COMPONENT ACPI_PROCESSOR_COMPONENT | 70 | #define _COMPONENT ACPI_PROCESSOR_COMPONENT |
53 | ACPI_MODULE_NAME("acpi_processor") | 71 | ACPI_MODULE_NAME("processor_idle"); |
54 | #define ACPI_PROCESSOR_FILE_POWER "power" | 72 | #define ACPI_PROCESSOR_FILE_POWER "power" |
55 | #define US_TO_PM_TIMER_TICKS(t) ((t * (PM_TIMER_FREQUENCY/1000)) / 1000) | 73 | #define US_TO_PM_TIMER_TICKS(t) ((t * (PM_TIMER_FREQUENCY/1000)) / 1000) |
56 | #define C2_OVERHEAD 4 /* 1us (3.579 ticks per us) */ | 74 | #define C2_OVERHEAD 4 /* 1us (3.579 ticks per us) */ |
@@ -238,6 +256,81 @@ static void acpi_cstate_enter(struct acpi_processor_cx *cstate) | |||
238 | } | 256 | } |
239 | } | 257 | } |
240 | 258 | ||
259 | #ifdef ARCH_APICTIMER_STOPS_ON_C3 | ||
260 | |||
261 | /* | ||
262 | * Some BIOS implementations switch to C3 in the published C2 state. | ||
263 | * This seems to be a common problem on AMD boxen, but other vendors | ||
264 | * are affected too. We pick the most conservative approach: we assume | ||
265 | * that the local APIC stops in both C2 and C3. | ||
266 | */ | ||
267 | static void acpi_timer_check_state(int state, struct acpi_processor *pr, | ||
268 | struct acpi_processor_cx *cx) | ||
269 | { | ||
270 | struct acpi_processor_power *pwr = &pr->power; | ||
271 | |||
272 | /* | ||
273 | * Check, if one of the previous states already marked the lapic | ||
274 | * unstable | ||
275 | */ | ||
276 | if (pwr->timer_broadcast_on_state < state) | ||
277 | return; | ||
278 | |||
279 | if (cx->type >= ACPI_STATE_C2) | ||
280 | pr->power.timer_broadcast_on_state = state; | ||
281 | } | ||
282 | |||
283 | static void acpi_propagate_timer_broadcast(struct acpi_processor *pr) | ||
284 | { | ||
285 | #ifdef CONFIG_GENERIC_CLOCKEVENTS | ||
286 | unsigned long reason; | ||
287 | |||
288 | reason = pr->power.timer_broadcast_on_state < INT_MAX ? | ||
289 | CLOCK_EVT_NOTIFY_BROADCAST_ON : CLOCK_EVT_NOTIFY_BROADCAST_OFF; | ||
290 | |||
291 | clockevents_notify(reason, &pr->id); | ||
292 | #else | ||
293 | cpumask_t mask = cpumask_of_cpu(pr->id); | ||
294 | |||
295 | if (pr->power.timer_broadcast_on_state < INT_MAX) | ||
296 | on_each_cpu(switch_APIC_timer_to_ipi, &mask, 1, 1); | ||
297 | else | ||
298 | on_each_cpu(switch_ipi_to_APIC_timer, &mask, 1, 1); | ||
299 | #endif | ||
300 | } | ||
301 | |||
302 | /* Power(C) State timer broadcast control */ | ||
303 | static void acpi_state_timer_broadcast(struct acpi_processor *pr, | ||
304 | struct acpi_processor_cx *cx, | ||
305 | int broadcast) | ||
306 | { | ||
307 | #ifdef CONFIG_GENERIC_CLOCKEVENTS | ||
308 | |||
309 | int state = cx - pr->power.states; | ||
310 | |||
311 | if (state >= pr->power.timer_broadcast_on_state) { | ||
312 | unsigned long reason; | ||
313 | |||
314 | reason = broadcast ? CLOCK_EVT_NOTIFY_BROADCAST_ENTER : | ||
315 | CLOCK_EVT_NOTIFY_BROADCAST_EXIT; | ||
316 | clockevents_notify(reason, &pr->id); | ||
317 | } | ||
318 | #endif | ||
319 | } | ||
320 | |||
321 | #else | ||
322 | |||
323 | static void acpi_timer_check_state(int state, struct acpi_processor *pr, | ||
324 | struct acpi_processor_cx *cstate) { } | ||
325 | static void acpi_propagate_timer_broadcast(struct acpi_processor *pr) { } | ||
326 | static void acpi_state_timer_broadcast(struct acpi_processor *pr, | ||
327 | struct acpi_processor_cx *cx, | ||
328 | int broadcast) | ||
329 | { | ||
330 | } | ||
331 | |||
332 | #endif | ||
333 | |||
241 | static void acpi_processor_idle(void) | 334 | static void acpi_processor_idle(void) |
242 | { | 335 | { |
243 | struct acpi_processor *pr = NULL; | 336 | struct acpi_processor *pr = NULL; |
@@ -382,6 +475,7 @@ static void acpi_processor_idle(void) | |||
382 | /* Get start time (ticks) */ | 475 | /* Get start time (ticks) */ |
383 | t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); | 476 | t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); |
384 | /* Invoke C2 */ | 477 | /* Invoke C2 */ |
478 | acpi_state_timer_broadcast(pr, cx, 1); | ||
385 | acpi_cstate_enter(cx); | 479 | acpi_cstate_enter(cx); |
386 | /* Get end time (ticks) */ | 480 | /* Get end time (ticks) */ |
387 | t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); | 481 | t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); |
@@ -396,6 +490,7 @@ static void acpi_processor_idle(void) | |||
396 | /* Compute time (ticks) that we were actually asleep */ | 490 | /* Compute time (ticks) that we were actually asleep */ |
397 | sleep_ticks = | 491 | sleep_ticks = |
398 | ticks_elapsed(t1, t2) - cx->latency_ticks - C2_OVERHEAD; | 492 | ticks_elapsed(t1, t2) - cx->latency_ticks - C2_OVERHEAD; |
493 | acpi_state_timer_broadcast(pr, cx, 0); | ||
399 | break; | 494 | break; |
400 | 495 | ||
401 | case ACPI_STATE_C3: | 496 | case ACPI_STATE_C3: |
@@ -417,6 +512,7 @@ static void acpi_processor_idle(void) | |||
417 | /* Get start time (ticks) */ | 512 | /* Get start time (ticks) */ |
418 | t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); | 513 | t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); |
419 | /* Invoke C3 */ | 514 | /* Invoke C3 */ |
515 | acpi_state_timer_broadcast(pr, cx, 1); | ||
420 | acpi_cstate_enter(cx); | 516 | acpi_cstate_enter(cx); |
421 | /* Get end time (ticks) */ | 517 | /* Get end time (ticks) */ |
422 | t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); | 518 | t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); |
@@ -436,6 +532,7 @@ static void acpi_processor_idle(void) | |||
436 | /* Compute time (ticks) that we were actually asleep */ | 532 | /* Compute time (ticks) that we were actually asleep */ |
437 | sleep_ticks = | 533 | sleep_ticks = |
438 | ticks_elapsed(t1, t2) - cx->latency_ticks - C3_OVERHEAD; | 534 | ticks_elapsed(t1, t2) - cx->latency_ticks - C3_OVERHEAD; |
535 | acpi_state_timer_broadcast(pr, cx, 0); | ||
439 | break; | 536 | break; |
440 | 537 | ||
441 | default: | 538 | default: |
@@ -904,11 +1001,7 @@ static int acpi_processor_power_verify(struct acpi_processor *pr) | |||
904 | unsigned int i; | 1001 | unsigned int i; |
905 | unsigned int working = 0; | 1002 | unsigned int working = 0; |
906 | 1003 | ||
907 | #ifdef ARCH_APICTIMER_STOPS_ON_C3 | 1004 | pr->power.timer_broadcast_on_state = INT_MAX; |
908 | int timer_broadcast = 0; | ||
909 | cpumask_t mask = cpumask_of_cpu(pr->id); | ||
910 | on_each_cpu(switch_ipi_to_APIC_timer, &mask, 1, 1); | ||
911 | #endif | ||
912 | 1005 | ||
913 | for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) { | 1006 | for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) { |
914 | struct acpi_processor_cx *cx = &pr->power.states[i]; | 1007 | struct acpi_processor_cx *cx = &pr->power.states[i]; |
@@ -920,21 +1013,14 @@ static int acpi_processor_power_verify(struct acpi_processor *pr) | |||
920 | 1013 | ||
921 | case ACPI_STATE_C2: | 1014 | case ACPI_STATE_C2: |
922 | acpi_processor_power_verify_c2(cx); | 1015 | acpi_processor_power_verify_c2(cx); |
923 | #ifdef ARCH_APICTIMER_STOPS_ON_C3 | 1016 | if (cx->valid) |
924 | /* Some AMD systems fake C3 as C2, but still | 1017 | acpi_timer_check_state(i, pr, cx); |
925 | have timer troubles */ | ||
926 | if (cx->valid && | ||
927 | boot_cpu_data.x86_vendor == X86_VENDOR_AMD) | ||
928 | timer_broadcast++; | ||
929 | #endif | ||
930 | break; | 1018 | break; |
931 | 1019 | ||
932 | case ACPI_STATE_C3: | 1020 | case ACPI_STATE_C3: |
933 | acpi_processor_power_verify_c3(pr, cx); | 1021 | acpi_processor_power_verify_c3(pr, cx); |
934 | #ifdef ARCH_APICTIMER_STOPS_ON_C3 | ||
935 | if (cx->valid) | 1022 | if (cx->valid) |
936 | timer_broadcast++; | 1023 | acpi_timer_check_state(i, pr, cx); |
937 | #endif | ||
938 | break; | 1024 | break; |
939 | } | 1025 | } |
940 | 1026 | ||
@@ -942,10 +1028,7 @@ static int acpi_processor_power_verify(struct acpi_processor *pr) | |||
942 | working++; | 1028 | working++; |
943 | } | 1029 | } |
944 | 1030 | ||
945 | #ifdef ARCH_APICTIMER_STOPS_ON_C3 | 1031 | acpi_propagate_timer_broadcast(pr); |
946 | if (timer_broadcast) | ||
947 | on_each_cpu(switch_APIC_timer_to_ipi, &mask, 1, 1); | ||
948 | #endif | ||
949 | 1032 | ||
950 | return (working); | 1033 | return (working); |
951 | } | 1034 | } |