diff options
Diffstat (limited to 'drivers/acpi/processor_idle.c')
-rw-r--r-- | drivers/acpi/processor_idle.c | 675 |
1 files changed, 14 insertions, 661 deletions
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 7acb23f830ce..259f6e806314 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c | |||
@@ -66,43 +66,17 @@ ACPI_MODULE_NAME("processor_idle"); | |||
66 | #define ACPI_PROCESSOR_FILE_POWER "power" | 66 | #define ACPI_PROCESSOR_FILE_POWER "power" |
67 | #define US_TO_PM_TIMER_TICKS(t) ((t * (PM_TIMER_FREQUENCY/1000)) / 1000) | 67 | #define US_TO_PM_TIMER_TICKS(t) ((t * (PM_TIMER_FREQUENCY/1000)) / 1000) |
68 | #define PM_TIMER_TICK_NS (1000000000ULL/PM_TIMER_FREQUENCY) | 68 | #define PM_TIMER_TICK_NS (1000000000ULL/PM_TIMER_FREQUENCY) |
69 | #ifndef CONFIG_CPU_IDLE | ||
70 | #define C2_OVERHEAD 4 /* 1us (3.579 ticks per us) */ | ||
71 | #define C3_OVERHEAD 4 /* 1us (3.579 ticks per us) */ | ||
72 | static void (*pm_idle_save) (void) __read_mostly; | ||
73 | #else | ||
74 | #define C2_OVERHEAD 1 /* 1us */ | 69 | #define C2_OVERHEAD 1 /* 1us */ |
75 | #define C3_OVERHEAD 1 /* 1us */ | 70 | #define C3_OVERHEAD 1 /* 1us */ |
76 | #endif | ||
77 | #define PM_TIMER_TICKS_TO_US(p) (((p) * 1000)/(PM_TIMER_FREQUENCY/1000)) | 71 | #define PM_TIMER_TICKS_TO_US(p) (((p) * 1000)/(PM_TIMER_FREQUENCY/1000)) |
78 | 72 | ||
79 | static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER; | 73 | static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER; |
80 | #ifdef CONFIG_CPU_IDLE | ||
81 | module_param(max_cstate, uint, 0000); | 74 | module_param(max_cstate, uint, 0000); |
82 | #else | ||
83 | module_param(max_cstate, uint, 0644); | ||
84 | #endif | ||
85 | static unsigned int nocst __read_mostly; | 75 | static unsigned int nocst __read_mostly; |
86 | module_param(nocst, uint, 0000); | 76 | module_param(nocst, uint, 0000); |
87 | 77 | ||
88 | #ifndef CONFIG_CPU_IDLE | ||
89 | /* | ||
90 | * bm_history -- bit-mask with a bit per jiffy of bus-master activity | ||
91 | * 1000 HZ: 0xFFFFFFFF: 32 jiffies = 32ms | ||
92 | * 800 HZ: 0xFFFFFFFF: 32 jiffies = 40ms | ||
93 | * 100 HZ: 0x0000000F: 4 jiffies = 40ms | ||
94 | * reduce history for more aggressive entry into C3 | ||
95 | */ | ||
96 | static unsigned int bm_history __read_mostly = | ||
97 | (HZ >= 800 ? 0xFFFFFFFF : ((1U << (HZ / 25)) - 1)); | ||
98 | module_param(bm_history, uint, 0644); | ||
99 | |||
100 | static int acpi_processor_set_power_policy(struct acpi_processor *pr); | ||
101 | |||
102 | #else /* CONFIG_CPU_IDLE */ | ||
103 | static unsigned int latency_factor __read_mostly = 2; | 78 | static unsigned int latency_factor __read_mostly = 2; |
104 | module_param(latency_factor, uint, 0644); | 79 | module_param(latency_factor, uint, 0644); |
105 | #endif | ||
106 | 80 | ||
107 | /* | 81 | /* |
108 | * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3. | 82 | * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3. |
@@ -224,75 +198,6 @@ static void acpi_safe_halt(void) | |||
224 | current_thread_info()->status |= TS_POLLING; | 198 | current_thread_info()->status |= TS_POLLING; |
225 | } | 199 | } |
226 | 200 | ||
227 | #ifndef CONFIG_CPU_IDLE | ||
228 | |||
229 | static void | ||
230 | acpi_processor_power_activate(struct acpi_processor *pr, | ||
231 | struct acpi_processor_cx *new) | ||
232 | { | ||
233 | struct acpi_processor_cx *old; | ||
234 | |||
235 | if (!pr || !new) | ||
236 | return; | ||
237 | |||
238 | old = pr->power.state; | ||
239 | |||
240 | if (old) | ||
241 | old->promotion.count = 0; | ||
242 | new->demotion.count = 0; | ||
243 | |||
244 | /* Cleanup from old state. */ | ||
245 | if (old) { | ||
246 | switch (old->type) { | ||
247 | case ACPI_STATE_C3: | ||
248 | /* Disable bus master reload */ | ||
249 | if (new->type != ACPI_STATE_C3 && pr->flags.bm_check) | ||
250 | acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0); | ||
251 | break; | ||
252 | } | ||
253 | } | ||
254 | |||
255 | /* Prepare to use new state. */ | ||
256 | switch (new->type) { | ||
257 | case ACPI_STATE_C3: | ||
258 | /* Enable bus master reload */ | ||
259 | if (old->type != ACPI_STATE_C3 && pr->flags.bm_check) | ||
260 | acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1); | ||
261 | break; | ||
262 | } | ||
263 | |||
264 | pr->power.state = new; | ||
265 | |||
266 | return; | ||
267 | } | ||
268 | |||
269 | static atomic_t c3_cpu_count; | ||
270 | |||
271 | /* Common C-state entry for C2, C3, .. */ | ||
272 | static void acpi_cstate_enter(struct acpi_processor_cx *cstate) | ||
273 | { | ||
274 | u64 perf_flags; | ||
275 | |||
276 | /* Don't trace irqs off for idle */ | ||
277 | stop_critical_timings(); | ||
278 | perf_flags = hw_perf_save_disable(); | ||
279 | if (cstate->entry_method == ACPI_CSTATE_FFH) { | ||
280 | /* Call into architectural FFH based C-state */ | ||
281 | acpi_processor_ffh_cstate_enter(cstate); | ||
282 | } else { | ||
283 | int unused; | ||
284 | /* IO port based C-state */ | ||
285 | inb(cstate->address); | ||
286 | /* Dummy wait op - must do something useless after P_LVL2 read | ||
287 | because chipsets cannot guarantee that STPCLK# signal | ||
288 | gets asserted in time to freeze execution properly. */ | ||
289 | unused = inl(acpi_gbl_FADT.xpm_timer_block.address); | ||
290 | } | ||
291 | hw_perf_restore(perf_flags); | ||
292 | start_critical_timings(); | ||
293 | } | ||
294 | #endif /* !CONFIG_CPU_IDLE */ | ||
295 | |||
296 | #ifdef ARCH_APICTIMER_STOPS_ON_C3 | 201 | #ifdef ARCH_APICTIMER_STOPS_ON_C3 |
297 | 202 | ||
298 | /* | 203 | /* |
@@ -394,421 +299,6 @@ static int tsc_halts_in_c(int state) | |||
394 | } | 299 | } |
395 | #endif | 300 | #endif |
396 | 301 | ||
397 | #ifndef CONFIG_CPU_IDLE | ||
398 | static void acpi_processor_idle(void) | ||
399 | { | ||
400 | struct acpi_processor *pr = NULL; | ||
401 | struct acpi_processor_cx *cx = NULL; | ||
402 | struct acpi_processor_cx *next_state = NULL; | ||
403 | int sleep_ticks = 0; | ||
404 | u32 t1, t2 = 0; | ||
405 | |||
406 | /* | ||
407 | * Interrupts must be disabled during bus mastering calculations and | ||
408 | * for C2/C3 transitions. | ||
409 | */ | ||
410 | local_irq_disable(); | ||
411 | |||
412 | pr = __get_cpu_var(processors); | ||
413 | if (!pr) { | ||
414 | local_irq_enable(); | ||
415 | return; | ||
416 | } | ||
417 | |||
418 | /* | ||
419 | * Check whether we truly need to go idle, or should | ||
420 | * reschedule: | ||
421 | */ | ||
422 | if (unlikely(need_resched())) { | ||
423 | local_irq_enable(); | ||
424 | return; | ||
425 | } | ||
426 | |||
427 | cx = pr->power.state; | ||
428 | if (!cx || acpi_idle_suspend) { | ||
429 | if (pm_idle_save) { | ||
430 | pm_idle_save(); /* enables IRQs */ | ||
431 | } else { | ||
432 | acpi_safe_halt(); | ||
433 | local_irq_enable(); | ||
434 | } | ||
435 | |||
436 | return; | ||
437 | } | ||
438 | |||
439 | /* | ||
440 | * Check BM Activity | ||
441 | * ----------------- | ||
442 | * Check for bus mastering activity (if required), record, and check | ||
443 | * for demotion. | ||
444 | */ | ||
445 | if (pr->flags.bm_check) { | ||
446 | u32 bm_status = 0; | ||
447 | unsigned long diff = jiffies - pr->power.bm_check_timestamp; | ||
448 | |||
449 | if (diff > 31) | ||
450 | diff = 31; | ||
451 | |||
452 | pr->power.bm_activity <<= diff; | ||
453 | |||
454 | acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status); | ||
455 | if (bm_status) { | ||
456 | pr->power.bm_activity |= 0x1; | ||
457 | acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 1); | ||
458 | } | ||
459 | /* | ||
460 | * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect | ||
461 | * the true state of bus mastering activity; forcing us to | ||
462 | * manually check the BMIDEA bit of each IDE channel. | ||
463 | */ | ||
464 | else if (errata.piix4.bmisx) { | ||
465 | if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01) | ||
466 | || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01)) | ||
467 | pr->power.bm_activity |= 0x1; | ||
468 | } | ||
469 | |||
470 | pr->power.bm_check_timestamp = jiffies; | ||
471 | |||
472 | /* | ||
473 | * If bus mastering is or was active this jiffy, demote | ||
474 | * to avoid a faulty transition. Note that the processor | ||
475 | * won't enter a low-power state during this call (to this | ||
476 | * function) but should upon the next. | ||
477 | * | ||
478 | * TBD: A better policy might be to fallback to the demotion | ||
479 | * state (use it for this quantum only) istead of | ||
480 | * demoting -- and rely on duration as our sole demotion | ||
481 | * qualification. This may, however, introduce DMA | ||
482 | * issues (e.g. floppy DMA transfer overrun/underrun). | ||
483 | */ | ||
484 | if ((pr->power.bm_activity & 0x1) && | ||
485 | cx->demotion.threshold.bm) { | ||
486 | local_irq_enable(); | ||
487 | next_state = cx->demotion.state; | ||
488 | goto end; | ||
489 | } | ||
490 | } | ||
491 | |||
492 | #ifdef CONFIG_HOTPLUG_CPU | ||
493 | /* | ||
494 | * Check for P_LVL2_UP flag before entering C2 and above on | ||
495 | * an SMP system. We do it here instead of doing it at _CST/P_LVL | ||
496 | * detection phase, to work cleanly with logical CPU hotplug. | ||
497 | */ | ||
498 | if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) && | ||
499 | !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) | ||
500 | cx = &pr->power.states[ACPI_STATE_C1]; | ||
501 | #endif | ||
502 | |||
503 | /* | ||
504 | * Sleep: | ||
505 | * ------ | ||
506 | * Invoke the current Cx state to put the processor to sleep. | ||
507 | */ | ||
508 | if (cx->type == ACPI_STATE_C2 || cx->type == ACPI_STATE_C3) { | ||
509 | current_thread_info()->status &= ~TS_POLLING; | ||
510 | /* | ||
511 | * TS_POLLING-cleared state must be visible before we | ||
512 | * test NEED_RESCHED: | ||
513 | */ | ||
514 | smp_mb(); | ||
515 | if (need_resched()) { | ||
516 | current_thread_info()->status |= TS_POLLING; | ||
517 | local_irq_enable(); | ||
518 | return; | ||
519 | } | ||
520 | } | ||
521 | |||
522 | switch (cx->type) { | ||
523 | |||
524 | case ACPI_STATE_C1: | ||
525 | /* | ||
526 | * Invoke C1. | ||
527 | * Use the appropriate idle routine, the one that would | ||
528 | * be used without acpi C-states. | ||
529 | */ | ||
530 | if (pm_idle_save) { | ||
531 | pm_idle_save(); /* enables IRQs */ | ||
532 | } else { | ||
533 | acpi_safe_halt(); | ||
534 | local_irq_enable(); | ||
535 | } | ||
536 | |||
537 | /* | ||
538 | * TBD: Can't get time duration while in C1, as resumes | ||
539 | * go to an ISR rather than here. Need to instrument | ||
540 | * base interrupt handler. | ||
541 | * | ||
542 | * Note: the TSC better not stop in C1, sched_clock() will | ||
543 | * skew otherwise. | ||
544 | */ | ||
545 | sleep_ticks = 0xFFFFFFFF; | ||
546 | |||
547 | break; | ||
548 | |||
549 | case ACPI_STATE_C2: | ||
550 | /* Get start time (ticks) */ | ||
551 | t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); | ||
552 | /* Tell the scheduler that we are going deep-idle: */ | ||
553 | sched_clock_idle_sleep_event(); | ||
554 | /* Invoke C2 */ | ||
555 | acpi_state_timer_broadcast(pr, cx, 1); | ||
556 | acpi_cstate_enter(cx); | ||
557 | /* Get end time (ticks) */ | ||
558 | t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); | ||
559 | |||
560 | #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86) | ||
561 | /* TSC halts in C2, so notify users */ | ||
562 | if (tsc_halts_in_c(ACPI_STATE_C2)) | ||
563 | mark_tsc_unstable("possible TSC halt in C2"); | ||
564 | #endif | ||
565 | /* Compute time (ticks) that we were actually asleep */ | ||
566 | sleep_ticks = ticks_elapsed(t1, t2); | ||
567 | |||
568 | /* Tell the scheduler how much we idled: */ | ||
569 | sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS); | ||
570 | |||
571 | /* Re-enable interrupts */ | ||
572 | local_irq_enable(); | ||
573 | /* Do not account our idle-switching overhead: */ | ||
574 | sleep_ticks -= cx->latency_ticks + C2_OVERHEAD; | ||
575 | |||
576 | current_thread_info()->status |= TS_POLLING; | ||
577 | acpi_state_timer_broadcast(pr, cx, 0); | ||
578 | break; | ||
579 | |||
580 | case ACPI_STATE_C3: | ||
581 | acpi_unlazy_tlb(smp_processor_id()); | ||
582 | /* | ||
583 | * Must be done before busmaster disable as we might | ||
584 | * need to access HPET ! | ||
585 | */ | ||
586 | acpi_state_timer_broadcast(pr, cx, 1); | ||
587 | /* | ||
588 | * disable bus master | ||
589 | * bm_check implies we need ARB_DIS | ||
590 | * !bm_check implies we need cache flush | ||
591 | * bm_control implies whether we can do ARB_DIS | ||
592 | * | ||
593 | * That leaves a case where bm_check is set and bm_control is | ||
594 | * not set. In that case we cannot do much, we enter C3 | ||
595 | * without doing anything. | ||
596 | */ | ||
597 | if (pr->flags.bm_check && pr->flags.bm_control) { | ||
598 | if (atomic_inc_return(&c3_cpu_count) == | ||
599 | num_online_cpus()) { | ||
600 | /* | ||
601 | * All CPUs are trying to go to C3 | ||
602 | * Disable bus master arbitration | ||
603 | */ | ||
604 | acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1); | ||
605 | } | ||
606 | } else if (!pr->flags.bm_check) { | ||
607 | /* SMP with no shared cache... Invalidate cache */ | ||
608 | ACPI_FLUSH_CPU_CACHE(); | ||
609 | } | ||
610 | |||
611 | /* Get start time (ticks) */ | ||
612 | t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); | ||
613 | /* Invoke C3 */ | ||
614 | /* Tell the scheduler that we are going deep-idle: */ | ||
615 | sched_clock_idle_sleep_event(); | ||
616 | acpi_cstate_enter(cx); | ||
617 | /* Get end time (ticks) */ | ||
618 | t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); | ||
619 | if (pr->flags.bm_check && pr->flags.bm_control) { | ||
620 | /* Enable bus master arbitration */ | ||
621 | atomic_dec(&c3_cpu_count); | ||
622 | acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0); | ||
623 | } | ||
624 | |||
625 | #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86) | ||
626 | /* TSC halts in C3, so notify users */ | ||
627 | if (tsc_halts_in_c(ACPI_STATE_C3)) | ||
628 | mark_tsc_unstable("TSC halts in C3"); | ||
629 | #endif | ||
630 | /* Compute time (ticks) that we were actually asleep */ | ||
631 | sleep_ticks = ticks_elapsed(t1, t2); | ||
632 | /* Tell the scheduler how much we idled: */ | ||
633 | sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS); | ||
634 | |||
635 | /* Re-enable interrupts */ | ||
636 | local_irq_enable(); | ||
637 | /* Do not account our idle-switching overhead: */ | ||
638 | sleep_ticks -= cx->latency_ticks + C3_OVERHEAD; | ||
639 | |||
640 | current_thread_info()->status |= TS_POLLING; | ||
641 | acpi_state_timer_broadcast(pr, cx, 0); | ||
642 | break; | ||
643 | |||
644 | default: | ||
645 | local_irq_enable(); | ||
646 | return; | ||
647 | } | ||
648 | cx->usage++; | ||
649 | if ((cx->type != ACPI_STATE_C1) && (sleep_ticks > 0)) | ||
650 | cx->time += sleep_ticks; | ||
651 | |||
652 | next_state = pr->power.state; | ||
653 | |||
654 | #ifdef CONFIG_HOTPLUG_CPU | ||
655 | /* Don't do promotion/demotion */ | ||
656 | if ((cx->type == ACPI_STATE_C1) && (num_online_cpus() > 1) && | ||
657 | !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) { | ||
658 | next_state = cx; | ||
659 | goto end; | ||
660 | } | ||
661 | #endif | ||
662 | |||
663 | /* | ||
664 | * Promotion? | ||
665 | * ---------- | ||
666 | * Track the number of longs (time asleep is greater than threshold) | ||
667 | * and promote when the count threshold is reached. Note that bus | ||
668 | * mastering activity may prevent promotions. | ||
669 | * Do not promote above max_cstate. | ||
670 | */ | ||
671 | if (cx->promotion.state && | ||
672 | ((cx->promotion.state - pr->power.states) <= max_cstate)) { | ||
673 | if (sleep_ticks > cx->promotion.threshold.ticks && | ||
674 | cx->promotion.state->latency <= | ||
675 | pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY)) { | ||
676 | cx->promotion.count++; | ||
677 | cx->demotion.count = 0; | ||
678 | if (cx->promotion.count >= | ||
679 | cx->promotion.threshold.count) { | ||
680 | if (pr->flags.bm_check) { | ||
681 | if (! | ||
682 | (pr->power.bm_activity & cx-> | ||
683 | promotion.threshold.bm)) { | ||
684 | next_state = | ||
685 | cx->promotion.state; | ||
686 | goto end; | ||
687 | } | ||
688 | } else { | ||
689 | next_state = cx->promotion.state; | ||
690 | goto end; | ||
691 | } | ||
692 | } | ||
693 | } | ||
694 | } | ||
695 | |||
696 | /* | ||
697 | * Demotion? | ||
698 | * --------- | ||
699 | * Track the number of shorts (time asleep is less than time threshold) | ||
700 | * and demote when the usage threshold is reached. | ||
701 | */ | ||
702 | if (cx->demotion.state) { | ||
703 | if (sleep_ticks < cx->demotion.threshold.ticks) { | ||
704 | cx->demotion.count++; | ||
705 | cx->promotion.count = 0; | ||
706 | if (cx->demotion.count >= cx->demotion.threshold.count) { | ||
707 | next_state = cx->demotion.state; | ||
708 | goto end; | ||
709 | } | ||
710 | } | ||
711 | } | ||
712 | |||
713 | end: | ||
714 | /* | ||
715 | * Demote if current state exceeds max_cstate | ||
716 | * or if the latency of the current state is unacceptable | ||
717 | */ | ||
718 | if ((pr->power.state - pr->power.states) > max_cstate || | ||
719 | pr->power.state->latency > | ||
720 | pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY)) { | ||
721 | if (cx->demotion.state) | ||
722 | next_state = cx->demotion.state; | ||
723 | } | ||
724 | |||
725 | /* | ||
726 | * New Cx State? | ||
727 | * ------------- | ||
728 | * If we're going to start using a new Cx state we must clean up | ||
729 | * from the previous and prepare to use the new. | ||
730 | */ | ||
731 | if (next_state != pr->power.state) | ||
732 | acpi_processor_power_activate(pr, next_state); | ||
733 | } | ||
734 | |||
735 | static int acpi_processor_set_power_policy(struct acpi_processor *pr) | ||
736 | { | ||
737 | unsigned int i; | ||
738 | unsigned int state_is_set = 0; | ||
739 | struct acpi_processor_cx *lower = NULL; | ||
740 | struct acpi_processor_cx *higher = NULL; | ||
741 | struct acpi_processor_cx *cx; | ||
742 | |||
743 | |||
744 | if (!pr) | ||
745 | return -EINVAL; | ||
746 | |||
747 | /* | ||
748 | * This function sets the default Cx state policy (OS idle handler). | ||
749 | * Our scheme is to promote quickly to C2 but more conservatively | ||
750 | * to C3. We're favoring C2 for its characteristics of low latency | ||
751 | * (quick response), good power savings, and ability to allow bus | ||
752 | * mastering activity. Note that the Cx state policy is completely | ||
753 | * customizable and can be altered dynamically. | ||
754 | */ | ||
755 | |||
756 | /* startup state */ | ||
757 | for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) { | ||
758 | cx = &pr->power.states[i]; | ||
759 | if (!cx->valid) | ||
760 | continue; | ||
761 | |||
762 | if (!state_is_set) | ||
763 | pr->power.state = cx; | ||
764 | state_is_set++; | ||
765 | break; | ||
766 | } | ||
767 | |||
768 | if (!state_is_set) | ||
769 | return -ENODEV; | ||
770 | |||
771 | /* demotion */ | ||
772 | for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) { | ||
773 | cx = &pr->power.states[i]; | ||
774 | if (!cx->valid) | ||
775 | continue; | ||
776 | |||
777 | if (lower) { | ||
778 | cx->demotion.state = lower; | ||
779 | cx->demotion.threshold.ticks = cx->latency_ticks; | ||
780 | cx->demotion.threshold.count = 1; | ||
781 | if (cx->type == ACPI_STATE_C3) | ||
782 | cx->demotion.threshold.bm = bm_history; | ||
783 | } | ||
784 | |||
785 | lower = cx; | ||
786 | } | ||
787 | |||
788 | /* promotion */ | ||
789 | for (i = (ACPI_PROCESSOR_MAX_POWER - 1); i > 0; i--) { | ||
790 | cx = &pr->power.states[i]; | ||
791 | if (!cx->valid) | ||
792 | continue; | ||
793 | |||
794 | if (higher) { | ||
795 | cx->promotion.state = higher; | ||
796 | cx->promotion.threshold.ticks = cx->latency_ticks; | ||
797 | if (cx->type >= ACPI_STATE_C2) | ||
798 | cx->promotion.threshold.count = 4; | ||
799 | else | ||
800 | cx->promotion.threshold.count = 10; | ||
801 | if (higher->type == ACPI_STATE_C3) | ||
802 | cx->promotion.threshold.bm = bm_history; | ||
803 | } | ||
804 | |||
805 | higher = cx; | ||
806 | } | ||
807 | |||
808 | return 0; | ||
809 | } | ||
810 | #endif /* !CONFIG_CPU_IDLE */ | ||
811 | |||
812 | static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr) | 302 | static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr) |
813 | { | 303 | { |
814 | 304 | ||
@@ -1051,11 +541,7 @@ static void acpi_processor_power_verify_c2(struct acpi_processor_cx *cx) | |||
1051 | */ | 541 | */ |
1052 | cx->valid = 1; | 542 | cx->valid = 1; |
1053 | 543 | ||
1054 | #ifndef CONFIG_CPU_IDLE | ||
1055 | cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency); | ||
1056 | #else | ||
1057 | cx->latency_ticks = cx->latency; | 544 | cx->latency_ticks = cx->latency; |
1058 | #endif | ||
1059 | 545 | ||
1060 | return; | 546 | return; |
1061 | } | 547 | } |
@@ -1125,7 +611,6 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr, | |||
1125 | " for C3 to be enabled on SMP systems\n")); | 611 | " for C3 to be enabled on SMP systems\n")); |
1126 | return; | 612 | return; |
1127 | } | 613 | } |
1128 | acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0); | ||
1129 | } | 614 | } |
1130 | 615 | ||
1131 | /* | 616 | /* |
@@ -1136,11 +621,16 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr, | |||
1136 | */ | 621 | */ |
1137 | cx->valid = 1; | 622 | cx->valid = 1; |
1138 | 623 | ||
1139 | #ifndef CONFIG_CPU_IDLE | ||
1140 | cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency); | ||
1141 | #else | ||
1142 | cx->latency_ticks = cx->latency; | 624 | cx->latency_ticks = cx->latency; |
1143 | #endif | 625 | /* |
626 | * On older chipsets, BM_RLD needs to be set | ||
627 | * in order for Bus Master activity to wake the | ||
628 | * system from C3. Newer chipsets handle DMA | ||
629 | * during C3 automatically and BM_RLD is a NOP. | ||
630 | * In either case, the proper way to | ||
631 | * handle BM_RLD is to set it and leave it set. | ||
632 | */ | ||
633 | acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1); | ||
1144 | 634 | ||
1145 | return; | 635 | return; |
1146 | } | 636 | } |
@@ -1205,20 +695,6 @@ static int acpi_processor_get_power_info(struct acpi_processor *pr) | |||
1205 | 695 | ||
1206 | pr->power.count = acpi_processor_power_verify(pr); | 696 | pr->power.count = acpi_processor_power_verify(pr); |
1207 | 697 | ||
1208 | #ifndef CONFIG_CPU_IDLE | ||
1209 | /* | ||
1210 | * Set Default Policy | ||
1211 | * ------------------ | ||
1212 | * Now that we know which states are supported, set the default | ||
1213 | * policy. Note that this policy can be changed dynamically | ||
1214 | * (e.g. encourage deeper sleeps to conserve battery life when | ||
1215 | * not on AC). | ||
1216 | */ | ||
1217 | result = acpi_processor_set_power_policy(pr); | ||
1218 | if (result) | ||
1219 | return result; | ||
1220 | #endif | ||
1221 | |||
1222 | /* | 698 | /* |
1223 | * if one state of type C2 or C3 is available, mark this | 699 | * if one state of type C2 or C3 is available, mark this |
1224 | * CPU as being "idle manageable" | 700 | * CPU as being "idle manageable" |
@@ -1316,69 +792,6 @@ static const struct file_operations acpi_processor_power_fops = { | |||
1316 | .release = single_release, | 792 | .release = single_release, |
1317 | }; | 793 | }; |
1318 | 794 | ||
1319 | #ifndef CONFIG_CPU_IDLE | ||
1320 | |||
1321 | int acpi_processor_cst_has_changed(struct acpi_processor *pr) | ||
1322 | { | ||
1323 | int result = 0; | ||
1324 | |||
1325 | if (boot_option_idle_override) | ||
1326 | return 0; | ||
1327 | |||
1328 | if (!pr) | ||
1329 | return -EINVAL; | ||
1330 | |||
1331 | if (nocst) { | ||
1332 | return -ENODEV; | ||
1333 | } | ||
1334 | |||
1335 | if (!pr->flags.power_setup_done) | ||
1336 | return -ENODEV; | ||
1337 | |||
1338 | /* | ||
1339 | * Fall back to the default idle loop, when pm_idle_save had | ||
1340 | * been initialized. | ||
1341 | */ | ||
1342 | if (pm_idle_save) { | ||
1343 | pm_idle = pm_idle_save; | ||
1344 | /* Relies on interrupts forcing exit from idle. */ | ||
1345 | synchronize_sched(); | ||
1346 | } | ||
1347 | |||
1348 | pr->flags.power = 0; | ||
1349 | result = acpi_processor_get_power_info(pr); | ||
1350 | if ((pr->flags.power == 1) && (pr->flags.power_setup_done)) | ||
1351 | pm_idle = acpi_processor_idle; | ||
1352 | |||
1353 | return result; | ||
1354 | } | ||
1355 | |||
1356 | #ifdef CONFIG_SMP | ||
1357 | static void smp_callback(void *v) | ||
1358 | { | ||
1359 | /* we already woke the CPU up, nothing more to do */ | ||
1360 | } | ||
1361 | |||
1362 | /* | ||
1363 | * This function gets called when a part of the kernel has a new latency | ||
1364 | * requirement. This means we need to get all processors out of their C-state, | ||
1365 | * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that | ||
1366 | * wakes them all right up. | ||
1367 | */ | ||
1368 | static int acpi_processor_latency_notify(struct notifier_block *b, | ||
1369 | unsigned long l, void *v) | ||
1370 | { | ||
1371 | smp_call_function(smp_callback, NULL, 1); | ||
1372 | return NOTIFY_OK; | ||
1373 | } | ||
1374 | |||
1375 | static struct notifier_block acpi_processor_latency_notifier = { | ||
1376 | .notifier_call = acpi_processor_latency_notify, | ||
1377 | }; | ||
1378 | |||
1379 | #endif | ||
1380 | |||
1381 | #else /* CONFIG_CPU_IDLE */ | ||
1382 | 795 | ||
1383 | /** | 796 | /** |
1384 | * acpi_idle_bm_check - checks if bus master activity was detected | 797 | * acpi_idle_bm_check - checks if bus master activity was detected |
@@ -1387,7 +800,7 @@ static int acpi_idle_bm_check(void) | |||
1387 | { | 800 | { |
1388 | u32 bm_status = 0; | 801 | u32 bm_status = 0; |
1389 | 802 | ||
1390 | acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status); | 803 | acpi_get_register_unlocked(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status); |
1391 | if (bm_status) | 804 | if (bm_status) |
1392 | acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 1); | 805 | acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 1); |
1393 | /* | 806 | /* |
@@ -1404,25 +817,6 @@ static int acpi_idle_bm_check(void) | |||
1404 | } | 817 | } |
1405 | 818 | ||
1406 | /** | 819 | /** |
1407 | * acpi_idle_update_bm_rld - updates the BM_RLD bit depending on target state | ||
1408 | * @pr: the processor | ||
1409 | * @target: the new target state | ||
1410 | */ | ||
1411 | static inline void acpi_idle_update_bm_rld(struct acpi_processor *pr, | ||
1412 | struct acpi_processor_cx *target) | ||
1413 | { | ||
1414 | if (pr->flags.bm_rld_set && target->type != ACPI_STATE_C3) { | ||
1415 | acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0); | ||
1416 | pr->flags.bm_rld_set = 0; | ||
1417 | } | ||
1418 | |||
1419 | if (!pr->flags.bm_rld_set && target->type == ACPI_STATE_C3) { | ||
1420 | acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1); | ||
1421 | pr->flags.bm_rld_set = 1; | ||
1422 | } | ||
1423 | } | ||
1424 | |||
1425 | /** | ||
1426 | * acpi_idle_do_entry - a helper function that does C2 and C3 type entry | 820 | * acpi_idle_do_entry - a helper function that does C2 and C3 type entry |
1427 | * @cx: cstate data | 821 | * @cx: cstate data |
1428 | * | 822 | * |
@@ -1430,10 +824,13 @@ static inline void acpi_idle_update_bm_rld(struct acpi_processor *pr, | |||
1430 | */ | 824 | */ |
1431 | static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx) | 825 | static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx) |
1432 | { | 826 | { |
827 | u64 perf_flags; | ||
828 | |||
1433 | u64 pctrl; | 829 | u64 pctrl; |
1434 | 830 | ||
1435 | /* Don't trace irqs off for idle */ | 831 | /* Don't trace irqs off for idle */ |
1436 | stop_critical_timings(); | 832 | stop_critical_timings(); |
833 | perf_flags = hw_perf_save_disable(); | ||
1437 | pctrl = hw_perf_save_disable(); | 834 | pctrl = hw_perf_save_disable(); |
1438 | if (cx->entry_method == ACPI_CSTATE_FFH) { | 835 | if (cx->entry_method == ACPI_CSTATE_FFH) { |
1439 | /* Call into architectural FFH based C-state */ | 836 | /* Call into architectural FFH based C-state */ |
@@ -1449,6 +846,7 @@ static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx) | |||
1449 | gets asserted in time to freeze execution properly. */ | 846 | gets asserted in time to freeze execution properly. */ |
1450 | unused = inl(acpi_gbl_FADT.xpm_timer_block.address); | 847 | unused = inl(acpi_gbl_FADT.xpm_timer_block.address); |
1451 | } | 848 | } |
849 | hw_perf_restore(perf_flags); | ||
1452 | hw_perf_restore(pctrl); | 850 | hw_perf_restore(pctrl); |
1453 | start_critical_timings(); | 851 | start_critical_timings(); |
1454 | } | 852 | } |
@@ -1481,9 +879,6 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev, | |||
1481 | return 0; | 879 | return 0; |
1482 | } | 880 | } |
1483 | 881 | ||
1484 | if (pr->flags.bm_check) | ||
1485 | acpi_idle_update_bm_rld(pr, cx); | ||
1486 | |||
1487 | t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); | 882 | t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); |
1488 | acpi_idle_do_entry(cx); | 883 | acpi_idle_do_entry(cx); |
1489 | t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); | 884 | t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); |
@@ -1535,9 +930,6 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, | |||
1535 | */ | 930 | */ |
1536 | acpi_state_timer_broadcast(pr, cx, 1); | 931 | acpi_state_timer_broadcast(pr, cx, 1); |
1537 | 932 | ||
1538 | if (pr->flags.bm_check) | ||
1539 | acpi_idle_update_bm_rld(pr, cx); | ||
1540 | |||
1541 | if (cx->type == ACPI_STATE_C3) | 933 | if (cx->type == ACPI_STATE_C3) |
1542 | ACPI_FLUSH_CPU_CACHE(); | 934 | ACPI_FLUSH_CPU_CACHE(); |
1543 | 935 | ||
@@ -1629,8 +1021,6 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, | |||
1629 | */ | 1021 | */ |
1630 | acpi_state_timer_broadcast(pr, cx, 1); | 1022 | acpi_state_timer_broadcast(pr, cx, 1); |
1631 | 1023 | ||
1632 | acpi_idle_update_bm_rld(pr, cx); | ||
1633 | |||
1634 | /* | 1024 | /* |
1635 | * disable bus master | 1025 | * disable bus master |
1636 | * bm_check implies we need ARB_DIS | 1026 | * bm_check implies we need ARB_DIS |
@@ -1803,8 +1193,6 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr) | |||
1803 | return ret; | 1193 | return ret; |
1804 | } | 1194 | } |
1805 | 1195 | ||
1806 | #endif /* CONFIG_CPU_IDLE */ | ||
1807 | |||
1808 | int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, | 1196 | int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, |
1809 | struct acpi_device *device) | 1197 | struct acpi_device *device) |
1810 | { | 1198 | { |
@@ -1833,10 +1221,6 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, | |||
1833 | "ACPI: processor limited to max C-state %d\n", | 1221 | "ACPI: processor limited to max C-state %d\n", |
1834 | max_cstate); | 1222 | max_cstate); |
1835 | first_run++; | 1223 | first_run++; |
1836 | #if !defined(CONFIG_CPU_IDLE) && defined(CONFIG_SMP) | ||
1837 | pm_qos_add_notifier(PM_QOS_CPU_DMA_LATENCY, | ||
1838 | &acpi_processor_latency_notifier); | ||
1839 | #endif | ||
1840 | } | 1224 | } |
1841 | 1225 | ||
1842 | if (!pr) | 1226 | if (!pr) |
@@ -1860,11 +1244,9 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, | |||
1860 | * platforms that only support C1. | 1244 | * platforms that only support C1. |
1861 | */ | 1245 | */ |
1862 | if (pr->flags.power) { | 1246 | if (pr->flags.power) { |
1863 | #ifdef CONFIG_CPU_IDLE | ||
1864 | acpi_processor_setup_cpuidle(pr); | 1247 | acpi_processor_setup_cpuidle(pr); |
1865 | if (cpuidle_register_device(&pr->power.dev)) | 1248 | if (cpuidle_register_device(&pr->power.dev)) |
1866 | return -EIO; | 1249 | return -EIO; |
1867 | #endif | ||
1868 | 1250 | ||
1869 | printk(KERN_INFO PREFIX "CPU%d (power states:", pr->id); | 1251 | printk(KERN_INFO PREFIX "CPU%d (power states:", pr->id); |
1870 | for (i = 1; i <= pr->power.count; i++) | 1252 | for (i = 1; i <= pr->power.count; i++) |
@@ -1872,13 +1254,6 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, | |||
1872 | printk(" C%d[C%d]", i, | 1254 | printk(" C%d[C%d]", i, |
1873 | pr->power.states[i].type); | 1255 | pr->power.states[i].type); |
1874 | printk(")\n"); | 1256 | printk(")\n"); |
1875 | |||
1876 | #ifndef CONFIG_CPU_IDLE | ||
1877 | if (pr->id == 0) { | ||
1878 | pm_idle_save = pm_idle; | ||
1879 | pm_idle = acpi_processor_idle; | ||
1880 | } | ||
1881 | #endif | ||
1882 | } | 1257 | } |
1883 | 1258 | ||
1884 | /* 'power' [R] */ | 1259 | /* 'power' [R] */ |
@@ -1897,34 +1272,12 @@ int acpi_processor_power_exit(struct acpi_processor *pr, | |||
1897 | if (boot_option_idle_override) | 1272 | if (boot_option_idle_override) |
1898 | return 0; | 1273 | return 0; |
1899 | 1274 | ||
1900 | #ifdef CONFIG_CPU_IDLE | ||
1901 | cpuidle_unregister_device(&pr->power.dev); | 1275 | cpuidle_unregister_device(&pr->power.dev); |
1902 | #endif | ||
1903 | pr->flags.power_setup_done = 0; | 1276 | pr->flags.power_setup_done = 0; |
1904 | 1277 | ||
1905 | if (acpi_device_dir(device)) | 1278 | if (acpi_device_dir(device)) |
1906 | remove_proc_entry(ACPI_PROCESSOR_FILE_POWER, | 1279 | remove_proc_entry(ACPI_PROCESSOR_FILE_POWER, |
1907 | acpi_device_dir(device)); | 1280 | acpi_device_dir(device)); |
1908 | 1281 | ||
1909 | #ifndef CONFIG_CPU_IDLE | ||
1910 | |||
1911 | /* Unregister the idle handler when processor #0 is removed. */ | ||
1912 | if (pr->id == 0) { | ||
1913 | if (pm_idle_save) | ||
1914 | pm_idle = pm_idle_save; | ||
1915 | |||
1916 | /* | ||
1917 | * We are about to unload the current idle thread pm callback | ||
1918 | * (pm_idle), Wait for all processors to update cached/local | ||
1919 | * copies of pm_idle before proceeding. | ||
1920 | */ | ||
1921 | cpu_idle_wait(); | ||
1922 | #ifdef CONFIG_SMP | ||
1923 | pm_qos_remove_notifier(PM_QOS_CPU_DMA_LATENCY, | ||
1924 | &acpi_processor_latency_notifier); | ||
1925 | #endif | ||
1926 | } | ||
1927 | #endif | ||
1928 | |||
1929 | return 0; | 1282 | return 0; |
1930 | } | 1283 | } |