aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/acpi/processor_idle.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/acpi/processor_idle.c')
-rw-r--r--drivers/acpi/processor_idle.c667
1 files changed, 10 insertions, 657 deletions
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 66a9d8145562..7bc22a471fe3 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -66,43 +66,17 @@ ACPI_MODULE_NAME("processor_idle");
66#define ACPI_PROCESSOR_FILE_POWER "power" 66#define ACPI_PROCESSOR_FILE_POWER "power"
67#define US_TO_PM_TIMER_TICKS(t) ((t * (PM_TIMER_FREQUENCY/1000)) / 1000) 67#define US_TO_PM_TIMER_TICKS(t) ((t * (PM_TIMER_FREQUENCY/1000)) / 1000)
68#define PM_TIMER_TICK_NS (1000000000ULL/PM_TIMER_FREQUENCY) 68#define PM_TIMER_TICK_NS (1000000000ULL/PM_TIMER_FREQUENCY)
69#ifndef CONFIG_CPU_IDLE
70#define C2_OVERHEAD 4 /* 1us (3.579 ticks per us) */
71#define C3_OVERHEAD 4 /* 1us (3.579 ticks per us) */
72static void (*pm_idle_save) (void) __read_mostly;
73#else
74#define C2_OVERHEAD 1 /* 1us */ 69#define C2_OVERHEAD 1 /* 1us */
75#define C3_OVERHEAD 1 /* 1us */ 70#define C3_OVERHEAD 1 /* 1us */
76#endif
77#define PM_TIMER_TICKS_TO_US(p) (((p) * 1000)/(PM_TIMER_FREQUENCY/1000)) 71#define PM_TIMER_TICKS_TO_US(p) (((p) * 1000)/(PM_TIMER_FREQUENCY/1000))
78 72
79static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER; 73static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER;
80#ifdef CONFIG_CPU_IDLE
81module_param(max_cstate, uint, 0000); 74module_param(max_cstate, uint, 0000);
82#else
83module_param(max_cstate, uint, 0644);
84#endif
85static unsigned int nocst __read_mostly; 75static unsigned int nocst __read_mostly;
86module_param(nocst, uint, 0000); 76module_param(nocst, uint, 0000);
87 77
88#ifndef CONFIG_CPU_IDLE
89/*
90 * bm_history -- bit-mask with a bit per jiffy of bus-master activity
91 * 1000 HZ: 0xFFFFFFFF: 32 jiffies = 32ms
92 * 800 HZ: 0xFFFFFFFF: 32 jiffies = 40ms
93 * 100 HZ: 0x0000000F: 4 jiffies = 40ms
94 * reduce history for more aggressive entry into C3
95 */
96static unsigned int bm_history __read_mostly =
97 (HZ >= 800 ? 0xFFFFFFFF : ((1U << (HZ / 25)) - 1));
98module_param(bm_history, uint, 0644);
99
100static int acpi_processor_set_power_policy(struct acpi_processor *pr);
101
102#else /* CONFIG_CPU_IDLE */
103static unsigned int latency_factor __read_mostly = 2; 78static unsigned int latency_factor __read_mostly = 2;
104module_param(latency_factor, uint, 0644); 79module_param(latency_factor, uint, 0644);
105#endif
106 80
107/* 81/*
108 * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3. 82 * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3.
@@ -224,71 +198,6 @@ static void acpi_safe_halt(void)
224 current_thread_info()->status |= TS_POLLING; 198 current_thread_info()->status |= TS_POLLING;
225} 199}
226 200
227#ifndef CONFIG_CPU_IDLE
228
229static void
230acpi_processor_power_activate(struct acpi_processor *pr,
231 struct acpi_processor_cx *new)
232{
233 struct acpi_processor_cx *old;
234
235 if (!pr || !new)
236 return;
237
238 old = pr->power.state;
239
240 if (old)
241 old->promotion.count = 0;
242 new->demotion.count = 0;
243
244 /* Cleanup from old state. */
245 if (old) {
246 switch (old->type) {
247 case ACPI_STATE_C3:
248 /* Disable bus master reload */
249 if (new->type != ACPI_STATE_C3 && pr->flags.bm_check)
250 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
251 break;
252 }
253 }
254
255 /* Prepare to use new state. */
256 switch (new->type) {
257 case ACPI_STATE_C3:
258 /* Enable bus master reload */
259 if (old->type != ACPI_STATE_C3 && pr->flags.bm_check)
260 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
261 break;
262 }
263
264 pr->power.state = new;
265
266 return;
267}
268
269static atomic_t c3_cpu_count;
270
271/* Common C-state entry for C2, C3, .. */
272static void acpi_cstate_enter(struct acpi_processor_cx *cstate)
273{
274 /* Don't trace irqs off for idle */
275 stop_critical_timings();
276 if (cstate->entry_method == ACPI_CSTATE_FFH) {
277 /* Call into architectural FFH based C-state */
278 acpi_processor_ffh_cstate_enter(cstate);
279 } else {
280 int unused;
281 /* IO port based C-state */
282 inb(cstate->address);
283 /* Dummy wait op - must do something useless after P_LVL2 read
284 because chipsets cannot guarantee that STPCLK# signal
285 gets asserted in time to freeze execution properly. */
286 unused = inl(acpi_gbl_FADT.xpm_timer_block.address);
287 }
288 start_critical_timings();
289}
290#endif /* !CONFIG_CPU_IDLE */
291
292#ifdef ARCH_APICTIMER_STOPS_ON_C3 201#ifdef ARCH_APICTIMER_STOPS_ON_C3
293 202
294/* 203/*
@@ -390,421 +299,6 @@ static int tsc_halts_in_c(int state)
390} 299}
391#endif 300#endif
392 301
393#ifndef CONFIG_CPU_IDLE
394static void acpi_processor_idle(void)
395{
396 struct acpi_processor *pr = NULL;
397 struct acpi_processor_cx *cx = NULL;
398 struct acpi_processor_cx *next_state = NULL;
399 int sleep_ticks = 0;
400 u32 t1, t2 = 0;
401
402 /*
403 * Interrupts must be disabled during bus mastering calculations and
404 * for C2/C3 transitions.
405 */
406 local_irq_disable();
407
408 pr = __get_cpu_var(processors);
409 if (!pr) {
410 local_irq_enable();
411 return;
412 }
413
414 /*
415 * Check whether we truly need to go idle, or should
416 * reschedule:
417 */
418 if (unlikely(need_resched())) {
419 local_irq_enable();
420 return;
421 }
422
423 cx = pr->power.state;
424 if (!cx || acpi_idle_suspend) {
425 if (pm_idle_save) {
426 pm_idle_save(); /* enables IRQs */
427 } else {
428 acpi_safe_halt();
429 local_irq_enable();
430 }
431
432 return;
433 }
434
435 /*
436 * Check BM Activity
437 * -----------------
438 * Check for bus mastering activity (if required), record, and check
439 * for demotion.
440 */
441 if (pr->flags.bm_check) {
442 u32 bm_status = 0;
443 unsigned long diff = jiffies - pr->power.bm_check_timestamp;
444
445 if (diff > 31)
446 diff = 31;
447
448 pr->power.bm_activity <<= diff;
449
450 acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
451 if (bm_status) {
452 pr->power.bm_activity |= 0x1;
453 acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
454 }
455 /*
456 * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
457 * the true state of bus mastering activity; forcing us to
458 * manually check the BMIDEA bit of each IDE channel.
459 */
460 else if (errata.piix4.bmisx) {
461 if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01)
462 || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01))
463 pr->power.bm_activity |= 0x1;
464 }
465
466 pr->power.bm_check_timestamp = jiffies;
467
468 /*
469 * If bus mastering is or was active this jiffy, demote
470 * to avoid a faulty transition. Note that the processor
471 * won't enter a low-power state during this call (to this
472 * function) but should upon the next.
473 *
474 * TBD: A better policy might be to fallback to the demotion
475 * state (use it for this quantum only) istead of
476 * demoting -- and rely on duration as our sole demotion
477 * qualification. This may, however, introduce DMA
478 * issues (e.g. floppy DMA transfer overrun/underrun).
479 */
480 if ((pr->power.bm_activity & 0x1) &&
481 cx->demotion.threshold.bm) {
482 local_irq_enable();
483 next_state = cx->demotion.state;
484 goto end;
485 }
486 }
487
488#ifdef CONFIG_HOTPLUG_CPU
489 /*
490 * Check for P_LVL2_UP flag before entering C2 and above on
491 * an SMP system. We do it here instead of doing it at _CST/P_LVL
492 * detection phase, to work cleanly with logical CPU hotplug.
493 */
494 if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
495 !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
496 cx = &pr->power.states[ACPI_STATE_C1];
497#endif
498
499 /*
500 * Sleep:
501 * ------
502 * Invoke the current Cx state to put the processor to sleep.
503 */
504 if (cx->type == ACPI_STATE_C2 || cx->type == ACPI_STATE_C3) {
505 current_thread_info()->status &= ~TS_POLLING;
506 /*
507 * TS_POLLING-cleared state must be visible before we
508 * test NEED_RESCHED:
509 */
510 smp_mb();
511 if (need_resched()) {
512 current_thread_info()->status |= TS_POLLING;
513 local_irq_enable();
514 return;
515 }
516 }
517
518 switch (cx->type) {
519
520 case ACPI_STATE_C1:
521 /*
522 * Invoke C1.
523 * Use the appropriate idle routine, the one that would
524 * be used without acpi C-states.
525 */
526 if (pm_idle_save) {
527 pm_idle_save(); /* enables IRQs */
528 } else {
529 acpi_safe_halt();
530 local_irq_enable();
531 }
532
533 /*
534 * TBD: Can't get time duration while in C1, as resumes
535 * go to an ISR rather than here. Need to instrument
536 * base interrupt handler.
537 *
538 * Note: the TSC better not stop in C1, sched_clock() will
539 * skew otherwise.
540 */
541 sleep_ticks = 0xFFFFFFFF;
542
543 break;
544
545 case ACPI_STATE_C2:
546 /* Get start time (ticks) */
547 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
548 /* Tell the scheduler that we are going deep-idle: */
549 sched_clock_idle_sleep_event();
550 /* Invoke C2 */
551 acpi_state_timer_broadcast(pr, cx, 1);
552 acpi_cstate_enter(cx);
553 /* Get end time (ticks) */
554 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
555
556#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
557 /* TSC halts in C2, so notify users */
558 if (tsc_halts_in_c(ACPI_STATE_C2))
559 mark_tsc_unstable("possible TSC halt in C2");
560#endif
561 /* Compute time (ticks) that we were actually asleep */
562 sleep_ticks = ticks_elapsed(t1, t2);
563
564 /* Tell the scheduler how much we idled: */
565 sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
566
567 /* Re-enable interrupts */
568 local_irq_enable();
569 /* Do not account our idle-switching overhead: */
570 sleep_ticks -= cx->latency_ticks + C2_OVERHEAD;
571
572 current_thread_info()->status |= TS_POLLING;
573 acpi_state_timer_broadcast(pr, cx, 0);
574 break;
575
576 case ACPI_STATE_C3:
577 acpi_unlazy_tlb(smp_processor_id());
578 /*
579 * Must be done before busmaster disable as we might
580 * need to access HPET !
581 */
582 acpi_state_timer_broadcast(pr, cx, 1);
583 /*
584 * disable bus master
585 * bm_check implies we need ARB_DIS
586 * !bm_check implies we need cache flush
587 * bm_control implies whether we can do ARB_DIS
588 *
589 * That leaves a case where bm_check is set and bm_control is
590 * not set. In that case we cannot do much, we enter C3
591 * without doing anything.
592 */
593 if (pr->flags.bm_check && pr->flags.bm_control) {
594 if (atomic_inc_return(&c3_cpu_count) ==
595 num_online_cpus()) {
596 /*
597 * All CPUs are trying to go to C3
598 * Disable bus master arbitration
599 */
600 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1);
601 }
602 } else if (!pr->flags.bm_check) {
603 /* SMP with no shared cache... Invalidate cache */
604 ACPI_FLUSH_CPU_CACHE();
605 }
606
607 /* Get start time (ticks) */
608 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
609 /* Invoke C3 */
610 /* Tell the scheduler that we are going deep-idle: */
611 sched_clock_idle_sleep_event();
612 acpi_cstate_enter(cx);
613 /* Get end time (ticks) */
614 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
615 if (pr->flags.bm_check && pr->flags.bm_control) {
616 /* Enable bus master arbitration */
617 atomic_dec(&c3_cpu_count);
618 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
619 }
620
621#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
622 /* TSC halts in C3, so notify users */
623 if (tsc_halts_in_c(ACPI_STATE_C3))
624 mark_tsc_unstable("TSC halts in C3");
625#endif
626 /* Compute time (ticks) that we were actually asleep */
627 sleep_ticks = ticks_elapsed(t1, t2);
628 /* Tell the scheduler how much we idled: */
629 sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
630
631 /* Re-enable interrupts */
632 local_irq_enable();
633 /* Do not account our idle-switching overhead: */
634 sleep_ticks -= cx->latency_ticks + C3_OVERHEAD;
635
636 current_thread_info()->status |= TS_POLLING;
637 acpi_state_timer_broadcast(pr, cx, 0);
638 break;
639
640 default:
641 local_irq_enable();
642 return;
643 }
644 cx->usage++;
645 if ((cx->type != ACPI_STATE_C1) && (sleep_ticks > 0))
646 cx->time += sleep_ticks;
647
648 next_state = pr->power.state;
649
650#ifdef CONFIG_HOTPLUG_CPU
651 /* Don't do promotion/demotion */
652 if ((cx->type == ACPI_STATE_C1) && (num_online_cpus() > 1) &&
653 !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) {
654 next_state = cx;
655 goto end;
656 }
657#endif
658
659 /*
660 * Promotion?
661 * ----------
662 * Track the number of longs (time asleep is greater than threshold)
663 * and promote when the count threshold is reached. Note that bus
664 * mastering activity may prevent promotions.
665 * Do not promote above max_cstate.
666 */
667 if (cx->promotion.state &&
668 ((cx->promotion.state - pr->power.states) <= max_cstate)) {
669 if (sleep_ticks > cx->promotion.threshold.ticks &&
670 cx->promotion.state->latency <=
671 pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY)) {
672 cx->promotion.count++;
673 cx->demotion.count = 0;
674 if (cx->promotion.count >=
675 cx->promotion.threshold.count) {
676 if (pr->flags.bm_check) {
677 if (!
678 (pr->power.bm_activity & cx->
679 promotion.threshold.bm)) {
680 next_state =
681 cx->promotion.state;
682 goto end;
683 }
684 } else {
685 next_state = cx->promotion.state;
686 goto end;
687 }
688 }
689 }
690 }
691
692 /*
693 * Demotion?
694 * ---------
695 * Track the number of shorts (time asleep is less than time threshold)
696 * and demote when the usage threshold is reached.
697 */
698 if (cx->demotion.state) {
699 if (sleep_ticks < cx->demotion.threshold.ticks) {
700 cx->demotion.count++;
701 cx->promotion.count = 0;
702 if (cx->demotion.count >= cx->demotion.threshold.count) {
703 next_state = cx->demotion.state;
704 goto end;
705 }
706 }
707 }
708
709 end:
710 /*
711 * Demote if current state exceeds max_cstate
712 * or if the latency of the current state is unacceptable
713 */
714 if ((pr->power.state - pr->power.states) > max_cstate ||
715 pr->power.state->latency >
716 pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY)) {
717 if (cx->demotion.state)
718 next_state = cx->demotion.state;
719 }
720
721 /*
722 * New Cx State?
723 * -------------
724 * If we're going to start using a new Cx state we must clean up
725 * from the previous and prepare to use the new.
726 */
727 if (next_state != pr->power.state)
728 acpi_processor_power_activate(pr, next_state);
729}
730
731static int acpi_processor_set_power_policy(struct acpi_processor *pr)
732{
733 unsigned int i;
734 unsigned int state_is_set = 0;
735 struct acpi_processor_cx *lower = NULL;
736 struct acpi_processor_cx *higher = NULL;
737 struct acpi_processor_cx *cx;
738
739
740 if (!pr)
741 return -EINVAL;
742
743 /*
744 * This function sets the default Cx state policy (OS idle handler).
745 * Our scheme is to promote quickly to C2 but more conservatively
746 * to C3. We're favoring C2 for its characteristics of low latency
747 * (quick response), good power savings, and ability to allow bus
748 * mastering activity. Note that the Cx state policy is completely
749 * customizable and can be altered dynamically.
750 */
751
752 /* startup state */
753 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
754 cx = &pr->power.states[i];
755 if (!cx->valid)
756 continue;
757
758 if (!state_is_set)
759 pr->power.state = cx;
760 state_is_set++;
761 break;
762 }
763
764 if (!state_is_set)
765 return -ENODEV;
766
767 /* demotion */
768 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
769 cx = &pr->power.states[i];
770 if (!cx->valid)
771 continue;
772
773 if (lower) {
774 cx->demotion.state = lower;
775 cx->demotion.threshold.ticks = cx->latency_ticks;
776 cx->demotion.threshold.count = 1;
777 if (cx->type == ACPI_STATE_C3)
778 cx->demotion.threshold.bm = bm_history;
779 }
780
781 lower = cx;
782 }
783
784 /* promotion */
785 for (i = (ACPI_PROCESSOR_MAX_POWER - 1); i > 0; i--) {
786 cx = &pr->power.states[i];
787 if (!cx->valid)
788 continue;
789
790 if (higher) {
791 cx->promotion.state = higher;
792 cx->promotion.threshold.ticks = cx->latency_ticks;
793 if (cx->type >= ACPI_STATE_C2)
794 cx->promotion.threshold.count = 4;
795 else
796 cx->promotion.threshold.count = 10;
797 if (higher->type == ACPI_STATE_C3)
798 cx->promotion.threshold.bm = bm_history;
799 }
800
801 higher = cx;
802 }
803
804 return 0;
805}
806#endif /* !CONFIG_CPU_IDLE */
807
808static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr) 302static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
809{ 303{
810 304
@@ -1047,11 +541,7 @@ static void acpi_processor_power_verify_c2(struct acpi_processor_cx *cx)
1047 */ 541 */
1048 cx->valid = 1; 542 cx->valid = 1;
1049 543
1050#ifndef CONFIG_CPU_IDLE
1051 cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency);
1052#else
1053 cx->latency_ticks = cx->latency; 544 cx->latency_ticks = cx->latency;
1054#endif
1055 545
1056 return; 546 return;
1057} 547}
@@ -1121,7 +611,6 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
1121 " for C3 to be enabled on SMP systems\n")); 611 " for C3 to be enabled on SMP systems\n"));
1122 return; 612 return;
1123 } 613 }
1124 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
1125 } 614 }
1126 615
1127 /* 616 /*
@@ -1132,11 +621,16 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
1132 */ 621 */
1133 cx->valid = 1; 622 cx->valid = 1;
1134 623
1135#ifndef CONFIG_CPU_IDLE
1136 cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency);
1137#else
1138 cx->latency_ticks = cx->latency; 624 cx->latency_ticks = cx->latency;
1139#endif 625 /*
626 * On older chipsets, BM_RLD needs to be set
627 * in order for Bus Master activity to wake the
628 * system from C3. Newer chipsets handle DMA
629 * during C3 automatically and BM_RLD is a NOP.
630 * In either case, the proper way to
631 * handle BM_RLD is to set it and leave it set.
632 */
633 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
1140 634
1141 return; 635 return;
1142} 636}
@@ -1201,20 +695,6 @@ static int acpi_processor_get_power_info(struct acpi_processor *pr)
1201 695
1202 pr->power.count = acpi_processor_power_verify(pr); 696 pr->power.count = acpi_processor_power_verify(pr);
1203 697
1204#ifndef CONFIG_CPU_IDLE
1205 /*
1206 * Set Default Policy
1207 * ------------------
1208 * Now that we know which states are supported, set the default
1209 * policy. Note that this policy can be changed dynamically
1210 * (e.g. encourage deeper sleeps to conserve battery life when
1211 * not on AC).
1212 */
1213 result = acpi_processor_set_power_policy(pr);
1214 if (result)
1215 return result;
1216#endif
1217
1218 /* 698 /*
1219 * if one state of type C2 or C3 is available, mark this 699 * if one state of type C2 or C3 is available, mark this
1220 * CPU as being "idle manageable" 700 * CPU as being "idle manageable"
@@ -1312,69 +792,6 @@ static const struct file_operations acpi_processor_power_fops = {
1312 .release = single_release, 792 .release = single_release,
1313}; 793};
1314 794
1315#ifndef CONFIG_CPU_IDLE
1316
1317int acpi_processor_cst_has_changed(struct acpi_processor *pr)
1318{
1319 int result = 0;
1320
1321 if (boot_option_idle_override)
1322 return 0;
1323
1324 if (!pr)
1325 return -EINVAL;
1326
1327 if (nocst) {
1328 return -ENODEV;
1329 }
1330
1331 if (!pr->flags.power_setup_done)
1332 return -ENODEV;
1333
1334 /*
1335 * Fall back to the default idle loop, when pm_idle_save had
1336 * been initialized.
1337 */
1338 if (pm_idle_save) {
1339 pm_idle = pm_idle_save;
1340 /* Relies on interrupts forcing exit from idle. */
1341 synchronize_sched();
1342 }
1343
1344 pr->flags.power = 0;
1345 result = acpi_processor_get_power_info(pr);
1346 if ((pr->flags.power == 1) && (pr->flags.power_setup_done))
1347 pm_idle = acpi_processor_idle;
1348
1349 return result;
1350}
1351
1352#ifdef CONFIG_SMP
1353static void smp_callback(void *v)
1354{
1355 /* we already woke the CPU up, nothing more to do */
1356}
1357
1358/*
1359 * This function gets called when a part of the kernel has a new latency
1360 * requirement. This means we need to get all processors out of their C-state,
1361 * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that
1362 * wakes them all right up.
1363 */
1364static int acpi_processor_latency_notify(struct notifier_block *b,
1365 unsigned long l, void *v)
1366{
1367 smp_call_function(smp_callback, NULL, 1);
1368 return NOTIFY_OK;
1369}
1370
1371static struct notifier_block acpi_processor_latency_notifier = {
1372 .notifier_call = acpi_processor_latency_notify,
1373};
1374
1375#endif
1376
1377#else /* CONFIG_CPU_IDLE */
1378 795
1379/** 796/**
1380 * acpi_idle_bm_check - checks if bus master activity was detected 797 * acpi_idle_bm_check - checks if bus master activity was detected
@@ -1383,7 +800,7 @@ static int acpi_idle_bm_check(void)
1383{ 800{
1384 u32 bm_status = 0; 801 u32 bm_status = 0;
1385 802
1386 acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status); 803 acpi_get_register_unlocked(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
1387 if (bm_status) 804 if (bm_status)
1388 acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 1); 805 acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
1389 /* 806 /*
@@ -1400,25 +817,6 @@ static int acpi_idle_bm_check(void)
1400} 817}
1401 818
1402/** 819/**
1403 * acpi_idle_update_bm_rld - updates the BM_RLD bit depending on target state
1404 * @pr: the processor
1405 * @target: the new target state
1406 */
1407static inline void acpi_idle_update_bm_rld(struct acpi_processor *pr,
1408 struct acpi_processor_cx *target)
1409{
1410 if (pr->flags.bm_rld_set && target->type != ACPI_STATE_C3) {
1411 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
1412 pr->flags.bm_rld_set = 0;
1413 }
1414
1415 if (!pr->flags.bm_rld_set && target->type == ACPI_STATE_C3) {
1416 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
1417 pr->flags.bm_rld_set = 1;
1418 }
1419}
1420
1421/**
1422 * acpi_idle_do_entry - a helper function that does C2 and C3 type entry 820 * acpi_idle_do_entry - a helper function that does C2 and C3 type entry
1423 * @cx: cstate data 821 * @cx: cstate data
1424 * 822 *
@@ -1473,9 +871,6 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,
1473 return 0; 871 return 0;
1474 } 872 }
1475 873
1476 if (pr->flags.bm_check)
1477 acpi_idle_update_bm_rld(pr, cx);
1478
1479 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); 874 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
1480 acpi_idle_do_entry(cx); 875 acpi_idle_do_entry(cx);
1481 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); 876 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
@@ -1527,9 +922,6 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
1527 */ 922 */
1528 acpi_state_timer_broadcast(pr, cx, 1); 923 acpi_state_timer_broadcast(pr, cx, 1);
1529 924
1530 if (pr->flags.bm_check)
1531 acpi_idle_update_bm_rld(pr, cx);
1532
1533 if (cx->type == ACPI_STATE_C3) 925 if (cx->type == ACPI_STATE_C3)
1534 ACPI_FLUSH_CPU_CACHE(); 926 ACPI_FLUSH_CPU_CACHE();
1535 927
@@ -1621,8 +1013,6 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
1621 */ 1013 */
1622 acpi_state_timer_broadcast(pr, cx, 1); 1014 acpi_state_timer_broadcast(pr, cx, 1);
1623 1015
1624 acpi_idle_update_bm_rld(pr, cx);
1625
1626 /* 1016 /*
1627 * disable bus master 1017 * disable bus master
1628 * bm_check implies we need ARB_DIS 1018 * bm_check implies we need ARB_DIS
@@ -1795,8 +1185,6 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr)
1795 return ret; 1185 return ret;
1796} 1186}
1797 1187
1798#endif /* CONFIG_CPU_IDLE */
1799
1800int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, 1188int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
1801 struct acpi_device *device) 1189 struct acpi_device *device)
1802{ 1190{
@@ -1825,10 +1213,6 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
1825 "ACPI: processor limited to max C-state %d\n", 1213 "ACPI: processor limited to max C-state %d\n",
1826 max_cstate); 1214 max_cstate);
1827 first_run++; 1215 first_run++;
1828#if !defined(CONFIG_CPU_IDLE) && defined(CONFIG_SMP)
1829 pm_qos_add_notifier(PM_QOS_CPU_DMA_LATENCY,
1830 &acpi_processor_latency_notifier);
1831#endif
1832 } 1216 }
1833 1217
1834 if (!pr) 1218 if (!pr)
@@ -1852,11 +1236,9 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
1852 * platforms that only support C1. 1236 * platforms that only support C1.
1853 */ 1237 */
1854 if (pr->flags.power) { 1238 if (pr->flags.power) {
1855#ifdef CONFIG_CPU_IDLE
1856 acpi_processor_setup_cpuidle(pr); 1239 acpi_processor_setup_cpuidle(pr);
1857 if (cpuidle_register_device(&pr->power.dev)) 1240 if (cpuidle_register_device(&pr->power.dev))
1858 return -EIO; 1241 return -EIO;
1859#endif
1860 1242
1861 printk(KERN_INFO PREFIX "CPU%d (power states:", pr->id); 1243 printk(KERN_INFO PREFIX "CPU%d (power states:", pr->id);
1862 for (i = 1; i <= pr->power.count; i++) 1244 for (i = 1; i <= pr->power.count; i++)
@@ -1864,13 +1246,6 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
1864 printk(" C%d[C%d]", i, 1246 printk(" C%d[C%d]", i,
1865 pr->power.states[i].type); 1247 pr->power.states[i].type);
1866 printk(")\n"); 1248 printk(")\n");
1867
1868#ifndef CONFIG_CPU_IDLE
1869 if (pr->id == 0) {
1870 pm_idle_save = pm_idle;
1871 pm_idle = acpi_processor_idle;
1872 }
1873#endif
1874 } 1249 }
1875 1250
1876 /* 'power' [R] */ 1251 /* 'power' [R] */
@@ -1889,34 +1264,12 @@ int acpi_processor_power_exit(struct acpi_processor *pr,
1889 if (boot_option_idle_override) 1264 if (boot_option_idle_override)
1890 return 0; 1265 return 0;
1891 1266
1892#ifdef CONFIG_CPU_IDLE
1893 cpuidle_unregister_device(&pr->power.dev); 1267 cpuidle_unregister_device(&pr->power.dev);
1894#endif
1895 pr->flags.power_setup_done = 0; 1268 pr->flags.power_setup_done = 0;
1896 1269
1897 if (acpi_device_dir(device)) 1270 if (acpi_device_dir(device))
1898 remove_proc_entry(ACPI_PROCESSOR_FILE_POWER, 1271 remove_proc_entry(ACPI_PROCESSOR_FILE_POWER,
1899 acpi_device_dir(device)); 1272 acpi_device_dir(device));
1900 1273
1901#ifndef CONFIG_CPU_IDLE
1902
1903 /* Unregister the idle handler when processor #0 is removed. */
1904 if (pr->id == 0) {
1905 if (pm_idle_save)
1906 pm_idle = pm_idle_save;
1907
1908 /*
1909 * We are about to unload the current idle thread pm callback
1910 * (pm_idle), Wait for all processors to update cached/local
1911 * copies of pm_idle before proceeding.
1912 */
1913 cpu_idle_wait();
1914#ifdef CONFIG_SMP
1915 pm_qos_remove_notifier(PM_QOS_CPU_DMA_LATENCY,
1916 &acpi_processor_latency_notifier);
1917#endif
1918 }
1919#endif
1920
1921 return 0; 1274 return 0;
1922} 1275}