diff options
| author | Len Brown <len.brown@intel.com> | 2009-02-06 12:24:17 -0500 |
|---|---|---|
| committer | Len Brown <len.brown@intel.com> | 2009-02-06 12:34:39 -0500 |
| commit | 9fdd54f206722ecee7fd7ba9dba26140450e7c32 (patch) | |
| tree | 83f6b6106d5ade8301327488fe6f45a6eaaa3476 | |
| parent | 31878dd86b7df9a147f5e6cc6e07092b4308782b (diff) | |
ACPI: delete CPU_IDLE=n code
CPU_IDLE=y has been default for ACPI=y since Nov-2007,
and has shipped in many distributions since then.
Here we delete the CPU_IDLE=n ACPI idle code, since
nobody should be using it, and we don't want to
maintain two versions.
Signed-off-by: Len Brown <len.brown@intel.com>
| -rw-r--r-- | drivers/acpi/Kconfig | 1 | ||||
| -rw-r--r-- | drivers/acpi/processor_idle.c | 608 |
2 files changed, 1 insertions, 608 deletions
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index d7f9839ba264..c5fc6efdc853 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig | |||
| @@ -9,6 +9,7 @@ menuconfig ACPI | |||
| 9 | depends on PCI | 9 | depends on PCI |
| 10 | depends on PM | 10 | depends on PM |
| 11 | select PNP | 11 | select PNP |
| 12 | select CPU_IDLE | ||
| 12 | default y | 13 | default y |
| 13 | ---help--- | 14 | ---help--- |
| 14 | Advanced Configuration and Power Interface (ACPI) support for | 15 | Advanced Configuration and Power Interface (ACPI) support for |
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 7eab733ae96e..7bc22a471fe3 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c | |||
| @@ -66,43 +66,17 @@ ACPI_MODULE_NAME("processor_idle"); | |||
| 66 | #define ACPI_PROCESSOR_FILE_POWER "power" | 66 | #define ACPI_PROCESSOR_FILE_POWER "power" |
| 67 | #define US_TO_PM_TIMER_TICKS(t) ((t * (PM_TIMER_FREQUENCY/1000)) / 1000) | 67 | #define US_TO_PM_TIMER_TICKS(t) ((t * (PM_TIMER_FREQUENCY/1000)) / 1000) |
| 68 | #define PM_TIMER_TICK_NS (1000000000ULL/PM_TIMER_FREQUENCY) | 68 | #define PM_TIMER_TICK_NS (1000000000ULL/PM_TIMER_FREQUENCY) |
| 69 | #ifndef CONFIG_CPU_IDLE | ||
| 70 | #define C2_OVERHEAD 4 /* 1us (3.579 ticks per us) */ | ||
| 71 | #define C3_OVERHEAD 4 /* 1us (3.579 ticks per us) */ | ||
| 72 | static void (*pm_idle_save) (void) __read_mostly; | ||
| 73 | #else | ||
| 74 | #define C2_OVERHEAD 1 /* 1us */ | 69 | #define C2_OVERHEAD 1 /* 1us */ |
| 75 | #define C3_OVERHEAD 1 /* 1us */ | 70 | #define C3_OVERHEAD 1 /* 1us */ |
| 76 | #endif | ||
| 77 | #define PM_TIMER_TICKS_TO_US(p) (((p) * 1000)/(PM_TIMER_FREQUENCY/1000)) | 71 | #define PM_TIMER_TICKS_TO_US(p) (((p) * 1000)/(PM_TIMER_FREQUENCY/1000)) |
| 78 | 72 | ||
| 79 | static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER; | 73 | static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER; |
| 80 | #ifdef CONFIG_CPU_IDLE | ||
| 81 | module_param(max_cstate, uint, 0000); | 74 | module_param(max_cstate, uint, 0000); |
| 82 | #else | ||
| 83 | module_param(max_cstate, uint, 0644); | ||
| 84 | #endif | ||
| 85 | static unsigned int nocst __read_mostly; | 75 | static unsigned int nocst __read_mostly; |
| 86 | module_param(nocst, uint, 0000); | 76 | module_param(nocst, uint, 0000); |
| 87 | 77 | ||
| 88 | #ifndef CONFIG_CPU_IDLE | ||
| 89 | /* | ||
| 90 | * bm_history -- bit-mask with a bit per jiffy of bus-master activity | ||
| 91 | * 1000 HZ: 0xFFFFFFFF: 32 jiffies = 32ms | ||
| 92 | * 800 HZ: 0xFFFFFFFF: 32 jiffies = 40ms | ||
| 93 | * 100 HZ: 0x0000000F: 4 jiffies = 40ms | ||
| 94 | * reduce history for more aggressive entry into C3 | ||
| 95 | */ | ||
| 96 | static unsigned int bm_history __read_mostly = | ||
| 97 | (HZ >= 800 ? 0xFFFFFFFF : ((1U << (HZ / 25)) - 1)); | ||
| 98 | module_param(bm_history, uint, 0644); | ||
| 99 | |||
| 100 | static int acpi_processor_set_power_policy(struct acpi_processor *pr); | ||
| 101 | |||
| 102 | #else /* CONFIG_CPU_IDLE */ | ||
| 103 | static unsigned int latency_factor __read_mostly = 2; | 78 | static unsigned int latency_factor __read_mostly = 2; |
| 104 | module_param(latency_factor, uint, 0644); | 79 | module_param(latency_factor, uint, 0644); |
| 105 | #endif | ||
| 106 | 80 | ||
| 107 | /* | 81 | /* |
| 108 | * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3. | 82 | * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3. |
| @@ -224,51 +198,6 @@ static void acpi_safe_halt(void) | |||
| 224 | current_thread_info()->status |= TS_POLLING; | 198 | current_thread_info()->status |= TS_POLLING; |
| 225 | } | 199 | } |
| 226 | 200 | ||
| 227 | #ifndef CONFIG_CPU_IDLE | ||
| 228 | |||
| 229 | static void | ||
| 230 | acpi_processor_power_activate(struct acpi_processor *pr, | ||
| 231 | struct acpi_processor_cx *new) | ||
| 232 | { | ||
| 233 | struct acpi_processor_cx *old; | ||
| 234 | |||
| 235 | if (!pr || !new) | ||
| 236 | return; | ||
| 237 | |||
| 238 | old = pr->power.state; | ||
| 239 | |||
| 240 | if (old) | ||
| 241 | old->promotion.count = 0; | ||
| 242 | new->demotion.count = 0; | ||
| 243 | |||
| 244 | pr->power.state = new; | ||
| 245 | |||
| 246 | return; | ||
| 247 | } | ||
| 248 | |||
| 249 | static atomic_t c3_cpu_count; | ||
| 250 | |||
| 251 | /* Common C-state entry for C2, C3, .. */ | ||
| 252 | static void acpi_cstate_enter(struct acpi_processor_cx *cstate) | ||
| 253 | { | ||
| 254 | /* Don't trace irqs off for idle */ | ||
| 255 | stop_critical_timings(); | ||
| 256 | if (cstate->entry_method == ACPI_CSTATE_FFH) { | ||
| 257 | /* Call into architectural FFH based C-state */ | ||
| 258 | acpi_processor_ffh_cstate_enter(cstate); | ||
| 259 | } else { | ||
| 260 | int unused; | ||
| 261 | /* IO port based C-state */ | ||
| 262 | inb(cstate->address); | ||
| 263 | /* Dummy wait op - must do something useless after P_LVL2 read | ||
| 264 | because chipsets cannot guarantee that STPCLK# signal | ||
| 265 | gets asserted in time to freeze execution properly. */ | ||
| 266 | unused = inl(acpi_gbl_FADT.xpm_timer_block.address); | ||
| 267 | } | ||
| 268 | start_critical_timings(); | ||
| 269 | } | ||
| 270 | #endif /* !CONFIG_CPU_IDLE */ | ||
| 271 | |||
| 272 | #ifdef ARCH_APICTIMER_STOPS_ON_C3 | 201 | #ifdef ARCH_APICTIMER_STOPS_ON_C3 |
| 273 | 202 | ||
| 274 | /* | 203 | /* |
| @@ -370,421 +299,6 @@ static int tsc_halts_in_c(int state) | |||
| 370 | } | 299 | } |
| 371 | #endif | 300 | #endif |
| 372 | 301 | ||
| 373 | #ifndef CONFIG_CPU_IDLE | ||
| 374 | static void acpi_processor_idle(void) | ||
| 375 | { | ||
| 376 | struct acpi_processor *pr = NULL; | ||
| 377 | struct acpi_processor_cx *cx = NULL; | ||
| 378 | struct acpi_processor_cx *next_state = NULL; | ||
| 379 | int sleep_ticks = 0; | ||
| 380 | u32 t1, t2 = 0; | ||
| 381 | |||
| 382 | /* | ||
| 383 | * Interrupts must be disabled during bus mastering calculations and | ||
| 384 | * for C2/C3 transitions. | ||
| 385 | */ | ||
| 386 | local_irq_disable(); | ||
| 387 | |||
| 388 | pr = __get_cpu_var(processors); | ||
| 389 | if (!pr) { | ||
| 390 | local_irq_enable(); | ||
| 391 | return; | ||
| 392 | } | ||
| 393 | |||
| 394 | /* | ||
| 395 | * Check whether we truly need to go idle, or should | ||
| 396 | * reschedule: | ||
| 397 | */ | ||
| 398 | if (unlikely(need_resched())) { | ||
| 399 | local_irq_enable(); | ||
| 400 | return; | ||
| 401 | } | ||
| 402 | |||
| 403 | cx = pr->power.state; | ||
| 404 | if (!cx || acpi_idle_suspend) { | ||
| 405 | if (pm_idle_save) { | ||
| 406 | pm_idle_save(); /* enables IRQs */ | ||
| 407 | } else { | ||
| 408 | acpi_safe_halt(); | ||
| 409 | local_irq_enable(); | ||
| 410 | } | ||
| 411 | |||
| 412 | return; | ||
| 413 | } | ||
| 414 | |||
| 415 | /* | ||
| 416 | * Check BM Activity | ||
| 417 | * ----------------- | ||
| 418 | * Check for bus mastering activity (if required), record, and check | ||
| 419 | * for demotion. | ||
| 420 | */ | ||
| 421 | if (pr->flags.bm_check) { | ||
| 422 | u32 bm_status = 0; | ||
| 423 | unsigned long diff = jiffies - pr->power.bm_check_timestamp; | ||
| 424 | |||
| 425 | if (diff > 31) | ||
| 426 | diff = 31; | ||
| 427 | |||
| 428 | pr->power.bm_activity <<= diff; | ||
| 429 | |||
| 430 | acpi_get_register_unlocked(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status); | ||
| 431 | if (bm_status) { | ||
| 432 | pr->power.bm_activity |= 0x1; | ||
| 433 | acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 1); | ||
| 434 | } | ||
| 435 | /* | ||
| 436 | * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect | ||
| 437 | * the true state of bus mastering activity; forcing us to | ||
| 438 | * manually check the BMIDEA bit of each IDE channel. | ||
| 439 | */ | ||
| 440 | else if (errata.piix4.bmisx) { | ||
| 441 | if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01) | ||
| 442 | || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01)) | ||
| 443 | pr->power.bm_activity |= 0x1; | ||
| 444 | } | ||
| 445 | |||
| 446 | pr->power.bm_check_timestamp = jiffies; | ||
| 447 | |||
| 448 | /* | ||
| 449 | * If bus mastering is or was active this jiffy, demote | ||
| 450 | * to avoid a faulty transition. Note that the processor | ||
| 451 | * won't enter a low-power state during this call (to this | ||
| 452 | * function) but should upon the next. | ||
| 453 | * | ||
| 454 | * TBD: A better policy might be to fallback to the demotion | ||
| 455 | * state (use it for this quantum only) istead of | ||
| 456 | * demoting -- and rely on duration as our sole demotion | ||
| 457 | * qualification. This may, however, introduce DMA | ||
| 458 | * issues (e.g. floppy DMA transfer overrun/underrun). | ||
| 459 | */ | ||
| 460 | if ((pr->power.bm_activity & 0x1) && | ||
| 461 | cx->demotion.threshold.bm) { | ||
| 462 | local_irq_enable(); | ||
| 463 | next_state = cx->demotion.state; | ||
| 464 | goto end; | ||
| 465 | } | ||
| 466 | } | ||
| 467 | |||
| 468 | #ifdef CONFIG_HOTPLUG_CPU | ||
| 469 | /* | ||
| 470 | * Check for P_LVL2_UP flag before entering C2 and above on | ||
| 471 | * an SMP system. We do it here instead of doing it at _CST/P_LVL | ||
| 472 | * detection phase, to work cleanly with logical CPU hotplug. | ||
| 473 | */ | ||
| 474 | if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) && | ||
| 475 | !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) | ||
| 476 | cx = &pr->power.states[ACPI_STATE_C1]; | ||
| 477 | #endif | ||
| 478 | |||
| 479 | /* | ||
| 480 | * Sleep: | ||
| 481 | * ------ | ||
| 482 | * Invoke the current Cx state to put the processor to sleep. | ||
| 483 | */ | ||
| 484 | if (cx->type == ACPI_STATE_C2 || cx->type == ACPI_STATE_C3) { | ||
| 485 | current_thread_info()->status &= ~TS_POLLING; | ||
| 486 | /* | ||
| 487 | * TS_POLLING-cleared state must be visible before we | ||
| 488 | * test NEED_RESCHED: | ||
| 489 | */ | ||
| 490 | smp_mb(); | ||
| 491 | if (need_resched()) { | ||
| 492 | current_thread_info()->status |= TS_POLLING; | ||
| 493 | local_irq_enable(); | ||
| 494 | return; | ||
| 495 | } | ||
| 496 | } | ||
| 497 | |||
| 498 | switch (cx->type) { | ||
| 499 | |||
| 500 | case ACPI_STATE_C1: | ||
| 501 | /* | ||
| 502 | * Invoke C1. | ||
| 503 | * Use the appropriate idle routine, the one that would | ||
| 504 | * be used without acpi C-states. | ||
| 505 | */ | ||
| 506 | if (pm_idle_save) { | ||
| 507 | pm_idle_save(); /* enables IRQs */ | ||
| 508 | } else { | ||
| 509 | acpi_safe_halt(); | ||
| 510 | local_irq_enable(); | ||
| 511 | } | ||
| 512 | |||
| 513 | /* | ||
| 514 | * TBD: Can't get time duration while in C1, as resumes | ||
| 515 | * go to an ISR rather than here. Need to instrument | ||
| 516 | * base interrupt handler. | ||
| 517 | * | ||
| 518 | * Note: the TSC better not stop in C1, sched_clock() will | ||
| 519 | * skew otherwise. | ||
| 520 | */ | ||
| 521 | sleep_ticks = 0xFFFFFFFF; | ||
| 522 | |||
| 523 | break; | ||
| 524 | |||
| 525 | case ACPI_STATE_C2: | ||
| 526 | /* Get start time (ticks) */ | ||
| 527 | t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); | ||
| 528 | /* Tell the scheduler that we are going deep-idle: */ | ||
| 529 | sched_clock_idle_sleep_event(); | ||
| 530 | /* Invoke C2 */ | ||
| 531 | acpi_state_timer_broadcast(pr, cx, 1); | ||
| 532 | acpi_cstate_enter(cx); | ||
| 533 | /* Get end time (ticks) */ | ||
| 534 | t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); | ||
| 535 | |||
| 536 | #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86) | ||
| 537 | /* TSC halts in C2, so notify users */ | ||
| 538 | if (tsc_halts_in_c(ACPI_STATE_C2)) | ||
| 539 | mark_tsc_unstable("possible TSC halt in C2"); | ||
| 540 | #endif | ||
| 541 | /* Compute time (ticks) that we were actually asleep */ | ||
| 542 | sleep_ticks = ticks_elapsed(t1, t2); | ||
| 543 | |||
| 544 | /* Tell the scheduler how much we idled: */ | ||
| 545 | sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS); | ||
| 546 | |||
| 547 | /* Re-enable interrupts */ | ||
| 548 | local_irq_enable(); | ||
| 549 | /* Do not account our idle-switching overhead: */ | ||
| 550 | sleep_ticks -= cx->latency_ticks + C2_OVERHEAD; | ||
| 551 | |||
| 552 | current_thread_info()->status |= TS_POLLING; | ||
| 553 | acpi_state_timer_broadcast(pr, cx, 0); | ||
| 554 | break; | ||
| 555 | |||
| 556 | case ACPI_STATE_C3: | ||
| 557 | acpi_unlazy_tlb(smp_processor_id()); | ||
| 558 | /* | ||
| 559 | * Must be done before busmaster disable as we might | ||
| 560 | * need to access HPET ! | ||
| 561 | */ | ||
| 562 | acpi_state_timer_broadcast(pr, cx, 1); | ||
| 563 | /* | ||
| 564 | * disable bus master | ||
| 565 | * bm_check implies we need ARB_DIS | ||
| 566 | * !bm_check implies we need cache flush | ||
| 567 | * bm_control implies whether we can do ARB_DIS | ||
| 568 | * | ||
| 569 | * That leaves a case where bm_check is set and bm_control is | ||
| 570 | * not set. In that case we cannot do much, we enter C3 | ||
| 571 | * without doing anything. | ||
| 572 | */ | ||
| 573 | if (pr->flags.bm_check && pr->flags.bm_control) { | ||
| 574 | if (atomic_inc_return(&c3_cpu_count) == | ||
| 575 | num_online_cpus()) { | ||
| 576 | /* | ||
| 577 | * All CPUs are trying to go to C3 | ||
| 578 | * Disable bus master arbitration | ||
| 579 | */ | ||
| 580 | acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1); | ||
| 581 | } | ||
| 582 | } else if (!pr->flags.bm_check) { | ||
| 583 | /* SMP with no shared cache... Invalidate cache */ | ||
| 584 | ACPI_FLUSH_CPU_CACHE(); | ||
| 585 | } | ||
| 586 | |||
| 587 | /* Get start time (ticks) */ | ||
| 588 | t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); | ||
| 589 | /* Invoke C3 */ | ||
| 590 | /* Tell the scheduler that we are going deep-idle: */ | ||
| 591 | sched_clock_idle_sleep_event(); | ||
| 592 | acpi_cstate_enter(cx); | ||
| 593 | /* Get end time (ticks) */ | ||
| 594 | t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); | ||
| 595 | if (pr->flags.bm_check && pr->flags.bm_control) { | ||
| 596 | /* Enable bus master arbitration */ | ||
| 597 | atomic_dec(&c3_cpu_count); | ||
| 598 | acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0); | ||
| 599 | } | ||
| 600 | |||
| 601 | #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86) | ||
| 602 | /* TSC halts in C3, so notify users */ | ||
| 603 | if (tsc_halts_in_c(ACPI_STATE_C3)) | ||
| 604 | mark_tsc_unstable("TSC halts in C3"); | ||
| 605 | #endif | ||
| 606 | /* Compute time (ticks) that we were actually asleep */ | ||
| 607 | sleep_ticks = ticks_elapsed(t1, t2); | ||
| 608 | /* Tell the scheduler how much we idled: */ | ||
| 609 | sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS); | ||
| 610 | |||
| 611 | /* Re-enable interrupts */ | ||
| 612 | local_irq_enable(); | ||
| 613 | /* Do not account our idle-switching overhead: */ | ||
| 614 | sleep_ticks -= cx->latency_ticks + C3_OVERHEAD; | ||
| 615 | |||
| 616 | current_thread_info()->status |= TS_POLLING; | ||
| 617 | acpi_state_timer_broadcast(pr, cx, 0); | ||
| 618 | break; | ||
| 619 | |||
| 620 | default: | ||
| 621 | local_irq_enable(); | ||
| 622 | return; | ||
| 623 | } | ||
| 624 | cx->usage++; | ||
| 625 | if ((cx->type != ACPI_STATE_C1) && (sleep_ticks > 0)) | ||
| 626 | cx->time += sleep_ticks; | ||
| 627 | |||
| 628 | next_state = pr->power.state; | ||
| 629 | |||
| 630 | #ifdef CONFIG_HOTPLUG_CPU | ||
| 631 | /* Don't do promotion/demotion */ | ||
| 632 | if ((cx->type == ACPI_STATE_C1) && (num_online_cpus() > 1) && | ||
| 633 | !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) { | ||
| 634 | next_state = cx; | ||
| 635 | goto end; | ||
| 636 | } | ||
| 637 | #endif | ||
| 638 | |||
| 639 | /* | ||
| 640 | * Promotion? | ||
| 641 | * ---------- | ||
| 642 | * Track the number of longs (time asleep is greater than threshold) | ||
| 643 | * and promote when the count threshold is reached. Note that bus | ||
| 644 | * mastering activity may prevent promotions. | ||
| 645 | * Do not promote above max_cstate. | ||
| 646 | */ | ||
| 647 | if (cx->promotion.state && | ||
| 648 | ((cx->promotion.state - pr->power.states) <= max_cstate)) { | ||
| 649 | if (sleep_ticks > cx->promotion.threshold.ticks && | ||
| 650 | cx->promotion.state->latency <= | ||
| 651 | pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY)) { | ||
| 652 | cx->promotion.count++; | ||
| 653 | cx->demotion.count = 0; | ||
| 654 | if (cx->promotion.count >= | ||
| 655 | cx->promotion.threshold.count) { | ||
| 656 | if (pr->flags.bm_check) { | ||
| 657 | if (! | ||
| 658 | (pr->power.bm_activity & cx-> | ||
| 659 | promotion.threshold.bm)) { | ||
| 660 | next_state = | ||
| 661 | cx->promotion.state; | ||
| 662 | goto end; | ||
| 663 | } | ||
| 664 | } else { | ||
| 665 | next_state = cx->promotion.state; | ||
| 666 | goto end; | ||
| 667 | } | ||
| 668 | } | ||
| 669 | } | ||
| 670 | } | ||
| 671 | |||
| 672 | /* | ||
| 673 | * Demotion? | ||
| 674 | * --------- | ||
| 675 | * Track the number of shorts (time asleep is less than time threshold) | ||
| 676 | * and demote when the usage threshold is reached. | ||
| 677 | */ | ||
| 678 | if (cx->demotion.state) { | ||
| 679 | if (sleep_ticks < cx->demotion.threshold.ticks) { | ||
| 680 | cx->demotion.count++; | ||
| 681 | cx->promotion.count = 0; | ||
| 682 | if (cx->demotion.count >= cx->demotion.threshold.count) { | ||
| 683 | next_state = cx->demotion.state; | ||
| 684 | goto end; | ||
| 685 | } | ||
| 686 | } | ||
| 687 | } | ||
| 688 | |||
| 689 | end: | ||
| 690 | /* | ||
| 691 | * Demote if current state exceeds max_cstate | ||
| 692 | * or if the latency of the current state is unacceptable | ||
| 693 | */ | ||
| 694 | if ((pr->power.state - pr->power.states) > max_cstate || | ||
| 695 | pr->power.state->latency > | ||
| 696 | pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY)) { | ||
| 697 | if (cx->demotion.state) | ||
| 698 | next_state = cx->demotion.state; | ||
| 699 | } | ||
| 700 | |||
| 701 | /* | ||
| 702 | * New Cx State? | ||
| 703 | * ------------- | ||
| 704 | * If we're going to start using a new Cx state we must clean up | ||
| 705 | * from the previous and prepare to use the new. | ||
| 706 | */ | ||
| 707 | if (next_state != pr->power.state) | ||
| 708 | acpi_processor_power_activate(pr, next_state); | ||
| 709 | } | ||
| 710 | |||
| 711 | static int acpi_processor_set_power_policy(struct acpi_processor *pr) | ||
| 712 | { | ||
| 713 | unsigned int i; | ||
| 714 | unsigned int state_is_set = 0; | ||
| 715 | struct acpi_processor_cx *lower = NULL; | ||
| 716 | struct acpi_processor_cx *higher = NULL; | ||
| 717 | struct acpi_processor_cx *cx; | ||
| 718 | |||
| 719 | |||
| 720 | if (!pr) | ||
| 721 | return -EINVAL; | ||
| 722 | |||
| 723 | /* | ||
| 724 | * This function sets the default Cx state policy (OS idle handler). | ||
| 725 | * Our scheme is to promote quickly to C2 but more conservatively | ||
| 726 | * to C3. We're favoring C2 for its characteristics of low latency | ||
| 727 | * (quick response), good power savings, and ability to allow bus | ||
| 728 | * mastering activity. Note that the Cx state policy is completely | ||
| 729 | * customizable and can be altered dynamically. | ||
| 730 | */ | ||
| 731 | |||
| 732 | /* startup state */ | ||
| 733 | for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) { | ||
| 734 | cx = &pr->power.states[i]; | ||
| 735 | if (!cx->valid) | ||
| 736 | continue; | ||
| 737 | |||
| 738 | if (!state_is_set) | ||
| 739 | pr->power.state = cx; | ||
| 740 | state_is_set++; | ||
| 741 | break; | ||
| 742 | } | ||
| 743 | |||
| 744 | if (!state_is_set) | ||
| 745 | return -ENODEV; | ||
| 746 | |||
| 747 | /* demotion */ | ||
| 748 | for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) { | ||
| 749 | cx = &pr->power.states[i]; | ||
| 750 | if (!cx->valid) | ||
| 751 | continue; | ||
| 752 | |||
| 753 | if (lower) { | ||
| 754 | cx->demotion.state = lower; | ||
| 755 | cx->demotion.threshold.ticks = cx->latency_ticks; | ||
| 756 | cx->demotion.threshold.count = 1; | ||
| 757 | if (cx->type == ACPI_STATE_C3) | ||
| 758 | cx->demotion.threshold.bm = bm_history; | ||
| 759 | } | ||
| 760 | |||
| 761 | lower = cx; | ||
| 762 | } | ||
| 763 | |||
| 764 | /* promotion */ | ||
| 765 | for (i = (ACPI_PROCESSOR_MAX_POWER - 1); i > 0; i--) { | ||
| 766 | cx = &pr->power.states[i]; | ||
| 767 | if (!cx->valid) | ||
| 768 | continue; | ||
| 769 | |||
| 770 | if (higher) { | ||
| 771 | cx->promotion.state = higher; | ||
| 772 | cx->promotion.threshold.ticks = cx->latency_ticks; | ||
| 773 | if (cx->type >= ACPI_STATE_C2) | ||
| 774 | cx->promotion.threshold.count = 4; | ||
| 775 | else | ||
| 776 | cx->promotion.threshold.count = 10; | ||
| 777 | if (higher->type == ACPI_STATE_C3) | ||
| 778 | cx->promotion.threshold.bm = bm_history; | ||
| 779 | } | ||
| 780 | |||
| 781 | higher = cx; | ||
| 782 | } | ||
| 783 | |||
| 784 | return 0; | ||
| 785 | } | ||
| 786 | #endif /* !CONFIG_CPU_IDLE */ | ||
| 787 | |||
| 788 | static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr) | 302 | static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr) |
| 789 | { | 303 | { |
| 790 | 304 | ||
| @@ -1027,11 +541,7 @@ static void acpi_processor_power_verify_c2(struct acpi_processor_cx *cx) | |||
| 1027 | */ | 541 | */ |
| 1028 | cx->valid = 1; | 542 | cx->valid = 1; |
| 1029 | 543 | ||
| 1030 | #ifndef CONFIG_CPU_IDLE | ||
| 1031 | cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency); | ||
| 1032 | #else | ||
| 1033 | cx->latency_ticks = cx->latency; | 544 | cx->latency_ticks = cx->latency; |
| 1034 | #endif | ||
| 1035 | 545 | ||
| 1036 | return; | 546 | return; |
| 1037 | } | 547 | } |
| @@ -1111,11 +621,7 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr, | |||
| 1111 | */ | 621 | */ |
| 1112 | cx->valid = 1; | 622 | cx->valid = 1; |
| 1113 | 623 | ||
| 1114 | #ifndef CONFIG_CPU_IDLE | ||
| 1115 | cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency); | ||
| 1116 | #else | ||
| 1117 | cx->latency_ticks = cx->latency; | 624 | cx->latency_ticks = cx->latency; |
| 1118 | #endif | ||
| 1119 | /* | 625 | /* |
| 1120 | * On older chipsets, BM_RLD needs to be set | 626 | * On older chipsets, BM_RLD needs to be set |
| 1121 | * in order for Bus Master activity to wake the | 627 | * in order for Bus Master activity to wake the |
| @@ -1189,20 +695,6 @@ static int acpi_processor_get_power_info(struct acpi_processor *pr) | |||
| 1189 | 695 | ||
| 1190 | pr->power.count = acpi_processor_power_verify(pr); | 696 | pr->power.count = acpi_processor_power_verify(pr); |
| 1191 | 697 | ||
| 1192 | #ifndef CONFIG_CPU_IDLE | ||
| 1193 | /* | ||
| 1194 | * Set Default Policy | ||
| 1195 | * ------------------ | ||
| 1196 | * Now that we know which states are supported, set the default | ||
| 1197 | * policy. Note that this policy can be changed dynamically | ||
| 1198 | * (e.g. encourage deeper sleeps to conserve battery life when | ||
| 1199 | * not on AC). | ||
| 1200 | */ | ||
| 1201 | result = acpi_processor_set_power_policy(pr); | ||
| 1202 | if (result) | ||
| 1203 | return result; | ||
| 1204 | #endif | ||
| 1205 | |||
| 1206 | /* | 698 | /* |
| 1207 | * if one state of type C2 or C3 is available, mark this | 699 | * if one state of type C2 or C3 is available, mark this |
| 1208 | * CPU as being "idle manageable" | 700 | * CPU as being "idle manageable" |
| @@ -1300,69 +792,6 @@ static const struct file_operations acpi_processor_power_fops = { | |||
| 1300 | .release = single_release, | 792 | .release = single_release, |
| 1301 | }; | 793 | }; |
| 1302 | 794 | ||
| 1303 | #ifndef CONFIG_CPU_IDLE | ||
| 1304 | |||
| 1305 | int acpi_processor_cst_has_changed(struct acpi_processor *pr) | ||
| 1306 | { | ||
| 1307 | int result = 0; | ||
| 1308 | |||
| 1309 | if (boot_option_idle_override) | ||
| 1310 | return 0; | ||
| 1311 | |||
| 1312 | if (!pr) | ||
| 1313 | return -EINVAL; | ||
| 1314 | |||
| 1315 | if (nocst) { | ||
| 1316 | return -ENODEV; | ||
| 1317 | } | ||
| 1318 | |||
| 1319 | if (!pr->flags.power_setup_done) | ||
| 1320 | return -ENODEV; | ||
| 1321 | |||
| 1322 | /* | ||
| 1323 | * Fall back to the default idle loop, when pm_idle_save had | ||
| 1324 | * been initialized. | ||
| 1325 | */ | ||
| 1326 | if (pm_idle_save) { | ||
| 1327 | pm_idle = pm_idle_save; | ||
| 1328 | /* Relies on interrupts forcing exit from idle. */ | ||
| 1329 | synchronize_sched(); | ||
| 1330 | } | ||
| 1331 | |||
| 1332 | pr->flags.power = 0; | ||
| 1333 | result = acpi_processor_get_power_info(pr); | ||
| 1334 | if ((pr->flags.power == 1) && (pr->flags.power_setup_done)) | ||
| 1335 | pm_idle = acpi_processor_idle; | ||
| 1336 | |||
| 1337 | return result; | ||
| 1338 | } | ||
| 1339 | |||
| 1340 | #ifdef CONFIG_SMP | ||
| 1341 | static void smp_callback(void *v) | ||
| 1342 | { | ||
| 1343 | /* we already woke the CPU up, nothing more to do */ | ||
| 1344 | } | ||
| 1345 | |||
| 1346 | /* | ||
| 1347 | * This function gets called when a part of the kernel has a new latency | ||
| 1348 | * requirement. This means we need to get all processors out of their C-state, | ||
| 1349 | * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that | ||
| 1350 | * wakes them all right up. | ||
| 1351 | */ | ||
| 1352 | static int acpi_processor_latency_notify(struct notifier_block *b, | ||
| 1353 | unsigned long l, void *v) | ||
| 1354 | { | ||
| 1355 | smp_call_function(smp_callback, NULL, 1); | ||
| 1356 | return NOTIFY_OK; | ||
| 1357 | } | ||
| 1358 | |||
| 1359 | static struct notifier_block acpi_processor_latency_notifier = { | ||
| 1360 | .notifier_call = acpi_processor_latency_notify, | ||
| 1361 | }; | ||
| 1362 | |||
| 1363 | #endif | ||
| 1364 | |||
| 1365 | #else /* CONFIG_CPU_IDLE */ | ||
| 1366 | 795 | ||
| 1367 | /** | 796 | /** |
| 1368 | * acpi_idle_bm_check - checks if bus master activity was detected | 797 | * acpi_idle_bm_check - checks if bus master activity was detected |
| @@ -1756,8 +1185,6 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr) | |||
| 1756 | return ret; | 1185 | return ret; |
| 1757 | } | 1186 | } |
| 1758 | 1187 | ||
| 1759 | #endif /* CONFIG_CPU_IDLE */ | ||
| 1760 | |||
| 1761 | int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, | 1188 | int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, |
| 1762 | struct acpi_device *device) | 1189 | struct acpi_device *device) |
| 1763 | { | 1190 | { |
| @@ -1786,10 +1213,6 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, | |||
| 1786 | "ACPI: processor limited to max C-state %d\n", | 1213 | "ACPI: processor limited to max C-state %d\n", |
| 1787 | max_cstate); | 1214 | max_cstate); |
| 1788 | first_run++; | 1215 | first_run++; |
| 1789 | #if !defined(CONFIG_CPU_IDLE) && defined(CONFIG_SMP) | ||
| 1790 | pm_qos_add_notifier(PM_QOS_CPU_DMA_LATENCY, | ||
| 1791 | &acpi_processor_latency_notifier); | ||
| 1792 | #endif | ||
| 1793 | } | 1216 | } |
| 1794 | 1217 | ||
| 1795 | if (!pr) | 1218 | if (!pr) |
| @@ -1813,11 +1236,9 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, | |||
| 1813 | * platforms that only support C1. | 1236 | * platforms that only support C1. |
| 1814 | */ | 1237 | */ |
| 1815 | if (pr->flags.power) { | 1238 | if (pr->flags.power) { |
| 1816 | #ifdef CONFIG_CPU_IDLE | ||
| 1817 | acpi_processor_setup_cpuidle(pr); | 1239 | acpi_processor_setup_cpuidle(pr); |
| 1818 | if (cpuidle_register_device(&pr->power.dev)) | 1240 | if (cpuidle_register_device(&pr->power.dev)) |
| 1819 | return -EIO; | 1241 | return -EIO; |
| 1820 | #endif | ||
| 1821 | 1242 | ||
| 1822 | printk(KERN_INFO PREFIX "CPU%d (power states:", pr->id); | 1243 | printk(KERN_INFO PREFIX "CPU%d (power states:", pr->id); |
| 1823 | for (i = 1; i <= pr->power.count; i++) | 1244 | for (i = 1; i <= pr->power.count; i++) |
| @@ -1825,13 +1246,6 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, | |||
| 1825 | printk(" C%d[C%d]", i, | 1246 | printk(" C%d[C%d]", i, |
| 1826 | pr->power.states[i].type); | 1247 | pr->power.states[i].type); |
| 1827 | printk(")\n"); | 1248 | printk(")\n"); |
| 1828 | |||
| 1829 | #ifndef CONFIG_CPU_IDLE | ||
| 1830 | if (pr->id == 0) { | ||
| 1831 | pm_idle_save = pm_idle; | ||
| 1832 | pm_idle = acpi_processor_idle; | ||
| 1833 | } | ||
| 1834 | #endif | ||
| 1835 | } | 1249 | } |
| 1836 | 1250 | ||
| 1837 | /* 'power' [R] */ | 1251 | /* 'power' [R] */ |
| @@ -1850,34 +1264,12 @@ int acpi_processor_power_exit(struct acpi_processor *pr, | |||
| 1850 | if (boot_option_idle_override) | 1264 | if (boot_option_idle_override) |
| 1851 | return 0; | 1265 | return 0; |
| 1852 | 1266 | ||
| 1853 | #ifdef CONFIG_CPU_IDLE | ||
| 1854 | cpuidle_unregister_device(&pr->power.dev); | 1267 | cpuidle_unregister_device(&pr->power.dev); |
| 1855 | #endif | ||
| 1856 | pr->flags.power_setup_done = 0; | 1268 | pr->flags.power_setup_done = 0; |
| 1857 | 1269 | ||
| 1858 | if (acpi_device_dir(device)) | 1270 | if (acpi_device_dir(device)) |
| 1859 | remove_proc_entry(ACPI_PROCESSOR_FILE_POWER, | 1271 | remove_proc_entry(ACPI_PROCESSOR_FILE_POWER, |
| 1860 | acpi_device_dir(device)); | 1272 | acpi_device_dir(device)); |
| 1861 | 1273 | ||
| 1862 | #ifndef CONFIG_CPU_IDLE | ||
| 1863 | |||
| 1864 | /* Unregister the idle handler when processor #0 is removed. */ | ||
| 1865 | if (pr->id == 0) { | ||
| 1866 | if (pm_idle_save) | ||
| 1867 | pm_idle = pm_idle_save; | ||
| 1868 | |||
| 1869 | /* | ||
| 1870 | * We are about to unload the current idle thread pm callback | ||
| 1871 | * (pm_idle), Wait for all processors to update cached/local | ||
| 1872 | * copies of pm_idle before proceeding. | ||
| 1873 | */ | ||
| 1874 | cpu_idle_wait(); | ||
| 1875 | #ifdef CONFIG_SMP | ||
| 1876 | pm_qos_remove_notifier(PM_QOS_CPU_DMA_LATENCY, | ||
| 1877 | &acpi_processor_latency_notifier); | ||
| 1878 | #endif | ||
| 1879 | } | ||
| 1880 | #endif | ||
| 1881 | |||
| 1882 | return 0; | 1274 | return 0; |
| 1883 | } | 1275 | } |
