diff options
| author | Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> | 2007-11-19 21:43:22 -0500 |
|---|---|---|
| committer | Len Brown <len.brown@intel.com> | 2007-11-19 21:43:22 -0500 |
| commit | ddc081a19585c8ba5aad437779950c2ef215360a (patch) | |
| tree | bea2be9bb259a336cb3be7f0babfb5bd571517da | |
| parent | 5062911830a66df0c0ad28c387a8c0623cb0d28c (diff) | |
cpuidle: fix HP nx6125 regression
Fix for http://bugzilla.kernel.org/show_bug.cgi?id=9355
cpuidle always used to fallback to C2 if there is some bm activity while
entering C3. But, presence of C2 is not always guaranteed. Change cpuidle
algorithm to detect a safe_state to fallback in case of bm_activity and
use that state instead of C2.
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Len Brown <len.brown@intel.com>
| -rw-r--r-- | drivers/acpi/processor_idle.c | 125 | ||||
| -rw-r--r-- | include/acpi/processor.h | 1 | ||||
| -rw-r--r-- | include/linux/cpuidle.h | 1 |
3 files changed, 56 insertions, 71 deletions
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 1af0694e8520..8904f5c82a1c 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c | |||
| @@ -197,6 +197,19 @@ static inline u32 ticks_elapsed_in_us(u32 t1, u32 t2) | |||
| 197 | return PM_TIMER_TICKS_TO_US((0xFFFFFFFF - t1) + t2); | 197 | return PM_TIMER_TICKS_TO_US((0xFFFFFFFF - t1) + t2); |
| 198 | } | 198 | } |
| 199 | 199 | ||
| 200 | static void acpi_safe_halt(void) | ||
| 201 | { | ||
| 202 | current_thread_info()->status &= ~TS_POLLING; | ||
| 203 | /* | ||
| 204 | * TS_POLLING-cleared state must be visible before we | ||
| 205 | * test NEED_RESCHED: | ||
| 206 | */ | ||
| 207 | smp_mb(); | ||
| 208 | if (!need_resched()) | ||
| 209 | safe_halt(); | ||
| 210 | current_thread_info()->status |= TS_POLLING; | ||
| 211 | } | ||
| 212 | |||
| 200 | #ifndef CONFIG_CPU_IDLE | 213 | #ifndef CONFIG_CPU_IDLE |
| 201 | 214 | ||
| 202 | static void | 215 | static void |
| @@ -239,19 +252,6 @@ acpi_processor_power_activate(struct acpi_processor *pr, | |||
| 239 | return; | 252 | return; |
| 240 | } | 253 | } |
| 241 | 254 | ||
| 242 | static void acpi_safe_halt(void) | ||
| 243 | { | ||
| 244 | current_thread_info()->status &= ~TS_POLLING; | ||
| 245 | /* | ||
| 246 | * TS_POLLING-cleared state must be visible before we | ||
| 247 | * test NEED_RESCHED: | ||
| 248 | */ | ||
| 249 | smp_mb(); | ||
| 250 | if (!need_resched()) | ||
| 251 | safe_halt(); | ||
| 252 | current_thread_info()->status |= TS_POLLING; | ||
| 253 | } | ||
| 254 | |||
| 255 | static atomic_t c3_cpu_count; | 255 | static atomic_t c3_cpu_count; |
| 256 | 256 | ||
| 257 | /* Common C-state entry for C2, C3, .. */ | 257 | /* Common C-state entry for C2, C3, .. */ |
| @@ -1385,15 +1385,7 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev, | |||
| 1385 | if (pr->flags.bm_check) | 1385 | if (pr->flags.bm_check) |
| 1386 | acpi_idle_update_bm_rld(pr, cx); | 1386 | acpi_idle_update_bm_rld(pr, cx); |
| 1387 | 1387 | ||
| 1388 | current_thread_info()->status &= ~TS_POLLING; | 1388 | acpi_safe_halt(); |
| 1389 | /* | ||
| 1390 | * TS_POLLING-cleared state must be visible before we test | ||
| 1391 | * NEED_RESCHED: | ||
| 1392 | */ | ||
| 1393 | smp_mb(); | ||
| 1394 | if (!need_resched()) | ||
| 1395 | safe_halt(); | ||
| 1396 | current_thread_info()->status |= TS_POLLING; | ||
| 1397 | 1389 | ||
| 1398 | cx->usage++; | 1390 | cx->usage++; |
| 1399 | 1391 | ||
| @@ -1493,6 +1485,15 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, | |||
| 1493 | if (acpi_idle_suspend) | 1485 | if (acpi_idle_suspend) |
| 1494 | return(acpi_idle_enter_c1(dev, state)); | 1486 | return(acpi_idle_enter_c1(dev, state)); |
| 1495 | 1487 | ||
| 1488 | if (acpi_idle_bm_check()) { | ||
| 1489 | if (dev->safe_state) { | ||
| 1490 | return dev->safe_state->enter(dev, dev->safe_state); | ||
| 1491 | } else { | ||
| 1492 | acpi_safe_halt(); | ||
| 1493 | return 0; | ||
| 1494 | } | ||
| 1495 | } | ||
| 1496 | |||
| 1496 | local_irq_disable(); | 1497 | local_irq_disable(); |
| 1497 | current_thread_info()->status &= ~TS_POLLING; | 1498 | current_thread_info()->status &= ~TS_POLLING; |
| 1498 | /* | 1499 | /* |
| @@ -1515,49 +1516,39 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, | |||
| 1515 | */ | 1516 | */ |
| 1516 | acpi_state_timer_broadcast(pr, cx, 1); | 1517 | acpi_state_timer_broadcast(pr, cx, 1); |
| 1517 | 1518 | ||
| 1518 | if (acpi_idle_bm_check()) { | 1519 | acpi_idle_update_bm_rld(pr, cx); |
| 1519 | cx = pr->power.bm_state; | ||
| 1520 | |||
| 1521 | acpi_idle_update_bm_rld(pr, cx); | ||
| 1522 | |||
| 1523 | t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); | ||
| 1524 | acpi_idle_do_entry(cx); | ||
| 1525 | t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); | ||
| 1526 | } else { | ||
| 1527 | acpi_idle_update_bm_rld(pr, cx); | ||
| 1528 | 1520 | ||
| 1529 | /* | 1521 | /* |
| 1530 | * disable bus master | 1522 | * disable bus master |
| 1531 | * bm_check implies we need ARB_DIS | 1523 | * bm_check implies we need ARB_DIS |
| 1532 | * !bm_check implies we need cache flush | 1524 | * !bm_check implies we need cache flush |
| 1533 | * bm_control implies whether we can do ARB_DIS | 1525 | * bm_control implies whether we can do ARB_DIS |
| 1534 | * | 1526 | * |
| 1535 | * That leaves a case where bm_check is set and bm_control is | 1527 | * That leaves a case where bm_check is set and bm_control is |
| 1536 | * not set. In that case we cannot do much, we enter C3 | 1528 | * not set. In that case we cannot do much, we enter C3 |
| 1537 | * without doing anything. | 1529 | * without doing anything. |
| 1538 | */ | 1530 | */ |
| 1539 | if (pr->flags.bm_check && pr->flags.bm_control) { | 1531 | if (pr->flags.bm_check && pr->flags.bm_control) { |
| 1540 | spin_lock(&c3_lock); | 1532 | spin_lock(&c3_lock); |
| 1541 | c3_cpu_count++; | 1533 | c3_cpu_count++; |
| 1542 | /* Disable bus master arbitration when all CPUs are in C3 */ | 1534 | /* Disable bus master arbitration when all CPUs are in C3 */ |
| 1543 | if (c3_cpu_count == num_online_cpus()) | 1535 | if (c3_cpu_count == num_online_cpus()) |
| 1544 | acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1); | 1536 | acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1); |
| 1545 | spin_unlock(&c3_lock); | 1537 | spin_unlock(&c3_lock); |
| 1546 | } else if (!pr->flags.bm_check) { | 1538 | } else if (!pr->flags.bm_check) { |
| 1547 | ACPI_FLUSH_CPU_CACHE(); | 1539 | ACPI_FLUSH_CPU_CACHE(); |
| 1548 | } | 1540 | } |
| 1549 | 1541 | ||
| 1550 | t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); | 1542 | t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); |
| 1551 | acpi_idle_do_entry(cx); | 1543 | acpi_idle_do_entry(cx); |
| 1552 | t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); | 1544 | t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); |
| 1553 | 1545 | ||
| 1554 | /* Re-enable bus master arbitration */ | 1546 | /* Re-enable bus master arbitration */ |
| 1555 | if (pr->flags.bm_check && pr->flags.bm_control) { | 1547 | if (pr->flags.bm_check && pr->flags.bm_control) { |
| 1556 | spin_lock(&c3_lock); | 1548 | spin_lock(&c3_lock); |
| 1557 | acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0); | 1549 | acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0); |
| 1558 | c3_cpu_count--; | 1550 | c3_cpu_count--; |
| 1559 | spin_unlock(&c3_lock); | 1551 | spin_unlock(&c3_lock); |
| 1560 | } | ||
| 1561 | } | 1552 | } |
| 1562 | 1553 | ||
| 1563 | #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC) | 1554 | #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC) |
| @@ -1626,12 +1617,14 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr) | |||
| 1626 | case ACPI_STATE_C1: | 1617 | case ACPI_STATE_C1: |
| 1627 | state->flags |= CPUIDLE_FLAG_SHALLOW; | 1618 | state->flags |= CPUIDLE_FLAG_SHALLOW; |
| 1628 | state->enter = acpi_idle_enter_c1; | 1619 | state->enter = acpi_idle_enter_c1; |
| 1620 | dev->safe_state = state; | ||
| 1629 | break; | 1621 | break; |
| 1630 | 1622 | ||
| 1631 | case ACPI_STATE_C2: | 1623 | case ACPI_STATE_C2: |
| 1632 | state->flags |= CPUIDLE_FLAG_BALANCED; | 1624 | state->flags |= CPUIDLE_FLAG_BALANCED; |
| 1633 | state->flags |= CPUIDLE_FLAG_TIME_VALID; | 1625 | state->flags |= CPUIDLE_FLAG_TIME_VALID; |
| 1634 | state->enter = acpi_idle_enter_simple; | 1626 | state->enter = acpi_idle_enter_simple; |
| 1627 | dev->safe_state = state; | ||
| 1635 | break; | 1628 | break; |
| 1636 | 1629 | ||
| 1637 | case ACPI_STATE_C3: | 1630 | case ACPI_STATE_C3: |
| @@ -1652,14 +1645,6 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr) | |||
| 1652 | if (!count) | 1645 | if (!count) |
| 1653 | return -EINVAL; | 1646 | return -EINVAL; |
| 1654 | 1647 | ||
| 1655 | /* find the deepest state that can handle active BM */ | ||
| 1656 | if (pr->flags.bm_check) { | ||
| 1657 | for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) | ||
| 1658 | if (pr->power.states[i].type == ACPI_STATE_C3) | ||
| 1659 | break; | ||
| 1660 | pr->power.bm_state = &pr->power.states[i-1]; | ||
| 1661 | } | ||
| 1662 | |||
| 1663 | return 0; | 1648 | return 0; |
| 1664 | } | 1649 | } |
| 1665 | 1650 | ||
diff --git a/include/acpi/processor.h b/include/acpi/processor.h index 26d79f6db8a0..76411b1fc4fd 100644 --- a/include/acpi/processor.h +++ b/include/acpi/processor.h | |||
| @@ -78,7 +78,6 @@ struct acpi_processor_cx { | |||
| 78 | struct acpi_processor_power { | 78 | struct acpi_processor_power { |
| 79 | struct cpuidle_device dev; | 79 | struct cpuidle_device dev; |
| 80 | struct acpi_processor_cx *state; | 80 | struct acpi_processor_cx *state; |
| 81 | struct acpi_processor_cx *bm_state; | ||
| 82 | unsigned long bm_check_timestamp; | 81 | unsigned long bm_check_timestamp; |
| 83 | u32 default_state; | 82 | u32 default_state; |
| 84 | u32 bm_activity; | 83 | u32 bm_activity; |
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index 16a51546db44..c4e00161a247 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h | |||
| @@ -92,6 +92,7 @@ struct cpuidle_device { | |||
| 92 | struct kobject kobj; | 92 | struct kobject kobj; |
| 93 | struct completion kobj_unregister; | 93 | struct completion kobj_unregister; |
| 94 | void *governor_data; | 94 | void *governor_data; |
| 95 | struct cpuidle_state *safe_state; | ||
| 95 | }; | 96 | }; |
| 96 | 97 | ||
| 97 | DECLARE_PER_CPU(struct cpuidle_device *, cpuidle_devices); | 98 | DECLARE_PER_CPU(struct cpuidle_device *, cpuidle_devices); |
