aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/processor_idle.c47
-rw-r--r--drivers/cpuidle/Kconfig4
-rw-r--r--drivers/cpuidle/cpuidle.c43
3 files changed, 80 insertions, 14 deletions
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 199ea2146153..32003fdc91e8 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -98,6 +98,9 @@ module_param(bm_history, uint, 0644);
98 98
99static int acpi_processor_set_power_policy(struct acpi_processor *pr); 99static int acpi_processor_set_power_policy(struct acpi_processor *pr);
100 100
101#else /* CONFIG_CPU_IDLE */
102static unsigned int latency_factor __read_mostly = 2;
103module_param(latency_factor, uint, 0644);
101#endif 104#endif
102 105
103/* 106/*
@@ -201,6 +204,10 @@ static inline u32 ticks_elapsed_in_us(u32 t1, u32 t2)
201 return PM_TIMER_TICKS_TO_US((0xFFFFFFFF - t1) + t2); 204 return PM_TIMER_TICKS_TO_US((0xFFFFFFFF - t1) + t2);
202} 205}
203 206
207/*
208 * Callers should disable interrupts before the call and enable
209 * interrupts after return.
210 */
204static void acpi_safe_halt(void) 211static void acpi_safe_halt(void)
205{ 212{
206 current_thread_info()->status &= ~TS_POLLING; 213 current_thread_info()->status &= ~TS_POLLING;
@@ -261,7 +268,7 @@ static atomic_t c3_cpu_count;
261/* Common C-state entry for C2, C3, .. */ 268/* Common C-state entry for C2, C3, .. */
262static void acpi_cstate_enter(struct acpi_processor_cx *cstate) 269static void acpi_cstate_enter(struct acpi_processor_cx *cstate)
263{ 270{
264 if (cstate->space_id == ACPI_CSTATE_FFH) { 271 if (cstate->entry_method == ACPI_CSTATE_FFH) {
265 /* Call into architectural FFH based C-state */ 272 /* Call into architectural FFH based C-state */
266 acpi_processor_ffh_cstate_enter(cstate); 273 acpi_processor_ffh_cstate_enter(cstate);
267 } else { 274 } else {
@@ -413,6 +420,8 @@ static void acpi_processor_idle(void)
413 pm_idle_save(); 420 pm_idle_save();
414 else 421 else
415 acpi_safe_halt(); 422 acpi_safe_halt();
423
424 local_irq_enable();
416 return; 425 return;
417 } 426 }
418 427
@@ -521,6 +530,7 @@ static void acpi_processor_idle(void)
521 * skew otherwise. 530 * skew otherwise.
522 */ 531 */
523 sleep_ticks = 0xFFFFFFFF; 532 sleep_ticks = 0xFFFFFFFF;
533 local_irq_enable();
524 break; 534 break;
525 535
526 case ACPI_STATE_C2: 536 case ACPI_STATE_C2:
@@ -922,20 +932,20 @@ static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
922 cx.address = reg->address; 932 cx.address = reg->address;
923 cx.index = current_count + 1; 933 cx.index = current_count + 1;
924 934
925 cx.space_id = ACPI_CSTATE_SYSTEMIO; 935 cx.entry_method = ACPI_CSTATE_SYSTEMIO;
926 if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) { 936 if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) {
927 if (acpi_processor_ffh_cstate_probe 937 if (acpi_processor_ffh_cstate_probe
928 (pr->id, &cx, reg) == 0) { 938 (pr->id, &cx, reg) == 0) {
929 cx.space_id = ACPI_CSTATE_FFH; 939 cx.entry_method = ACPI_CSTATE_FFH;
930 } else if (cx.type != ACPI_STATE_C1) { 940 } else if (cx.type == ACPI_STATE_C1) {
931 /* 941 /*
932 * C1 is a special case where FIXED_HARDWARE 942 * C1 is a special case where FIXED_HARDWARE
933 * can be handled in non-MWAIT way as well. 943 * can be handled in non-MWAIT way as well.
934 * In that case, save this _CST entry info. 944 * In that case, save this _CST entry info.
935 * That is, we retain space_id of SYSTEM_IO for
936 * halt based C1.
937 * Otherwise, ignore this info and continue. 945 * Otherwise, ignore this info and continue.
938 */ 946 */
947 cx.entry_method = ACPI_CSTATE_HALT;
948 } else {
939 continue; 949 continue;
940 } 950 }
941 } 951 }
@@ -1369,12 +1379,16 @@ static inline void acpi_idle_update_bm_rld(struct acpi_processor *pr,
1369/** 1379/**
1370 * acpi_idle_do_entry - a helper function that does C2 and C3 type entry 1380 * acpi_idle_do_entry - a helper function that does C2 and C3 type entry
1371 * @cx: cstate data 1381 * @cx: cstate data
1382 *
1383 * Caller disables interrupt before call and enables interrupt after return.
1372 */ 1384 */
1373static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx) 1385static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx)
1374{ 1386{
1375 if (cx->space_id == ACPI_CSTATE_FFH) { 1387 if (cx->entry_method == ACPI_CSTATE_FFH) {
1376 /* Call into architectural FFH based C-state */ 1388 /* Call into architectural FFH based C-state */
1377 acpi_processor_ffh_cstate_enter(cx); 1389 acpi_processor_ffh_cstate_enter(cx);
1390 } else if (cx->entry_method == ACPI_CSTATE_HALT) {
1391 acpi_safe_halt();
1378 } else { 1392 } else {
1379 int unused; 1393 int unused;
1380 /* IO port based C-state */ 1394 /* IO port based C-state */
@@ -1396,21 +1410,27 @@ static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx)
1396static int acpi_idle_enter_c1(struct cpuidle_device *dev, 1410static int acpi_idle_enter_c1(struct cpuidle_device *dev,
1397 struct cpuidle_state *state) 1411 struct cpuidle_state *state)
1398{ 1412{
1413 u32 t1, t2;
1399 struct acpi_processor *pr; 1414 struct acpi_processor *pr;
1400 struct acpi_processor_cx *cx = cpuidle_get_statedata(state); 1415 struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
1416
1401 pr = processors[smp_processor_id()]; 1417 pr = processors[smp_processor_id()];
1402 1418
1403 if (unlikely(!pr)) 1419 if (unlikely(!pr))
1404 return 0; 1420 return 0;
1405 1421
1422 local_irq_disable();
1406 if (pr->flags.bm_check) 1423 if (pr->flags.bm_check)
1407 acpi_idle_update_bm_rld(pr, cx); 1424 acpi_idle_update_bm_rld(pr, cx);
1408 1425
1409 acpi_safe_halt(); 1426 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
1427 acpi_idle_do_entry(cx);
1428 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
1410 1429
1430 local_irq_enable();
1411 cx->usage++; 1431 cx->usage++;
1412 1432
1413 return 0; 1433 return ticks_elapsed_in_us(t1, t2);
1414} 1434}
1415 1435
1416/** 1436/**
@@ -1517,7 +1537,9 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
1517 if (dev->safe_state) { 1537 if (dev->safe_state) {
1518 return dev->safe_state->enter(dev, dev->safe_state); 1538 return dev->safe_state->enter(dev, dev->safe_state);
1519 } else { 1539 } else {
1540 local_irq_disable();
1520 acpi_safe_halt(); 1541 acpi_safe_halt();
1542 local_irq_enable();
1521 return 0; 1543 return 0;
1522 } 1544 }
1523 } 1545 }
@@ -1609,7 +1631,7 @@ struct cpuidle_driver acpi_idle_driver = {
1609 */ 1631 */
1610static int acpi_processor_setup_cpuidle(struct acpi_processor *pr) 1632static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
1611{ 1633{
1612 int i, count = 0; 1634 int i, count = CPUIDLE_DRIVER_STATE_START;
1613 struct acpi_processor_cx *cx; 1635 struct acpi_processor_cx *cx;
1614 struct cpuidle_state *state; 1636 struct cpuidle_state *state;
1615 struct cpuidle_device *dev = &pr->power.dev; 1637 struct cpuidle_device *dev = &pr->power.dev;
@@ -1638,13 +1660,14 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
1638 1660
1639 snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i); 1661 snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i);
1640 state->exit_latency = cx->latency; 1662 state->exit_latency = cx->latency;
1641 state->target_residency = cx->latency * 6; 1663 state->target_residency = cx->latency * latency_factor;
1642 state->power_usage = cx->power; 1664 state->power_usage = cx->power;
1643 1665
1644 state->flags = 0; 1666 state->flags = 0;
1645 switch (cx->type) { 1667 switch (cx->type) {
1646 case ACPI_STATE_C1: 1668 case ACPI_STATE_C1:
1647 state->flags |= CPUIDLE_FLAG_SHALLOW; 1669 state->flags |= CPUIDLE_FLAG_SHALLOW;
1670 state->flags |= CPUIDLE_FLAG_TIME_VALID;
1648 state->enter = acpi_idle_enter_c1; 1671 state->enter = acpi_idle_enter_c1;
1649 dev->safe_state = state; 1672 dev->safe_state = state;
1650 break; 1673 break;
@@ -1667,6 +1690,8 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
1667 } 1690 }
1668 1691
1669 count++; 1692 count++;
1693 if (count == CPUIDLE_STATE_MAX)
1694 break;
1670 } 1695 }
1671 1696
1672 dev->state_count = count; 1697 dev->state_count = count;
diff --git a/drivers/cpuidle/Kconfig b/drivers/cpuidle/Kconfig
index 3bed4127d4ad..7dbc4a83c45c 100644
--- a/drivers/cpuidle/Kconfig
+++ b/drivers/cpuidle/Kconfig
@@ -1,13 +1,13 @@
1 1
2config CPU_IDLE 2config CPU_IDLE
3 bool "CPU idle PM support" 3 bool "CPU idle PM support"
4 default ACPI
4 help 5 help
5 CPU idle is a generic framework for supporting software-controlled 6 CPU idle is a generic framework for supporting software-controlled
6 idle processor power management. It includes modular cross-platform 7 idle processor power management. It includes modular cross-platform
7 governors that can be swapped during runtime. 8 governors that can be swapped during runtime.
8 9
9 If you're using a mobile platform that supports CPU idle PM (e.g. 10 If you're using an ACPI-enabled platform, you should say Y here.
10 an ACPI-capable notebook), you should say Y here.
11 11
12config CPU_IDLE_GOV_LADDER 12config CPU_IDLE_GOV_LADDER
13 bool 13 bool
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 2a98d99cbd46..d868d737742f 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -15,6 +15,7 @@
15#include <linux/pm_qos_params.h> 15#include <linux/pm_qos_params.h>
16#include <linux/cpu.h> 16#include <linux/cpu.h>
17#include <linux/cpuidle.h> 17#include <linux/cpuidle.h>
18#include <linux/ktime.h>
18 19
19#include "cpuidle.h" 20#include "cpuidle.h"
20 21
@@ -82,7 +83,7 @@ void cpuidle_uninstall_idle_handler(void)
82{ 83{
83 if (enabled_devices && (pm_idle != pm_idle_old)) { 84 if (enabled_devices && (pm_idle != pm_idle_old)) {
84 pm_idle = pm_idle_old; 85 pm_idle = pm_idle_old;
85 cpu_idle_wait(); 86 cpuidle_kick_cpus();
86 } 87 }
87} 88}
88 89
@@ -180,6 +181,44 @@ void cpuidle_disable_device(struct cpuidle_device *dev)
180 181
181EXPORT_SYMBOL_GPL(cpuidle_disable_device); 182EXPORT_SYMBOL_GPL(cpuidle_disable_device);
182 183
184#ifdef CONFIG_ARCH_HAS_CPU_RELAX
185static int poll_idle(struct cpuidle_device *dev, struct cpuidle_state *st)
186{
187 ktime_t t1, t2;
188 s64 diff;
189 int ret;
190
191 t1 = ktime_get();
192 local_irq_enable();
193 while (!need_resched())
194 cpu_relax();
195
196 t2 = ktime_get();
197 diff = ktime_to_us(ktime_sub(t2, t1));
198 if (diff > INT_MAX)
199 diff = INT_MAX;
200
201 ret = (int) diff;
202 return ret;
203}
204
205static void poll_idle_init(struct cpuidle_device *dev)
206{
207 struct cpuidle_state *state = &dev->states[0];
208
209 cpuidle_set_statedata(state, NULL);
210
211 snprintf(state->name, CPUIDLE_NAME_LEN, "C0 (poll idle)");
212 state->exit_latency = 0;
213 state->target_residency = 0;
214 state->power_usage = -1;
215 state->flags = CPUIDLE_FLAG_POLL | CPUIDLE_FLAG_TIME_VALID;
216 state->enter = poll_idle;
217}
218#else
219static void poll_idle_init(struct cpuidle_device *dev) {}
220#endif /* CONFIG_ARCH_HAS_CPU_RELAX */
221
183/** 222/**
184 * cpuidle_register_device - registers a CPU's idle PM feature 223 * cpuidle_register_device - registers a CPU's idle PM feature
185 * @dev: the cpu 224 * @dev: the cpu
@@ -198,6 +237,8 @@ int cpuidle_register_device(struct cpuidle_device *dev)
198 237
199 mutex_lock(&cpuidle_lock); 238 mutex_lock(&cpuidle_lock);
200 239
240 poll_idle_init(dev);
241
201 per_cpu(cpuidle_devices, dev->cpu) = dev; 242 per_cpu(cpuidle_devices, dev->cpu) = dev;
202 list_add(&dev->device_list, &cpuidle_detected_devices); 243 list_add(&dev->device_list, &cpuidle_detected_devices);
203 if ((ret = cpuidle_add_sysfs(sys_dev))) { 244 if ((ret = cpuidle_add_sysfs(sys_dev))) {