aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorKevin Hilman <khilman@deeprootsystems.com>2010-09-08 19:37:42 -0400
committerKevin Hilman <khilman@deeprootsystems.com>2010-09-23 20:13:50 -0400
commite7410cf7831c2e5106a90dac6179df5d2c9bd60e (patch)
treed53065cc2c53d2cc512b5bbb0a8f4ba23c652eac /arch
parent6cdee91257bee23a46dc869ca62469b67cba2c7e (diff)
OMAP3: PM: move device-specific special cases from PM core into CPUidle
In an effort to simplify the core idle path, move any device-specific special case handling from the core PM idle path into the CPUidle pre-idle checking path. This keeps the core, interrupts-disabled idle path streamlined and independent of any device-specific handling, and also allows CPUidle to do the checking only for certain C-states as needed. This patch has the device checks in place for all states with the CHECK_BM flag, namely all states >= C2. This patch was inspired by a similar patch written by Tero Kristo as part of a larger series to add INACTIVE state support. NOTE: This is a baby-step towards decoupling device idle (or system idle) from CPU idle. Eventually, CPUidle should only manage the CPU, and device/system idle should be managed elsewhere. Cc: Tero Kristo <tero.kristo@nokia.com> Signed-off-by: Kevin Hilman <khilman@deeprootsystems.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/mach-omap2/cpuidle34xx.c58
-rw-r--r--arch/arm/mach-omap2/pm34xx.c14
2 files changed, 56 insertions, 16 deletions
diff --git a/arch/arm/mach-omap2/cpuidle34xx.c b/arch/arm/mach-omap2/cpuidle34xx.c
index 3d3d035db9a..8ea012ef0b5 100644
--- a/arch/arm/mach-omap2/cpuidle34xx.c
+++ b/arch/arm/mach-omap2/cpuidle34xx.c
@@ -60,7 +60,8 @@ struct omap3_processor_cx {
60 60
61struct omap3_processor_cx omap3_power_states[OMAP3_MAX_STATES]; 61struct omap3_processor_cx omap3_power_states[OMAP3_MAX_STATES];
62struct omap3_processor_cx current_cx_state; 62struct omap3_processor_cx current_cx_state;
63struct powerdomain *mpu_pd, *core_pd; 63struct powerdomain *mpu_pd, *core_pd, *per_pd;
64struct powerdomain *cam_pd;
64 65
65/* 66/*
66 * The latencies/thresholds for various C states have 67 * The latencies/thresholds for various C states have
@@ -233,14 +234,62 @@ static int omap3_enter_idle_bm(struct cpuidle_device *dev,
233 struct cpuidle_state *state) 234 struct cpuidle_state *state)
234{ 235{
235 struct cpuidle_state *new_state = next_valid_state(dev, state); 236 struct cpuidle_state *new_state = next_valid_state(dev, state);
237 u32 core_next_state, per_next_state = 0, per_saved_state = 0;
238 u32 cam_state;
239 struct omap3_processor_cx *cx;
240 int ret;
236 241
237 if ((state->flags & CPUIDLE_FLAG_CHECK_BM) && omap3_idle_bm_check()) { 242 if ((state->flags & CPUIDLE_FLAG_CHECK_BM) && omap3_idle_bm_check()) {
238 BUG_ON(!dev->safe_state); 243 BUG_ON(!dev->safe_state);
239 new_state = dev->safe_state; 244 new_state = dev->safe_state;
245 goto select_state;
246 }
247
248 cx = cpuidle_get_statedata(state);
249 core_next_state = cx->core_state;
250
251 /*
252 * FIXME: we currently manage device-specific idle states
253 * for PER and CORE in combination with CPU-specific
254 * idle states. This is wrong, and device-specific
255 * idle managment needs to be separated out into
256 * its own code.
257 */
258
259 /*
260 * Prevent idle completely if CAM is active.
261 * CAM does not have wakeup capability in OMAP3.
262 */
263 cam_state = pwrdm_read_pwrst(cam_pd);
264 if (cam_state == PWRDM_POWER_ON) {
265 new_state = dev->safe_state;
266 goto select_state;
267 }
268
269 /*
270 * Prevent PER off if CORE is not in retention or off as this
271 * would disable PER wakeups completely.
272 */
273 per_next_state = per_saved_state = pwrdm_read_next_pwrst(per_pd);
274 if ((per_next_state == PWRDM_POWER_OFF) &&
275 (core_next_state > PWRDM_POWER_RET)) {
276 per_next_state = PWRDM_POWER_RET;
277 pwrdm_set_next_pwrst(per_pd, per_next_state);
240 } 278 }
241 279
280 /* Are we changing PER target state? */
281 if (per_next_state != per_saved_state)
282 pwrdm_set_next_pwrst(per_pd, per_next_state);
283
284select_state:
242 dev->last_state = new_state; 285 dev->last_state = new_state;
243 return omap3_enter_idle(dev, new_state); 286 ret = omap3_enter_idle(dev, new_state);
287
288 /* Restore original PER state if it was modified */
289 if (per_next_state != per_saved_state)
290 pwrdm_set_next_pwrst(per_pd, per_saved_state);
291
292 return ret;
244} 293}
245 294
246DEFINE_PER_CPU(struct cpuidle_device, omap3_idle_dev); 295DEFINE_PER_CPU(struct cpuidle_device, omap3_idle_dev);
@@ -328,7 +377,8 @@ void omap_init_power_states(void)
328 cpuidle_params_table[OMAP3_STATE_C2].threshold; 377 cpuidle_params_table[OMAP3_STATE_C2].threshold;
329 omap3_power_states[OMAP3_STATE_C2].mpu_state = PWRDM_POWER_ON; 378 omap3_power_states[OMAP3_STATE_C2].mpu_state = PWRDM_POWER_ON;
330 omap3_power_states[OMAP3_STATE_C2].core_state = PWRDM_POWER_ON; 379 omap3_power_states[OMAP3_STATE_C2].core_state = PWRDM_POWER_ON;
331 omap3_power_states[OMAP3_STATE_C2].flags = CPUIDLE_FLAG_TIME_VALID; 380 omap3_power_states[OMAP3_STATE_C2].flags = CPUIDLE_FLAG_TIME_VALID |
381 CPUIDLE_FLAG_CHECK_BM;
332 382
333 /* C3 . MPU CSWR + Core inactive */ 383 /* C3 . MPU CSWR + Core inactive */
334 omap3_power_states[OMAP3_STATE_C3].valid = 384 omap3_power_states[OMAP3_STATE_C3].valid =
@@ -426,6 +476,8 @@ int __init omap3_idle_init(void)
426 476
427 mpu_pd = pwrdm_lookup("mpu_pwrdm"); 477 mpu_pd = pwrdm_lookup("mpu_pwrdm");
428 core_pd = pwrdm_lookup("core_pwrdm"); 478 core_pd = pwrdm_lookup("core_pwrdm");
479 per_pd = pwrdm_lookup("per_pwrdm");
480 cam_pd = pwrdm_lookup("cam_pwrdm");
429 481
430 omap_init_power_states(); 482 omap_init_power_states();
431 cpuidle_register_driver(&omap3_idle_driver); 483 cpuidle_register_driver(&omap3_idle_driver);
diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
index 429268eaf8b..bb2ba1e03d0 100644
--- a/arch/arm/mach-omap2/pm34xx.c
+++ b/arch/arm/mach-omap2/pm34xx.c
@@ -346,7 +346,6 @@ void omap_sram_idle(void)
346 int core_next_state = PWRDM_POWER_ON; 346 int core_next_state = PWRDM_POWER_ON;
347 int core_prev_state, per_prev_state; 347 int core_prev_state, per_prev_state;
348 u32 sdrc_pwr = 0; 348 u32 sdrc_pwr = 0;
349 int per_state_modified = 0;
350 349
351 if (!_omap_sram_idle) 350 if (!_omap_sram_idle)
352 return; 351 return;
@@ -391,19 +390,10 @@ void omap_sram_idle(void)
391 if (per_next_state < PWRDM_POWER_ON) { 390 if (per_next_state < PWRDM_POWER_ON) {
392 omap_uart_prepare_idle(2); 391 omap_uart_prepare_idle(2);
393 omap2_gpio_prepare_for_idle(per_next_state); 392 omap2_gpio_prepare_for_idle(per_next_state);
394 if (per_next_state == PWRDM_POWER_OFF) { 393 if (per_next_state == PWRDM_POWER_OFF)
395 if (core_next_state == PWRDM_POWER_ON) {
396 per_next_state = PWRDM_POWER_RET;
397 pwrdm_set_next_pwrst(per_pwrdm, per_next_state);
398 per_state_modified = 1;
399 } else
400 omap3_per_save_context(); 394 omap3_per_save_context();
401 }
402 } 395 }
403 396
404 if (pwrdm_read_pwrst(cam_pwrdm) == PWRDM_POWER_ON)
405 omap2_clkdm_deny_idle(mpu_pwrdm->pwrdm_clkdms[0]);
406
407 /* CORE */ 397 /* CORE */
408 if (core_next_state < PWRDM_POWER_ON) { 398 if (core_next_state < PWRDM_POWER_ON) {
409 omap_uart_prepare_idle(0); 399 omap_uart_prepare_idle(0);
@@ -470,8 +460,6 @@ void omap_sram_idle(void)
470 if (per_prev_state == PWRDM_POWER_OFF) 460 if (per_prev_state == PWRDM_POWER_OFF)
471 omap3_per_restore_context(); 461 omap3_per_restore_context();
472 omap_uart_resume_idle(2); 462 omap_uart_resume_idle(2);
473 if (per_state_modified)
474 pwrdm_set_next_pwrst(per_pwrdm, PWRDM_POWER_OFF);
475 } 463 }
476 464
477 /* Disable IO-PAD and IO-CHAIN wakeup */ 465 /* Disable IO-PAD and IO-CHAIN wakeup */