aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mach-omap2/cpuidle34xx.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mach-omap2/cpuidle34xx.c')
-rw-r--r--arch/arm/mach-omap2/cpuidle34xx.c305
1 files changed, 101 insertions, 204 deletions
diff --git a/arch/arm/mach-omap2/cpuidle34xx.c b/arch/arm/mach-omap2/cpuidle34xx.c
index d7bc31a2b3af..f9c8676b1f4c 100644
--- a/arch/arm/mach-omap2/cpuidle34xx.c
+++ b/arch/arm/mach-omap2/cpuidle34xx.c
@@ -36,35 +36,6 @@
36 36
37#ifdef CONFIG_CPU_IDLE 37#ifdef CONFIG_CPU_IDLE
38 38
39#define OMAP3_MAX_STATES 7
40#define OMAP3_STATE_C1 0 /* C1 - MPU WFI + Core active */
41#define OMAP3_STATE_C2 1 /* C2 - MPU WFI + Core inactive */
42#define OMAP3_STATE_C3 2 /* C3 - MPU CSWR + Core inactive */
43#define OMAP3_STATE_C4 3 /* C4 - MPU OFF + Core iactive */
44#define OMAP3_STATE_C5 4 /* C5 - MPU RET + Core RET */
45#define OMAP3_STATE_C6 5 /* C6 - MPU OFF + Core RET */
46#define OMAP3_STATE_C7 6 /* C7 - MPU OFF + Core OFF */
47
48#define OMAP3_STATE_MAX OMAP3_STATE_C7
49
50#define CPUIDLE_FLAG_CHECK_BM 0x10000 /* use omap3_enter_idle_bm() */
51
52struct omap3_processor_cx {
53 u8 valid;
54 u8 type;
55 u32 exit_latency;
56 u32 mpu_state;
57 u32 core_state;
58 u32 target_residency;
59 u32 flags;
60 const char *desc;
61};
62
63struct omap3_processor_cx omap3_power_states[OMAP3_MAX_STATES];
64struct omap3_processor_cx current_cx_state;
65struct powerdomain *mpu_pd, *core_pd, *per_pd;
66struct powerdomain *cam_pd;
67
68/* 39/*
69 * The latencies/thresholds for various C states have 40 * The latencies/thresholds for various C states have
70 * to be configured from the respective board files. 41 * to be configured from the respective board files.
@@ -88,6 +59,17 @@ static struct cpuidle_params cpuidle_params_table[] = {
88 /* C7 */ 59 /* C7 */
89 {10000 + 30000, 300000, 1}, 60 {10000 + 30000, 300000, 1},
90}; 61};
62#define OMAP3_NUM_STATES ARRAY_SIZE(cpuidle_params_table)
63
64/* Mach specific information to be recorded in the C-state driver_data */
65struct omap3_idle_statedata {
66 u32 mpu_state;
67 u32 core_state;
68 u8 valid;
69};
70struct omap3_idle_statedata omap3_idle_data[OMAP3_NUM_STATES];
71
72struct powerdomain *mpu_pd, *core_pd, *per_pd, *cam_pd;
91 73
92static int omap3_idle_bm_check(void) 74static int omap3_idle_bm_check(void)
93{ 75{
@@ -121,12 +103,10 @@ static int _cpuidle_deny_idle(struct powerdomain *pwrdm,
121static int omap3_enter_idle(struct cpuidle_device *dev, 103static int omap3_enter_idle(struct cpuidle_device *dev,
122 struct cpuidle_state *state) 104 struct cpuidle_state *state)
123{ 105{
124 struct omap3_processor_cx *cx = cpuidle_get_statedata(state); 106 struct omap3_idle_statedata *cx = cpuidle_get_statedata(state);
125 struct timespec ts_preidle, ts_postidle, ts_idle; 107 struct timespec ts_preidle, ts_postidle, ts_idle;
126 u32 mpu_state = cx->mpu_state, core_state = cx->core_state; 108 u32 mpu_state = cx->mpu_state, core_state = cx->core_state;
127 109
128 current_cx_state = *cx;
129
130 /* Used to keep track of the total time in idle */ 110 /* Used to keep track of the total time in idle */
131 getnstimeofday(&ts_preidle); 111 getnstimeofday(&ts_preidle);
132 112
@@ -139,7 +119,8 @@ static int omap3_enter_idle(struct cpuidle_device *dev,
139 if (omap_irq_pending() || need_resched()) 119 if (omap_irq_pending() || need_resched())
140 goto return_sleep_time; 120 goto return_sleep_time;
141 121
142 if (cx->type == OMAP3_STATE_C1) { 122 /* Deny idle for C1 */
123 if (state == &dev->states[0]) {
143 pwrdm_for_each_clkdm(mpu_pd, _cpuidle_deny_idle); 124 pwrdm_for_each_clkdm(mpu_pd, _cpuidle_deny_idle);
144 pwrdm_for_each_clkdm(core_pd, _cpuidle_deny_idle); 125 pwrdm_for_each_clkdm(core_pd, _cpuidle_deny_idle);
145 } 126 }
@@ -147,7 +128,8 @@ static int omap3_enter_idle(struct cpuidle_device *dev,
147 /* Execute ARM wfi */ 128 /* Execute ARM wfi */
148 omap_sram_idle(); 129 omap_sram_idle();
149 130
150 if (cx->type == OMAP3_STATE_C1) { 131 /* Re-allow idle for C1 */
132 if (state == &dev->states[0]) {
151 pwrdm_for_each_clkdm(mpu_pd, _cpuidle_allow_idle); 133 pwrdm_for_each_clkdm(mpu_pd, _cpuidle_allow_idle);
152 pwrdm_for_each_clkdm(core_pd, _cpuidle_allow_idle); 134 pwrdm_for_each_clkdm(core_pd, _cpuidle_allow_idle);
153 } 135 }
@@ -169,26 +151,26 @@ return_sleep_time:
169 * 151 *
170 * If the current state is valid, it is returned back to the caller. 152 * If the current state is valid, it is returned back to the caller.
171 * Else, this function searches for a lower c-state which is still 153 * Else, this function searches for a lower c-state which is still
172 * valid (as defined in omap3_power_states[]). 154 * valid.
173 */ 155 */
174static struct cpuidle_state *next_valid_state(struct cpuidle_device *dev, 156static struct cpuidle_state *next_valid_state(struct cpuidle_device *dev,
175 struct cpuidle_state *curr) 157 struct cpuidle_state *curr)
176{ 158{
177 struct cpuidle_state *next = NULL; 159 struct cpuidle_state *next = NULL;
178 struct omap3_processor_cx *cx; 160 struct omap3_idle_statedata *cx;
179 161
180 cx = (struct omap3_processor_cx *)cpuidle_get_statedata(curr); 162 cx = cpuidle_get_statedata(curr);
181 163
182 /* Check if current state is valid */ 164 /* Check if current state is valid */
183 if (cx->valid) { 165 if (cx->valid) {
184 return curr; 166 return curr;
185 } else { 167 } else {
186 u8 idx = OMAP3_STATE_MAX; 168 int idx = OMAP3_NUM_STATES - 1;
187 169
188 /* 170 /*
189 * Reach the current state starting at highest C-state 171 * Reach the current state starting at highest C-state
190 */ 172 */
191 for (; idx >= OMAP3_STATE_C1; idx--) { 173 for (; idx >= 0; idx--) {
192 if (&dev->states[idx] == curr) { 174 if (&dev->states[idx] == curr) {
193 next = &dev->states[idx]; 175 next = &dev->states[idx];
194 break; 176 break;
@@ -205,9 +187,7 @@ static struct cpuidle_state *next_valid_state(struct cpuidle_device *dev,
205 * Start search from the next (lower) state. 187 * Start search from the next (lower) state.
206 */ 188 */
207 idx--; 189 idx--;
208 for (; idx >= OMAP3_STATE_C1; idx--) { 190 for (; idx >= 0; idx--) {
209 struct omap3_processor_cx *cx;
210
211 cx = cpuidle_get_statedata(&dev->states[idx]); 191 cx = cpuidle_get_statedata(&dev->states[idx]);
212 if (cx->valid) { 192 if (cx->valid) {
213 next = &dev->states[idx]; 193 next = &dev->states[idx];
@@ -215,7 +195,7 @@ static struct cpuidle_state *next_valid_state(struct cpuidle_device *dev,
215 } 195 }
216 } 196 }
217 /* 197 /*
218 * C1 and C2 are always valid. 198 * C1 is always valid.
219 * So, no need to check for 'next==NULL' outside this loop. 199 * So, no need to check for 'next==NULL' outside this loop.
220 */ 200 */
221 } 201 }
@@ -228,9 +208,8 @@ static struct cpuidle_state *next_valid_state(struct cpuidle_device *dev,
228 * @dev: cpuidle device 208 * @dev: cpuidle device
229 * @state: The target state to be programmed 209 * @state: The target state to be programmed
230 * 210 *
231 * Used for C states with CPUIDLE_FLAG_CHECK_BM flag set. This 211 * This function checks for any pending activity and then programs
232 * function checks for any pending activity and then programs the 212 * the device to the specified or a safer state.
233 * device to the specified or a safer state.
234 */ 213 */
235static int omap3_enter_idle_bm(struct cpuidle_device *dev, 214static int omap3_enter_idle_bm(struct cpuidle_device *dev,
236 struct cpuidle_state *state) 215 struct cpuidle_state *state)
@@ -238,10 +217,10 @@ static int omap3_enter_idle_bm(struct cpuidle_device *dev,
238 struct cpuidle_state *new_state = next_valid_state(dev, state); 217 struct cpuidle_state *new_state = next_valid_state(dev, state);
239 u32 core_next_state, per_next_state = 0, per_saved_state = 0; 218 u32 core_next_state, per_next_state = 0, per_saved_state = 0;
240 u32 cam_state; 219 u32 cam_state;
241 struct omap3_processor_cx *cx; 220 struct omap3_idle_statedata *cx;
242 int ret; 221 int ret;
243 222
244 if ((state->flags & CPUIDLE_FLAG_CHECK_BM) && omap3_idle_bm_check()) { 223 if (omap3_idle_bm_check()) {
245 BUG_ON(!dev->safe_state); 224 BUG_ON(!dev->safe_state);
246 new_state = dev->safe_state; 225 new_state = dev->safe_state;
247 goto select_state; 226 goto select_state;
@@ -307,8 +286,8 @@ void omap3_cpuidle_update_states(u32 mpu_deepest_state, u32 core_deepest_state)
307{ 286{
308 int i; 287 int i;
309 288
310 for (i = OMAP3_STATE_C1; i < OMAP3_MAX_STATES; i++) { 289 for (i = 0; i < OMAP3_NUM_STATES; i++) {
311 struct omap3_processor_cx *cx = &omap3_power_states[i]; 290 struct omap3_idle_statedata *cx = &omap3_idle_data[i];
312 291
313 if ((cx->mpu_state >= mpu_deepest_state) && 292 if ((cx->mpu_state >= mpu_deepest_state) &&
314 (cx->core_state >= core_deepest_state)) { 293 (cx->core_state >= core_deepest_state)) {
@@ -326,9 +305,8 @@ void omap3_pm_init_cpuidle(struct cpuidle_params *cpuidle_board_params)
326 if (!cpuidle_board_params) 305 if (!cpuidle_board_params)
327 return; 306 return;
328 307
329 for (i = OMAP3_STATE_C1; i < OMAP3_MAX_STATES; i++) { 308 for (i = 0; i < OMAP3_NUM_STATES; i++) {
330 cpuidle_params_table[i].valid = 309 cpuidle_params_table[i].valid = cpuidle_board_params[i].valid;
331 cpuidle_board_params[i].valid;
332 cpuidle_params_table[i].exit_latency = 310 cpuidle_params_table[i].exit_latency =
333 cpuidle_board_params[i].exit_latency; 311 cpuidle_board_params[i].exit_latency;
334 cpuidle_params_table[i].target_residency = 312 cpuidle_params_table[i].target_residency =
@@ -337,185 +315,104 @@ void omap3_pm_init_cpuidle(struct cpuidle_params *cpuidle_board_params)
337 return; 315 return;
338} 316}
339 317
340/* omap3_init_power_states - Initialises the OMAP3 specific C states.
341 *
342 * Below is the desciption of each C state.
343 * C1 . MPU WFI + Core active
344 * C2 . MPU WFI + Core inactive
345 * C3 . MPU CSWR + Core inactive
346 * C4 . MPU OFF + Core inactive
347 * C5 . MPU CSWR + Core CSWR
348 * C6 . MPU OFF + Core CSWR
349 * C7 . MPU OFF + Core OFF
350 */
351void omap_init_power_states(void)
352{
353 /* C1 . MPU WFI + Core active */
354 omap3_power_states[OMAP3_STATE_C1].valid =
355 cpuidle_params_table[OMAP3_STATE_C1].valid;
356 omap3_power_states[OMAP3_STATE_C1].type = OMAP3_STATE_C1;
357 omap3_power_states[OMAP3_STATE_C1].exit_latency =
358 cpuidle_params_table[OMAP3_STATE_C1].exit_latency;
359 omap3_power_states[OMAP3_STATE_C1].target_residency =
360 cpuidle_params_table[OMAP3_STATE_C1].target_residency;
361 omap3_power_states[OMAP3_STATE_C1].mpu_state = PWRDM_POWER_ON;
362 omap3_power_states[OMAP3_STATE_C1].core_state = PWRDM_POWER_ON;
363 omap3_power_states[OMAP3_STATE_C1].flags = CPUIDLE_FLAG_TIME_VALID;
364 omap3_power_states[OMAP3_STATE_C1].desc = "MPU ON + CORE ON";
365
366 /* C2 . MPU WFI + Core inactive */
367 omap3_power_states[OMAP3_STATE_C2].valid =
368 cpuidle_params_table[OMAP3_STATE_C2].valid;
369 omap3_power_states[OMAP3_STATE_C2].type = OMAP3_STATE_C2;
370 omap3_power_states[OMAP3_STATE_C2].exit_latency =
371 cpuidle_params_table[OMAP3_STATE_C2].exit_latency;
372 omap3_power_states[OMAP3_STATE_C2].target_residency =
373 cpuidle_params_table[OMAP3_STATE_C2].target_residency;
374 omap3_power_states[OMAP3_STATE_C2].mpu_state = PWRDM_POWER_ON;
375 omap3_power_states[OMAP3_STATE_C2].core_state = PWRDM_POWER_ON;
376 omap3_power_states[OMAP3_STATE_C2].flags = CPUIDLE_FLAG_TIME_VALID |
377 CPUIDLE_FLAG_CHECK_BM;
378 omap3_power_states[OMAP3_STATE_C2].desc = "MPU ON + CORE ON";
379
380 /* C3 . MPU CSWR + Core inactive */
381 omap3_power_states[OMAP3_STATE_C3].valid =
382 cpuidle_params_table[OMAP3_STATE_C3].valid;
383 omap3_power_states[OMAP3_STATE_C3].type = OMAP3_STATE_C3;
384 omap3_power_states[OMAP3_STATE_C3].exit_latency =
385 cpuidle_params_table[OMAP3_STATE_C3].exit_latency;
386 omap3_power_states[OMAP3_STATE_C3].target_residency =
387 cpuidle_params_table[OMAP3_STATE_C3].target_residency;
388 omap3_power_states[OMAP3_STATE_C3].mpu_state = PWRDM_POWER_RET;
389 omap3_power_states[OMAP3_STATE_C3].core_state = PWRDM_POWER_ON;
390 omap3_power_states[OMAP3_STATE_C3].flags = CPUIDLE_FLAG_TIME_VALID |
391 CPUIDLE_FLAG_CHECK_BM;
392 omap3_power_states[OMAP3_STATE_C3].desc = "MPU RET + CORE ON";
393
394 /* C4 . MPU OFF + Core inactive */
395 omap3_power_states[OMAP3_STATE_C4].valid =
396 cpuidle_params_table[OMAP3_STATE_C4].valid;
397 omap3_power_states[OMAP3_STATE_C4].type = OMAP3_STATE_C4;
398 omap3_power_states[OMAP3_STATE_C4].exit_latency =
399 cpuidle_params_table[OMAP3_STATE_C4].exit_latency;
400 omap3_power_states[OMAP3_STATE_C4].target_residency =
401 cpuidle_params_table[OMAP3_STATE_C4].target_residency;
402 omap3_power_states[OMAP3_STATE_C4].mpu_state = PWRDM_POWER_OFF;
403 omap3_power_states[OMAP3_STATE_C4].core_state = PWRDM_POWER_ON;
404 omap3_power_states[OMAP3_STATE_C4].flags = CPUIDLE_FLAG_TIME_VALID |
405 CPUIDLE_FLAG_CHECK_BM;
406 omap3_power_states[OMAP3_STATE_C4].desc = "MPU OFF + CORE ON";
407
408 /* C5 . MPU CSWR + Core CSWR*/
409 omap3_power_states[OMAP3_STATE_C5].valid =
410 cpuidle_params_table[OMAP3_STATE_C5].valid;
411 omap3_power_states[OMAP3_STATE_C5].type = OMAP3_STATE_C5;
412 omap3_power_states[OMAP3_STATE_C5].exit_latency =
413 cpuidle_params_table[OMAP3_STATE_C5].exit_latency;
414 omap3_power_states[OMAP3_STATE_C5].target_residency =
415 cpuidle_params_table[OMAP3_STATE_C5].target_residency;
416 omap3_power_states[OMAP3_STATE_C5].mpu_state = PWRDM_POWER_RET;
417 omap3_power_states[OMAP3_STATE_C5].core_state = PWRDM_POWER_RET;
418 omap3_power_states[OMAP3_STATE_C5].flags = CPUIDLE_FLAG_TIME_VALID |
419 CPUIDLE_FLAG_CHECK_BM;
420 omap3_power_states[OMAP3_STATE_C5].desc = "MPU RET + CORE RET";
421
422 /* C6 . MPU OFF + Core CSWR */
423 omap3_power_states[OMAP3_STATE_C6].valid =
424 cpuidle_params_table[OMAP3_STATE_C6].valid;
425 omap3_power_states[OMAP3_STATE_C6].type = OMAP3_STATE_C6;
426 omap3_power_states[OMAP3_STATE_C6].exit_latency =
427 cpuidle_params_table[OMAP3_STATE_C6].exit_latency;
428 omap3_power_states[OMAP3_STATE_C6].target_residency =
429 cpuidle_params_table[OMAP3_STATE_C6].target_residency;
430 omap3_power_states[OMAP3_STATE_C6].mpu_state = PWRDM_POWER_OFF;
431 omap3_power_states[OMAP3_STATE_C6].core_state = PWRDM_POWER_RET;
432 omap3_power_states[OMAP3_STATE_C6].flags = CPUIDLE_FLAG_TIME_VALID |
433 CPUIDLE_FLAG_CHECK_BM;
434 omap3_power_states[OMAP3_STATE_C6].desc = "MPU OFF + CORE RET";
435
436 /* C7 . MPU OFF + Core OFF */
437 omap3_power_states[OMAP3_STATE_C7].valid =
438 cpuidle_params_table[OMAP3_STATE_C7].valid;
439 omap3_power_states[OMAP3_STATE_C7].type = OMAP3_STATE_C7;
440 omap3_power_states[OMAP3_STATE_C7].exit_latency =
441 cpuidle_params_table[OMAP3_STATE_C7].exit_latency;
442 omap3_power_states[OMAP3_STATE_C7].target_residency =
443 cpuidle_params_table[OMAP3_STATE_C7].target_residency;
444 omap3_power_states[OMAP3_STATE_C7].mpu_state = PWRDM_POWER_OFF;
445 omap3_power_states[OMAP3_STATE_C7].core_state = PWRDM_POWER_OFF;
446 omap3_power_states[OMAP3_STATE_C7].flags = CPUIDLE_FLAG_TIME_VALID |
447 CPUIDLE_FLAG_CHECK_BM;
448 omap3_power_states[OMAP3_STATE_C7].desc = "MPU OFF + CORE OFF";
449
450 /*
451 * Erratum i583: implementation for ES rev < Es1.2 on 3630. We cannot
452 * enable OFF mode in a stable form for previous revisions.
453 * we disable C7 state as a result.
454 */
455 if (IS_PM34XX_ERRATUM(PM_SDRC_WAKEUP_ERRATUM_i583)) {
456 omap3_power_states[OMAP3_STATE_C7].valid = 0;
457 cpuidle_params_table[OMAP3_STATE_C7].valid = 0;
458 pr_warn("%s: core off state C7 disabled due to i583\n",
459 __func__);
460 }
461}
462
463struct cpuidle_driver omap3_idle_driver = { 318struct cpuidle_driver omap3_idle_driver = {
464 .name = "omap3_idle", 319 .name = "omap3_idle",
465 .owner = THIS_MODULE, 320 .owner = THIS_MODULE,
466}; 321};
467 322
323/* Fill in the state data from the mach tables and register the driver_data */
324static inline struct omap3_idle_statedata *_fill_cstate(
325 struct cpuidle_device *dev,
326 int idx, const char *descr)
327{
328 struct omap3_idle_statedata *cx = &omap3_idle_data[idx];
329 struct cpuidle_state *state = &dev->states[idx];
330
331 state->exit_latency = cpuidle_params_table[idx].exit_latency;
332 state->target_residency = cpuidle_params_table[idx].target_residency;
333 state->flags = CPUIDLE_FLAG_TIME_VALID;
334 state->enter = omap3_enter_idle_bm;
335 cx->valid = cpuidle_params_table[idx].valid;
336 sprintf(state->name, "C%d", idx + 1);
337 strncpy(state->desc, descr, CPUIDLE_DESC_LEN);
338 cpuidle_set_statedata(state, cx);
339
340 return cx;
341}
342
468/** 343/**
469 * omap3_idle_init - Init routine for OMAP3 idle 344 * omap3_idle_init - Init routine for OMAP3 idle
470 * 345 *
471 * Registers the OMAP3 specific cpuidle driver with the cpuidle 346 * Registers the OMAP3 specific cpuidle driver to the cpuidle
472 * framework with the valid set of states. 347 * framework with the valid set of states.
473 */ 348 */
474int __init omap3_idle_init(void) 349int __init omap3_idle_init(void)
475{ 350{
476 int i, count = 0;
477 struct omap3_processor_cx *cx;
478 struct cpuidle_state *state;
479 struct cpuidle_device *dev; 351 struct cpuidle_device *dev;
352 struct omap3_idle_statedata *cx;
480 353
481 mpu_pd = pwrdm_lookup("mpu_pwrdm"); 354 mpu_pd = pwrdm_lookup("mpu_pwrdm");
482 core_pd = pwrdm_lookup("core_pwrdm"); 355 core_pd = pwrdm_lookup("core_pwrdm");
483 per_pd = pwrdm_lookup("per_pwrdm"); 356 per_pd = pwrdm_lookup("per_pwrdm");
484 cam_pd = pwrdm_lookup("cam_pwrdm"); 357 cam_pd = pwrdm_lookup("cam_pwrdm");
485 358
486 omap_init_power_states();
487 cpuidle_register_driver(&omap3_idle_driver); 359 cpuidle_register_driver(&omap3_idle_driver);
488
489 dev = &per_cpu(omap3_idle_dev, smp_processor_id()); 360 dev = &per_cpu(omap3_idle_dev, smp_processor_id());
490 361
491 for (i = OMAP3_STATE_C1; i < OMAP3_MAX_STATES; i++) { 362 /* C1 . MPU WFI + Core active */
492 cx = &omap3_power_states[i]; 363 cx = _fill_cstate(dev, 0, "MPU ON + CORE ON");
493 state = &dev->states[count]; 364 (&dev->states[0])->enter = omap3_enter_idle;
494 365 dev->safe_state = &dev->states[0];
495 if (!cx->valid) 366 cx->valid = 1; /* C1 is always valid */
496 continue; 367 cx->mpu_state = PWRDM_POWER_ON;
497 cpuidle_set_statedata(state, cx); 368 cx->core_state = PWRDM_POWER_ON;
498 state->exit_latency = cx->exit_latency; 369
499 state->target_residency = cx->target_residency; 370 /* C2 . MPU WFI + Core inactive */
500 state->flags = cx->flags; 371 cx = _fill_cstate(dev, 1, "MPU ON + CORE ON");
501 state->enter = (state->flags & CPUIDLE_FLAG_CHECK_BM) ? 372 cx->mpu_state = PWRDM_POWER_ON;
502 omap3_enter_idle_bm : omap3_enter_idle; 373 cx->core_state = PWRDM_POWER_ON;
503 if (cx->type == OMAP3_STATE_C1) 374
504 dev->safe_state = state; 375 /* C3 . MPU CSWR + Core inactive */
505 sprintf(state->name, "C%d", count+1); 376 cx = _fill_cstate(dev, 2, "MPU RET + CORE ON");
506 strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN); 377 cx->mpu_state = PWRDM_POWER_RET;
507 count++; 378 cx->core_state = PWRDM_POWER_ON;
508 } 379
380 /* C4 . MPU OFF + Core inactive */
381 cx = _fill_cstate(dev, 3, "MPU OFF + CORE ON");
382 cx->mpu_state = PWRDM_POWER_OFF;
383 cx->core_state = PWRDM_POWER_ON;
384
385 /* C5 . MPU RET + Core RET */
386 cx = _fill_cstate(dev, 4, "MPU RET + CORE RET");
387 cx->mpu_state = PWRDM_POWER_RET;
388 cx->core_state = PWRDM_POWER_RET;
509 389
510 if (!count) 390 /* C6 . MPU OFF + Core RET */
511 return -EINVAL; 391 cx = _fill_cstate(dev, 5, "MPU OFF + CORE RET");
512 dev->state_count = count; 392 cx->mpu_state = PWRDM_POWER_OFF;
393 cx->core_state = PWRDM_POWER_RET;
394
395 /* C7 . MPU OFF + Core OFF */
396 cx = _fill_cstate(dev, 6, "MPU OFF + CORE OFF");
397 /*
398 * Erratum i583: implementation for ES rev < Es1.2 on 3630. We cannot
399 * enable OFF mode in a stable form for previous revisions.
400 * We disable C7 state as a result.
401 */
402 if (IS_PM34XX_ERRATUM(PM_SDRC_WAKEUP_ERRATUM_i583)) {
403 cx->valid = 0;
404 pr_warn("%s: core off state C7 disabled due to i583\n",
405 __func__);
406 }
407 cx->mpu_state = PWRDM_POWER_OFF;
408 cx->core_state = PWRDM_POWER_OFF;
513 409
514 if (enable_off_mode) 410 if (enable_off_mode)
515 omap3_cpuidle_update_states(PWRDM_POWER_OFF, PWRDM_POWER_OFF); 411 omap3_cpuidle_update_states(PWRDM_POWER_OFF, PWRDM_POWER_OFF);
516 else 412 else
517 omap3_cpuidle_update_states(PWRDM_POWER_RET, PWRDM_POWER_RET); 413 omap3_cpuidle_update_states(PWRDM_POWER_RET, PWRDM_POWER_RET);
518 414
415 dev->state_count = OMAP3_NUM_STATES;
519 if (cpuidle_register_device(dev)) { 416 if (cpuidle_register_device(dev)) {
520 printk(KERN_ERR "%s: CPUidle register device failed\n", 417 printk(KERN_ERR "%s: CPUidle register device failed\n",
521 __func__); 418 __func__);