aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mach-omap2/cpuidle34xx.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mach-omap2/cpuidle34xx.c')
-rw-r--r--arch/arm/mach-omap2/cpuidle34xx.c436
1 files changed, 146 insertions, 290 deletions
diff --git a/arch/arm/mach-omap2/cpuidle34xx.c b/arch/arm/mach-omap2/cpuidle34xx.c
index 1c240eff3918..4bf6e6e8b100 100644
--- a/arch/arm/mach-omap2/cpuidle34xx.c
+++ b/arch/arm/mach-omap2/cpuidle34xx.c
@@ -36,36 +36,6 @@
36 36
37#ifdef CONFIG_CPU_IDLE 37#ifdef CONFIG_CPU_IDLE
38 38
39#define OMAP3_MAX_STATES 7
40#define OMAP3_STATE_C1 0 /* C1 - MPU WFI + Core active */
41#define OMAP3_STATE_C2 1 /* C2 - MPU WFI + Core inactive */
42#define OMAP3_STATE_C3 2 /* C3 - MPU CSWR + Core inactive */
43#define OMAP3_STATE_C4 3 /* C4 - MPU OFF + Core iactive */
44#define OMAP3_STATE_C5 4 /* C5 - MPU RET + Core RET */
45#define OMAP3_STATE_C6 5 /* C6 - MPU OFF + Core RET */
46#define OMAP3_STATE_C7 6 /* C7 - MPU OFF + Core OFF */
47
48#define OMAP3_STATE_MAX OMAP3_STATE_C7
49
50#define CPUIDLE_FLAG_CHECK_BM 0x10000 /* use omap3_enter_idle_bm() */
51
52struct omap3_processor_cx {
53 u8 valid;
54 u8 type;
55 u32 sleep_latency;
56 u32 wakeup_latency;
57 u32 mpu_state;
58 u32 core_state;
59 u32 threshold;
60 u32 flags;
61 const char *desc;
62};
63
64struct omap3_processor_cx omap3_power_states[OMAP3_MAX_STATES];
65struct omap3_processor_cx current_cx_state;
66struct powerdomain *mpu_pd, *core_pd, *per_pd;
67struct powerdomain *cam_pd;
68
69/* 39/*
70 * The latencies/thresholds for various C states have 40 * The latencies/thresholds for various C states have
71 * to be configured from the respective board files. 41 * to be configured from the respective board files.
@@ -75,27 +45,31 @@ struct powerdomain *cam_pd;
75 */ 45 */
76static struct cpuidle_params cpuidle_params_table[] = { 46static struct cpuidle_params cpuidle_params_table[] = {
77 /* C1 */ 47 /* C1 */
78 {1, 2, 2, 5}, 48 {2 + 2, 5, 1},
79 /* C2 */ 49 /* C2 */
80 {1, 10, 10, 30}, 50 {10 + 10, 30, 1},
81 /* C3 */ 51 /* C3 */
82 {1, 50, 50, 300}, 52 {50 + 50, 300, 1},
83 /* C4 */ 53 /* C4 */
84 {1, 1500, 1800, 4000}, 54 {1500 + 1800, 4000, 1},
85 /* C5 */ 55 /* C5 */
86 {1, 2500, 7500, 12000}, 56 {2500 + 7500, 12000, 1},
87 /* C6 */ 57 /* C6 */
88 {1, 3000, 8500, 15000}, 58 {3000 + 8500, 15000, 1},
89 /* C7 */ 59 /* C7 */
90 {1, 10000, 30000, 300000}, 60 {10000 + 30000, 300000, 1},
91}; 61};
62#define OMAP3_NUM_STATES ARRAY_SIZE(cpuidle_params_table)
92 63
93static int omap3_idle_bm_check(void) 64/* Mach specific information to be recorded in the C-state driver_data */
94{ 65struct omap3_idle_statedata {
95 if (!omap3_can_sleep()) 66 u32 mpu_state;
96 return 1; 67 u32 core_state;
97 return 0; 68 u8 valid;
98} 69};
70struct omap3_idle_statedata omap3_idle_data[OMAP3_NUM_STATES];
71
72struct powerdomain *mpu_pd, *core_pd, *per_pd, *cam_pd;
99 73
100static int _cpuidle_allow_idle(struct powerdomain *pwrdm, 74static int _cpuidle_allow_idle(struct powerdomain *pwrdm,
101 struct clockdomain *clkdm) 75 struct clockdomain *clkdm)
@@ -122,12 +96,10 @@ static int _cpuidle_deny_idle(struct powerdomain *pwrdm,
122static int omap3_enter_idle(struct cpuidle_device *dev, 96static int omap3_enter_idle(struct cpuidle_device *dev,
123 struct cpuidle_state *state) 97 struct cpuidle_state *state)
124{ 98{
125 struct omap3_processor_cx *cx = cpuidle_get_statedata(state); 99 struct omap3_idle_statedata *cx = cpuidle_get_statedata(state);
126 struct timespec ts_preidle, ts_postidle, ts_idle; 100 struct timespec ts_preidle, ts_postidle, ts_idle;
127 u32 mpu_state = cx->mpu_state, core_state = cx->core_state; 101 u32 mpu_state = cx->mpu_state, core_state = cx->core_state;
128 102
129 current_cx_state = *cx;
130
131 /* Used to keep track of the total time in idle */ 103 /* Used to keep track of the total time in idle */
132 getnstimeofday(&ts_preidle); 104 getnstimeofday(&ts_preidle);
133 105
@@ -140,7 +112,8 @@ static int omap3_enter_idle(struct cpuidle_device *dev,
140 if (omap_irq_pending() || need_resched()) 112 if (omap_irq_pending() || need_resched())
141 goto return_sleep_time; 113 goto return_sleep_time;
142 114
143 if (cx->type == OMAP3_STATE_C1) { 115 /* Deny idle for C1 */
116 if (state == &dev->states[0]) {
144 pwrdm_for_each_clkdm(mpu_pd, _cpuidle_deny_idle); 117 pwrdm_for_each_clkdm(mpu_pd, _cpuidle_deny_idle);
145 pwrdm_for_each_clkdm(core_pd, _cpuidle_deny_idle); 118 pwrdm_for_each_clkdm(core_pd, _cpuidle_deny_idle);
146 } 119 }
@@ -148,7 +121,8 @@ static int omap3_enter_idle(struct cpuidle_device *dev,
148 /* Execute ARM wfi */ 121 /* Execute ARM wfi */
149 omap_sram_idle(); 122 omap_sram_idle();
150 123
151 if (cx->type == OMAP3_STATE_C1) { 124 /* Re-allow idle for C1 */
125 if (state == &dev->states[0]) {
152 pwrdm_for_each_clkdm(mpu_pd, _cpuidle_allow_idle); 126 pwrdm_for_each_clkdm(mpu_pd, _cpuidle_allow_idle);
153 pwrdm_for_each_clkdm(core_pd, _cpuidle_allow_idle); 127 pwrdm_for_each_clkdm(core_pd, _cpuidle_allow_idle);
154 } 128 }
@@ -164,41 +138,53 @@ return_sleep_time:
164} 138}
165 139
166/** 140/**
167 * next_valid_state - Find next valid c-state 141 * next_valid_state - Find next valid C-state
168 * @dev: cpuidle device 142 * @dev: cpuidle device
169 * @state: Currently selected c-state 143 * @state: Currently selected C-state
170 * 144 *
171 * If the current state is valid, it is returned back to the caller. 145 * If the current state is valid, it is returned back to the caller.
172 * Else, this function searches for a lower c-state which is still 146 * Else, this function searches for a lower c-state which is still
173 * valid (as defined in omap3_power_states[]). 147 * valid.
148 *
149 * A state is valid if the 'valid' field is enabled and
150 * if it satisfies the enable_off_mode condition.
174 */ 151 */
175static struct cpuidle_state *next_valid_state(struct cpuidle_device *dev, 152static struct cpuidle_state *next_valid_state(struct cpuidle_device *dev,
176 struct cpuidle_state *curr) 153 struct cpuidle_state *curr)
177{ 154{
178 struct cpuidle_state *next = NULL; 155 struct cpuidle_state *next = NULL;
179 struct omap3_processor_cx *cx; 156 struct omap3_idle_statedata *cx = cpuidle_get_statedata(curr);
157 u32 mpu_deepest_state = PWRDM_POWER_RET;
158 u32 core_deepest_state = PWRDM_POWER_RET;
180 159
181 cx = (struct omap3_processor_cx *)cpuidle_get_statedata(curr); 160 if (enable_off_mode) {
161 mpu_deepest_state = PWRDM_POWER_OFF;
162 /*
163 * Erratum i583: valable for ES rev < Es1.2 on 3630.
164 * CORE OFF mode is not supported in a stable form, restrict
165 * instead the CORE state to RET.
166 */
167 if (!IS_PM34XX_ERRATUM(PM_SDRC_WAKEUP_ERRATUM_i583))
168 core_deepest_state = PWRDM_POWER_OFF;
169 }
182 170
183 /* Check if current state is valid */ 171 /* Check if current state is valid */
184 if (cx->valid) { 172 if ((cx->valid) &&
173 (cx->mpu_state >= mpu_deepest_state) &&
174 (cx->core_state >= core_deepest_state)) {
185 return curr; 175 return curr;
186 } else { 176 } else {
187 u8 idx = OMAP3_STATE_MAX; 177 int idx = OMAP3_NUM_STATES - 1;
188 178
189 /* 179 /* Reach the current state starting at highest C-state */
190 * Reach the current state starting at highest C-state 180 for (; idx >= 0; idx--) {
191 */
192 for (; idx >= OMAP3_STATE_C1; idx--) {
193 if (&dev->states[idx] == curr) { 181 if (&dev->states[idx] == curr) {
194 next = &dev->states[idx]; 182 next = &dev->states[idx];
195 break; 183 break;
196 } 184 }
197 } 185 }
198 186
199 /* 187 /* Should never hit this condition */
200 * Should never hit this condition.
201 */
202 WARN_ON(next == NULL); 188 WARN_ON(next == NULL);
203 189
204 /* 190 /*
@@ -206,17 +192,17 @@ static struct cpuidle_state *next_valid_state(struct cpuidle_device *dev,
206 * Start search from the next (lower) state. 192 * Start search from the next (lower) state.
207 */ 193 */
208 idx--; 194 idx--;
209 for (; idx >= OMAP3_STATE_C1; idx--) { 195 for (; idx >= 0; idx--) {
210 struct omap3_processor_cx *cx;
211
212 cx = cpuidle_get_statedata(&dev->states[idx]); 196 cx = cpuidle_get_statedata(&dev->states[idx]);
213 if (cx->valid) { 197 if ((cx->valid) &&
198 (cx->mpu_state >= mpu_deepest_state) &&
199 (cx->core_state >= core_deepest_state)) {
214 next = &dev->states[idx]; 200 next = &dev->states[idx];
215 break; 201 break;
216 } 202 }
217 } 203 }
218 /* 204 /*
219 * C1 and C2 are always valid. 205 * C1 is always valid.
220 * So, no need to check for 'next==NULL' outside this loop. 206 * So, no need to check for 'next==NULL' outside this loop.
221 */ 207 */
222 } 208 }
@@ -229,36 +215,22 @@ static struct cpuidle_state *next_valid_state(struct cpuidle_device *dev,
229 * @dev: cpuidle device 215 * @dev: cpuidle device
230 * @state: The target state to be programmed 216 * @state: The target state to be programmed
231 * 217 *
232 * Used for C states with CPUIDLE_FLAG_CHECK_BM flag set. This 218 * This function checks for any pending activity and then programs
233 * function checks for any pending activity and then programs the 219 * the device to the specified or a safer state.
234 * device to the specified or a safer state.
235 */ 220 */
236static int omap3_enter_idle_bm(struct cpuidle_device *dev, 221static int omap3_enter_idle_bm(struct cpuidle_device *dev,
237 struct cpuidle_state *state) 222 struct cpuidle_state *state)
238{ 223{
239 struct cpuidle_state *new_state = next_valid_state(dev, state); 224 struct cpuidle_state *new_state;
240 u32 core_next_state, per_next_state = 0, per_saved_state = 0; 225 u32 core_next_state, per_next_state = 0, per_saved_state = 0, cam_state;
241 u32 cam_state; 226 struct omap3_idle_statedata *cx;
242 struct omap3_processor_cx *cx;
243 int ret; 227 int ret;
244 228
245 if ((state->flags & CPUIDLE_FLAG_CHECK_BM) && omap3_idle_bm_check()) { 229 if (!omap3_can_sleep()) {
246 BUG_ON(!dev->safe_state);
247 new_state = dev->safe_state; 230 new_state = dev->safe_state;
248 goto select_state; 231 goto select_state;
249 } 232 }
250 233
251 cx = cpuidle_get_statedata(state);
252 core_next_state = cx->core_state;
253
254 /*
255 * FIXME: we currently manage device-specific idle states
256 * for PER and CORE in combination with CPU-specific
257 * idle states. This is wrong, and device-specific
258 * idle management needs to be separated out into
259 * its own code.
260 */
261
262 /* 234 /*
263 * Prevent idle completely if CAM is active. 235 * Prevent idle completely if CAM is active.
264 * CAM does not have wakeup capability in OMAP3. 236 * CAM does not have wakeup capability in OMAP3.
@@ -270,9 +242,19 @@ static int omap3_enter_idle_bm(struct cpuidle_device *dev,
270 } 242 }
271 243
272 /* 244 /*
245 * FIXME: we currently manage device-specific idle states
246 * for PER and CORE in combination with CPU-specific
247 * idle states. This is wrong, and device-specific
248 * idle management needs to be separated out into
249 * its own code.
250 */
251
252 /*
273 * Prevent PER off if CORE is not in retention or off as this 253 * Prevent PER off if CORE is not in retention or off as this
274 * would disable PER wakeups completely. 254 * would disable PER wakeups completely.
275 */ 255 */
256 cx = cpuidle_get_statedata(state);
257 core_next_state = cx->core_state;
276 per_next_state = per_saved_state = pwrdm_read_next_pwrst(per_pd); 258 per_next_state = per_saved_state = pwrdm_read_next_pwrst(per_pd);
277 if ((per_next_state == PWRDM_POWER_OFF) && 259 if ((per_next_state == PWRDM_POWER_OFF) &&
278 (core_next_state > PWRDM_POWER_RET)) 260 (core_next_state > PWRDM_POWER_RET))
@@ -282,6 +264,8 @@ static int omap3_enter_idle_bm(struct cpuidle_device *dev,
282 if (per_next_state != per_saved_state) 264 if (per_next_state != per_saved_state)
283 pwrdm_set_next_pwrst(per_pd, per_next_state); 265 pwrdm_set_next_pwrst(per_pd, per_next_state);
284 266
267 new_state = next_valid_state(dev, state);
268
285select_state: 269select_state:
286 dev->last_state = new_state; 270 dev->last_state = new_state;
287 ret = omap3_enter_idle(dev, new_state); 271 ret = omap3_enter_idle(dev, new_state);
@@ -295,31 +279,6 @@ select_state:
295 279
296DEFINE_PER_CPU(struct cpuidle_device, omap3_idle_dev); 280DEFINE_PER_CPU(struct cpuidle_device, omap3_idle_dev);
297 281
298/**
299 * omap3_cpuidle_update_states() - Update the cpuidle states
300 * @mpu_deepest_state: Enable states up to and including this for mpu domain
301 * @core_deepest_state: Enable states up to and including this for core domain
302 *
303 * This goes through the list of states available and enables and disables the
304 * validity of C states based on deepest state that can be achieved for the
305 * variable domain
306 */
307void omap3_cpuidle_update_states(u32 mpu_deepest_state, u32 core_deepest_state)
308{
309 int i;
310
311 for (i = OMAP3_STATE_C1; i < OMAP3_MAX_STATES; i++) {
312 struct omap3_processor_cx *cx = &omap3_power_states[i];
313
314 if ((cx->mpu_state >= mpu_deepest_state) &&
315 (cx->core_state >= core_deepest_state)) {
316 cx->valid = 1;
317 } else {
318 cx->valid = 0;
319 }
320 }
321}
322
323void omap3_pm_init_cpuidle(struct cpuidle_params *cpuidle_board_params) 282void omap3_pm_init_cpuidle(struct cpuidle_params *cpuidle_board_params)
324{ 283{
325 int i; 284 int i;
@@ -327,212 +286,109 @@ void omap3_pm_init_cpuidle(struct cpuidle_params *cpuidle_board_params)
327 if (!cpuidle_board_params) 286 if (!cpuidle_board_params)
328 return; 287 return;
329 288
330 for (i = OMAP3_STATE_C1; i < OMAP3_MAX_STATES; i++) { 289 for (i = 0; i < OMAP3_NUM_STATES; i++) {
331 cpuidle_params_table[i].valid = 290 cpuidle_params_table[i].valid = cpuidle_board_params[i].valid;
332 cpuidle_board_params[i].valid; 291 cpuidle_params_table[i].exit_latency =
333 cpuidle_params_table[i].sleep_latency = 292 cpuidle_board_params[i].exit_latency;
334 cpuidle_board_params[i].sleep_latency; 293 cpuidle_params_table[i].target_residency =
335 cpuidle_params_table[i].wake_latency = 294 cpuidle_board_params[i].target_residency;
336 cpuidle_board_params[i].wake_latency;
337 cpuidle_params_table[i].threshold =
338 cpuidle_board_params[i].threshold;
339 } 295 }
340 return; 296 return;
341} 297}
342 298
343/* omap3_init_power_states - Initialises the OMAP3 specific C states.
344 *
345 * Below is the desciption of each C state.
346 * C1 . MPU WFI + Core active
347 * C2 . MPU WFI + Core inactive
348 * C3 . MPU CSWR + Core inactive
349 * C4 . MPU OFF + Core inactive
350 * C5 . MPU CSWR + Core CSWR
351 * C6 . MPU OFF + Core CSWR
352 * C7 . MPU OFF + Core OFF
353 */
354void omap_init_power_states(void)
355{
356 /* C1 . MPU WFI + Core active */
357 omap3_power_states[OMAP3_STATE_C1].valid =
358 cpuidle_params_table[OMAP3_STATE_C1].valid;
359 omap3_power_states[OMAP3_STATE_C1].type = OMAP3_STATE_C1;
360 omap3_power_states[OMAP3_STATE_C1].sleep_latency =
361 cpuidle_params_table[OMAP3_STATE_C1].sleep_latency;
362 omap3_power_states[OMAP3_STATE_C1].wakeup_latency =
363 cpuidle_params_table[OMAP3_STATE_C1].wake_latency;
364 omap3_power_states[OMAP3_STATE_C1].threshold =
365 cpuidle_params_table[OMAP3_STATE_C1].threshold;
366 omap3_power_states[OMAP3_STATE_C1].mpu_state = PWRDM_POWER_ON;
367 omap3_power_states[OMAP3_STATE_C1].core_state = PWRDM_POWER_ON;
368 omap3_power_states[OMAP3_STATE_C1].flags = CPUIDLE_FLAG_TIME_VALID;
369 omap3_power_states[OMAP3_STATE_C1].desc = "MPU ON + CORE ON";
370
371 /* C2 . MPU WFI + Core inactive */
372 omap3_power_states[OMAP3_STATE_C2].valid =
373 cpuidle_params_table[OMAP3_STATE_C2].valid;
374 omap3_power_states[OMAP3_STATE_C2].type = OMAP3_STATE_C2;
375 omap3_power_states[OMAP3_STATE_C2].sleep_latency =
376 cpuidle_params_table[OMAP3_STATE_C2].sleep_latency;
377 omap3_power_states[OMAP3_STATE_C2].wakeup_latency =
378 cpuidle_params_table[OMAP3_STATE_C2].wake_latency;
379 omap3_power_states[OMAP3_STATE_C2].threshold =
380 cpuidle_params_table[OMAP3_STATE_C2].threshold;
381 omap3_power_states[OMAP3_STATE_C2].mpu_state = PWRDM_POWER_ON;
382 omap3_power_states[OMAP3_STATE_C2].core_state = PWRDM_POWER_ON;
383 omap3_power_states[OMAP3_STATE_C2].flags = CPUIDLE_FLAG_TIME_VALID |
384 CPUIDLE_FLAG_CHECK_BM;
385 omap3_power_states[OMAP3_STATE_C2].desc = "MPU ON + CORE ON";
386
387 /* C3 . MPU CSWR + Core inactive */
388 omap3_power_states[OMAP3_STATE_C3].valid =
389 cpuidle_params_table[OMAP3_STATE_C3].valid;
390 omap3_power_states[OMAP3_STATE_C3].type = OMAP3_STATE_C3;
391 omap3_power_states[OMAP3_STATE_C3].sleep_latency =
392 cpuidle_params_table[OMAP3_STATE_C3].sleep_latency;
393 omap3_power_states[OMAP3_STATE_C3].wakeup_latency =
394 cpuidle_params_table[OMAP3_STATE_C3].wake_latency;
395 omap3_power_states[OMAP3_STATE_C3].threshold =
396 cpuidle_params_table[OMAP3_STATE_C3].threshold;
397 omap3_power_states[OMAP3_STATE_C3].mpu_state = PWRDM_POWER_RET;
398 omap3_power_states[OMAP3_STATE_C3].core_state = PWRDM_POWER_ON;
399 omap3_power_states[OMAP3_STATE_C3].flags = CPUIDLE_FLAG_TIME_VALID |
400 CPUIDLE_FLAG_CHECK_BM;
401 omap3_power_states[OMAP3_STATE_C3].desc = "MPU RET + CORE ON";
402
403 /* C4 . MPU OFF + Core inactive */
404 omap3_power_states[OMAP3_STATE_C4].valid =
405 cpuidle_params_table[OMAP3_STATE_C4].valid;
406 omap3_power_states[OMAP3_STATE_C4].type = OMAP3_STATE_C4;
407 omap3_power_states[OMAP3_STATE_C4].sleep_latency =
408 cpuidle_params_table[OMAP3_STATE_C4].sleep_latency;
409 omap3_power_states[OMAP3_STATE_C4].wakeup_latency =
410 cpuidle_params_table[OMAP3_STATE_C4].wake_latency;
411 omap3_power_states[OMAP3_STATE_C4].threshold =
412 cpuidle_params_table[OMAP3_STATE_C4].threshold;
413 omap3_power_states[OMAP3_STATE_C4].mpu_state = PWRDM_POWER_OFF;
414 omap3_power_states[OMAP3_STATE_C4].core_state = PWRDM_POWER_ON;
415 omap3_power_states[OMAP3_STATE_C4].flags = CPUIDLE_FLAG_TIME_VALID |
416 CPUIDLE_FLAG_CHECK_BM;
417 omap3_power_states[OMAP3_STATE_C4].desc = "MPU OFF + CORE ON";
418
419 /* C5 . MPU CSWR + Core CSWR*/
420 omap3_power_states[OMAP3_STATE_C5].valid =
421 cpuidle_params_table[OMAP3_STATE_C5].valid;
422 omap3_power_states[OMAP3_STATE_C5].type = OMAP3_STATE_C5;
423 omap3_power_states[OMAP3_STATE_C5].sleep_latency =
424 cpuidle_params_table[OMAP3_STATE_C5].sleep_latency;
425 omap3_power_states[OMAP3_STATE_C5].wakeup_latency =
426 cpuidle_params_table[OMAP3_STATE_C5].wake_latency;
427 omap3_power_states[OMAP3_STATE_C5].threshold =
428 cpuidle_params_table[OMAP3_STATE_C5].threshold;
429 omap3_power_states[OMAP3_STATE_C5].mpu_state = PWRDM_POWER_RET;
430 omap3_power_states[OMAP3_STATE_C5].core_state = PWRDM_POWER_RET;
431 omap3_power_states[OMAP3_STATE_C5].flags = CPUIDLE_FLAG_TIME_VALID |
432 CPUIDLE_FLAG_CHECK_BM;
433 omap3_power_states[OMAP3_STATE_C5].desc = "MPU RET + CORE RET";
434
435 /* C6 . MPU OFF + Core CSWR */
436 omap3_power_states[OMAP3_STATE_C6].valid =
437 cpuidle_params_table[OMAP3_STATE_C6].valid;
438 omap3_power_states[OMAP3_STATE_C6].type = OMAP3_STATE_C6;
439 omap3_power_states[OMAP3_STATE_C6].sleep_latency =
440 cpuidle_params_table[OMAP3_STATE_C6].sleep_latency;
441 omap3_power_states[OMAP3_STATE_C6].wakeup_latency =
442 cpuidle_params_table[OMAP3_STATE_C6].wake_latency;
443 omap3_power_states[OMAP3_STATE_C6].threshold =
444 cpuidle_params_table[OMAP3_STATE_C6].threshold;
445 omap3_power_states[OMAP3_STATE_C6].mpu_state = PWRDM_POWER_OFF;
446 omap3_power_states[OMAP3_STATE_C6].core_state = PWRDM_POWER_RET;
447 omap3_power_states[OMAP3_STATE_C6].flags = CPUIDLE_FLAG_TIME_VALID |
448 CPUIDLE_FLAG_CHECK_BM;
449 omap3_power_states[OMAP3_STATE_C6].desc = "MPU OFF + CORE RET";
450
451 /* C7 . MPU OFF + Core OFF */
452 omap3_power_states[OMAP3_STATE_C7].valid =
453 cpuidle_params_table[OMAP3_STATE_C7].valid;
454 omap3_power_states[OMAP3_STATE_C7].type = OMAP3_STATE_C7;
455 omap3_power_states[OMAP3_STATE_C7].sleep_latency =
456 cpuidle_params_table[OMAP3_STATE_C7].sleep_latency;
457 omap3_power_states[OMAP3_STATE_C7].wakeup_latency =
458 cpuidle_params_table[OMAP3_STATE_C7].wake_latency;
459 omap3_power_states[OMAP3_STATE_C7].threshold =
460 cpuidle_params_table[OMAP3_STATE_C7].threshold;
461 omap3_power_states[OMAP3_STATE_C7].mpu_state = PWRDM_POWER_OFF;
462 omap3_power_states[OMAP3_STATE_C7].core_state = PWRDM_POWER_OFF;
463 omap3_power_states[OMAP3_STATE_C7].flags = CPUIDLE_FLAG_TIME_VALID |
464 CPUIDLE_FLAG_CHECK_BM;
465 omap3_power_states[OMAP3_STATE_C7].desc = "MPU OFF + CORE OFF";
466
467 /*
468 * Erratum i583: implementation for ES rev < Es1.2 on 3630. We cannot
469 * enable OFF mode in a stable form for previous revisions.
470 * we disable C7 state as a result.
471 */
472 if (IS_PM34XX_ERRATUM(PM_SDRC_WAKEUP_ERRATUM_i583)) {
473 omap3_power_states[OMAP3_STATE_C7].valid = 0;
474 cpuidle_params_table[OMAP3_STATE_C7].valid = 0;
475 pr_warn("%s: core off state C7 disabled due to i583\n",
476 __func__);
477 }
478}
479
480struct cpuidle_driver omap3_idle_driver = { 299struct cpuidle_driver omap3_idle_driver = {
481 .name = "omap3_idle", 300 .name = "omap3_idle",
482 .owner = THIS_MODULE, 301 .owner = THIS_MODULE,
483}; 302};
484 303
304/* Helper to fill the C-state common data and register the driver_data */
305static inline struct omap3_idle_statedata *_fill_cstate(
306 struct cpuidle_device *dev,
307 int idx, const char *descr)
308{
309 struct omap3_idle_statedata *cx = &omap3_idle_data[idx];
310 struct cpuidle_state *state = &dev->states[idx];
311
312 state->exit_latency = cpuidle_params_table[idx].exit_latency;
313 state->target_residency = cpuidle_params_table[idx].target_residency;
314 state->flags = CPUIDLE_FLAG_TIME_VALID;
315 state->enter = omap3_enter_idle_bm;
316 cx->valid = cpuidle_params_table[idx].valid;
317 sprintf(state->name, "C%d", idx + 1);
318 strncpy(state->desc, descr, CPUIDLE_DESC_LEN);
319 cpuidle_set_statedata(state, cx);
320
321 return cx;
322}
323
485/** 324/**
486 * omap3_idle_init - Init routine for OMAP3 idle 325 * omap3_idle_init - Init routine for OMAP3 idle
487 * 326 *
488 * Registers the OMAP3 specific cpuidle driver with the cpuidle 327 * Registers the OMAP3 specific cpuidle driver to the cpuidle
489 * framework with the valid set of states. 328 * framework with the valid set of states.
490 */ 329 */
491int __init omap3_idle_init(void) 330int __init omap3_idle_init(void)
492{ 331{
493 int i, count = 0;
494 struct omap3_processor_cx *cx;
495 struct cpuidle_state *state;
496 struct cpuidle_device *dev; 332 struct cpuidle_device *dev;
333 struct omap3_idle_statedata *cx;
497 334
498 mpu_pd = pwrdm_lookup("mpu_pwrdm"); 335 mpu_pd = pwrdm_lookup("mpu_pwrdm");
499 core_pd = pwrdm_lookup("core_pwrdm"); 336 core_pd = pwrdm_lookup("core_pwrdm");
500 per_pd = pwrdm_lookup("per_pwrdm"); 337 per_pd = pwrdm_lookup("per_pwrdm");
501 cam_pd = pwrdm_lookup("cam_pwrdm"); 338 cam_pd = pwrdm_lookup("cam_pwrdm");
502 339
503 omap_init_power_states();
504 cpuidle_register_driver(&omap3_idle_driver); 340 cpuidle_register_driver(&omap3_idle_driver);
505
506 dev = &per_cpu(omap3_idle_dev, smp_processor_id()); 341 dev = &per_cpu(omap3_idle_dev, smp_processor_id());
507 342
508 for (i = OMAP3_STATE_C1; i < OMAP3_MAX_STATES; i++) { 343 /* C1 . MPU WFI + Core active */
509 cx = &omap3_power_states[i]; 344 cx = _fill_cstate(dev, 0, "MPU ON + CORE ON");
510 state = &dev->states[count]; 345 (&dev->states[0])->enter = omap3_enter_idle;
511 346 dev->safe_state = &dev->states[0];
512 if (!cx->valid) 347 cx->valid = 1; /* C1 is always valid */
513 continue; 348 cx->mpu_state = PWRDM_POWER_ON;
514 cpuidle_set_statedata(state, cx); 349 cx->core_state = PWRDM_POWER_ON;
515 state->exit_latency = cx->sleep_latency + cx->wakeup_latency;
516 state->target_residency = cx->threshold;
517 state->flags = cx->flags;
518 state->enter = (state->flags & CPUIDLE_FLAG_CHECK_BM) ?
519 omap3_enter_idle_bm : omap3_enter_idle;
520 if (cx->type == OMAP3_STATE_C1)
521 dev->safe_state = state;
522 sprintf(state->name, "C%d", count+1);
523 strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
524 count++;
525 }
526 350
527 if (!count) 351 /* C2 . MPU WFI + Core inactive */
528 return -EINVAL; 352 cx = _fill_cstate(dev, 1, "MPU ON + CORE ON");
529 dev->state_count = count; 353 cx->mpu_state = PWRDM_POWER_ON;
354 cx->core_state = PWRDM_POWER_ON;
355
356 /* C3 . MPU CSWR + Core inactive */
357 cx = _fill_cstate(dev, 2, "MPU RET + CORE ON");
358 cx->mpu_state = PWRDM_POWER_RET;
359 cx->core_state = PWRDM_POWER_ON;
530 360
531 if (enable_off_mode) 361 /* C4 . MPU OFF + Core inactive */
532 omap3_cpuidle_update_states(PWRDM_POWER_OFF, PWRDM_POWER_OFF); 362 cx = _fill_cstate(dev, 3, "MPU OFF + CORE ON");
533 else 363 cx->mpu_state = PWRDM_POWER_OFF;
534 omap3_cpuidle_update_states(PWRDM_POWER_RET, PWRDM_POWER_RET); 364 cx->core_state = PWRDM_POWER_ON;
365
366 /* C5 . MPU RET + Core RET */
367 cx = _fill_cstate(dev, 4, "MPU RET + CORE RET");
368 cx->mpu_state = PWRDM_POWER_RET;
369 cx->core_state = PWRDM_POWER_RET;
370
371 /* C6 . MPU OFF + Core RET */
372 cx = _fill_cstate(dev, 5, "MPU OFF + CORE RET");
373 cx->mpu_state = PWRDM_POWER_OFF;
374 cx->core_state = PWRDM_POWER_RET;
375
376 /* C7 . MPU OFF + Core OFF */
377 cx = _fill_cstate(dev, 6, "MPU OFF + CORE OFF");
378 /*
379 * Erratum i583: implementation for ES rev < Es1.2 on 3630. We cannot
380 * enable OFF mode in a stable form for previous revisions.
381 * We disable C7 state as a result.
382 */
383 if (IS_PM34XX_ERRATUM(PM_SDRC_WAKEUP_ERRATUM_i583)) {
384 cx->valid = 0;
385 pr_warn("%s: core off state C7 disabled due to i583\n",
386 __func__);
387 }
388 cx->mpu_state = PWRDM_POWER_OFF;
389 cx->core_state = PWRDM_POWER_OFF;
535 390
391 dev->state_count = OMAP3_NUM_STATES;
536 if (cpuidle_register_device(dev)) { 392 if (cpuidle_register_device(dev)) {
537 printk(KERN_ERR "%s: CPUidle register device failed\n", 393 printk(KERN_ERR "%s: CPUidle register device failed\n",
538 __func__); 394 __func__);