diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /drivers/acpi/processor_idle.c |
Linux-2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'drivers/acpi/processor_idle.c')
-rw-r--r-- | drivers/acpi/processor_idle.c | 1017 |
1 files changed, 1017 insertions, 0 deletions
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c new file mode 100644 index 00000000000..05a17812d52 --- /dev/null +++ b/drivers/acpi/processor_idle.c | |||
@@ -0,0 +1,1017 @@ | |||
1 | /* | ||
2 | * processor_idle - idle state submodule to the ACPI processor driver | ||
3 | * | ||
4 | * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> | ||
5 | * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> | ||
6 | * Copyright (C) 2004 Dominik Brodowski <linux@brodo.de> | ||
7 | * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> | ||
8 | * - Added processor hotplug support | ||
9 | * | ||
10 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or modify | ||
13 | * it under the terms of the GNU General Public License as published by | ||
14 | * the Free Software Foundation; either version 2 of the License, or (at | ||
15 | * your option) any later version. | ||
16 | * | ||
17 | * This program is distributed in the hope that it will be useful, but | ||
18 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
19 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
20 | * General Public License for more details. | ||
21 | * | ||
22 | * You should have received a copy of the GNU General Public License along | ||
23 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
24 | * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. | ||
25 | * | ||
26 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||
27 | */ | ||
28 | |||
29 | #include <linux/kernel.h> | ||
30 | #include <linux/module.h> | ||
31 | #include <linux/init.h> | ||
32 | #include <linux/cpufreq.h> | ||
33 | #include <linux/proc_fs.h> | ||
34 | #include <linux/seq_file.h> | ||
35 | #include <linux/acpi.h> | ||
36 | #include <linux/dmi.h> | ||
37 | #include <linux/moduleparam.h> | ||
38 | |||
39 | #include <asm/io.h> | ||
40 | #include <asm/uaccess.h> | ||
41 | |||
42 | #include <acpi/acpi_bus.h> | ||
43 | #include <acpi/processor.h> | ||
44 | |||
45 | #define ACPI_PROCESSOR_COMPONENT 0x01000000 | ||
46 | #define ACPI_PROCESSOR_CLASS "processor" | ||
47 | #define ACPI_PROCESSOR_DRIVER_NAME "ACPI Processor Driver" | ||
48 | #define _COMPONENT ACPI_PROCESSOR_COMPONENT | ||
49 | ACPI_MODULE_NAME ("acpi_processor") | ||
50 | |||
51 | #define ACPI_PROCESSOR_FILE_POWER "power" | ||
52 | |||
53 | #define US_TO_PM_TIMER_TICKS(t) ((t * (PM_TIMER_FREQUENCY/1000)) / 1000) | ||
54 | #define C2_OVERHEAD 4 /* 1us (3.579 ticks per us) */ | ||
55 | #define C3_OVERHEAD 4 /* 1us (3.579 ticks per us) */ | ||
56 | |||
57 | static void (*pm_idle_save)(void); | ||
58 | module_param(max_cstate, uint, 0644); | ||
59 | |||
60 | static unsigned int nocst = 0; | ||
61 | module_param(nocst, uint, 0000); | ||
62 | |||
63 | /* | ||
64 | * bm_history -- bit-mask with a bit per jiffy of bus-master activity | ||
65 | * 1000 HZ: 0xFFFFFFFF: 32 jiffies = 32ms | ||
66 | * 800 HZ: 0xFFFFFFFF: 32 jiffies = 40ms | ||
67 | * 100 HZ: 0x0000000F: 4 jiffies = 40ms | ||
68 | * reduce history for more aggressive entry into C3 | ||
69 | */ | ||
70 | static unsigned int bm_history = (HZ >= 800 ? 0xFFFFFFFF : ((1U << (HZ / 25)) - 1)); | ||
71 | module_param(bm_history, uint, 0644); | ||
72 | /* -------------------------------------------------------------------------- | ||
73 | Power Management | ||
74 | -------------------------------------------------------------------------- */ | ||
75 | |||
76 | /* | ||
77 | * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3. | ||
78 | * For now disable this. Probably a bug somewhere else. | ||
79 | * | ||
80 | * To skip this limit, boot/load with a large max_cstate limit. | ||
81 | */ | ||
82 | static int no_c2c3(struct dmi_system_id *id) | ||
83 | { | ||
84 | if (max_cstate > ACPI_PROCESSOR_MAX_POWER) | ||
85 | return 0; | ||
86 | |||
87 | printk(KERN_NOTICE PREFIX "%s detected - C2,C3 disabled." | ||
88 | " Override with \"processor.max_cstate=%d\"\n", id->ident, | ||
89 | ACPI_PROCESSOR_MAX_POWER + 1); | ||
90 | |||
91 | max_cstate = 1; | ||
92 | |||
93 | return 0; | ||
94 | } | ||
95 | |||
96 | |||
97 | |||
98 | |||
99 | static struct dmi_system_id __initdata processor_power_dmi_table[] = { | ||
100 | { no_c2c3, "IBM ThinkPad R40e", { | ||
101 | DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), | ||
102 | DMI_MATCH(DMI_BIOS_VERSION,"1SET60WW") }}, | ||
103 | { no_c2c3, "Medion 41700", { | ||
104 | DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"), | ||
105 | DMI_MATCH(DMI_BIOS_VERSION,"R01-A1J") }}, | ||
106 | {}, | ||
107 | }; | ||
108 | |||
109 | |||
110 | static inline u32 | ||
111 | ticks_elapsed ( | ||
112 | u32 t1, | ||
113 | u32 t2) | ||
114 | { | ||
115 | if (t2 >= t1) | ||
116 | return (t2 - t1); | ||
117 | else if (!acpi_fadt.tmr_val_ext) | ||
118 | return (((0x00FFFFFF - t1) + t2) & 0x00FFFFFF); | ||
119 | else | ||
120 | return ((0xFFFFFFFF - t1) + t2); | ||
121 | } | ||
122 | |||
123 | |||
124 | static void | ||
125 | acpi_processor_power_activate ( | ||
126 | struct acpi_processor *pr, | ||
127 | struct acpi_processor_cx *new) | ||
128 | { | ||
129 | struct acpi_processor_cx *old; | ||
130 | |||
131 | if (!pr || !new) | ||
132 | return; | ||
133 | |||
134 | old = pr->power.state; | ||
135 | |||
136 | if (old) | ||
137 | old->promotion.count = 0; | ||
138 | new->demotion.count = 0; | ||
139 | |||
140 | /* Cleanup from old state. */ | ||
141 | if (old) { | ||
142 | switch (old->type) { | ||
143 | case ACPI_STATE_C3: | ||
144 | /* Disable bus master reload */ | ||
145 | if (new->type != ACPI_STATE_C3) | ||
146 | acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0, ACPI_MTX_DO_NOT_LOCK); | ||
147 | break; | ||
148 | } | ||
149 | } | ||
150 | |||
151 | /* Prepare to use new state. */ | ||
152 | switch (new->type) { | ||
153 | case ACPI_STATE_C3: | ||
154 | /* Enable bus master reload */ | ||
155 | if (old->type != ACPI_STATE_C3) | ||
156 | acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1, ACPI_MTX_DO_NOT_LOCK); | ||
157 | break; | ||
158 | } | ||
159 | |||
160 | pr->power.state = new; | ||
161 | |||
162 | return; | ||
163 | } | ||
164 | |||
165 | |||
166 | static void acpi_processor_idle (void) | ||
167 | { | ||
168 | struct acpi_processor *pr = NULL; | ||
169 | struct acpi_processor_cx *cx = NULL; | ||
170 | struct acpi_processor_cx *next_state = NULL; | ||
171 | int sleep_ticks = 0; | ||
172 | u32 t1, t2 = 0; | ||
173 | |||
174 | pr = processors[_smp_processor_id()]; | ||
175 | if (!pr) | ||
176 | return; | ||
177 | |||
178 | /* | ||
179 | * Interrupts must be disabled during bus mastering calculations and | ||
180 | * for C2/C3 transitions. | ||
181 | */ | ||
182 | local_irq_disable(); | ||
183 | |||
184 | /* | ||
185 | * Check whether we truly need to go idle, or should | ||
186 | * reschedule: | ||
187 | */ | ||
188 | if (unlikely(need_resched())) { | ||
189 | local_irq_enable(); | ||
190 | return; | ||
191 | } | ||
192 | |||
193 | cx = pr->power.state; | ||
194 | if (!cx) | ||
195 | goto easy_out; | ||
196 | |||
197 | /* | ||
198 | * Check BM Activity | ||
199 | * ----------------- | ||
200 | * Check for bus mastering activity (if required), record, and check | ||
201 | * for demotion. | ||
202 | */ | ||
203 | if (pr->flags.bm_check) { | ||
204 | u32 bm_status = 0; | ||
205 | unsigned long diff = jiffies - pr->power.bm_check_timestamp; | ||
206 | |||
207 | if (diff > 32) | ||
208 | diff = 32; | ||
209 | |||
210 | while (diff) { | ||
211 | /* if we didn't get called, assume there was busmaster activity */ | ||
212 | diff--; | ||
213 | if (diff) | ||
214 | pr->power.bm_activity |= 0x1; | ||
215 | pr->power.bm_activity <<= 1; | ||
216 | } | ||
217 | |||
218 | acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS, | ||
219 | &bm_status, ACPI_MTX_DO_NOT_LOCK); | ||
220 | if (bm_status) { | ||
221 | pr->power.bm_activity++; | ||
222 | acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, | ||
223 | 1, ACPI_MTX_DO_NOT_LOCK); | ||
224 | } | ||
225 | /* | ||
226 | * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect | ||
227 | * the true state of bus mastering activity; forcing us to | ||
228 | * manually check the BMIDEA bit of each IDE channel. | ||
229 | */ | ||
230 | else if (errata.piix4.bmisx) { | ||
231 | if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01) | ||
232 | || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01)) | ||
233 | pr->power.bm_activity++; | ||
234 | } | ||
235 | |||
236 | pr->power.bm_check_timestamp = jiffies; | ||
237 | |||
238 | /* | ||
239 | * Apply bus mastering demotion policy. Automatically demote | ||
240 | * to avoid a faulty transition. Note that the processor | ||
241 | * won't enter a low-power state during this call (to this | ||
242 | * funciton) but should upon the next. | ||
243 | * | ||
244 | * TBD: A better policy might be to fallback to the demotion | ||
245 | * state (use it for this quantum only) istead of | ||
246 | * demoting -- and rely on duration as our sole demotion | ||
247 | * qualification. This may, however, introduce DMA | ||
248 | * issues (e.g. floppy DMA transfer overrun/underrun). | ||
249 | */ | ||
250 | if (pr->power.bm_activity & cx->demotion.threshold.bm) { | ||
251 | local_irq_enable(); | ||
252 | next_state = cx->demotion.state; | ||
253 | goto end; | ||
254 | } | ||
255 | } | ||
256 | |||
257 | cx->usage++; | ||
258 | |||
259 | /* | ||
260 | * Sleep: | ||
261 | * ------ | ||
262 | * Invoke the current Cx state to put the processor to sleep. | ||
263 | */ | ||
264 | switch (cx->type) { | ||
265 | |||
266 | case ACPI_STATE_C1: | ||
267 | /* | ||
268 | * Invoke C1. | ||
269 | * Use the appropriate idle routine, the one that would | ||
270 | * be used without acpi C-states. | ||
271 | */ | ||
272 | if (pm_idle_save) | ||
273 | pm_idle_save(); | ||
274 | else | ||
275 | safe_halt(); | ||
276 | /* | ||
277 | * TBD: Can't get time duration while in C1, as resumes | ||
278 | * go to an ISR rather than here. Need to instrument | ||
279 | * base interrupt handler. | ||
280 | */ | ||
281 | sleep_ticks = 0xFFFFFFFF; | ||
282 | break; | ||
283 | |||
284 | case ACPI_STATE_C2: | ||
285 | /* Get start time (ticks) */ | ||
286 | t1 = inl(acpi_fadt.xpm_tmr_blk.address); | ||
287 | /* Invoke C2 */ | ||
288 | inb(cx->address); | ||
289 | /* Dummy op - must do something useless after P_LVL2 read */ | ||
290 | t2 = inl(acpi_fadt.xpm_tmr_blk.address); | ||
291 | /* Get end time (ticks) */ | ||
292 | t2 = inl(acpi_fadt.xpm_tmr_blk.address); | ||
293 | /* Re-enable interrupts */ | ||
294 | local_irq_enable(); | ||
295 | /* Compute time (ticks) that we were actually asleep */ | ||
296 | sleep_ticks = ticks_elapsed(t1, t2) - cx->latency_ticks - C2_OVERHEAD; | ||
297 | break; | ||
298 | |||
299 | case ACPI_STATE_C3: | ||
300 | /* Disable bus master arbitration */ | ||
301 | acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1, ACPI_MTX_DO_NOT_LOCK); | ||
302 | /* Get start time (ticks) */ | ||
303 | t1 = inl(acpi_fadt.xpm_tmr_blk.address); | ||
304 | /* Invoke C3 */ | ||
305 | inb(cx->address); | ||
306 | /* Dummy op - must do something useless after P_LVL3 read */ | ||
307 | t2 = inl(acpi_fadt.xpm_tmr_blk.address); | ||
308 | /* Get end time (ticks) */ | ||
309 | t2 = inl(acpi_fadt.xpm_tmr_blk.address); | ||
310 | /* Enable bus master arbitration */ | ||
311 | acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0, ACPI_MTX_DO_NOT_LOCK); | ||
312 | /* Re-enable interrupts */ | ||
313 | local_irq_enable(); | ||
314 | /* Compute time (ticks) that we were actually asleep */ | ||
315 | sleep_ticks = ticks_elapsed(t1, t2) - cx->latency_ticks - C3_OVERHEAD; | ||
316 | break; | ||
317 | |||
318 | default: | ||
319 | local_irq_enable(); | ||
320 | return; | ||
321 | } | ||
322 | |||
323 | next_state = pr->power.state; | ||
324 | |||
325 | /* | ||
326 | * Promotion? | ||
327 | * ---------- | ||
328 | * Track the number of longs (time asleep is greater than threshold) | ||
329 | * and promote when the count threshold is reached. Note that bus | ||
330 | * mastering activity may prevent promotions. | ||
331 | * Do not promote above max_cstate. | ||
332 | */ | ||
333 | if (cx->promotion.state && | ||
334 | ((cx->promotion.state - pr->power.states) <= max_cstate)) { | ||
335 | if (sleep_ticks > cx->promotion.threshold.ticks) { | ||
336 | cx->promotion.count++; | ||
337 | cx->demotion.count = 0; | ||
338 | if (cx->promotion.count >= cx->promotion.threshold.count) { | ||
339 | if (pr->flags.bm_check) { | ||
340 | if (!(pr->power.bm_activity & cx->promotion.threshold.bm)) { | ||
341 | next_state = cx->promotion.state; | ||
342 | goto end; | ||
343 | } | ||
344 | } | ||
345 | else { | ||
346 | next_state = cx->promotion.state; | ||
347 | goto end; | ||
348 | } | ||
349 | } | ||
350 | } | ||
351 | } | ||
352 | |||
353 | /* | ||
354 | * Demotion? | ||
355 | * --------- | ||
356 | * Track the number of shorts (time asleep is less than time threshold) | ||
357 | * and demote when the usage threshold is reached. | ||
358 | */ | ||
359 | if (cx->demotion.state) { | ||
360 | if (sleep_ticks < cx->demotion.threshold.ticks) { | ||
361 | cx->demotion.count++; | ||
362 | cx->promotion.count = 0; | ||
363 | if (cx->demotion.count >= cx->demotion.threshold.count) { | ||
364 | next_state = cx->demotion.state; | ||
365 | goto end; | ||
366 | } | ||
367 | } | ||
368 | } | ||
369 | |||
370 | end: | ||
371 | /* | ||
372 | * Demote if current state exceeds max_cstate | ||
373 | */ | ||
374 | if ((pr->power.state - pr->power.states) > max_cstate) { | ||
375 | if (cx->demotion.state) | ||
376 | next_state = cx->demotion.state; | ||
377 | } | ||
378 | |||
379 | /* | ||
380 | * New Cx State? | ||
381 | * ------------- | ||
382 | * If we're going to start using a new Cx state we must clean up | ||
383 | * from the previous and prepare to use the new. | ||
384 | */ | ||
385 | if (next_state != pr->power.state) | ||
386 | acpi_processor_power_activate(pr, next_state); | ||
387 | |||
388 | return; | ||
389 | |||
390 | easy_out: | ||
391 | /* do C1 instead of busy loop */ | ||
392 | if (pm_idle_save) | ||
393 | pm_idle_save(); | ||
394 | else | ||
395 | safe_halt(); | ||
396 | return; | ||
397 | } | ||
398 | |||
399 | |||
400 | static int | ||
401 | acpi_processor_set_power_policy ( | ||
402 | struct acpi_processor *pr) | ||
403 | { | ||
404 | unsigned int i; | ||
405 | unsigned int state_is_set = 0; | ||
406 | struct acpi_processor_cx *lower = NULL; | ||
407 | struct acpi_processor_cx *higher = NULL; | ||
408 | struct acpi_processor_cx *cx; | ||
409 | |||
410 | ACPI_FUNCTION_TRACE("acpi_processor_set_power_policy"); | ||
411 | |||
412 | if (!pr) | ||
413 | return_VALUE(-EINVAL); | ||
414 | |||
415 | /* | ||
416 | * This function sets the default Cx state policy (OS idle handler). | ||
417 | * Our scheme is to promote quickly to C2 but more conservatively | ||
418 | * to C3. We're favoring C2 for its characteristics of low latency | ||
419 | * (quick response), good power savings, and ability to allow bus | ||
420 | * mastering activity. Note that the Cx state policy is completely | ||
421 | * customizable and can be altered dynamically. | ||
422 | */ | ||
423 | |||
424 | /* startup state */ | ||
425 | for (i=1; i < ACPI_PROCESSOR_MAX_POWER; i++) { | ||
426 | cx = &pr->power.states[i]; | ||
427 | if (!cx->valid) | ||
428 | continue; | ||
429 | |||
430 | if (!state_is_set) | ||
431 | pr->power.state = cx; | ||
432 | state_is_set++; | ||
433 | break; | ||
434 | } | ||
435 | |||
436 | if (!state_is_set) | ||
437 | return_VALUE(-ENODEV); | ||
438 | |||
439 | /* demotion */ | ||
440 | for (i=1; i < ACPI_PROCESSOR_MAX_POWER; i++) { | ||
441 | cx = &pr->power.states[i]; | ||
442 | if (!cx->valid) | ||
443 | continue; | ||
444 | |||
445 | if (lower) { | ||
446 | cx->demotion.state = lower; | ||
447 | cx->demotion.threshold.ticks = cx->latency_ticks; | ||
448 | cx->demotion.threshold.count = 1; | ||
449 | if (cx->type == ACPI_STATE_C3) | ||
450 | cx->demotion.threshold.bm = bm_history; | ||
451 | } | ||
452 | |||
453 | lower = cx; | ||
454 | } | ||
455 | |||
456 | /* promotion */ | ||
457 | for (i = (ACPI_PROCESSOR_MAX_POWER - 1); i > 0; i--) { | ||
458 | cx = &pr->power.states[i]; | ||
459 | if (!cx->valid) | ||
460 | continue; | ||
461 | |||
462 | if (higher) { | ||
463 | cx->promotion.state = higher; | ||
464 | cx->promotion.threshold.ticks = cx->latency_ticks; | ||
465 | if (cx->type >= ACPI_STATE_C2) | ||
466 | cx->promotion.threshold.count = 4; | ||
467 | else | ||
468 | cx->promotion.threshold.count = 10; | ||
469 | if (higher->type == ACPI_STATE_C3) | ||
470 | cx->promotion.threshold.bm = bm_history; | ||
471 | } | ||
472 | |||
473 | higher = cx; | ||
474 | } | ||
475 | |||
476 | return_VALUE(0); | ||
477 | } | ||
478 | |||
479 | |||
480 | static int acpi_processor_get_power_info_fadt (struct acpi_processor *pr) | ||
481 | { | ||
482 | int i; | ||
483 | |||
484 | ACPI_FUNCTION_TRACE("acpi_processor_get_power_info_fadt"); | ||
485 | |||
486 | if (!pr) | ||
487 | return_VALUE(-EINVAL); | ||
488 | |||
489 | if (!pr->pblk) | ||
490 | return_VALUE(-ENODEV); | ||
491 | |||
492 | for (i = 0; i < ACPI_PROCESSOR_MAX_POWER; i++) | ||
493 | memset(pr->power.states, 0, sizeof(struct acpi_processor_cx)); | ||
494 | |||
495 | /* if info is obtained from pblk/fadt, type equals state */ | ||
496 | pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1; | ||
497 | pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2; | ||
498 | pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3; | ||
499 | |||
500 | /* the C0 state only exists as a filler in our array, | ||
501 | * and all processors need to support C1 */ | ||
502 | pr->power.states[ACPI_STATE_C0].valid = 1; | ||
503 | pr->power.states[ACPI_STATE_C1].valid = 1; | ||
504 | |||
505 | /* determine C2 and C3 address from pblk */ | ||
506 | pr->power.states[ACPI_STATE_C2].address = pr->pblk + 4; | ||
507 | pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5; | ||
508 | |||
509 | /* determine latencies from FADT */ | ||
510 | pr->power.states[ACPI_STATE_C2].latency = acpi_fadt.plvl2_lat; | ||
511 | pr->power.states[ACPI_STATE_C3].latency = acpi_fadt.plvl3_lat; | ||
512 | |||
513 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | ||
514 | "lvl2[0x%08x] lvl3[0x%08x]\n", | ||
515 | pr->power.states[ACPI_STATE_C2].address, | ||
516 | pr->power.states[ACPI_STATE_C3].address)); | ||
517 | |||
518 | return_VALUE(0); | ||
519 | } | ||
520 | |||
521 | |||
522 | static int acpi_processor_get_power_info_cst (struct acpi_processor *pr) | ||
523 | { | ||
524 | acpi_status status = 0; | ||
525 | acpi_integer count; | ||
526 | int i; | ||
527 | struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; | ||
528 | union acpi_object *cst; | ||
529 | |||
530 | ACPI_FUNCTION_TRACE("acpi_processor_get_power_info_cst"); | ||
531 | |||
532 | if (errata.smp) | ||
533 | return_VALUE(-ENODEV); | ||
534 | |||
535 | if (nocst) | ||
536 | return_VALUE(-ENODEV); | ||
537 | |||
538 | pr->power.count = 0; | ||
539 | for (i = 0; i < ACPI_PROCESSOR_MAX_POWER; i++) | ||
540 | memset(pr->power.states, 0, sizeof(struct acpi_processor_cx)); | ||
541 | |||
542 | status = acpi_evaluate_object(pr->handle, "_CST", NULL, &buffer); | ||
543 | if (ACPI_FAILURE(status)) { | ||
544 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _CST, giving up\n")); | ||
545 | return_VALUE(-ENODEV); | ||
546 | } | ||
547 | |||
548 | cst = (union acpi_object *) buffer.pointer; | ||
549 | |||
550 | /* There must be at least 2 elements */ | ||
551 | if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) { | ||
552 | ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "not enough elements in _CST\n")); | ||
553 | status = -EFAULT; | ||
554 | goto end; | ||
555 | } | ||
556 | |||
557 | count = cst->package.elements[0].integer.value; | ||
558 | |||
559 | /* Validate number of power states. */ | ||
560 | if (count < 1 || count != cst->package.count - 1) { | ||
561 | ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "count given by _CST is not valid\n")); | ||
562 | status = -EFAULT; | ||
563 | goto end; | ||
564 | } | ||
565 | |||
566 | /* We support up to ACPI_PROCESSOR_MAX_POWER. */ | ||
567 | if (count > ACPI_PROCESSOR_MAX_POWER) { | ||
568 | printk(KERN_WARNING "Limiting number of power states to max (%d)\n", ACPI_PROCESSOR_MAX_POWER); | ||
569 | printk(KERN_WARNING "Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n"); | ||
570 | count = ACPI_PROCESSOR_MAX_POWER; | ||
571 | } | ||
572 | |||
573 | /* Tell driver that at least _CST is supported. */ | ||
574 | pr->flags.has_cst = 1; | ||
575 | |||
576 | for (i = 1; i <= count; i++) { | ||
577 | union acpi_object *element; | ||
578 | union acpi_object *obj; | ||
579 | struct acpi_power_register *reg; | ||
580 | struct acpi_processor_cx cx; | ||
581 | |||
582 | memset(&cx, 0, sizeof(cx)); | ||
583 | |||
584 | element = (union acpi_object *) &(cst->package.elements[i]); | ||
585 | if (element->type != ACPI_TYPE_PACKAGE) | ||
586 | continue; | ||
587 | |||
588 | if (element->package.count != 4) | ||
589 | continue; | ||
590 | |||
591 | obj = (union acpi_object *) &(element->package.elements[0]); | ||
592 | |||
593 | if (obj->type != ACPI_TYPE_BUFFER) | ||
594 | continue; | ||
595 | |||
596 | reg = (struct acpi_power_register *) obj->buffer.pointer; | ||
597 | |||
598 | if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO && | ||
599 | (reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) | ||
600 | continue; | ||
601 | |||
602 | cx.address = (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) ? | ||
603 | 0 : reg->address; | ||
604 | |||
605 | /* There should be an easy way to extract an integer... */ | ||
606 | obj = (union acpi_object *) &(element->package.elements[1]); | ||
607 | if (obj->type != ACPI_TYPE_INTEGER) | ||
608 | continue; | ||
609 | |||
610 | cx.type = obj->integer.value; | ||
611 | |||
612 | if ((cx.type != ACPI_STATE_C1) && | ||
613 | (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO)) | ||
614 | continue; | ||
615 | |||
616 | if ((cx.type < ACPI_STATE_C1) || | ||
617 | (cx.type > ACPI_STATE_C3)) | ||
618 | continue; | ||
619 | |||
620 | obj = (union acpi_object *) &(element->package.elements[2]); | ||
621 | if (obj->type != ACPI_TYPE_INTEGER) | ||
622 | continue; | ||
623 | |||
624 | cx.latency = obj->integer.value; | ||
625 | |||
626 | obj = (union acpi_object *) &(element->package.elements[3]); | ||
627 | if (obj->type != ACPI_TYPE_INTEGER) | ||
628 | continue; | ||
629 | |||
630 | cx.power = obj->integer.value; | ||
631 | |||
632 | (pr->power.count)++; | ||
633 | memcpy(&(pr->power.states[pr->power.count]), &cx, sizeof(cx)); | ||
634 | } | ||
635 | |||
636 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d power states\n", pr->power.count)); | ||
637 | |||
638 | /* Validate number of power states discovered */ | ||
639 | if (pr->power.count < 2) | ||
640 | status = -ENODEV; | ||
641 | |||
642 | end: | ||
643 | acpi_os_free(buffer.pointer); | ||
644 | |||
645 | return_VALUE(status); | ||
646 | } | ||
647 | |||
648 | |||
649 | static void acpi_processor_power_verify_c2(struct acpi_processor_cx *cx) | ||
650 | { | ||
651 | ACPI_FUNCTION_TRACE("acpi_processor_get_power_verify_c2"); | ||
652 | |||
653 | if (!cx->address) | ||
654 | return_VOID; | ||
655 | |||
656 | /* | ||
657 | * C2 latency must be less than or equal to 100 | ||
658 | * microseconds. | ||
659 | */ | ||
660 | else if (cx->latency > ACPI_PROCESSOR_MAX_C2_LATENCY) { | ||
661 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | ||
662 | "latency too large [%d]\n", | ||
663 | cx->latency)); | ||
664 | return_VOID; | ||
665 | } | ||
666 | |||
667 | /* We're (currently) only supporting C2 on UP */ | ||
668 | else if (errata.smp) { | ||
669 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | ||
670 | "C2 not supported in SMP mode\n")); | ||
671 | return_VOID; | ||
672 | } | ||
673 | |||
674 | /* | ||
675 | * Otherwise we've met all of our C2 requirements. | ||
676 | * Normalize the C2 latency to expidite policy | ||
677 | */ | ||
678 | cx->valid = 1; | ||
679 | cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency); | ||
680 | |||
681 | return_VOID; | ||
682 | } | ||
683 | |||
684 | |||
685 | static void acpi_processor_power_verify_c3( | ||
686 | struct acpi_processor *pr, | ||
687 | struct acpi_processor_cx *cx) | ||
688 | { | ||
689 | ACPI_FUNCTION_TRACE("acpi_processor_get_power_verify_c3"); | ||
690 | |||
691 | if (!cx->address) | ||
692 | return_VOID; | ||
693 | |||
694 | /* | ||
695 | * C3 latency must be less than or equal to 1000 | ||
696 | * microseconds. | ||
697 | */ | ||
698 | else if (cx->latency > ACPI_PROCESSOR_MAX_C3_LATENCY) { | ||
699 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | ||
700 | "latency too large [%d]\n", | ||
701 | cx->latency)); | ||
702 | return_VOID; | ||
703 | } | ||
704 | |||
705 | /* bus mastering control is necessary */ | ||
706 | else if (!pr->flags.bm_control) { | ||
707 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | ||
708 | "C3 support requires bus mastering control\n")); | ||
709 | return_VOID; | ||
710 | } | ||
711 | |||
712 | /* We're (currently) only supporting C2 on UP */ | ||
713 | else if (errata.smp) { | ||
714 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | ||
715 | "C3 not supported in SMP mode\n")); | ||
716 | return_VOID; | ||
717 | } | ||
718 | |||
719 | /* | ||
720 | * PIIX4 Erratum #18: We don't support C3 when Type-F (fast) | ||
721 | * DMA transfers are used by any ISA device to avoid livelock. | ||
722 | * Note that we could disable Type-F DMA (as recommended by | ||
723 | * the erratum), but this is known to disrupt certain ISA | ||
724 | * devices thus we take the conservative approach. | ||
725 | */ | ||
726 | else if (errata.piix4.fdma) { | ||
727 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | ||
728 | "C3 not supported on PIIX4 with Type-F DMA\n")); | ||
729 | return_VOID; | ||
730 | } | ||
731 | |||
732 | /* | ||
733 | * Otherwise we've met all of our C3 requirements. | ||
734 | * Normalize the C3 latency to expidite policy. Enable | ||
735 | * checking of bus mastering status (bm_check) so we can | ||
736 | * use this in our C3 policy | ||
737 | */ | ||
738 | cx->valid = 1; | ||
739 | cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency); | ||
740 | pr->flags.bm_check = 1; | ||
741 | |||
742 | return_VOID; | ||
743 | } | ||
744 | |||
745 | |||
746 | static int acpi_processor_power_verify(struct acpi_processor *pr) | ||
747 | { | ||
748 | unsigned int i; | ||
749 | unsigned int working = 0; | ||
750 | |||
751 | for (i=1; i < ACPI_PROCESSOR_MAX_POWER; i++) { | ||
752 | struct acpi_processor_cx *cx = &pr->power.states[i]; | ||
753 | |||
754 | switch (cx->type) { | ||
755 | case ACPI_STATE_C1: | ||
756 | cx->valid = 1; | ||
757 | break; | ||
758 | |||
759 | case ACPI_STATE_C2: | ||
760 | acpi_processor_power_verify_c2(cx); | ||
761 | break; | ||
762 | |||
763 | case ACPI_STATE_C3: | ||
764 | acpi_processor_power_verify_c3(pr, cx); | ||
765 | break; | ||
766 | } | ||
767 | |||
768 | if (cx->valid) | ||
769 | working++; | ||
770 | } | ||
771 | |||
772 | return (working); | ||
773 | } | ||
774 | |||
775 | static int acpi_processor_get_power_info ( | ||
776 | struct acpi_processor *pr) | ||
777 | { | ||
778 | unsigned int i; | ||
779 | int result; | ||
780 | |||
781 | ACPI_FUNCTION_TRACE("acpi_processor_get_power_info"); | ||
782 | |||
783 | /* NOTE: the idle thread may not be running while calling | ||
784 | * this function */ | ||
785 | |||
786 | result = acpi_processor_get_power_info_cst(pr); | ||
787 | if ((result) || (acpi_processor_power_verify(pr) < 2)) { | ||
788 | result = acpi_processor_get_power_info_fadt(pr); | ||
789 | if (result) | ||
790 | return_VALUE(result); | ||
791 | |||
792 | if (acpi_processor_power_verify(pr) < 2) | ||
793 | return_VALUE(-ENODEV); | ||
794 | } | ||
795 | |||
796 | /* | ||
797 | * Set Default Policy | ||
798 | * ------------------ | ||
799 | * Now that we know which states are supported, set the default | ||
800 | * policy. Note that this policy can be changed dynamically | ||
801 | * (e.g. encourage deeper sleeps to conserve battery life when | ||
802 | * not on AC). | ||
803 | */ | ||
804 | result = acpi_processor_set_power_policy(pr); | ||
805 | if (result) | ||
806 | return_VALUE(result); | ||
807 | |||
808 | /* | ||
809 | * if one state of type C2 or C3 is available, mark this | ||
810 | * CPU as being "idle manageable" | ||
811 | */ | ||
812 | for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) { | ||
813 | if (pr->power.states[i].valid) | ||
814 | pr->power.count = i; | ||
815 | if ((pr->power.states[i].valid) && | ||
816 | (pr->power.states[i].type >= ACPI_STATE_C2)) | ||
817 | pr->flags.power = 1; | ||
818 | } | ||
819 | |||
820 | return_VALUE(0); | ||
821 | } | ||
822 | |||
823 | int acpi_processor_cst_has_changed (struct acpi_processor *pr) | ||
824 | { | ||
825 | int result = 0; | ||
826 | |||
827 | ACPI_FUNCTION_TRACE("acpi_processor_cst_has_changed"); | ||
828 | |||
829 | if (!pr) | ||
830 | return_VALUE(-EINVAL); | ||
831 | |||
832 | if (errata.smp || nocst) { | ||
833 | return_VALUE(-ENODEV); | ||
834 | } | ||
835 | |||
836 | if (!pr->flags.power_setup_done) | ||
837 | return_VALUE(-ENODEV); | ||
838 | |||
839 | /* Fall back to the default idle loop */ | ||
840 | pm_idle = pm_idle_save; | ||
841 | synchronize_kernel(); | ||
842 | |||
843 | pr->flags.power = 0; | ||
844 | result = acpi_processor_get_power_info(pr); | ||
845 | if ((pr->flags.power == 1) && (pr->flags.power_setup_done)) | ||
846 | pm_idle = acpi_processor_idle; | ||
847 | |||
848 | return_VALUE(result); | ||
849 | } | ||
850 | |||
851 | /* proc interface */ | ||
852 | |||
853 | static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset) | ||
854 | { | ||
855 | struct acpi_processor *pr = (struct acpi_processor *)seq->private; | ||
856 | unsigned int i; | ||
857 | |||
858 | ACPI_FUNCTION_TRACE("acpi_processor_power_seq_show"); | ||
859 | |||
860 | if (!pr) | ||
861 | goto end; | ||
862 | |||
863 | seq_printf(seq, "active state: C%zd\n" | ||
864 | "max_cstate: C%d\n" | ||
865 | "bus master activity: %08x\n", | ||
866 | pr->power.state ? pr->power.state - pr->power.states : 0, | ||
867 | max_cstate, | ||
868 | (unsigned)pr->power.bm_activity); | ||
869 | |||
870 | seq_puts(seq, "states:\n"); | ||
871 | |||
872 | for (i = 1; i <= pr->power.count; i++) { | ||
873 | seq_printf(seq, " %cC%d: ", | ||
874 | (&pr->power.states[i] == pr->power.state?'*':' '), i); | ||
875 | |||
876 | if (!pr->power.states[i].valid) { | ||
877 | seq_puts(seq, "<not supported>\n"); | ||
878 | continue; | ||
879 | } | ||
880 | |||
881 | switch (pr->power.states[i].type) { | ||
882 | case ACPI_STATE_C1: | ||
883 | seq_printf(seq, "type[C1] "); | ||
884 | break; | ||
885 | case ACPI_STATE_C2: | ||
886 | seq_printf(seq, "type[C2] "); | ||
887 | break; | ||
888 | case ACPI_STATE_C3: | ||
889 | seq_printf(seq, "type[C3] "); | ||
890 | break; | ||
891 | default: | ||
892 | seq_printf(seq, "type[--] "); | ||
893 | break; | ||
894 | } | ||
895 | |||
896 | if (pr->power.states[i].promotion.state) | ||
897 | seq_printf(seq, "promotion[C%zd] ", | ||
898 | (pr->power.states[i].promotion.state - | ||
899 | pr->power.states)); | ||
900 | else | ||
901 | seq_puts(seq, "promotion[--] "); | ||
902 | |||
903 | if (pr->power.states[i].demotion.state) | ||
904 | seq_printf(seq, "demotion[C%zd] ", | ||
905 | (pr->power.states[i].demotion.state - | ||
906 | pr->power.states)); | ||
907 | else | ||
908 | seq_puts(seq, "demotion[--] "); | ||
909 | |||
910 | seq_printf(seq, "latency[%03d] usage[%08d]\n", | ||
911 | pr->power.states[i].latency, | ||
912 | pr->power.states[i].usage); | ||
913 | } | ||
914 | |||
915 | end: | ||
916 | return_VALUE(0); | ||
917 | } | ||
918 | |||
919 | static int acpi_processor_power_open_fs(struct inode *inode, struct file *file) | ||
920 | { | ||
921 | return single_open(file, acpi_processor_power_seq_show, | ||
922 | PDE(inode)->data); | ||
923 | } | ||
924 | |||
925 | static struct file_operations acpi_processor_power_fops = { | ||
926 | .open = acpi_processor_power_open_fs, | ||
927 | .read = seq_read, | ||
928 | .llseek = seq_lseek, | ||
929 | .release = single_release, | ||
930 | }; | ||
931 | |||
932 | |||
933 | int acpi_processor_power_init(struct acpi_processor *pr, struct acpi_device *device) | ||
934 | { | ||
935 | acpi_status status = 0; | ||
936 | static int first_run = 0; | ||
937 | struct proc_dir_entry *entry = NULL; | ||
938 | unsigned int i; | ||
939 | |||
940 | ACPI_FUNCTION_TRACE("acpi_processor_power_init"); | ||
941 | |||
942 | if (!first_run) { | ||
943 | dmi_check_system(processor_power_dmi_table); | ||
944 | if (max_cstate < ACPI_C_STATES_MAX) | ||
945 | printk(KERN_NOTICE "ACPI: processor limited to max C-state %d\n", max_cstate); | ||
946 | first_run++; | ||
947 | } | ||
948 | |||
949 | if (!errata.smp && (pr->id == 0) && acpi_fadt.cst_cnt && !nocst) { | ||
950 | status = acpi_os_write_port(acpi_fadt.smi_cmd, acpi_fadt.cst_cnt, 8); | ||
951 | if (ACPI_FAILURE(status)) { | ||
952 | ACPI_DEBUG_PRINT((ACPI_DB_ERROR, | ||
953 | "Notifying BIOS of _CST ability failed\n")); | ||
954 | } | ||
955 | } | ||
956 | |||
957 | acpi_processor_get_power_info(pr); | ||
958 | |||
959 | /* | ||
960 | * Install the idle handler if processor power management is supported. | ||
961 | * Note that we use previously set idle handler will be used on | ||
962 | * platforms that only support C1. | ||
963 | */ | ||
964 | if ((pr->flags.power) && (!boot_option_idle_override)) { | ||
965 | printk(KERN_INFO PREFIX "CPU%d (power states:", pr->id); | ||
966 | for (i = 1; i <= pr->power.count; i++) | ||
967 | if (pr->power.states[i].valid) | ||
968 | printk(" C%d[C%d]", i, pr->power.states[i].type); | ||
969 | printk(")\n"); | ||
970 | |||
971 | if (pr->id == 0) { | ||
972 | pm_idle_save = pm_idle; | ||
973 | pm_idle = acpi_processor_idle; | ||
974 | } | ||
975 | } | ||
976 | |||
977 | /* 'power' [R] */ | ||
978 | entry = create_proc_entry(ACPI_PROCESSOR_FILE_POWER, | ||
979 | S_IRUGO, acpi_device_dir(device)); | ||
980 | if (!entry) | ||
981 | ACPI_DEBUG_PRINT((ACPI_DB_ERROR, | ||
982 | "Unable to create '%s' fs entry\n", | ||
983 | ACPI_PROCESSOR_FILE_POWER)); | ||
984 | else { | ||
985 | entry->proc_fops = &acpi_processor_power_fops; | ||
986 | entry->data = acpi_driver_data(device); | ||
987 | entry->owner = THIS_MODULE; | ||
988 | } | ||
989 | |||
990 | pr->flags.power_setup_done = 1; | ||
991 | |||
992 | return_VALUE(0); | ||
993 | } | ||
994 | |||
995 | int acpi_processor_power_exit(struct acpi_processor *pr, struct acpi_device *device) | ||
996 | { | ||
997 | ACPI_FUNCTION_TRACE("acpi_processor_power_exit"); | ||
998 | |||
999 | pr->flags.power_setup_done = 0; | ||
1000 | |||
1001 | if (acpi_device_dir(device)) | ||
1002 | remove_proc_entry(ACPI_PROCESSOR_FILE_POWER,acpi_device_dir(device)); | ||
1003 | |||
1004 | /* Unregister the idle handler when processor #0 is removed. */ | ||
1005 | if (pr->id == 0) { | ||
1006 | pm_idle = pm_idle_save; | ||
1007 | |||
1008 | /* | ||
1009 | * We are about to unload the current idle thread pm callback | ||
1010 | * (pm_idle), Wait for all processors to update cached/local | ||
1011 | * copies of pm_idle before proceeding. | ||
1012 | */ | ||
1013 | cpu_idle_wait(); | ||
1014 | } | ||
1015 | |||
1016 | return_VALUE(0); | ||
1017 | } | ||