diff options
author | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-10-19 16:12:46 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-10-19 16:12:46 -0400 |
commit | c4ec20717313daafba59225f812db89595952b83 (patch) | |
tree | 253337453b1dc965c40668e4949337ed1c46cab7 /drivers/acpi/processor_idle.c | |
parent | ec2626815bf9a9922e49820b03e670e833f3ca3c (diff) | |
parent | 00a2b433557f10736e8a02de619b3e9052556c12 (diff) |
Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux-acpi-2.6
* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux-acpi-2.6: (41 commits)
ACPICA: hw: Don't carry spinlock over suspend
ACPICA: hw: remove use_lock flag from acpi_hw_register_{read, write}
ACPI: cpuidle: port idle timer suspend/resume workaround to cpuidle
ACPI: clean up acpi_enter_sleep_state_prep
Hibernation: Make sure that ACPI is enabled in acpi_hibernation_finish
ACPI: suppress uninitialized var warning
cpuidle: consolidate 2.6.22 cpuidle branch into one patch
ACPI: thinkpad-acpi: skip blanks before the data when parsing sysfs
ACPI: AC: Add sysfs interface
ACPI: SBS: Add sysfs alarm
ACPI: SBS: Add ACPI_PROCFS around procfs handling code.
ACPI: SBS: Add support for power_supply class (and sysfs)
ACPI: SBS: Make SBS reads table-driven.
ACPI: SBS: Simplify data structures in SBS
ACPI: SBS: Split host controller (ACPI0001) from SBS driver (ACPI0002)
ACPI: EC: Add new query handler to list head.
ACPI: Add acpi_bus_generate_event4() function
ACPI: Battery: add sysfs alarm
ACPI: Battery: Add sysfs support
ACPI: Battery: Misc clean-ups, no functional changes
...
Fix up conflicts in drivers/misc/thinkpad_acpi.[ch] manually
Diffstat (limited to 'drivers/acpi/processor_idle.c')
-rw-r--r-- | drivers/acpi/processor_idle.c | 488 |
1 files changed, 449 insertions, 39 deletions
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 1f6fb38de017..f996d0e37689 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c | |||
@@ -40,6 +40,7 @@ | |||
40 | #include <linux/sched.h> /* need_resched() */ | 40 | #include <linux/sched.h> /* need_resched() */ |
41 | #include <linux/latency.h> | 41 | #include <linux/latency.h> |
42 | #include <linux/clockchips.h> | 42 | #include <linux/clockchips.h> |
43 | #include <linux/cpuidle.h> | ||
43 | 44 | ||
44 | /* | 45 | /* |
45 | * Include the apic definitions for x86 to have the APIC timer related defines | 46 | * Include the apic definitions for x86 to have the APIC timer related defines |
@@ -64,14 +65,22 @@ ACPI_MODULE_NAME("processor_idle"); | |||
64 | #define ACPI_PROCESSOR_FILE_POWER "power" | 65 | #define ACPI_PROCESSOR_FILE_POWER "power" |
65 | #define US_TO_PM_TIMER_TICKS(t) ((t * (PM_TIMER_FREQUENCY/1000)) / 1000) | 66 | #define US_TO_PM_TIMER_TICKS(t) ((t * (PM_TIMER_FREQUENCY/1000)) / 1000) |
66 | #define PM_TIMER_TICK_NS (1000000000ULL/PM_TIMER_FREQUENCY) | 67 | #define PM_TIMER_TICK_NS (1000000000ULL/PM_TIMER_FREQUENCY) |
68 | #ifndef CONFIG_CPU_IDLE | ||
67 | #define C2_OVERHEAD 4 /* 1us (3.579 ticks per us) */ | 69 | #define C2_OVERHEAD 4 /* 1us (3.579 ticks per us) */ |
68 | #define C3_OVERHEAD 4 /* 1us (3.579 ticks per us) */ | 70 | #define C3_OVERHEAD 4 /* 1us (3.579 ticks per us) */ |
69 | static void (*pm_idle_save) (void) __read_mostly; | 71 | static void (*pm_idle_save) (void) __read_mostly; |
70 | module_param(max_cstate, uint, 0644); | 72 | #else |
73 | #define C2_OVERHEAD 1 /* 1us */ | ||
74 | #define C3_OVERHEAD 1 /* 1us */ | ||
75 | #endif | ||
76 | #define PM_TIMER_TICKS_TO_US(p) (((p) * 1000)/(PM_TIMER_FREQUENCY/1000)) | ||
71 | 77 | ||
78 | static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER; | ||
79 | module_param(max_cstate, uint, 0000); | ||
72 | static unsigned int nocst __read_mostly; | 80 | static unsigned int nocst __read_mostly; |
73 | module_param(nocst, uint, 0000); | 81 | module_param(nocst, uint, 0000); |
74 | 82 | ||
83 | #ifndef CONFIG_CPU_IDLE | ||
75 | /* | 84 | /* |
76 | * bm_history -- bit-mask with a bit per jiffy of bus-master activity | 85 | * bm_history -- bit-mask with a bit per jiffy of bus-master activity |
77 | * 1000 HZ: 0xFFFFFFFF: 32 jiffies = 32ms | 86 | * 1000 HZ: 0xFFFFFFFF: 32 jiffies = 32ms |
@@ -82,9 +91,10 @@ module_param(nocst, uint, 0000); | |||
82 | static unsigned int bm_history __read_mostly = | 91 | static unsigned int bm_history __read_mostly = |
83 | (HZ >= 800 ? 0xFFFFFFFF : ((1U << (HZ / 25)) - 1)); | 92 | (HZ >= 800 ? 0xFFFFFFFF : ((1U << (HZ / 25)) - 1)); |
84 | module_param(bm_history, uint, 0644); | 93 | module_param(bm_history, uint, 0644); |
85 | /* -------------------------------------------------------------------------- | 94 | |
86 | Power Management | 95 | static int acpi_processor_set_power_policy(struct acpi_processor *pr); |
87 | -------------------------------------------------------------------------- */ | 96 | |
97 | #endif | ||
88 | 98 | ||
89 | /* | 99 | /* |
90 | * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3. | 100 | * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3. |
@@ -177,6 +187,18 @@ static inline u32 ticks_elapsed(u32 t1, u32 t2) | |||
177 | return ((0xFFFFFFFF - t1) + t2); | 187 | return ((0xFFFFFFFF - t1) + t2); |
178 | } | 188 | } |
179 | 189 | ||
190 | static inline u32 ticks_elapsed_in_us(u32 t1, u32 t2) | ||
191 | { | ||
192 | if (t2 >= t1) | ||
193 | return PM_TIMER_TICKS_TO_US(t2 - t1); | ||
194 | else if (!(acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER)) | ||
195 | return PM_TIMER_TICKS_TO_US(((0x00FFFFFF - t1) + t2) & 0x00FFFFFF); | ||
196 | else | ||
197 | return PM_TIMER_TICKS_TO_US((0xFFFFFFFF - t1) + t2); | ||
198 | } | ||
199 | |||
200 | #ifndef CONFIG_CPU_IDLE | ||
201 | |||
180 | static void | 202 | static void |
181 | acpi_processor_power_activate(struct acpi_processor *pr, | 203 | acpi_processor_power_activate(struct acpi_processor *pr, |
182 | struct acpi_processor_cx *new) | 204 | struct acpi_processor_cx *new) |
@@ -248,6 +270,7 @@ static void acpi_cstate_enter(struct acpi_processor_cx *cstate) | |||
248 | unused = inl(acpi_gbl_FADT.xpm_timer_block.address); | 270 | unused = inl(acpi_gbl_FADT.xpm_timer_block.address); |
249 | } | 271 | } |
250 | } | 272 | } |
273 | #endif /* !CONFIG_CPU_IDLE */ | ||
251 | 274 | ||
252 | #ifdef ARCH_APICTIMER_STOPS_ON_C3 | 275 | #ifdef ARCH_APICTIMER_STOPS_ON_C3 |
253 | 276 | ||
@@ -330,6 +353,7 @@ int acpi_processor_resume(struct acpi_device * device) | |||
330 | return 0; | 353 | return 0; |
331 | } | 354 | } |
332 | 355 | ||
356 | #ifndef CONFIG_CPU_IDLE | ||
333 | static void acpi_processor_idle(void) | 357 | static void acpi_processor_idle(void) |
334 | { | 358 | { |
335 | struct acpi_processor *pr = NULL; | 359 | struct acpi_processor *pr = NULL; |
@@ -427,7 +451,7 @@ static void acpi_processor_idle(void) | |||
427 | * an SMP system. We do it here instead of doing it at _CST/P_LVL | 451 | * an SMP system. We do it here instead of doing it at _CST/P_LVL |
428 | * detection phase, to work cleanly with logical CPU hotplug. | 452 | * detection phase, to work cleanly with logical CPU hotplug. |
429 | */ | 453 | */ |
430 | if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) && | 454 | if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) && |
431 | !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) | 455 | !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) |
432 | cx = &pr->power.states[ACPI_STATE_C1]; | 456 | cx = &pr->power.states[ACPI_STATE_C1]; |
433 | #endif | 457 | #endif |
@@ -727,6 +751,7 @@ static int acpi_processor_set_power_policy(struct acpi_processor *pr) | |||
727 | 751 | ||
728 | return 0; | 752 | return 0; |
729 | } | 753 | } |
754 | #endif /* !CONFIG_CPU_IDLE */ | ||
730 | 755 | ||
731 | static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr) | 756 | static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr) |
732 | { | 757 | { |
@@ -744,7 +769,7 @@ static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr) | |||
744 | #ifndef CONFIG_HOTPLUG_CPU | 769 | #ifndef CONFIG_HOTPLUG_CPU |
745 | /* | 770 | /* |
746 | * Check for P_LVL2_UP flag before entering C2 and above on | 771 | * Check for P_LVL2_UP flag before entering C2 and above on |
747 | * an SMP system. | 772 | * an SMP system. |
748 | */ | 773 | */ |
749 | if ((num_online_cpus() > 1) && | 774 | if ((num_online_cpus() > 1) && |
750 | !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) | 775 | !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) |
@@ -945,7 +970,12 @@ static void acpi_processor_power_verify_c2(struct acpi_processor_cx *cx) | |||
945 | * Normalize the C2 latency to expidite policy | 970 | * Normalize the C2 latency to expidite policy |
946 | */ | 971 | */ |
947 | cx->valid = 1; | 972 | cx->valid = 1; |
973 | |||
974 | #ifndef CONFIG_CPU_IDLE | ||
948 | cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency); | 975 | cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency); |
976 | #else | ||
977 | cx->latency_ticks = cx->latency; | ||
978 | #endif | ||
949 | 979 | ||
950 | return; | 980 | return; |
951 | } | 981 | } |
@@ -1025,7 +1055,12 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr, | |||
1025 | * use this in our C3 policy | 1055 | * use this in our C3 policy |
1026 | */ | 1056 | */ |
1027 | cx->valid = 1; | 1057 | cx->valid = 1; |
1058 | |||
1059 | #ifndef CONFIG_CPU_IDLE | ||
1028 | cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency); | 1060 | cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency); |
1061 | #else | ||
1062 | cx->latency_ticks = cx->latency; | ||
1063 | #endif | ||
1029 | 1064 | ||
1030 | return; | 1065 | return; |
1031 | } | 1066 | } |
@@ -1090,6 +1125,7 @@ static int acpi_processor_get_power_info(struct acpi_processor *pr) | |||
1090 | 1125 | ||
1091 | pr->power.count = acpi_processor_power_verify(pr); | 1126 | pr->power.count = acpi_processor_power_verify(pr); |
1092 | 1127 | ||
1128 | #ifndef CONFIG_CPU_IDLE | ||
1093 | /* | 1129 | /* |
1094 | * Set Default Policy | 1130 | * Set Default Policy |
1095 | * ------------------ | 1131 | * ------------------ |
@@ -1101,6 +1137,7 @@ static int acpi_processor_get_power_info(struct acpi_processor *pr) | |||
1101 | result = acpi_processor_set_power_policy(pr); | 1137 | result = acpi_processor_set_power_policy(pr); |
1102 | if (result) | 1138 | if (result) |
1103 | return result; | 1139 | return result; |
1140 | #endif | ||
1104 | 1141 | ||
1105 | /* | 1142 | /* |
1106 | * if one state of type C2 or C3 is available, mark this | 1143 | * if one state of type C2 or C3 is available, mark this |
@@ -1117,35 +1154,6 @@ static int acpi_processor_get_power_info(struct acpi_processor *pr) | |||
1117 | return 0; | 1154 | return 0; |
1118 | } | 1155 | } |
1119 | 1156 | ||
1120 | int acpi_processor_cst_has_changed(struct acpi_processor *pr) | ||
1121 | { | ||
1122 | int result = 0; | ||
1123 | |||
1124 | |||
1125 | if (!pr) | ||
1126 | return -EINVAL; | ||
1127 | |||
1128 | if (nocst) { | ||
1129 | return -ENODEV; | ||
1130 | } | ||
1131 | |||
1132 | if (!pr->flags.power_setup_done) | ||
1133 | return -ENODEV; | ||
1134 | |||
1135 | /* Fall back to the default idle loop */ | ||
1136 | pm_idle = pm_idle_save; | ||
1137 | synchronize_sched(); /* Relies on interrupts forcing exit from idle. */ | ||
1138 | |||
1139 | pr->flags.power = 0; | ||
1140 | result = acpi_processor_get_power_info(pr); | ||
1141 | if ((pr->flags.power == 1) && (pr->flags.power_setup_done)) | ||
1142 | pm_idle = acpi_processor_idle; | ||
1143 | |||
1144 | return result; | ||
1145 | } | ||
1146 | |||
1147 | /* proc interface */ | ||
1148 | |||
1149 | static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset) | 1157 | static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset) |
1150 | { | 1158 | { |
1151 | struct acpi_processor *pr = seq->private; | 1159 | struct acpi_processor *pr = seq->private; |
@@ -1227,6 +1235,35 @@ static const struct file_operations acpi_processor_power_fops = { | |||
1227 | .release = single_release, | 1235 | .release = single_release, |
1228 | }; | 1236 | }; |
1229 | 1237 | ||
1238 | #ifndef CONFIG_CPU_IDLE | ||
1239 | |||
1240 | int acpi_processor_cst_has_changed(struct acpi_processor *pr) | ||
1241 | { | ||
1242 | int result = 0; | ||
1243 | |||
1244 | |||
1245 | if (!pr) | ||
1246 | return -EINVAL; | ||
1247 | |||
1248 | if (nocst) { | ||
1249 | return -ENODEV; | ||
1250 | } | ||
1251 | |||
1252 | if (!pr->flags.power_setup_done) | ||
1253 | return -ENODEV; | ||
1254 | |||
1255 | /* Fall back to the default idle loop */ | ||
1256 | pm_idle = pm_idle_save; | ||
1257 | synchronize_sched(); /* Relies on interrupts forcing exit from idle. */ | ||
1258 | |||
1259 | pr->flags.power = 0; | ||
1260 | result = acpi_processor_get_power_info(pr); | ||
1261 | if ((pr->flags.power == 1) && (pr->flags.power_setup_done)) | ||
1262 | pm_idle = acpi_processor_idle; | ||
1263 | |||
1264 | return result; | ||
1265 | } | ||
1266 | |||
1230 | #ifdef CONFIG_SMP | 1267 | #ifdef CONFIG_SMP |
1231 | static void smp_callback(void *v) | 1268 | static void smp_callback(void *v) |
1232 | { | 1269 | { |
@@ -1249,7 +1286,366 @@ static int acpi_processor_latency_notify(struct notifier_block *b, | |||
1249 | static struct notifier_block acpi_processor_latency_notifier = { | 1286 | static struct notifier_block acpi_processor_latency_notifier = { |
1250 | .notifier_call = acpi_processor_latency_notify, | 1287 | .notifier_call = acpi_processor_latency_notify, |
1251 | }; | 1288 | }; |
1289 | |||
1290 | #endif | ||
1291 | |||
1292 | #else /* CONFIG_CPU_IDLE */ | ||
1293 | |||
1294 | /** | ||
1295 | * acpi_idle_bm_check - checks if bus master activity was detected | ||
1296 | */ | ||
1297 | static int acpi_idle_bm_check(void) | ||
1298 | { | ||
1299 | u32 bm_status = 0; | ||
1300 | |||
1301 | acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status); | ||
1302 | if (bm_status) | ||
1303 | acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 1); | ||
1304 | /* | ||
1305 | * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect | ||
1306 | * the true state of bus mastering activity; forcing us to | ||
1307 | * manually check the BMIDEA bit of each IDE channel. | ||
1308 | */ | ||
1309 | else if (errata.piix4.bmisx) { | ||
1310 | if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01) | ||
1311 | || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01)) | ||
1312 | bm_status = 1; | ||
1313 | } | ||
1314 | return bm_status; | ||
1315 | } | ||
1316 | |||
1317 | /** | ||
1318 | * acpi_idle_update_bm_rld - updates the BM_RLD bit depending on target state | ||
1319 | * @pr: the processor | ||
1320 | * @target: the new target state | ||
1321 | */ | ||
1322 | static inline void acpi_idle_update_bm_rld(struct acpi_processor *pr, | ||
1323 | struct acpi_processor_cx *target) | ||
1324 | { | ||
1325 | if (pr->flags.bm_rld_set && target->type != ACPI_STATE_C3) { | ||
1326 | acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0); | ||
1327 | pr->flags.bm_rld_set = 0; | ||
1328 | } | ||
1329 | |||
1330 | if (!pr->flags.bm_rld_set && target->type == ACPI_STATE_C3) { | ||
1331 | acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1); | ||
1332 | pr->flags.bm_rld_set = 1; | ||
1333 | } | ||
1334 | } | ||
1335 | |||
1336 | /** | ||
1337 | * acpi_idle_do_entry - a helper function that does C2 and C3 type entry | ||
1338 | * @cx: cstate data | ||
1339 | */ | ||
1340 | static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx) | ||
1341 | { | ||
1342 | if (cx->space_id == ACPI_CSTATE_FFH) { | ||
1343 | /* Call into architectural FFH based C-state */ | ||
1344 | acpi_processor_ffh_cstate_enter(cx); | ||
1345 | } else { | ||
1346 | int unused; | ||
1347 | /* IO port based C-state */ | ||
1348 | inb(cx->address); | ||
1349 | /* Dummy wait op - must do something useless after P_LVL2 read | ||
1350 | because chipsets cannot guarantee that STPCLK# signal | ||
1351 | gets asserted in time to freeze execution properly. */ | ||
1352 | unused = inl(acpi_gbl_FADT.xpm_timer_block.address); | ||
1353 | } | ||
1354 | } | ||
1355 | |||
1356 | /** | ||
1357 | * acpi_idle_enter_c1 - enters an ACPI C1 state-type | ||
1358 | * @dev: the target CPU | ||
1359 | * @state: the state data | ||
1360 | * | ||
1361 | * This is equivalent to the HALT instruction. | ||
1362 | */ | ||
1363 | static int acpi_idle_enter_c1(struct cpuidle_device *dev, | ||
1364 | struct cpuidle_state *state) | ||
1365 | { | ||
1366 | struct acpi_processor *pr; | ||
1367 | struct acpi_processor_cx *cx = cpuidle_get_statedata(state); | ||
1368 | pr = processors[smp_processor_id()]; | ||
1369 | |||
1370 | if (unlikely(!pr)) | ||
1371 | return 0; | ||
1372 | |||
1373 | if (pr->flags.bm_check) | ||
1374 | acpi_idle_update_bm_rld(pr, cx); | ||
1375 | |||
1376 | current_thread_info()->status &= ~TS_POLLING; | ||
1377 | /* | ||
1378 | * TS_POLLING-cleared state must be visible before we test | ||
1379 | * NEED_RESCHED: | ||
1380 | */ | ||
1381 | smp_mb(); | ||
1382 | if (!need_resched()) | ||
1383 | safe_halt(); | ||
1384 | current_thread_info()->status |= TS_POLLING; | ||
1385 | |||
1386 | cx->usage++; | ||
1387 | |||
1388 | return 0; | ||
1389 | } | ||
1390 | |||
1391 | /** | ||
1392 | * acpi_idle_enter_simple - enters an ACPI state without BM handling | ||
1393 | * @dev: the target CPU | ||
1394 | * @state: the state data | ||
1395 | */ | ||
1396 | static int acpi_idle_enter_simple(struct cpuidle_device *dev, | ||
1397 | struct cpuidle_state *state) | ||
1398 | { | ||
1399 | struct acpi_processor *pr; | ||
1400 | struct acpi_processor_cx *cx = cpuidle_get_statedata(state); | ||
1401 | u32 t1, t2; | ||
1402 | pr = processors[smp_processor_id()]; | ||
1403 | |||
1404 | if (unlikely(!pr)) | ||
1405 | return 0; | ||
1406 | |||
1407 | if (acpi_idle_suspend) | ||
1408 | return(acpi_idle_enter_c1(dev, state)); | ||
1409 | |||
1410 | if (pr->flags.bm_check) | ||
1411 | acpi_idle_update_bm_rld(pr, cx); | ||
1412 | |||
1413 | local_irq_disable(); | ||
1414 | current_thread_info()->status &= ~TS_POLLING; | ||
1415 | /* | ||
1416 | * TS_POLLING-cleared state must be visible before we test | ||
1417 | * NEED_RESCHED: | ||
1418 | */ | ||
1419 | smp_mb(); | ||
1420 | |||
1421 | if (unlikely(need_resched())) { | ||
1422 | current_thread_info()->status |= TS_POLLING; | ||
1423 | local_irq_enable(); | ||
1424 | return 0; | ||
1425 | } | ||
1426 | |||
1427 | if (cx->type == ACPI_STATE_C3) | ||
1428 | ACPI_FLUSH_CPU_CACHE(); | ||
1429 | |||
1430 | t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); | ||
1431 | acpi_state_timer_broadcast(pr, cx, 1); | ||
1432 | acpi_idle_do_entry(cx); | ||
1433 | t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); | ||
1434 | |||
1435 | #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC) | ||
1436 | /* TSC could halt in idle, so notify users */ | ||
1437 | mark_tsc_unstable("TSC halts in idle");; | ||
1438 | #endif | ||
1439 | |||
1440 | local_irq_enable(); | ||
1441 | current_thread_info()->status |= TS_POLLING; | ||
1442 | |||
1443 | cx->usage++; | ||
1444 | |||
1445 | acpi_state_timer_broadcast(pr, cx, 0); | ||
1446 | cx->time += ticks_elapsed(t1, t2); | ||
1447 | return ticks_elapsed_in_us(t1, t2); | ||
1448 | } | ||
1449 | |||
1450 | static int c3_cpu_count; | ||
1451 | static DEFINE_SPINLOCK(c3_lock); | ||
1452 | |||
1453 | /** | ||
1454 | * acpi_idle_enter_bm - enters C3 with proper BM handling | ||
1455 | * @dev: the target CPU | ||
1456 | * @state: the state data | ||
1457 | * | ||
1458 | * If BM is detected, the deepest non-C3 idle state is entered instead. | ||
1459 | */ | ||
1460 | static int acpi_idle_enter_bm(struct cpuidle_device *dev, | ||
1461 | struct cpuidle_state *state) | ||
1462 | { | ||
1463 | struct acpi_processor *pr; | ||
1464 | struct acpi_processor_cx *cx = cpuidle_get_statedata(state); | ||
1465 | u32 t1, t2; | ||
1466 | pr = processors[smp_processor_id()]; | ||
1467 | |||
1468 | if (unlikely(!pr)) | ||
1469 | return 0; | ||
1470 | |||
1471 | if (acpi_idle_suspend) | ||
1472 | return(acpi_idle_enter_c1(dev, state)); | ||
1473 | |||
1474 | local_irq_disable(); | ||
1475 | current_thread_info()->status &= ~TS_POLLING; | ||
1476 | /* | ||
1477 | * TS_POLLING-cleared state must be visible before we test | ||
1478 | * NEED_RESCHED: | ||
1479 | */ | ||
1480 | smp_mb(); | ||
1481 | |||
1482 | if (unlikely(need_resched())) { | ||
1483 | current_thread_info()->status |= TS_POLLING; | ||
1484 | local_irq_enable(); | ||
1485 | return 0; | ||
1486 | } | ||
1487 | |||
1488 | /* | ||
1489 | * Must be done before busmaster disable as we might need to | ||
1490 | * access HPET ! | ||
1491 | */ | ||
1492 | acpi_state_timer_broadcast(pr, cx, 1); | ||
1493 | |||
1494 | if (acpi_idle_bm_check()) { | ||
1495 | cx = pr->power.bm_state; | ||
1496 | |||
1497 | acpi_idle_update_bm_rld(pr, cx); | ||
1498 | |||
1499 | t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); | ||
1500 | acpi_idle_do_entry(cx); | ||
1501 | t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); | ||
1502 | } else { | ||
1503 | acpi_idle_update_bm_rld(pr, cx); | ||
1504 | |||
1505 | spin_lock(&c3_lock); | ||
1506 | c3_cpu_count++; | ||
1507 | /* Disable bus master arbitration when all CPUs are in C3 */ | ||
1508 | if (c3_cpu_count == num_online_cpus()) | ||
1509 | acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1); | ||
1510 | spin_unlock(&c3_lock); | ||
1511 | |||
1512 | t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); | ||
1513 | acpi_idle_do_entry(cx); | ||
1514 | t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); | ||
1515 | |||
1516 | spin_lock(&c3_lock); | ||
1517 | /* Re-enable bus master arbitration */ | ||
1518 | if (c3_cpu_count == num_online_cpus()) | ||
1519 | acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0); | ||
1520 | c3_cpu_count--; | ||
1521 | spin_unlock(&c3_lock); | ||
1522 | } | ||
1523 | |||
1524 | #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC) | ||
1525 | /* TSC could halt in idle, so notify users */ | ||
1526 | mark_tsc_unstable("TSC halts in idle"); | ||
1527 | #endif | ||
1528 | |||
1529 | local_irq_enable(); | ||
1530 | current_thread_info()->status |= TS_POLLING; | ||
1531 | |||
1532 | cx->usage++; | ||
1533 | |||
1534 | acpi_state_timer_broadcast(pr, cx, 0); | ||
1535 | cx->time += ticks_elapsed(t1, t2); | ||
1536 | return ticks_elapsed_in_us(t1, t2); | ||
1537 | } | ||
1538 | |||
1539 | struct cpuidle_driver acpi_idle_driver = { | ||
1540 | .name = "acpi_idle", | ||
1541 | .owner = THIS_MODULE, | ||
1542 | }; | ||
1543 | |||
1544 | /** | ||
1545 | * acpi_processor_setup_cpuidle - prepares and configures CPUIDLE | ||
1546 | * @pr: the ACPI processor | ||
1547 | */ | ||
1548 | static int acpi_processor_setup_cpuidle(struct acpi_processor *pr) | ||
1549 | { | ||
1550 | int i, count = 0; | ||
1551 | struct acpi_processor_cx *cx; | ||
1552 | struct cpuidle_state *state; | ||
1553 | struct cpuidle_device *dev = &pr->power.dev; | ||
1554 | |||
1555 | if (!pr->flags.power_setup_done) | ||
1556 | return -EINVAL; | ||
1557 | |||
1558 | if (pr->flags.power == 0) { | ||
1559 | return -EINVAL; | ||
1560 | } | ||
1561 | |||
1562 | for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) { | ||
1563 | cx = &pr->power.states[i]; | ||
1564 | state = &dev->states[count]; | ||
1565 | |||
1566 | if (!cx->valid) | ||
1567 | continue; | ||
1568 | |||
1569 | #ifdef CONFIG_HOTPLUG_CPU | ||
1570 | if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) && | ||
1571 | !pr->flags.has_cst && | ||
1572 | !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) | ||
1573 | continue; | ||
1252 | #endif | 1574 | #endif |
1575 | cpuidle_set_statedata(state, cx); | ||
1576 | |||
1577 | snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i); | ||
1578 | state->exit_latency = cx->latency; | ||
1579 | state->target_residency = cx->latency * 6; | ||
1580 | state->power_usage = cx->power; | ||
1581 | |||
1582 | state->flags = 0; | ||
1583 | switch (cx->type) { | ||
1584 | case ACPI_STATE_C1: | ||
1585 | state->flags |= CPUIDLE_FLAG_SHALLOW; | ||
1586 | state->enter = acpi_idle_enter_c1; | ||
1587 | break; | ||
1588 | |||
1589 | case ACPI_STATE_C2: | ||
1590 | state->flags |= CPUIDLE_FLAG_BALANCED; | ||
1591 | state->flags |= CPUIDLE_FLAG_TIME_VALID; | ||
1592 | state->enter = acpi_idle_enter_simple; | ||
1593 | break; | ||
1594 | |||
1595 | case ACPI_STATE_C3: | ||
1596 | state->flags |= CPUIDLE_FLAG_DEEP; | ||
1597 | state->flags |= CPUIDLE_FLAG_TIME_VALID; | ||
1598 | state->flags |= CPUIDLE_FLAG_CHECK_BM; | ||
1599 | state->enter = pr->flags.bm_check ? | ||
1600 | acpi_idle_enter_bm : | ||
1601 | acpi_idle_enter_simple; | ||
1602 | break; | ||
1603 | } | ||
1604 | |||
1605 | count++; | ||
1606 | } | ||
1607 | |||
1608 | dev->state_count = count; | ||
1609 | |||
1610 | if (!count) | ||
1611 | return -EINVAL; | ||
1612 | |||
1613 | /* find the deepest state that can handle active BM */ | ||
1614 | if (pr->flags.bm_check) { | ||
1615 | for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) | ||
1616 | if (pr->power.states[i].type == ACPI_STATE_C3) | ||
1617 | break; | ||
1618 | pr->power.bm_state = &pr->power.states[i-1]; | ||
1619 | } | ||
1620 | |||
1621 | return 0; | ||
1622 | } | ||
1623 | |||
1624 | int acpi_processor_cst_has_changed(struct acpi_processor *pr) | ||
1625 | { | ||
1626 | int ret; | ||
1627 | |||
1628 | if (!pr) | ||
1629 | return -EINVAL; | ||
1630 | |||
1631 | if (nocst) { | ||
1632 | return -ENODEV; | ||
1633 | } | ||
1634 | |||
1635 | if (!pr->flags.power_setup_done) | ||
1636 | return -ENODEV; | ||
1637 | |||
1638 | cpuidle_pause_and_lock(); | ||
1639 | cpuidle_disable_device(&pr->power.dev); | ||
1640 | acpi_processor_get_power_info(pr); | ||
1641 | acpi_processor_setup_cpuidle(pr); | ||
1642 | ret = cpuidle_enable_device(&pr->power.dev); | ||
1643 | cpuidle_resume_and_unlock(); | ||
1644 | |||
1645 | return ret; | ||
1646 | } | ||
1647 | |||
1648 | #endif /* CONFIG_CPU_IDLE */ | ||
1253 | 1649 | ||
1254 | int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, | 1650 | int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, |
1255 | struct acpi_device *device) | 1651 | struct acpi_device *device) |
@@ -1267,7 +1663,7 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, | |||
1267 | "ACPI: processor limited to max C-state %d\n", | 1663 | "ACPI: processor limited to max C-state %d\n", |
1268 | max_cstate); | 1664 | max_cstate); |
1269 | first_run++; | 1665 | first_run++; |
1270 | #ifdef CONFIG_SMP | 1666 | #if !defined (CONFIG_CPU_IDLE) && defined (CONFIG_SMP) |
1271 | register_latency_notifier(&acpi_processor_latency_notifier); | 1667 | register_latency_notifier(&acpi_processor_latency_notifier); |
1272 | #endif | 1668 | #endif |
1273 | } | 1669 | } |
@@ -1285,6 +1681,7 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, | |||
1285 | } | 1681 | } |
1286 | 1682 | ||
1287 | acpi_processor_get_power_info(pr); | 1683 | acpi_processor_get_power_info(pr); |
1684 | pr->flags.power_setup_done = 1; | ||
1288 | 1685 | ||
1289 | /* | 1686 | /* |
1290 | * Install the idle handler if processor power management is supported. | 1687 | * Install the idle handler if processor power management is supported. |
@@ -1292,6 +1689,13 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, | |||
1292 | * platforms that only support C1. | 1689 | * platforms that only support C1. |
1293 | */ | 1690 | */ |
1294 | if ((pr->flags.power) && (!boot_option_idle_override)) { | 1691 | if ((pr->flags.power) && (!boot_option_idle_override)) { |
1692 | #ifdef CONFIG_CPU_IDLE | ||
1693 | acpi_processor_setup_cpuidle(pr); | ||
1694 | pr->power.dev.cpu = pr->id; | ||
1695 | if (cpuidle_register_device(&pr->power.dev)) | ||
1696 | return -EIO; | ||
1697 | #endif | ||
1698 | |||
1295 | printk(KERN_INFO PREFIX "CPU%d (power states:", pr->id); | 1699 | printk(KERN_INFO PREFIX "CPU%d (power states:", pr->id); |
1296 | for (i = 1; i <= pr->power.count; i++) | 1700 | for (i = 1; i <= pr->power.count; i++) |
1297 | if (pr->power.states[i].valid) | 1701 | if (pr->power.states[i].valid) |
@@ -1299,10 +1703,12 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, | |||
1299 | pr->power.states[i].type); | 1703 | pr->power.states[i].type); |
1300 | printk(")\n"); | 1704 | printk(")\n"); |
1301 | 1705 | ||
1706 | #ifndef CONFIG_CPU_IDLE | ||
1302 | if (pr->id == 0) { | 1707 | if (pr->id == 0) { |
1303 | pm_idle_save = pm_idle; | 1708 | pm_idle_save = pm_idle; |
1304 | pm_idle = acpi_processor_idle; | 1709 | pm_idle = acpi_processor_idle; |
1305 | } | 1710 | } |
1711 | #endif | ||
1306 | } | 1712 | } |
1307 | 1713 | ||
1308 | /* 'power' [R] */ | 1714 | /* 'power' [R] */ |
@@ -1316,21 +1722,24 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, | |||
1316 | entry->owner = THIS_MODULE; | 1722 | entry->owner = THIS_MODULE; |
1317 | } | 1723 | } |
1318 | 1724 | ||
1319 | pr->flags.power_setup_done = 1; | ||
1320 | |||
1321 | return 0; | 1725 | return 0; |
1322 | } | 1726 | } |
1323 | 1727 | ||
1324 | int acpi_processor_power_exit(struct acpi_processor *pr, | 1728 | int acpi_processor_power_exit(struct acpi_processor *pr, |
1325 | struct acpi_device *device) | 1729 | struct acpi_device *device) |
1326 | { | 1730 | { |
1327 | 1731 | #ifdef CONFIG_CPU_IDLE | |
1732 | if ((pr->flags.power) && (!boot_option_idle_override)) | ||
1733 | cpuidle_unregister_device(&pr->power.dev); | ||
1734 | #endif | ||
1328 | pr->flags.power_setup_done = 0; | 1735 | pr->flags.power_setup_done = 0; |
1329 | 1736 | ||
1330 | if (acpi_device_dir(device)) | 1737 | if (acpi_device_dir(device)) |
1331 | remove_proc_entry(ACPI_PROCESSOR_FILE_POWER, | 1738 | remove_proc_entry(ACPI_PROCESSOR_FILE_POWER, |
1332 | acpi_device_dir(device)); | 1739 | acpi_device_dir(device)); |
1333 | 1740 | ||
1741 | #ifndef CONFIG_CPU_IDLE | ||
1742 | |||
1334 | /* Unregister the idle handler when processor #0 is removed. */ | 1743 | /* Unregister the idle handler when processor #0 is removed. */ |
1335 | if (pr->id == 0) { | 1744 | if (pr->id == 0) { |
1336 | pm_idle = pm_idle_save; | 1745 | pm_idle = pm_idle_save; |
@@ -1345,6 +1754,7 @@ int acpi_processor_power_exit(struct acpi_processor *pr, | |||
1345 | unregister_latency_notifier(&acpi_processor_latency_notifier); | 1754 | unregister_latency_notifier(&acpi_processor_latency_notifier); |
1346 | #endif | 1755 | #endif |
1347 | } | 1756 | } |
1757 | #endif | ||
1348 | 1758 | ||
1349 | return 0; | 1759 | return 0; |
1350 | } | 1760 | } |