diff options
Diffstat (limited to 'drivers/acpi/processor_idle.c')
-rw-r--r-- | drivers/acpi/processor_idle.c | 482 |
1 files changed, 443 insertions, 39 deletions
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index f18261368e76..99da6a790857 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c | |||
@@ -40,6 +40,7 @@ | |||
40 | #include <linux/sched.h> /* need_resched() */ | 40 | #include <linux/sched.h> /* need_resched() */ |
41 | #include <linux/latency.h> | 41 | #include <linux/latency.h> |
42 | #include <linux/clockchips.h> | 42 | #include <linux/clockchips.h> |
43 | #include <linux/cpuidle.h> | ||
43 | 44 | ||
44 | /* | 45 | /* |
45 | * Include the apic definitions for x86 to have the APIC timer related defines | 46 | * Include the apic definitions for x86 to have the APIC timer related defines |
@@ -64,14 +65,22 @@ ACPI_MODULE_NAME("processor_idle"); | |||
64 | #define ACPI_PROCESSOR_FILE_POWER "power" | 65 | #define ACPI_PROCESSOR_FILE_POWER "power" |
65 | #define US_TO_PM_TIMER_TICKS(t) ((t * (PM_TIMER_FREQUENCY/1000)) / 1000) | 66 | #define US_TO_PM_TIMER_TICKS(t) ((t * (PM_TIMER_FREQUENCY/1000)) / 1000) |
66 | #define PM_TIMER_TICK_NS (1000000000ULL/PM_TIMER_FREQUENCY) | 67 | #define PM_TIMER_TICK_NS (1000000000ULL/PM_TIMER_FREQUENCY) |
68 | #ifndef CONFIG_CPU_IDLE | ||
67 | #define C2_OVERHEAD 4 /* 1us (3.579 ticks per us) */ | 69 | #define C2_OVERHEAD 4 /* 1us (3.579 ticks per us) */ |
68 | #define C3_OVERHEAD 4 /* 1us (3.579 ticks per us) */ | 70 | #define C3_OVERHEAD 4 /* 1us (3.579 ticks per us) */ |
69 | static void (*pm_idle_save) (void) __read_mostly; | 71 | static void (*pm_idle_save) (void) __read_mostly; |
70 | module_param(max_cstate, uint, 0644); | 72 | #else |
73 | #define C2_OVERHEAD 1 /* 1us */ | ||
74 | #define C3_OVERHEAD 1 /* 1us */ | ||
75 | #endif | ||
76 | #define PM_TIMER_TICKS_TO_US(p) (((p) * 1000)/(PM_TIMER_FREQUENCY/1000)) | ||
71 | 77 | ||
78 | static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER; | ||
79 | module_param(max_cstate, uint, 0000); | ||
72 | static unsigned int nocst __read_mostly; | 80 | static unsigned int nocst __read_mostly; |
73 | module_param(nocst, uint, 0000); | 81 | module_param(nocst, uint, 0000); |
74 | 82 | ||
83 | #ifndef CONFIG_CPU_IDLE | ||
75 | /* | 84 | /* |
76 | * bm_history -- bit-mask with a bit per jiffy of bus-master activity | 85 | * bm_history -- bit-mask with a bit per jiffy of bus-master activity |
77 | * 1000 HZ: 0xFFFFFFFF: 32 jiffies = 32ms | 86 | * 1000 HZ: 0xFFFFFFFF: 32 jiffies = 32ms |
@@ -82,9 +91,10 @@ module_param(nocst, uint, 0000); | |||
82 | static unsigned int bm_history __read_mostly = | 91 | static unsigned int bm_history __read_mostly = |
83 | (HZ >= 800 ? 0xFFFFFFFF : ((1U << (HZ / 25)) - 1)); | 92 | (HZ >= 800 ? 0xFFFFFFFF : ((1U << (HZ / 25)) - 1)); |
84 | module_param(bm_history, uint, 0644); | 93 | module_param(bm_history, uint, 0644); |
85 | /* -------------------------------------------------------------------------- | 94 | |
86 | Power Management | 95 | static int acpi_processor_set_power_policy(struct acpi_processor *pr); |
87 | -------------------------------------------------------------------------- */ | 96 | |
97 | #endif | ||
88 | 98 | ||
89 | /* | 99 | /* |
90 | * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3. | 100 | * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3. |
@@ -177,6 +187,18 @@ static inline u32 ticks_elapsed(u32 t1, u32 t2) | |||
177 | return ((0xFFFFFFFF - t1) + t2); | 187 | return ((0xFFFFFFFF - t1) + t2); |
178 | } | 188 | } |
179 | 189 | ||
190 | static inline u32 ticks_elapsed_in_us(u32 t1, u32 t2) | ||
191 | { | ||
192 | if (t2 >= t1) | ||
193 | return PM_TIMER_TICKS_TO_US(t2 - t1); | ||
194 | else if (!(acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER)) | ||
195 | return PM_TIMER_TICKS_TO_US(((0x00FFFFFF - t1) + t2) & 0x00FFFFFF); | ||
196 | else | ||
197 | return PM_TIMER_TICKS_TO_US((0xFFFFFFFF - t1) + t2); | ||
198 | } | ||
199 | |||
200 | #ifndef CONFIG_CPU_IDLE | ||
201 | |||
180 | static void | 202 | static void |
181 | acpi_processor_power_activate(struct acpi_processor *pr, | 203 | acpi_processor_power_activate(struct acpi_processor *pr, |
182 | struct acpi_processor_cx *new) | 204 | struct acpi_processor_cx *new) |
@@ -248,6 +270,7 @@ static void acpi_cstate_enter(struct acpi_processor_cx *cstate) | |||
248 | unused = inl(acpi_gbl_FADT.xpm_timer_block.address); | 270 | unused = inl(acpi_gbl_FADT.xpm_timer_block.address); |
249 | } | 271 | } |
250 | } | 272 | } |
273 | #endif /* !CONFIG_CPU_IDLE */ | ||
251 | 274 | ||
252 | #ifdef ARCH_APICTIMER_STOPS_ON_C3 | 275 | #ifdef ARCH_APICTIMER_STOPS_ON_C3 |
253 | 276 | ||
@@ -342,6 +365,7 @@ int acpi_processor_resume(struct acpi_device * device) | |||
342 | return 0; | 365 | return 0; |
343 | } | 366 | } |
344 | 367 | ||
368 | #ifndef CONFIG_CPU_IDLE | ||
345 | static void acpi_processor_idle(void) | 369 | static void acpi_processor_idle(void) |
346 | { | 370 | { |
347 | struct acpi_processor *pr = NULL; | 371 | struct acpi_processor *pr = NULL; |
@@ -439,7 +463,7 @@ static void acpi_processor_idle(void) | |||
439 | * an SMP system. We do it here instead of doing it at _CST/P_LVL | 463 | * an SMP system. We do it here instead of doing it at _CST/P_LVL |
440 | * detection phase, to work cleanly with logical CPU hotplug. | 464 | * detection phase, to work cleanly with logical CPU hotplug. |
441 | */ | 465 | */ |
442 | if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) && | 466 | if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) && |
443 | !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) | 467 | !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) |
444 | cx = &pr->power.states[ACPI_STATE_C1]; | 468 | cx = &pr->power.states[ACPI_STATE_C1]; |
445 | #endif | 469 | #endif |
@@ -739,6 +763,7 @@ static int acpi_processor_set_power_policy(struct acpi_processor *pr) | |||
739 | 763 | ||
740 | return 0; | 764 | return 0; |
741 | } | 765 | } |
766 | #endif /* !CONFIG_CPU_IDLE */ | ||
742 | 767 | ||
743 | static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr) | 768 | static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr) |
744 | { | 769 | { |
@@ -756,7 +781,7 @@ static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr) | |||
756 | #ifndef CONFIG_HOTPLUG_CPU | 781 | #ifndef CONFIG_HOTPLUG_CPU |
757 | /* | 782 | /* |
758 | * Check for P_LVL2_UP flag before entering C2 and above on | 783 | * Check for P_LVL2_UP flag before entering C2 and above on |
759 | * an SMP system. | 784 | * an SMP system. |
760 | */ | 785 | */ |
761 | if ((num_online_cpus() > 1) && | 786 | if ((num_online_cpus() > 1) && |
762 | !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) | 787 | !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) |
@@ -957,7 +982,12 @@ static void acpi_processor_power_verify_c2(struct acpi_processor_cx *cx) | |||
957 | * Normalize the C2 latency to expidite policy | 982 | * Normalize the C2 latency to expidite policy |
958 | */ | 983 | */ |
959 | cx->valid = 1; | 984 | cx->valid = 1; |
985 | |||
986 | #ifndef CONFIG_CPU_IDLE | ||
960 | cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency); | 987 | cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency); |
988 | #else | ||
989 | cx->latency_ticks = cx->latency; | ||
990 | #endif | ||
961 | 991 | ||
962 | return; | 992 | return; |
963 | } | 993 | } |
@@ -1037,7 +1067,12 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr, | |||
1037 | * use this in our C3 policy | 1067 | * use this in our C3 policy |
1038 | */ | 1068 | */ |
1039 | cx->valid = 1; | 1069 | cx->valid = 1; |
1070 | |||
1071 | #ifndef CONFIG_CPU_IDLE | ||
1040 | cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency); | 1072 | cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency); |
1073 | #else | ||
1074 | cx->latency_ticks = cx->latency; | ||
1075 | #endif | ||
1041 | 1076 | ||
1042 | return; | 1077 | return; |
1043 | } | 1078 | } |
@@ -1102,6 +1137,7 @@ static int acpi_processor_get_power_info(struct acpi_processor *pr) | |||
1102 | 1137 | ||
1103 | pr->power.count = acpi_processor_power_verify(pr); | 1138 | pr->power.count = acpi_processor_power_verify(pr); |
1104 | 1139 | ||
1140 | #ifndef CONFIG_CPU_IDLE | ||
1105 | /* | 1141 | /* |
1106 | * Set Default Policy | 1142 | * Set Default Policy |
1107 | * ------------------ | 1143 | * ------------------ |
@@ -1113,6 +1149,7 @@ static int acpi_processor_get_power_info(struct acpi_processor *pr) | |||
1113 | result = acpi_processor_set_power_policy(pr); | 1149 | result = acpi_processor_set_power_policy(pr); |
1114 | if (result) | 1150 | if (result) |
1115 | return result; | 1151 | return result; |
1152 | #endif | ||
1116 | 1153 | ||
1117 | /* | 1154 | /* |
1118 | * if one state of type C2 or C3 is available, mark this | 1155 | * if one state of type C2 or C3 is available, mark this |
@@ -1129,35 +1166,6 @@ static int acpi_processor_get_power_info(struct acpi_processor *pr) | |||
1129 | return 0; | 1166 | return 0; |
1130 | } | 1167 | } |
1131 | 1168 | ||
1132 | int acpi_processor_cst_has_changed(struct acpi_processor *pr) | ||
1133 | { | ||
1134 | int result = 0; | ||
1135 | |||
1136 | |||
1137 | if (!pr) | ||
1138 | return -EINVAL; | ||
1139 | |||
1140 | if (nocst) { | ||
1141 | return -ENODEV; | ||
1142 | } | ||
1143 | |||
1144 | if (!pr->flags.power_setup_done) | ||
1145 | return -ENODEV; | ||
1146 | |||
1147 | /* Fall back to the default idle loop */ | ||
1148 | pm_idle = pm_idle_save; | ||
1149 | synchronize_sched(); /* Relies on interrupts forcing exit from idle. */ | ||
1150 | |||
1151 | pr->flags.power = 0; | ||
1152 | result = acpi_processor_get_power_info(pr); | ||
1153 | if ((pr->flags.power == 1) && (pr->flags.power_setup_done)) | ||
1154 | pm_idle = acpi_processor_idle; | ||
1155 | |||
1156 | return result; | ||
1157 | } | ||
1158 | |||
1159 | /* proc interface */ | ||
1160 | |||
1161 | static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset) | 1169 | static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset) |
1162 | { | 1170 | { |
1163 | struct acpi_processor *pr = seq->private; | 1171 | struct acpi_processor *pr = seq->private; |
@@ -1239,6 +1247,35 @@ static const struct file_operations acpi_processor_power_fops = { | |||
1239 | .release = single_release, | 1247 | .release = single_release, |
1240 | }; | 1248 | }; |
1241 | 1249 | ||
1250 | #ifndef CONFIG_CPU_IDLE | ||
1251 | |||
1252 | int acpi_processor_cst_has_changed(struct acpi_processor *pr) | ||
1253 | { | ||
1254 | int result = 0; | ||
1255 | |||
1256 | |||
1257 | if (!pr) | ||
1258 | return -EINVAL; | ||
1259 | |||
1260 | if (nocst) { | ||
1261 | return -ENODEV; | ||
1262 | } | ||
1263 | |||
1264 | if (!pr->flags.power_setup_done) | ||
1265 | return -ENODEV; | ||
1266 | |||
1267 | /* Fall back to the default idle loop */ | ||
1268 | pm_idle = pm_idle_save; | ||
1269 | synchronize_sched(); /* Relies on interrupts forcing exit from idle. */ | ||
1270 | |||
1271 | pr->flags.power = 0; | ||
1272 | result = acpi_processor_get_power_info(pr); | ||
1273 | if ((pr->flags.power == 1) && (pr->flags.power_setup_done)) | ||
1274 | pm_idle = acpi_processor_idle; | ||
1275 | |||
1276 | return result; | ||
1277 | } | ||
1278 | |||
1242 | #ifdef CONFIG_SMP | 1279 | #ifdef CONFIG_SMP |
1243 | static void smp_callback(void *v) | 1280 | static void smp_callback(void *v) |
1244 | { | 1281 | { |
@@ -1261,7 +1298,360 @@ static int acpi_processor_latency_notify(struct notifier_block *b, | |||
1261 | static struct notifier_block acpi_processor_latency_notifier = { | 1298 | static struct notifier_block acpi_processor_latency_notifier = { |
1262 | .notifier_call = acpi_processor_latency_notify, | 1299 | .notifier_call = acpi_processor_latency_notify, |
1263 | }; | 1300 | }; |
1301 | |||
1302 | #endif | ||
1303 | |||
1304 | #else /* CONFIG_CPU_IDLE */ | ||
1305 | |||
1306 | /** | ||
1307 | * acpi_idle_bm_check - checks if bus master activity was detected | ||
1308 | */ | ||
1309 | static int acpi_idle_bm_check(void) | ||
1310 | { | ||
1311 | u32 bm_status = 0; | ||
1312 | |||
1313 | acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status); | ||
1314 | if (bm_status) | ||
1315 | acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 1); | ||
1316 | /* | ||
1317 | * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect | ||
1318 | * the true state of bus mastering activity; forcing us to | ||
1319 | * manually check the BMIDEA bit of each IDE channel. | ||
1320 | */ | ||
1321 | else if (errata.piix4.bmisx) { | ||
1322 | if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01) | ||
1323 | || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01)) | ||
1324 | bm_status = 1; | ||
1325 | } | ||
1326 | return bm_status; | ||
1327 | } | ||
1328 | |||
1329 | /** | ||
1330 | * acpi_idle_update_bm_rld - updates the BM_RLD bit depending on target state | ||
1331 | * @pr: the processor | ||
1332 | * @target: the new target state | ||
1333 | */ | ||
1334 | static inline void acpi_idle_update_bm_rld(struct acpi_processor *pr, | ||
1335 | struct acpi_processor_cx *target) | ||
1336 | { | ||
1337 | if (pr->flags.bm_rld_set && target->type != ACPI_STATE_C3) { | ||
1338 | acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0); | ||
1339 | pr->flags.bm_rld_set = 0; | ||
1340 | } | ||
1341 | |||
1342 | if (!pr->flags.bm_rld_set && target->type == ACPI_STATE_C3) { | ||
1343 | acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1); | ||
1344 | pr->flags.bm_rld_set = 1; | ||
1345 | } | ||
1346 | } | ||
1347 | |||
1348 | /** | ||
1349 | * acpi_idle_do_entry - a helper function that does C2 and C3 type entry | ||
1350 | * @cx: cstate data | ||
1351 | */ | ||
1352 | static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx) | ||
1353 | { | ||
1354 | if (cx->space_id == ACPI_CSTATE_FFH) { | ||
1355 | /* Call into architectural FFH based C-state */ | ||
1356 | acpi_processor_ffh_cstate_enter(cx); | ||
1357 | } else { | ||
1358 | int unused; | ||
1359 | /* IO port based C-state */ | ||
1360 | inb(cx->address); | ||
1361 | /* Dummy wait op - must do something useless after P_LVL2 read | ||
1362 | because chipsets cannot guarantee that STPCLK# signal | ||
1363 | gets asserted in time to freeze execution properly. */ | ||
1364 | unused = inl(acpi_gbl_FADT.xpm_timer_block.address); | ||
1365 | } | ||
1366 | } | ||
1367 | |||
1368 | /** | ||
1369 | * acpi_idle_enter_c1 - enters an ACPI C1 state-type | ||
1370 | * @dev: the target CPU | ||
1371 | * @state: the state data | ||
1372 | * | ||
1373 | * This is equivalent to the HALT instruction. | ||
1374 | */ | ||
1375 | static int acpi_idle_enter_c1(struct cpuidle_device *dev, | ||
1376 | struct cpuidle_state *state) | ||
1377 | { | ||
1378 | struct acpi_processor *pr; | ||
1379 | struct acpi_processor_cx *cx = cpuidle_get_statedata(state); | ||
1380 | pr = processors[smp_processor_id()]; | ||
1381 | |||
1382 | if (unlikely(!pr)) | ||
1383 | return 0; | ||
1384 | |||
1385 | if (pr->flags.bm_check) | ||
1386 | acpi_idle_update_bm_rld(pr, cx); | ||
1387 | |||
1388 | current_thread_info()->status &= ~TS_POLLING; | ||
1389 | /* | ||
1390 | * TS_POLLING-cleared state must be visible before we test | ||
1391 | * NEED_RESCHED: | ||
1392 | */ | ||
1393 | smp_mb(); | ||
1394 | if (!need_resched()) | ||
1395 | safe_halt(); | ||
1396 | current_thread_info()->status |= TS_POLLING; | ||
1397 | |||
1398 | cx->usage++; | ||
1399 | |||
1400 | return 0; | ||
1401 | } | ||
1402 | |||
1403 | /** | ||
1404 | * acpi_idle_enter_simple - enters an ACPI state without BM handling | ||
1405 | * @dev: the target CPU | ||
1406 | * @state: the state data | ||
1407 | */ | ||
1408 | static int acpi_idle_enter_simple(struct cpuidle_device *dev, | ||
1409 | struct cpuidle_state *state) | ||
1410 | { | ||
1411 | struct acpi_processor *pr; | ||
1412 | struct acpi_processor_cx *cx = cpuidle_get_statedata(state); | ||
1413 | u32 t1, t2; | ||
1414 | pr = processors[smp_processor_id()]; | ||
1415 | |||
1416 | if (unlikely(!pr)) | ||
1417 | return 0; | ||
1418 | |||
1419 | if (pr->flags.bm_check) | ||
1420 | acpi_idle_update_bm_rld(pr, cx); | ||
1421 | |||
1422 | local_irq_disable(); | ||
1423 | current_thread_info()->status &= ~TS_POLLING; | ||
1424 | /* | ||
1425 | * TS_POLLING-cleared state must be visible before we test | ||
1426 | * NEED_RESCHED: | ||
1427 | */ | ||
1428 | smp_mb(); | ||
1429 | |||
1430 | if (unlikely(need_resched())) { | ||
1431 | current_thread_info()->status |= TS_POLLING; | ||
1432 | local_irq_enable(); | ||
1433 | return 0; | ||
1434 | } | ||
1435 | |||
1436 | if (cx->type == ACPI_STATE_C3) | ||
1437 | ACPI_FLUSH_CPU_CACHE(); | ||
1438 | |||
1439 | t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); | ||
1440 | acpi_state_timer_broadcast(pr, cx, 1); | ||
1441 | acpi_idle_do_entry(cx); | ||
1442 | t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); | ||
1443 | |||
1444 | #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC) | ||
1445 | /* TSC could halt in idle, so notify users */ | ||
1446 | mark_tsc_unstable("TSC halts in idle");; | ||
1447 | #endif | ||
1448 | |||
1449 | local_irq_enable(); | ||
1450 | current_thread_info()->status |= TS_POLLING; | ||
1451 | |||
1452 | cx->usage++; | ||
1453 | |||
1454 | acpi_state_timer_broadcast(pr, cx, 0); | ||
1455 | cx->time += ticks_elapsed(t1, t2); | ||
1456 | return ticks_elapsed_in_us(t1, t2); | ||
1457 | } | ||
1458 | |||
1459 | static int c3_cpu_count; | ||
1460 | static DEFINE_SPINLOCK(c3_lock); | ||
1461 | |||
1462 | /** | ||
1463 | * acpi_idle_enter_bm - enters C3 with proper BM handling | ||
1464 | * @dev: the target CPU | ||
1465 | * @state: the state data | ||
1466 | * | ||
1467 | * If BM is detected, the deepest non-C3 idle state is entered instead. | ||
1468 | */ | ||
1469 | static int acpi_idle_enter_bm(struct cpuidle_device *dev, | ||
1470 | struct cpuidle_state *state) | ||
1471 | { | ||
1472 | struct acpi_processor *pr; | ||
1473 | struct acpi_processor_cx *cx = cpuidle_get_statedata(state); | ||
1474 | u32 t1, t2; | ||
1475 | pr = processors[smp_processor_id()]; | ||
1476 | |||
1477 | if (unlikely(!pr)) | ||
1478 | return 0; | ||
1479 | |||
1480 | local_irq_disable(); | ||
1481 | current_thread_info()->status &= ~TS_POLLING; | ||
1482 | /* | ||
1483 | * TS_POLLING-cleared state must be visible before we test | ||
1484 | * NEED_RESCHED: | ||
1485 | */ | ||
1486 | smp_mb(); | ||
1487 | |||
1488 | if (unlikely(need_resched())) { | ||
1489 | current_thread_info()->status |= TS_POLLING; | ||
1490 | local_irq_enable(); | ||
1491 | return 0; | ||
1492 | } | ||
1493 | |||
1494 | /* | ||
1495 | * Must be done before busmaster disable as we might need to | ||
1496 | * access HPET ! | ||
1497 | */ | ||
1498 | acpi_state_timer_broadcast(pr, cx, 1); | ||
1499 | |||
1500 | if (acpi_idle_bm_check()) { | ||
1501 | cx = pr->power.bm_state; | ||
1502 | |||
1503 | acpi_idle_update_bm_rld(pr, cx); | ||
1504 | |||
1505 | t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); | ||
1506 | acpi_idle_do_entry(cx); | ||
1507 | t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); | ||
1508 | } else { | ||
1509 | acpi_idle_update_bm_rld(pr, cx); | ||
1510 | |||
1511 | spin_lock(&c3_lock); | ||
1512 | c3_cpu_count++; | ||
1513 | /* Disable bus master arbitration when all CPUs are in C3 */ | ||
1514 | if (c3_cpu_count == num_online_cpus()) | ||
1515 | acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1); | ||
1516 | spin_unlock(&c3_lock); | ||
1517 | |||
1518 | t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); | ||
1519 | acpi_idle_do_entry(cx); | ||
1520 | t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); | ||
1521 | |||
1522 | spin_lock(&c3_lock); | ||
1523 | /* Re-enable bus master arbitration */ | ||
1524 | if (c3_cpu_count == num_online_cpus()) | ||
1525 | acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0); | ||
1526 | c3_cpu_count--; | ||
1527 | spin_unlock(&c3_lock); | ||
1528 | } | ||
1529 | |||
1530 | #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC) | ||
1531 | /* TSC could halt in idle, so notify users */ | ||
1532 | mark_tsc_unstable("TSC halts in idle"); | ||
1533 | #endif | ||
1534 | |||
1535 | local_irq_enable(); | ||
1536 | current_thread_info()->status |= TS_POLLING; | ||
1537 | |||
1538 | cx->usage++; | ||
1539 | |||
1540 | acpi_state_timer_broadcast(pr, cx, 0); | ||
1541 | cx->time += ticks_elapsed(t1, t2); | ||
1542 | return ticks_elapsed_in_us(t1, t2); | ||
1543 | } | ||
1544 | |||
1545 | struct cpuidle_driver acpi_idle_driver = { | ||
1546 | .name = "acpi_idle", | ||
1547 | .owner = THIS_MODULE, | ||
1548 | }; | ||
1549 | |||
1550 | /** | ||
1551 | * acpi_processor_setup_cpuidle - prepares and configures CPUIDLE | ||
1552 | * @pr: the ACPI processor | ||
1553 | */ | ||
1554 | static int acpi_processor_setup_cpuidle(struct acpi_processor *pr) | ||
1555 | { | ||
1556 | int i, count = 0; | ||
1557 | struct acpi_processor_cx *cx; | ||
1558 | struct cpuidle_state *state; | ||
1559 | struct cpuidle_device *dev = &pr->power.dev; | ||
1560 | |||
1561 | if (!pr->flags.power_setup_done) | ||
1562 | return -EINVAL; | ||
1563 | |||
1564 | if (pr->flags.power == 0) { | ||
1565 | return -EINVAL; | ||
1566 | } | ||
1567 | |||
1568 | for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) { | ||
1569 | cx = &pr->power.states[i]; | ||
1570 | state = &dev->states[count]; | ||
1571 | |||
1572 | if (!cx->valid) | ||
1573 | continue; | ||
1574 | |||
1575 | #ifdef CONFIG_HOTPLUG_CPU | ||
1576 | if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) && | ||
1577 | !pr->flags.has_cst && | ||
1578 | !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) | ||
1579 | continue; | ||
1264 | #endif | 1580 | #endif |
1581 | cpuidle_set_statedata(state, cx); | ||
1582 | |||
1583 | snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i); | ||
1584 | state->exit_latency = cx->latency; | ||
1585 | state->target_residency = cx->latency * 6; | ||
1586 | state->power_usage = cx->power; | ||
1587 | |||
1588 | state->flags = 0; | ||
1589 | switch (cx->type) { | ||
1590 | case ACPI_STATE_C1: | ||
1591 | state->flags |= CPUIDLE_FLAG_SHALLOW; | ||
1592 | state->enter = acpi_idle_enter_c1; | ||
1593 | break; | ||
1594 | |||
1595 | case ACPI_STATE_C2: | ||
1596 | state->flags |= CPUIDLE_FLAG_BALANCED; | ||
1597 | state->flags |= CPUIDLE_FLAG_TIME_VALID; | ||
1598 | state->enter = acpi_idle_enter_simple; | ||
1599 | break; | ||
1600 | |||
1601 | case ACPI_STATE_C3: | ||
1602 | state->flags |= CPUIDLE_FLAG_DEEP; | ||
1603 | state->flags |= CPUIDLE_FLAG_TIME_VALID; | ||
1604 | state->flags |= CPUIDLE_FLAG_CHECK_BM; | ||
1605 | state->enter = pr->flags.bm_check ? | ||
1606 | acpi_idle_enter_bm : | ||
1607 | acpi_idle_enter_simple; | ||
1608 | break; | ||
1609 | } | ||
1610 | |||
1611 | count++; | ||
1612 | } | ||
1613 | |||
1614 | dev->state_count = count; | ||
1615 | |||
1616 | if (!count) | ||
1617 | return -EINVAL; | ||
1618 | |||
1619 | /* find the deepest state that can handle active BM */ | ||
1620 | if (pr->flags.bm_check) { | ||
1621 | for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) | ||
1622 | if (pr->power.states[i].type == ACPI_STATE_C3) | ||
1623 | break; | ||
1624 | pr->power.bm_state = &pr->power.states[i-1]; | ||
1625 | } | ||
1626 | |||
1627 | return 0; | ||
1628 | } | ||
1629 | |||
1630 | int acpi_processor_cst_has_changed(struct acpi_processor *pr) | ||
1631 | { | ||
1632 | int ret; | ||
1633 | |||
1634 | if (!pr) | ||
1635 | return -EINVAL; | ||
1636 | |||
1637 | if (nocst) { | ||
1638 | return -ENODEV; | ||
1639 | } | ||
1640 | |||
1641 | if (!pr->flags.power_setup_done) | ||
1642 | return -ENODEV; | ||
1643 | |||
1644 | cpuidle_pause_and_lock(); | ||
1645 | cpuidle_disable_device(&pr->power.dev); | ||
1646 | acpi_processor_get_power_info(pr); | ||
1647 | acpi_processor_setup_cpuidle(pr); | ||
1648 | ret = cpuidle_enable_device(&pr->power.dev); | ||
1649 | cpuidle_resume_and_unlock(); | ||
1650 | |||
1651 | return ret; | ||
1652 | } | ||
1653 | |||
1654 | #endif /* CONFIG_CPU_IDLE */ | ||
1265 | 1655 | ||
1266 | int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, | 1656 | int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, |
1267 | struct acpi_device *device) | 1657 | struct acpi_device *device) |
@@ -1279,7 +1669,7 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, | |||
1279 | "ACPI: processor limited to max C-state %d\n", | 1669 | "ACPI: processor limited to max C-state %d\n", |
1280 | max_cstate); | 1670 | max_cstate); |
1281 | first_run++; | 1671 | first_run++; |
1282 | #ifdef CONFIG_SMP | 1672 | #if !defined (CONFIG_CPU_IDLE) && defined (CONFIG_SMP) |
1283 | register_latency_notifier(&acpi_processor_latency_notifier); | 1673 | register_latency_notifier(&acpi_processor_latency_notifier); |
1284 | #endif | 1674 | #endif |
1285 | } | 1675 | } |
@@ -1297,6 +1687,7 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, | |||
1297 | } | 1687 | } |
1298 | 1688 | ||
1299 | acpi_processor_get_power_info(pr); | 1689 | acpi_processor_get_power_info(pr); |
1690 | pr->flags.power_setup_done = 1; | ||
1300 | 1691 | ||
1301 | /* | 1692 | /* |
1302 | * Install the idle handler if processor power management is supported. | 1693 | * Install the idle handler if processor power management is supported. |
@@ -1304,6 +1695,13 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, | |||
1304 | * platforms that only support C1. | 1695 | * platforms that only support C1. |
1305 | */ | 1696 | */ |
1306 | if ((pr->flags.power) && (!boot_option_idle_override)) { | 1697 | if ((pr->flags.power) && (!boot_option_idle_override)) { |
1698 | #ifdef CONFIG_CPU_IDLE | ||
1699 | acpi_processor_setup_cpuidle(pr); | ||
1700 | pr->power.dev.cpu = pr->id; | ||
1701 | if (cpuidle_register_device(&pr->power.dev)) | ||
1702 | return -EIO; | ||
1703 | #endif | ||
1704 | |||
1307 | printk(KERN_INFO PREFIX "CPU%d (power states:", pr->id); | 1705 | printk(KERN_INFO PREFIX "CPU%d (power states:", pr->id); |
1308 | for (i = 1; i <= pr->power.count; i++) | 1706 | for (i = 1; i <= pr->power.count; i++) |
1309 | if (pr->power.states[i].valid) | 1707 | if (pr->power.states[i].valid) |
@@ -1311,10 +1709,12 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, | |||
1311 | pr->power.states[i].type); | 1709 | pr->power.states[i].type); |
1312 | printk(")\n"); | 1710 | printk(")\n"); |
1313 | 1711 | ||
1712 | #ifndef CONFIG_CPU_IDLE | ||
1314 | if (pr->id == 0) { | 1713 | if (pr->id == 0) { |
1315 | pm_idle_save = pm_idle; | 1714 | pm_idle_save = pm_idle; |
1316 | pm_idle = acpi_processor_idle; | 1715 | pm_idle = acpi_processor_idle; |
1317 | } | 1716 | } |
1717 | #endif | ||
1318 | } | 1718 | } |
1319 | 1719 | ||
1320 | /* 'power' [R] */ | 1720 | /* 'power' [R] */ |
@@ -1328,21 +1728,24 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, | |||
1328 | entry->owner = THIS_MODULE; | 1728 | entry->owner = THIS_MODULE; |
1329 | } | 1729 | } |
1330 | 1730 | ||
1331 | pr->flags.power_setup_done = 1; | ||
1332 | |||
1333 | return 0; | 1731 | return 0; |
1334 | } | 1732 | } |
1335 | 1733 | ||
1336 | int acpi_processor_power_exit(struct acpi_processor *pr, | 1734 | int acpi_processor_power_exit(struct acpi_processor *pr, |
1337 | struct acpi_device *device) | 1735 | struct acpi_device *device) |
1338 | { | 1736 | { |
1339 | 1737 | #ifdef CONFIG_CPU_IDLE | |
1738 | if ((pr->flags.power) && (!boot_option_idle_override)) | ||
1739 | cpuidle_unregister_device(&pr->power.dev); | ||
1740 | #endif | ||
1340 | pr->flags.power_setup_done = 0; | 1741 | pr->flags.power_setup_done = 0; |
1341 | 1742 | ||
1342 | if (acpi_device_dir(device)) | 1743 | if (acpi_device_dir(device)) |
1343 | remove_proc_entry(ACPI_PROCESSOR_FILE_POWER, | 1744 | remove_proc_entry(ACPI_PROCESSOR_FILE_POWER, |
1344 | acpi_device_dir(device)); | 1745 | acpi_device_dir(device)); |
1345 | 1746 | ||
1747 | #ifndef CONFIG_CPU_IDLE | ||
1748 | |||
1346 | /* Unregister the idle handler when processor #0 is removed. */ | 1749 | /* Unregister the idle handler when processor #0 is removed. */ |
1347 | if (pr->id == 0) { | 1750 | if (pr->id == 0) { |
1348 | pm_idle = pm_idle_save; | 1751 | pm_idle = pm_idle_save; |
@@ -1357,6 +1760,7 @@ int acpi_processor_power_exit(struct acpi_processor *pr, | |||
1357 | unregister_latency_notifier(&acpi_processor_latency_notifier); | 1760 | unregister_latency_notifier(&acpi_processor_latency_notifier); |
1358 | #endif | 1761 | #endif |
1359 | } | 1762 | } |
1763 | #endif | ||
1360 | 1764 | ||
1361 | return 0; | 1765 | return 0; |
1362 | } | 1766 | } |