diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/Makefile | 1 | ||||
-rw-r--r-- | drivers/acpi/osl.c | 8 | ||||
-rw-r--r-- | drivers/acpi/processor_core.c | 22 | ||||
-rw-r--r-- | drivers/acpi/processor_idle.c | 482 | ||||
-rw-r--r-- | drivers/cpuidle/Kconfig | 20 | ||||
-rw-r--r-- | drivers/cpuidle/Makefile | 5 | ||||
-rw-r--r-- | drivers/cpuidle/cpuidle.c | 295 | ||||
-rw-r--r-- | drivers/cpuidle/cpuidle.h | 33 | ||||
-rw-r--r-- | drivers/cpuidle/driver.c | 56 | ||||
-rw-r--r-- | drivers/cpuidle/governor.c | 141 | ||||
-rw-r--r-- | drivers/cpuidle/governors/Makefile | 6 | ||||
-rw-r--r-- | drivers/cpuidle/governors/ladder.c | 166 | ||||
-rw-r--r-- | drivers/cpuidle/governors/menu.c | 137 | ||||
-rw-r--r-- | drivers/cpuidle/sysfs.c | 361 | ||||
-rw-r--r-- | drivers/net/wireless/ipw2100.c | 29 | ||||
-rw-r--r-- | drivers/net/wireless/ipw2100.h | 2 |
16 files changed, 1681 insertions, 83 deletions
diff --git a/drivers/Makefile b/drivers/Makefile index f0878b2ec55e..10a9c52c9100 100644 --- a/drivers/Makefile +++ b/drivers/Makefile | |||
@@ -76,6 +76,7 @@ obj-$(CONFIG_MCA) += mca/ | |||
76 | obj-$(CONFIG_EISA) += eisa/ | 76 | obj-$(CONFIG_EISA) += eisa/ |
77 | obj-$(CONFIG_LGUEST_GUEST) += lguest/ | 77 | obj-$(CONFIG_LGUEST_GUEST) += lguest/ |
78 | obj-$(CONFIG_CPU_FREQ) += cpufreq/ | 78 | obj-$(CONFIG_CPU_FREQ) += cpufreq/ |
79 | obj-$(CONFIG_CPU_IDLE) += cpuidle/ | ||
79 | obj-$(CONFIG_MMC) += mmc/ | 80 | obj-$(CONFIG_MMC) += mmc/ |
80 | obj-$(CONFIG_NEW_LEDS) += leds/ | 81 | obj-$(CONFIG_NEW_LEDS) += leds/ |
81 | obj-$(CONFIG_INFINIBAND) += infiniband/ | 82 | obj-$(CONFIG_INFINIBAND) += infiniband/ |
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index 12c09fafce9a..5d14d4f10b12 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c | |||
@@ -1043,14 +1043,6 @@ static int __init acpi_wake_gpes_always_on_setup(char *str) | |||
1043 | __setup("acpi_wake_gpes_always_on", acpi_wake_gpes_always_on_setup); | 1043 | __setup("acpi_wake_gpes_always_on", acpi_wake_gpes_always_on_setup); |
1044 | 1044 | ||
1045 | /* | 1045 | /* |
1046 | * max_cstate is defined in the base kernel so modules can | ||
1047 | * change it w/o depending on the state of the processor module. | ||
1048 | */ | ||
1049 | unsigned int max_cstate = ACPI_PROCESSOR_MAX_POWER; | ||
1050 | |||
1051 | EXPORT_SYMBOL(max_cstate); | ||
1052 | |||
1053 | /* | ||
1054 | * Acquire a spinlock. | 1046 | * Acquire a spinlock. |
1055 | * | 1047 | * |
1056 | * handle is a pointer to the spinlock_t. | 1048 | * handle is a pointer to the spinlock_t. |
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c index 9f11dc296cdd..a7351084f2f8 100644 --- a/drivers/acpi/processor_core.c +++ b/drivers/acpi/processor_core.c | |||
@@ -44,6 +44,7 @@ | |||
44 | #include <linux/seq_file.h> | 44 | #include <linux/seq_file.h> |
45 | #include <linux/dmi.h> | 45 | #include <linux/dmi.h> |
46 | #include <linux/moduleparam.h> | 46 | #include <linux/moduleparam.h> |
47 | #include <linux/cpuidle.h> | ||
47 | 48 | ||
48 | #include <asm/io.h> | 49 | #include <asm/io.h> |
49 | #include <asm/system.h> | 50 | #include <asm/system.h> |
@@ -1049,11 +1050,13 @@ static int __init acpi_processor_init(void) | |||
1049 | return -ENOMEM; | 1050 | return -ENOMEM; |
1050 | acpi_processor_dir->owner = THIS_MODULE; | 1051 | acpi_processor_dir->owner = THIS_MODULE; |
1051 | 1052 | ||
1053 | result = cpuidle_register_driver(&acpi_idle_driver); | ||
1054 | if (result < 0) | ||
1055 | goto out_proc; | ||
1056 | |||
1052 | result = acpi_bus_register_driver(&acpi_processor_driver); | 1057 | result = acpi_bus_register_driver(&acpi_processor_driver); |
1053 | if (result < 0) { | 1058 | if (result < 0) |
1054 | remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir); | 1059 | goto out_cpuidle; |
1055 | return result; | ||
1056 | } | ||
1057 | 1060 | ||
1058 | acpi_processor_install_hotplug_notify(); | 1061 | acpi_processor_install_hotplug_notify(); |
1059 | 1062 | ||
@@ -1062,11 +1065,18 @@ static int __init acpi_processor_init(void) | |||
1062 | acpi_processor_ppc_init(); | 1065 | acpi_processor_ppc_init(); |
1063 | 1066 | ||
1064 | return 0; | 1067 | return 0; |
1068 | |||
1069 | out_cpuidle: | ||
1070 | cpuidle_unregister_driver(&acpi_idle_driver); | ||
1071 | |||
1072 | out_proc: | ||
1073 | remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir); | ||
1074 | |||
1075 | return result; | ||
1065 | } | 1076 | } |
1066 | 1077 | ||
1067 | static void __exit acpi_processor_exit(void) | 1078 | static void __exit acpi_processor_exit(void) |
1068 | { | 1079 | { |
1069 | |||
1070 | acpi_processor_ppc_exit(); | 1080 | acpi_processor_ppc_exit(); |
1071 | 1081 | ||
1072 | acpi_thermal_cpufreq_exit(); | 1082 | acpi_thermal_cpufreq_exit(); |
@@ -1075,6 +1085,8 @@ static void __exit acpi_processor_exit(void) | |||
1075 | 1085 | ||
1076 | acpi_bus_unregister_driver(&acpi_processor_driver); | 1086 | acpi_bus_unregister_driver(&acpi_processor_driver); |
1077 | 1087 | ||
1088 | cpuidle_unregister_driver(&acpi_idle_driver); | ||
1089 | |||
1078 | remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir); | 1090 | remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir); |
1079 | 1091 | ||
1080 | return; | 1092 | return; |
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index f18261368e76..99da6a790857 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c | |||
@@ -40,6 +40,7 @@ | |||
40 | #include <linux/sched.h> /* need_resched() */ | 40 | #include <linux/sched.h> /* need_resched() */ |
41 | #include <linux/latency.h> | 41 | #include <linux/latency.h> |
42 | #include <linux/clockchips.h> | 42 | #include <linux/clockchips.h> |
43 | #include <linux/cpuidle.h> | ||
43 | 44 | ||
44 | /* | 45 | /* |
45 | * Include the apic definitions for x86 to have the APIC timer related defines | 46 | * Include the apic definitions for x86 to have the APIC timer related defines |
@@ -64,14 +65,22 @@ ACPI_MODULE_NAME("processor_idle"); | |||
64 | #define ACPI_PROCESSOR_FILE_POWER "power" | 65 | #define ACPI_PROCESSOR_FILE_POWER "power" |
65 | #define US_TO_PM_TIMER_TICKS(t) ((t * (PM_TIMER_FREQUENCY/1000)) / 1000) | 66 | #define US_TO_PM_TIMER_TICKS(t) ((t * (PM_TIMER_FREQUENCY/1000)) / 1000) |
66 | #define PM_TIMER_TICK_NS (1000000000ULL/PM_TIMER_FREQUENCY) | 67 | #define PM_TIMER_TICK_NS (1000000000ULL/PM_TIMER_FREQUENCY) |
68 | #ifndef CONFIG_CPU_IDLE | ||
67 | #define C2_OVERHEAD 4 /* 1us (3.579 ticks per us) */ | 69 | #define C2_OVERHEAD 4 /* 1us (3.579 ticks per us) */ |
68 | #define C3_OVERHEAD 4 /* 1us (3.579 ticks per us) */ | 70 | #define C3_OVERHEAD 4 /* 1us (3.579 ticks per us) */ |
69 | static void (*pm_idle_save) (void) __read_mostly; | 71 | static void (*pm_idle_save) (void) __read_mostly; |
70 | module_param(max_cstate, uint, 0644); | 72 | #else |
73 | #define C2_OVERHEAD 1 /* 1us */ | ||
74 | #define C3_OVERHEAD 1 /* 1us */ | ||
75 | #endif | ||
76 | #define PM_TIMER_TICKS_TO_US(p) (((p) * 1000)/(PM_TIMER_FREQUENCY/1000)) | ||
71 | 77 | ||
78 | static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER; | ||
79 | module_param(max_cstate, uint, 0000); | ||
72 | static unsigned int nocst __read_mostly; | 80 | static unsigned int nocst __read_mostly; |
73 | module_param(nocst, uint, 0000); | 81 | module_param(nocst, uint, 0000); |
74 | 82 | ||
83 | #ifndef CONFIG_CPU_IDLE | ||
75 | /* | 84 | /* |
76 | * bm_history -- bit-mask with a bit per jiffy of bus-master activity | 85 | * bm_history -- bit-mask with a bit per jiffy of bus-master activity |
77 | * 1000 HZ: 0xFFFFFFFF: 32 jiffies = 32ms | 86 | * 1000 HZ: 0xFFFFFFFF: 32 jiffies = 32ms |
@@ -82,9 +91,10 @@ module_param(nocst, uint, 0000); | |||
82 | static unsigned int bm_history __read_mostly = | 91 | static unsigned int bm_history __read_mostly = |
83 | (HZ >= 800 ? 0xFFFFFFFF : ((1U << (HZ / 25)) - 1)); | 92 | (HZ >= 800 ? 0xFFFFFFFF : ((1U << (HZ / 25)) - 1)); |
84 | module_param(bm_history, uint, 0644); | 93 | module_param(bm_history, uint, 0644); |
85 | /* -------------------------------------------------------------------------- | 94 | |
86 | Power Management | 95 | static int acpi_processor_set_power_policy(struct acpi_processor *pr); |
87 | -------------------------------------------------------------------------- */ | 96 | |
97 | #endif | ||
88 | 98 | ||
89 | /* | 99 | /* |
90 | * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3. | 100 | * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3. |
@@ -177,6 +187,18 @@ static inline u32 ticks_elapsed(u32 t1, u32 t2) | |||
177 | return ((0xFFFFFFFF - t1) + t2); | 187 | return ((0xFFFFFFFF - t1) + t2); |
178 | } | 188 | } |
179 | 189 | ||
190 | static inline u32 ticks_elapsed_in_us(u32 t1, u32 t2) | ||
191 | { | ||
192 | if (t2 >= t1) | ||
193 | return PM_TIMER_TICKS_TO_US(t2 - t1); | ||
194 | else if (!(acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER)) | ||
195 | return PM_TIMER_TICKS_TO_US(((0x00FFFFFF - t1) + t2) & 0x00FFFFFF); | ||
196 | else | ||
197 | return PM_TIMER_TICKS_TO_US((0xFFFFFFFF - t1) + t2); | ||
198 | } | ||
199 | |||
200 | #ifndef CONFIG_CPU_IDLE | ||
201 | |||
180 | static void | 202 | static void |
181 | acpi_processor_power_activate(struct acpi_processor *pr, | 203 | acpi_processor_power_activate(struct acpi_processor *pr, |
182 | struct acpi_processor_cx *new) | 204 | struct acpi_processor_cx *new) |
@@ -248,6 +270,7 @@ static void acpi_cstate_enter(struct acpi_processor_cx *cstate) | |||
248 | unused = inl(acpi_gbl_FADT.xpm_timer_block.address); | 270 | unused = inl(acpi_gbl_FADT.xpm_timer_block.address); |
249 | } | 271 | } |
250 | } | 272 | } |
273 | #endif /* !CONFIG_CPU_IDLE */ | ||
251 | 274 | ||
252 | #ifdef ARCH_APICTIMER_STOPS_ON_C3 | 275 | #ifdef ARCH_APICTIMER_STOPS_ON_C3 |
253 | 276 | ||
@@ -342,6 +365,7 @@ int acpi_processor_resume(struct acpi_device * device) | |||
342 | return 0; | 365 | return 0; |
343 | } | 366 | } |
344 | 367 | ||
368 | #ifndef CONFIG_CPU_IDLE | ||
345 | static void acpi_processor_idle(void) | 369 | static void acpi_processor_idle(void) |
346 | { | 370 | { |
347 | struct acpi_processor *pr = NULL; | 371 | struct acpi_processor *pr = NULL; |
@@ -439,7 +463,7 @@ static void acpi_processor_idle(void) | |||
439 | * an SMP system. We do it here instead of doing it at _CST/P_LVL | 463 | * an SMP system. We do it here instead of doing it at _CST/P_LVL |
440 | * detection phase, to work cleanly with logical CPU hotplug. | 464 | * detection phase, to work cleanly with logical CPU hotplug. |
441 | */ | 465 | */ |
442 | if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) && | 466 | if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) && |
443 | !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) | 467 | !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) |
444 | cx = &pr->power.states[ACPI_STATE_C1]; | 468 | cx = &pr->power.states[ACPI_STATE_C1]; |
445 | #endif | 469 | #endif |
@@ -739,6 +763,7 @@ static int acpi_processor_set_power_policy(struct acpi_processor *pr) | |||
739 | 763 | ||
740 | return 0; | 764 | return 0; |
741 | } | 765 | } |
766 | #endif /* !CONFIG_CPU_IDLE */ | ||
742 | 767 | ||
743 | static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr) | 768 | static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr) |
744 | { | 769 | { |
@@ -756,7 +781,7 @@ static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr) | |||
756 | #ifndef CONFIG_HOTPLUG_CPU | 781 | #ifndef CONFIG_HOTPLUG_CPU |
757 | /* | 782 | /* |
758 | * Check for P_LVL2_UP flag before entering C2 and above on | 783 | * Check for P_LVL2_UP flag before entering C2 and above on |
759 | * an SMP system. | 784 | * an SMP system. |
760 | */ | 785 | */ |
761 | if ((num_online_cpus() > 1) && | 786 | if ((num_online_cpus() > 1) && |
762 | !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) | 787 | !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) |
@@ -957,7 +982,12 @@ static void acpi_processor_power_verify_c2(struct acpi_processor_cx *cx) | |||
957 | * Normalize the C2 latency to expidite policy | 982 | * Normalize the C2 latency to expidite policy |
958 | */ | 983 | */ |
959 | cx->valid = 1; | 984 | cx->valid = 1; |
985 | |||
986 | #ifndef CONFIG_CPU_IDLE | ||
960 | cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency); | 987 | cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency); |
988 | #else | ||
989 | cx->latency_ticks = cx->latency; | ||
990 | #endif | ||
961 | 991 | ||
962 | return; | 992 | return; |
963 | } | 993 | } |
@@ -1037,7 +1067,12 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr, | |||
1037 | * use this in our C3 policy | 1067 | * use this in our C3 policy |
1038 | */ | 1068 | */ |
1039 | cx->valid = 1; | 1069 | cx->valid = 1; |
1070 | |||
1071 | #ifndef CONFIG_CPU_IDLE | ||
1040 | cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency); | 1072 | cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency); |
1073 | #else | ||
1074 | cx->latency_ticks = cx->latency; | ||
1075 | #endif | ||
1041 | 1076 | ||
1042 | return; | 1077 | return; |
1043 | } | 1078 | } |
@@ -1102,6 +1137,7 @@ static int acpi_processor_get_power_info(struct acpi_processor *pr) | |||
1102 | 1137 | ||
1103 | pr->power.count = acpi_processor_power_verify(pr); | 1138 | pr->power.count = acpi_processor_power_verify(pr); |
1104 | 1139 | ||
1140 | #ifndef CONFIG_CPU_IDLE | ||
1105 | /* | 1141 | /* |
1106 | * Set Default Policy | 1142 | * Set Default Policy |
1107 | * ------------------ | 1143 | * ------------------ |
@@ -1113,6 +1149,7 @@ static int acpi_processor_get_power_info(struct acpi_processor *pr) | |||
1113 | result = acpi_processor_set_power_policy(pr); | 1149 | result = acpi_processor_set_power_policy(pr); |
1114 | if (result) | 1150 | if (result) |
1115 | return result; | 1151 | return result; |
1152 | #endif | ||
1116 | 1153 | ||
1117 | /* | 1154 | /* |
1118 | * if one state of type C2 or C3 is available, mark this | 1155 | * if one state of type C2 or C3 is available, mark this |
@@ -1129,35 +1166,6 @@ static int acpi_processor_get_power_info(struct acpi_processor *pr) | |||
1129 | return 0; | 1166 | return 0; |
1130 | } | 1167 | } |
1131 | 1168 | ||
1132 | int acpi_processor_cst_has_changed(struct acpi_processor *pr) | ||
1133 | { | ||
1134 | int result = 0; | ||
1135 | |||
1136 | |||
1137 | if (!pr) | ||
1138 | return -EINVAL; | ||
1139 | |||
1140 | if (nocst) { | ||
1141 | return -ENODEV; | ||
1142 | } | ||
1143 | |||
1144 | if (!pr->flags.power_setup_done) | ||
1145 | return -ENODEV; | ||
1146 | |||
1147 | /* Fall back to the default idle loop */ | ||
1148 | pm_idle = pm_idle_save; | ||
1149 | synchronize_sched(); /* Relies on interrupts forcing exit from idle. */ | ||
1150 | |||
1151 | pr->flags.power = 0; | ||
1152 | result = acpi_processor_get_power_info(pr); | ||
1153 | if ((pr->flags.power == 1) && (pr->flags.power_setup_done)) | ||
1154 | pm_idle = acpi_processor_idle; | ||
1155 | |||
1156 | return result; | ||
1157 | } | ||
1158 | |||
1159 | /* proc interface */ | ||
1160 | |||
1161 | static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset) | 1169 | static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset) |
1162 | { | 1170 | { |
1163 | struct acpi_processor *pr = seq->private; | 1171 | struct acpi_processor *pr = seq->private; |
@@ -1239,6 +1247,35 @@ static const struct file_operations acpi_processor_power_fops = { | |||
1239 | .release = single_release, | 1247 | .release = single_release, |
1240 | }; | 1248 | }; |
1241 | 1249 | ||
1250 | #ifndef CONFIG_CPU_IDLE | ||
1251 | |||
1252 | int acpi_processor_cst_has_changed(struct acpi_processor *pr) | ||
1253 | { | ||
1254 | int result = 0; | ||
1255 | |||
1256 | |||
1257 | if (!pr) | ||
1258 | return -EINVAL; | ||
1259 | |||
1260 | if (nocst) { | ||
1261 | return -ENODEV; | ||
1262 | } | ||
1263 | |||
1264 | if (!pr->flags.power_setup_done) | ||
1265 | return -ENODEV; | ||
1266 | |||
1267 | /* Fall back to the default idle loop */ | ||
1268 | pm_idle = pm_idle_save; | ||
1269 | synchronize_sched(); /* Relies on interrupts forcing exit from idle. */ | ||
1270 | |||
1271 | pr->flags.power = 0; | ||
1272 | result = acpi_processor_get_power_info(pr); | ||
1273 | if ((pr->flags.power == 1) && (pr->flags.power_setup_done)) | ||
1274 | pm_idle = acpi_processor_idle; | ||
1275 | |||
1276 | return result; | ||
1277 | } | ||
1278 | |||
1242 | #ifdef CONFIG_SMP | 1279 | #ifdef CONFIG_SMP |
1243 | static void smp_callback(void *v) | 1280 | static void smp_callback(void *v) |
1244 | { | 1281 | { |
@@ -1261,7 +1298,360 @@ static int acpi_processor_latency_notify(struct notifier_block *b, | |||
1261 | static struct notifier_block acpi_processor_latency_notifier = { | 1298 | static struct notifier_block acpi_processor_latency_notifier = { |
1262 | .notifier_call = acpi_processor_latency_notify, | 1299 | .notifier_call = acpi_processor_latency_notify, |
1263 | }; | 1300 | }; |
1301 | |||
1302 | #endif | ||
1303 | |||
1304 | #else /* CONFIG_CPU_IDLE */ | ||
1305 | |||
1306 | /** | ||
1307 | * acpi_idle_bm_check - checks if bus master activity was detected | ||
1308 | */ | ||
1309 | static int acpi_idle_bm_check(void) | ||
1310 | { | ||
1311 | u32 bm_status = 0; | ||
1312 | |||
1313 | acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status); | ||
1314 | if (bm_status) | ||
1315 | acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 1); | ||
1316 | /* | ||
1317 | * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect | ||
1318 | * the true state of bus mastering activity; forcing us to | ||
1319 | * manually check the BMIDEA bit of each IDE channel. | ||
1320 | */ | ||
1321 | else if (errata.piix4.bmisx) { | ||
1322 | if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01) | ||
1323 | || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01)) | ||
1324 | bm_status = 1; | ||
1325 | } | ||
1326 | return bm_status; | ||
1327 | } | ||
1328 | |||
1329 | /** | ||
1330 | * acpi_idle_update_bm_rld - updates the BM_RLD bit depending on target state | ||
1331 | * @pr: the processor | ||
1332 | * @target: the new target state | ||
1333 | */ | ||
1334 | static inline void acpi_idle_update_bm_rld(struct acpi_processor *pr, | ||
1335 | struct acpi_processor_cx *target) | ||
1336 | { | ||
1337 | if (pr->flags.bm_rld_set && target->type != ACPI_STATE_C3) { | ||
1338 | acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0); | ||
1339 | pr->flags.bm_rld_set = 0; | ||
1340 | } | ||
1341 | |||
1342 | if (!pr->flags.bm_rld_set && target->type == ACPI_STATE_C3) { | ||
1343 | acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1); | ||
1344 | pr->flags.bm_rld_set = 1; | ||
1345 | } | ||
1346 | } | ||
1347 | |||
1348 | /** | ||
1349 | * acpi_idle_do_entry - a helper function that does C2 and C3 type entry | ||
1350 | * @cx: cstate data | ||
1351 | */ | ||
1352 | static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx) | ||
1353 | { | ||
1354 | if (cx->space_id == ACPI_CSTATE_FFH) { | ||
1355 | /* Call into architectural FFH based C-state */ | ||
1356 | acpi_processor_ffh_cstate_enter(cx); | ||
1357 | } else { | ||
1358 | int unused; | ||
1359 | /* IO port based C-state */ | ||
1360 | inb(cx->address); | ||
1361 | /* Dummy wait op - must do something useless after P_LVL2 read | ||
1362 | because chipsets cannot guarantee that STPCLK# signal | ||
1363 | gets asserted in time to freeze execution properly. */ | ||
1364 | unused = inl(acpi_gbl_FADT.xpm_timer_block.address); | ||
1365 | } | ||
1366 | } | ||
1367 | |||
1368 | /** | ||
1369 | * acpi_idle_enter_c1 - enters an ACPI C1 state-type | ||
1370 | * @dev: the target CPU | ||
1371 | * @state: the state data | ||
1372 | * | ||
1373 | * This is equivalent to the HALT instruction. | ||
1374 | */ | ||
1375 | static int acpi_idle_enter_c1(struct cpuidle_device *dev, | ||
1376 | struct cpuidle_state *state) | ||
1377 | { | ||
1378 | struct acpi_processor *pr; | ||
1379 | struct acpi_processor_cx *cx = cpuidle_get_statedata(state); | ||
1380 | pr = processors[smp_processor_id()]; | ||
1381 | |||
1382 | if (unlikely(!pr)) | ||
1383 | return 0; | ||
1384 | |||
1385 | if (pr->flags.bm_check) | ||
1386 | acpi_idle_update_bm_rld(pr, cx); | ||
1387 | |||
1388 | current_thread_info()->status &= ~TS_POLLING; | ||
1389 | /* | ||
1390 | * TS_POLLING-cleared state must be visible before we test | ||
1391 | * NEED_RESCHED: | ||
1392 | */ | ||
1393 | smp_mb(); | ||
1394 | if (!need_resched()) | ||
1395 | safe_halt(); | ||
1396 | current_thread_info()->status |= TS_POLLING; | ||
1397 | |||
1398 | cx->usage++; | ||
1399 | |||
1400 | return 0; | ||
1401 | } | ||
1402 | |||
1403 | /** | ||
1404 | * acpi_idle_enter_simple - enters an ACPI state without BM handling | ||
1405 | * @dev: the target CPU | ||
1406 | * @state: the state data | ||
1407 | */ | ||
1408 | static int acpi_idle_enter_simple(struct cpuidle_device *dev, | ||
1409 | struct cpuidle_state *state) | ||
1410 | { | ||
1411 | struct acpi_processor *pr; | ||
1412 | struct acpi_processor_cx *cx = cpuidle_get_statedata(state); | ||
1413 | u32 t1, t2; | ||
1414 | pr = processors[smp_processor_id()]; | ||
1415 | |||
1416 | if (unlikely(!pr)) | ||
1417 | return 0; | ||
1418 | |||
1419 | if (pr->flags.bm_check) | ||
1420 | acpi_idle_update_bm_rld(pr, cx); | ||
1421 | |||
1422 | local_irq_disable(); | ||
1423 | current_thread_info()->status &= ~TS_POLLING; | ||
1424 | /* | ||
1425 | * TS_POLLING-cleared state must be visible before we test | ||
1426 | * NEED_RESCHED: | ||
1427 | */ | ||
1428 | smp_mb(); | ||
1429 | |||
1430 | if (unlikely(need_resched())) { | ||
1431 | current_thread_info()->status |= TS_POLLING; | ||
1432 | local_irq_enable(); | ||
1433 | return 0; | ||
1434 | } | ||
1435 | |||
1436 | if (cx->type == ACPI_STATE_C3) | ||
1437 | ACPI_FLUSH_CPU_CACHE(); | ||
1438 | |||
1439 | t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); | ||
1440 | acpi_state_timer_broadcast(pr, cx, 1); | ||
1441 | acpi_idle_do_entry(cx); | ||
1442 | t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); | ||
1443 | |||
1444 | #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC) | ||
1445 | /* TSC could halt in idle, so notify users */ | ||
1446 | mark_tsc_unstable("TSC halts in idle");; | ||
1447 | #endif | ||
1448 | |||
1449 | local_irq_enable(); | ||
1450 | current_thread_info()->status |= TS_POLLING; | ||
1451 | |||
1452 | cx->usage++; | ||
1453 | |||
1454 | acpi_state_timer_broadcast(pr, cx, 0); | ||
1455 | cx->time += ticks_elapsed(t1, t2); | ||
1456 | return ticks_elapsed_in_us(t1, t2); | ||
1457 | } | ||
1458 | |||
1459 | static int c3_cpu_count; | ||
1460 | static DEFINE_SPINLOCK(c3_lock); | ||
1461 | |||
1462 | /** | ||
1463 | * acpi_idle_enter_bm - enters C3 with proper BM handling | ||
1464 | * @dev: the target CPU | ||
1465 | * @state: the state data | ||
1466 | * | ||
1467 | * If BM is detected, the deepest non-C3 idle state is entered instead. | ||
1468 | */ | ||
1469 | static int acpi_idle_enter_bm(struct cpuidle_device *dev, | ||
1470 | struct cpuidle_state *state) | ||
1471 | { | ||
1472 | struct acpi_processor *pr; | ||
1473 | struct acpi_processor_cx *cx = cpuidle_get_statedata(state); | ||
1474 | u32 t1, t2; | ||
1475 | pr = processors[smp_processor_id()]; | ||
1476 | |||
1477 | if (unlikely(!pr)) | ||
1478 | return 0; | ||
1479 | |||
1480 | local_irq_disable(); | ||
1481 | current_thread_info()->status &= ~TS_POLLING; | ||
1482 | /* | ||
1483 | * TS_POLLING-cleared state must be visible before we test | ||
1484 | * NEED_RESCHED: | ||
1485 | */ | ||
1486 | smp_mb(); | ||
1487 | |||
1488 | if (unlikely(need_resched())) { | ||
1489 | current_thread_info()->status |= TS_POLLING; | ||
1490 | local_irq_enable(); | ||
1491 | return 0; | ||
1492 | } | ||
1493 | |||
1494 | /* | ||
1495 | * Must be done before busmaster disable as we might need to | ||
1496 | * access HPET ! | ||
1497 | */ | ||
1498 | acpi_state_timer_broadcast(pr, cx, 1); | ||
1499 | |||
1500 | if (acpi_idle_bm_check()) { | ||
1501 | cx = pr->power.bm_state; | ||
1502 | |||
1503 | acpi_idle_update_bm_rld(pr, cx); | ||
1504 | |||
1505 | t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); | ||
1506 | acpi_idle_do_entry(cx); | ||
1507 | t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); | ||
1508 | } else { | ||
1509 | acpi_idle_update_bm_rld(pr, cx); | ||
1510 | |||
1511 | spin_lock(&c3_lock); | ||
1512 | c3_cpu_count++; | ||
1513 | /* Disable bus master arbitration when all CPUs are in C3 */ | ||
1514 | if (c3_cpu_count == num_online_cpus()) | ||
1515 | acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1); | ||
1516 | spin_unlock(&c3_lock); | ||
1517 | |||
1518 | t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); | ||
1519 | acpi_idle_do_entry(cx); | ||
1520 | t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); | ||
1521 | |||
1522 | spin_lock(&c3_lock); | ||
1523 | /* Re-enable bus master arbitration */ | ||
1524 | if (c3_cpu_count == num_online_cpus()) | ||
1525 | acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0); | ||
1526 | c3_cpu_count--; | ||
1527 | spin_unlock(&c3_lock); | ||
1528 | } | ||
1529 | |||
1530 | #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC) | ||
1531 | /* TSC could halt in idle, so notify users */ | ||
1532 | mark_tsc_unstable("TSC halts in idle"); | ||
1533 | #endif | ||
1534 | |||
1535 | local_irq_enable(); | ||
1536 | current_thread_info()->status |= TS_POLLING; | ||
1537 | |||
1538 | cx->usage++; | ||
1539 | |||
1540 | acpi_state_timer_broadcast(pr, cx, 0); | ||
1541 | cx->time += ticks_elapsed(t1, t2); | ||
1542 | return ticks_elapsed_in_us(t1, t2); | ||
1543 | } | ||
1544 | |||
1545 | struct cpuidle_driver acpi_idle_driver = { | ||
1546 | .name = "acpi_idle", | ||
1547 | .owner = THIS_MODULE, | ||
1548 | }; | ||
1549 | |||
1550 | /** | ||
1551 | * acpi_processor_setup_cpuidle - prepares and configures CPUIDLE | ||
1552 | * @pr: the ACPI processor | ||
1553 | */ | ||
1554 | static int acpi_processor_setup_cpuidle(struct acpi_processor *pr) | ||
1555 | { | ||
1556 | int i, count = 0; | ||
1557 | struct acpi_processor_cx *cx; | ||
1558 | struct cpuidle_state *state; | ||
1559 | struct cpuidle_device *dev = &pr->power.dev; | ||
1560 | |||
1561 | if (!pr->flags.power_setup_done) | ||
1562 | return -EINVAL; | ||
1563 | |||
1564 | if (pr->flags.power == 0) { | ||
1565 | return -EINVAL; | ||
1566 | } | ||
1567 | |||
1568 | for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) { | ||
1569 | cx = &pr->power.states[i]; | ||
1570 | state = &dev->states[count]; | ||
1571 | |||
1572 | if (!cx->valid) | ||
1573 | continue; | ||
1574 | |||
1575 | #ifdef CONFIG_HOTPLUG_CPU | ||
1576 | if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) && | ||
1577 | !pr->flags.has_cst && | ||
1578 | !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) | ||
1579 | continue; | ||
1264 | #endif | 1580 | #endif |
1581 | cpuidle_set_statedata(state, cx); | ||
1582 | |||
1583 | snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i); | ||
1584 | state->exit_latency = cx->latency; | ||
1585 | state->target_residency = cx->latency * 6; | ||
1586 | state->power_usage = cx->power; | ||
1587 | |||
1588 | state->flags = 0; | ||
1589 | switch (cx->type) { | ||
1590 | case ACPI_STATE_C1: | ||
1591 | state->flags |= CPUIDLE_FLAG_SHALLOW; | ||
1592 | state->enter = acpi_idle_enter_c1; | ||
1593 | break; | ||
1594 | |||
1595 | case ACPI_STATE_C2: | ||
1596 | state->flags |= CPUIDLE_FLAG_BALANCED; | ||
1597 | state->flags |= CPUIDLE_FLAG_TIME_VALID; | ||
1598 | state->enter = acpi_idle_enter_simple; | ||
1599 | break; | ||
1600 | |||
1601 | case ACPI_STATE_C3: | ||
1602 | state->flags |= CPUIDLE_FLAG_DEEP; | ||
1603 | state->flags |= CPUIDLE_FLAG_TIME_VALID; | ||
1604 | state->flags |= CPUIDLE_FLAG_CHECK_BM; | ||
1605 | state->enter = pr->flags.bm_check ? | ||
1606 | acpi_idle_enter_bm : | ||
1607 | acpi_idle_enter_simple; | ||
1608 | break; | ||
1609 | } | ||
1610 | |||
1611 | count++; | ||
1612 | } | ||
1613 | |||
1614 | dev->state_count = count; | ||
1615 | |||
1616 | if (!count) | ||
1617 | return -EINVAL; | ||
1618 | |||
1619 | /* find the deepest state that can handle active BM */ | ||
1620 | if (pr->flags.bm_check) { | ||
1621 | for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) | ||
1622 | if (pr->power.states[i].type == ACPI_STATE_C3) | ||
1623 | break; | ||
1624 | pr->power.bm_state = &pr->power.states[i-1]; | ||
1625 | } | ||
1626 | |||
1627 | return 0; | ||
1628 | } | ||
1629 | |||
1630 | int acpi_processor_cst_has_changed(struct acpi_processor *pr) | ||
1631 | { | ||
1632 | int ret; | ||
1633 | |||
1634 | if (!pr) | ||
1635 | return -EINVAL; | ||
1636 | |||
1637 | if (nocst) { | ||
1638 | return -ENODEV; | ||
1639 | } | ||
1640 | |||
1641 | if (!pr->flags.power_setup_done) | ||
1642 | return -ENODEV; | ||
1643 | |||
1644 | cpuidle_pause_and_lock(); | ||
1645 | cpuidle_disable_device(&pr->power.dev); | ||
1646 | acpi_processor_get_power_info(pr); | ||
1647 | acpi_processor_setup_cpuidle(pr); | ||
1648 | ret = cpuidle_enable_device(&pr->power.dev); | ||
1649 | cpuidle_resume_and_unlock(); | ||
1650 | |||
1651 | return ret; | ||
1652 | } | ||
1653 | |||
1654 | #endif /* CONFIG_CPU_IDLE */ | ||
1265 | 1655 | ||
1266 | int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, | 1656 | int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, |
1267 | struct acpi_device *device) | 1657 | struct acpi_device *device) |
@@ -1279,7 +1669,7 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, | |||
1279 | "ACPI: processor limited to max C-state %d\n", | 1669 | "ACPI: processor limited to max C-state %d\n", |
1280 | max_cstate); | 1670 | max_cstate); |
1281 | first_run++; | 1671 | first_run++; |
1282 | #ifdef CONFIG_SMP | 1672 | #if !defined (CONFIG_CPU_IDLE) && defined (CONFIG_SMP) |
1283 | register_latency_notifier(&acpi_processor_latency_notifier); | 1673 | register_latency_notifier(&acpi_processor_latency_notifier); |
1284 | #endif | 1674 | #endif |
1285 | } | 1675 | } |
@@ -1297,6 +1687,7 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, | |||
1297 | } | 1687 | } |
1298 | 1688 | ||
1299 | acpi_processor_get_power_info(pr); | 1689 | acpi_processor_get_power_info(pr); |
1690 | pr->flags.power_setup_done = 1; | ||
1300 | 1691 | ||
1301 | /* | 1692 | /* |
1302 | * Install the idle handler if processor power management is supported. | 1693 | * Install the idle handler if processor power management is supported. |
@@ -1304,6 +1695,13 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, | |||
1304 | * platforms that only support C1. | 1695 | * platforms that only support C1. |
1305 | */ | 1696 | */ |
1306 | if ((pr->flags.power) && (!boot_option_idle_override)) { | 1697 | if ((pr->flags.power) && (!boot_option_idle_override)) { |
1698 | #ifdef CONFIG_CPU_IDLE | ||
1699 | acpi_processor_setup_cpuidle(pr); | ||
1700 | pr->power.dev.cpu = pr->id; | ||
1701 | if (cpuidle_register_device(&pr->power.dev)) | ||
1702 | return -EIO; | ||
1703 | #endif | ||
1704 | |||
1307 | printk(KERN_INFO PREFIX "CPU%d (power states:", pr->id); | 1705 | printk(KERN_INFO PREFIX "CPU%d (power states:", pr->id); |
1308 | for (i = 1; i <= pr->power.count; i++) | 1706 | for (i = 1; i <= pr->power.count; i++) |
1309 | if (pr->power.states[i].valid) | 1707 | if (pr->power.states[i].valid) |
@@ -1311,10 +1709,12 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, | |||
1311 | pr->power.states[i].type); | 1709 | pr->power.states[i].type); |
1312 | printk(")\n"); | 1710 | printk(")\n"); |
1313 | 1711 | ||
1712 | #ifndef CONFIG_CPU_IDLE | ||
1314 | if (pr->id == 0) { | 1713 | if (pr->id == 0) { |
1315 | pm_idle_save = pm_idle; | 1714 | pm_idle_save = pm_idle; |
1316 | pm_idle = acpi_processor_idle; | 1715 | pm_idle = acpi_processor_idle; |
1317 | } | 1716 | } |
1717 | #endif | ||
1318 | } | 1718 | } |
1319 | 1719 | ||
1320 | /* 'power' [R] */ | 1720 | /* 'power' [R] */ |
@@ -1328,21 +1728,24 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, | |||
1328 | entry->owner = THIS_MODULE; | 1728 | entry->owner = THIS_MODULE; |
1329 | } | 1729 | } |
1330 | 1730 | ||
1331 | pr->flags.power_setup_done = 1; | ||
1332 | |||
1333 | return 0; | 1731 | return 0; |
1334 | } | 1732 | } |
1335 | 1733 | ||
1336 | int acpi_processor_power_exit(struct acpi_processor *pr, | 1734 | int acpi_processor_power_exit(struct acpi_processor *pr, |
1337 | struct acpi_device *device) | 1735 | struct acpi_device *device) |
1338 | { | 1736 | { |
1339 | 1737 | #ifdef CONFIG_CPU_IDLE | |
1738 | if ((pr->flags.power) && (!boot_option_idle_override)) | ||
1739 | cpuidle_unregister_device(&pr->power.dev); | ||
1740 | #endif | ||
1340 | pr->flags.power_setup_done = 0; | 1741 | pr->flags.power_setup_done = 0; |
1341 | 1742 | ||
1342 | if (acpi_device_dir(device)) | 1743 | if (acpi_device_dir(device)) |
1343 | remove_proc_entry(ACPI_PROCESSOR_FILE_POWER, | 1744 | remove_proc_entry(ACPI_PROCESSOR_FILE_POWER, |
1344 | acpi_device_dir(device)); | 1745 | acpi_device_dir(device)); |
1345 | 1746 | ||
1747 | #ifndef CONFIG_CPU_IDLE | ||
1748 | |||
1346 | /* Unregister the idle handler when processor #0 is removed. */ | 1749 | /* Unregister the idle handler when processor #0 is removed. */ |
1347 | if (pr->id == 0) { | 1750 | if (pr->id == 0) { |
1348 | pm_idle = pm_idle_save; | 1751 | pm_idle = pm_idle_save; |
@@ -1357,6 +1760,7 @@ int acpi_processor_power_exit(struct acpi_processor *pr, | |||
1357 | unregister_latency_notifier(&acpi_processor_latency_notifier); | 1760 | unregister_latency_notifier(&acpi_processor_latency_notifier); |
1358 | #endif | 1761 | #endif |
1359 | } | 1762 | } |
1763 | #endif | ||
1360 | 1764 | ||
1361 | return 0; | 1765 | return 0; |
1362 | } | 1766 | } |
diff --git a/drivers/cpuidle/Kconfig b/drivers/cpuidle/Kconfig new file mode 100644 index 000000000000..3bed4127d4ad --- /dev/null +++ b/drivers/cpuidle/Kconfig | |||
@@ -0,0 +1,20 @@ | |||
1 | |||
2 | config CPU_IDLE | ||
3 | bool "CPU idle PM support" | ||
4 | help | ||
5 | CPU idle is a generic framework for supporting software-controlled | ||
6 | idle processor power management. It includes modular cross-platform | ||
7 | governors that can be swapped during runtime. | ||
8 | |||
9 | If you're using a mobile platform that supports CPU idle PM (e.g. | ||
10 | an ACPI-capable notebook), you should say Y here. | ||
11 | |||
12 | config CPU_IDLE_GOV_LADDER | ||
13 | bool | ||
14 | depends on CPU_IDLE | ||
15 | default y | ||
16 | |||
17 | config CPU_IDLE_GOV_MENU | ||
18 | bool | ||
19 | depends on CPU_IDLE && NO_HZ | ||
20 | default y | ||
diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile new file mode 100644 index 000000000000..5634f88379df --- /dev/null +++ b/drivers/cpuidle/Makefile | |||
@@ -0,0 +1,5 @@ | |||
1 | # | ||
2 | # Makefile for cpuidle. | ||
3 | # | ||
4 | |||
5 | obj-y += cpuidle.o driver.o governor.o sysfs.o governors/ | ||
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c new file mode 100644 index 000000000000..fdf4106b817b --- /dev/null +++ b/drivers/cpuidle/cpuidle.c | |||
@@ -0,0 +1,295 @@ | |||
1 | /* | ||
2 | * cpuidle.c - core cpuidle infrastructure | ||
3 | * | ||
4 | * (C) 2006-2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> | ||
5 | * Shaohua Li <shaohua.li@intel.com> | ||
6 | * Adam Belay <abelay@novell.com> | ||
7 | * | ||
8 | * This code is licenced under the GPL. | ||
9 | */ | ||
10 | |||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/mutex.h> | ||
13 | #include <linux/sched.h> | ||
14 | #include <linux/notifier.h> | ||
15 | #include <linux/latency.h> | ||
16 | #include <linux/cpu.h> | ||
17 | #include <linux/cpuidle.h> | ||
18 | |||
19 | #include "cpuidle.h" | ||
20 | |||
21 | DEFINE_PER_CPU(struct cpuidle_device *, cpuidle_devices); | ||
22 | EXPORT_PER_CPU_SYMBOL_GPL(cpuidle_devices); | ||
23 | |||
24 | DEFINE_MUTEX(cpuidle_lock); | ||
25 | LIST_HEAD(cpuidle_detected_devices); | ||
26 | static void (*pm_idle_old)(void); | ||
27 | |||
28 | static int enabled_devices; | ||
29 | |||
30 | /** | ||
31 | * cpuidle_idle_call - the main idle loop | ||
32 | * | ||
33 | * NOTE: no locks or semaphores should be used here | ||
34 | */ | ||
35 | static void cpuidle_idle_call(void) | ||
36 | { | ||
37 | struct cpuidle_device *dev = __get_cpu_var(cpuidle_devices); | ||
38 | struct cpuidle_state *target_state; | ||
39 | int next_state; | ||
40 | |||
41 | /* check if the device is ready */ | ||
42 | if (!dev || !dev->enabled) { | ||
43 | if (pm_idle_old) | ||
44 | pm_idle_old(); | ||
45 | else | ||
46 | local_irq_enable(); | ||
47 | return; | ||
48 | } | ||
49 | |||
50 | /* ask the governor for the next state */ | ||
51 | next_state = cpuidle_curr_governor->select(dev); | ||
52 | if (need_resched()) | ||
53 | return; | ||
54 | target_state = &dev->states[next_state]; | ||
55 | |||
56 | /* enter the state and update stats */ | ||
57 | dev->last_residency = target_state->enter(dev, target_state); | ||
58 | dev->last_state = target_state; | ||
59 | target_state->time += dev->last_residency; | ||
60 | target_state->usage++; | ||
61 | |||
62 | /* give the governor an opportunity to reflect on the outcome */ | ||
63 | if (cpuidle_curr_governor->reflect) | ||
64 | cpuidle_curr_governor->reflect(dev); | ||
65 | } | ||
66 | |||
67 | /** | ||
68 | * cpuidle_install_idle_handler - installs the cpuidle idle loop handler | ||
69 | */ | ||
70 | void cpuidle_install_idle_handler(void) | ||
71 | { | ||
72 | if (enabled_devices && (pm_idle != cpuidle_idle_call)) { | ||
73 | /* Make sure all changes finished before we switch to new idle */ | ||
74 | smp_wmb(); | ||
75 | pm_idle = cpuidle_idle_call; | ||
76 | } | ||
77 | } | ||
78 | |||
79 | /** | ||
80 | * cpuidle_uninstall_idle_handler - uninstalls the cpuidle idle loop handler | ||
81 | */ | ||
82 | void cpuidle_uninstall_idle_handler(void) | ||
83 | { | ||
84 | if (enabled_devices && (pm_idle != pm_idle_old)) { | ||
85 | pm_idle = pm_idle_old; | ||
86 | cpu_idle_wait(); | ||
87 | } | ||
88 | } | ||
89 | |||
90 | /** | ||
91 | * cpuidle_pause_and_lock - temporarily disables CPUIDLE | ||
92 | */ | ||
93 | void cpuidle_pause_and_lock(void) | ||
94 | { | ||
95 | mutex_lock(&cpuidle_lock); | ||
96 | cpuidle_uninstall_idle_handler(); | ||
97 | } | ||
98 | |||
99 | EXPORT_SYMBOL_GPL(cpuidle_pause_and_lock); | ||
100 | |||
101 | /** | ||
102 | * cpuidle_resume_and_unlock - resumes CPUIDLE operation | ||
103 | */ | ||
104 | void cpuidle_resume_and_unlock(void) | ||
105 | { | ||
106 | cpuidle_install_idle_handler(); | ||
107 | mutex_unlock(&cpuidle_lock); | ||
108 | } | ||
109 | |||
110 | EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock); | ||
111 | |||
112 | /** | ||
113 | * cpuidle_enable_device - enables idle PM for a CPU | ||
114 | * @dev: the CPU | ||
115 | * | ||
116 | * This function must be called between cpuidle_pause_and_lock and | ||
117 | * cpuidle_resume_and_unlock when used externally. | ||
118 | */ | ||
119 | int cpuidle_enable_device(struct cpuidle_device *dev) | ||
120 | { | ||
121 | int ret, i; | ||
122 | |||
123 | if (dev->enabled) | ||
124 | return 0; | ||
125 | if (!cpuidle_curr_driver || !cpuidle_curr_governor) | ||
126 | return -EIO; | ||
127 | if (!dev->state_count) | ||
128 | return -EINVAL; | ||
129 | |||
130 | if ((ret = cpuidle_add_state_sysfs(dev))) | ||
131 | return ret; | ||
132 | |||
133 | if (cpuidle_curr_governor->enable && | ||
134 | (ret = cpuidle_curr_governor->enable(dev))) | ||
135 | goto fail_sysfs; | ||
136 | |||
137 | for (i = 0; i < dev->state_count; i++) { | ||
138 | dev->states[i].usage = 0; | ||
139 | dev->states[i].time = 0; | ||
140 | } | ||
141 | dev->last_residency = 0; | ||
142 | dev->last_state = NULL; | ||
143 | |||
144 | smp_wmb(); | ||
145 | |||
146 | dev->enabled = 1; | ||
147 | |||
148 | enabled_devices++; | ||
149 | return 0; | ||
150 | |||
151 | fail_sysfs: | ||
152 | cpuidle_remove_state_sysfs(dev); | ||
153 | |||
154 | return ret; | ||
155 | } | ||
156 | |||
157 | EXPORT_SYMBOL_GPL(cpuidle_enable_device); | ||
158 | |||
159 | /** | ||
160 | * cpuidle_disable_device - disables idle PM for a CPU | ||
161 | * @dev: the CPU | ||
162 | * | ||
163 | * This function must be called between cpuidle_pause_and_lock and | ||
164 | * cpuidle_resume_and_unlock when used externally. | ||
165 | */ | ||
166 | void cpuidle_disable_device(struct cpuidle_device *dev) | ||
167 | { | ||
168 | if (!dev->enabled) | ||
169 | return; | ||
170 | if (!cpuidle_curr_driver || !cpuidle_curr_governor) | ||
171 | return; | ||
172 | |||
173 | dev->enabled = 0; | ||
174 | |||
175 | if (cpuidle_curr_governor->disable) | ||
176 | cpuidle_curr_governor->disable(dev); | ||
177 | |||
178 | cpuidle_remove_state_sysfs(dev); | ||
179 | enabled_devices--; | ||
180 | } | ||
181 | |||
182 | EXPORT_SYMBOL_GPL(cpuidle_disable_device); | ||
183 | |||
184 | /** | ||
185 | * cpuidle_register_device - registers a CPU's idle PM feature | ||
186 | * @dev: the cpu | ||
187 | */ | ||
188 | int cpuidle_register_device(struct cpuidle_device *dev) | ||
189 | { | ||
190 | int ret; | ||
191 | struct sys_device *sys_dev = get_cpu_sysdev((unsigned long)dev->cpu); | ||
192 | |||
193 | if (!sys_dev) | ||
194 | return -EINVAL; | ||
195 | if (!try_module_get(cpuidle_curr_driver->owner)) | ||
196 | return -EINVAL; | ||
197 | |||
198 | init_completion(&dev->kobj_unregister); | ||
199 | |||
200 | mutex_lock(&cpuidle_lock); | ||
201 | |||
202 | per_cpu(cpuidle_devices, dev->cpu) = dev; | ||
203 | list_add(&dev->device_list, &cpuidle_detected_devices); | ||
204 | if ((ret = cpuidle_add_sysfs(sys_dev))) { | ||
205 | mutex_unlock(&cpuidle_lock); | ||
206 | module_put(cpuidle_curr_driver->owner); | ||
207 | return ret; | ||
208 | } | ||
209 | |||
210 | cpuidle_enable_device(dev); | ||
211 | cpuidle_install_idle_handler(); | ||
212 | |||
213 | mutex_unlock(&cpuidle_lock); | ||
214 | |||
215 | return 0; | ||
216 | |||
217 | } | ||
218 | |||
219 | EXPORT_SYMBOL_GPL(cpuidle_register_device); | ||
220 | |||
221 | /** | ||
222 | * cpuidle_unregister_device - unregisters a CPU's idle PM feature | ||
223 | * @dev: the cpu | ||
224 | */ | ||
225 | void cpuidle_unregister_device(struct cpuidle_device *dev) | ||
226 | { | ||
227 | struct sys_device *sys_dev = get_cpu_sysdev((unsigned long)dev->cpu); | ||
228 | |||
229 | cpuidle_pause_and_lock(); | ||
230 | |||
231 | cpuidle_disable_device(dev); | ||
232 | |||
233 | cpuidle_remove_sysfs(sys_dev); | ||
234 | list_del(&dev->device_list); | ||
235 | wait_for_completion(&dev->kobj_unregister); | ||
236 | per_cpu(cpuidle_devices, dev->cpu) = NULL; | ||
237 | |||
238 | cpuidle_resume_and_unlock(); | ||
239 | |||
240 | module_put(cpuidle_curr_driver->owner); | ||
241 | } | ||
242 | |||
243 | EXPORT_SYMBOL_GPL(cpuidle_unregister_device); | ||
244 | |||
245 | #ifdef CONFIG_SMP | ||
246 | |||
247 | static void smp_callback(void *v) | ||
248 | { | ||
249 | /* we already woke the CPU up, nothing more to do */ | ||
250 | } | ||
251 | |||
252 | /* | ||
253 | * This function gets called when a part of the kernel has a new latency | ||
254 | * requirement. This means we need to get all processors out of their C-state, | ||
255 | * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that | ||
256 | * wakes them all right up. | ||
257 | */ | ||
258 | static int cpuidle_latency_notify(struct notifier_block *b, | ||
259 | unsigned long l, void *v) | ||
260 | { | ||
261 | smp_call_function(smp_callback, NULL, 0, 1); | ||
262 | return NOTIFY_OK; | ||
263 | } | ||
264 | |||
265 | static struct notifier_block cpuidle_latency_notifier = { | ||
266 | .notifier_call = cpuidle_latency_notify, | ||
267 | }; | ||
268 | |||
269 | #define latency_notifier_init(x) do { register_latency_notifier(x); } while (0) | ||
270 | |||
271 | #else /* CONFIG_SMP */ | ||
272 | |||
273 | #define latency_notifier_init(x) do { } while (0) | ||
274 | |||
275 | #endif /* CONFIG_SMP */ | ||
276 | |||
277 | /** | ||
278 | * cpuidle_init - core initializer | ||
279 | */ | ||
280 | static int __init cpuidle_init(void) | ||
281 | { | ||
282 | int ret; | ||
283 | |||
284 | pm_idle_old = pm_idle; | ||
285 | |||
286 | ret = cpuidle_add_class_sysfs(&cpu_sysdev_class); | ||
287 | if (ret) | ||
288 | return ret; | ||
289 | |||
290 | latency_notifier_init(&cpuidle_latency_notifier); | ||
291 | |||
292 | return 0; | ||
293 | } | ||
294 | |||
295 | core_initcall(cpuidle_init); | ||
diff --git a/drivers/cpuidle/cpuidle.h b/drivers/cpuidle/cpuidle.h new file mode 100644 index 000000000000..9476ba33ee2c --- /dev/null +++ b/drivers/cpuidle/cpuidle.h | |||
@@ -0,0 +1,33 @@ | |||
1 | /* | ||
2 | * cpuidle.h - The internal header file | ||
3 | */ | ||
4 | |||
5 | #ifndef __DRIVER_CPUIDLE_H | ||
6 | #define __DRIVER_CPUIDLE_H | ||
7 | |||
8 | #include <linux/sysdev.h> | ||
9 | |||
10 | /* For internal use only */ | ||
11 | extern struct cpuidle_governor *cpuidle_curr_governor; | ||
12 | extern struct cpuidle_driver *cpuidle_curr_driver; | ||
13 | extern struct list_head cpuidle_governors; | ||
14 | extern struct list_head cpuidle_detected_devices; | ||
15 | extern struct mutex cpuidle_lock; | ||
16 | extern spinlock_t cpuidle_driver_lock; | ||
17 | |||
18 | /* idle loop */ | ||
19 | extern void cpuidle_install_idle_handler(void); | ||
20 | extern void cpuidle_uninstall_idle_handler(void); | ||
21 | |||
22 | /* governors */ | ||
23 | extern int cpuidle_switch_governor(struct cpuidle_governor *gov); | ||
24 | |||
25 | /* sysfs */ | ||
26 | extern int cpuidle_add_class_sysfs(struct sysdev_class *cls); | ||
27 | extern void cpuidle_remove_class_sysfs(struct sysdev_class *cls); | ||
28 | extern int cpuidle_add_state_sysfs(struct cpuidle_device *device); | ||
29 | extern void cpuidle_remove_state_sysfs(struct cpuidle_device *device); | ||
30 | extern int cpuidle_add_sysfs(struct sys_device *sysdev); | ||
31 | extern void cpuidle_remove_sysfs(struct sys_device *sysdev); | ||
32 | |||
33 | #endif /* __DRIVER_CPUIDLE_H */ | ||
diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c new file mode 100644 index 000000000000..2257004fe33d --- /dev/null +++ b/drivers/cpuidle/driver.c | |||
@@ -0,0 +1,56 @@ | |||
1 | /* | ||
2 | * driver.c - driver support | ||
3 | * | ||
4 | * (C) 2006-2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> | ||
5 | * Shaohua Li <shaohua.li@intel.com> | ||
6 | * Adam Belay <abelay@novell.com> | ||
7 | * | ||
8 | * This code is licenced under the GPL. | ||
9 | */ | ||
10 | |||
11 | #include <linux/mutex.h> | ||
12 | #include <linux/module.h> | ||
13 | #include <linux/cpuidle.h> | ||
14 | |||
15 | #include "cpuidle.h" | ||
16 | |||
17 | struct cpuidle_driver *cpuidle_curr_driver; | ||
18 | DEFINE_SPINLOCK(cpuidle_driver_lock); | ||
19 | |||
20 | /** | ||
21 | * cpuidle_register_driver - registers a driver | ||
22 | * @drv: the driver | ||
23 | */ | ||
24 | int cpuidle_register_driver(struct cpuidle_driver *drv) | ||
25 | { | ||
26 | if (!drv) | ||
27 | return -EINVAL; | ||
28 | |||
29 | spin_lock(&cpuidle_driver_lock); | ||
30 | if (cpuidle_curr_driver) { | ||
31 | spin_unlock(&cpuidle_driver_lock); | ||
32 | return -EBUSY; | ||
33 | } | ||
34 | cpuidle_curr_driver = drv; | ||
35 | spin_unlock(&cpuidle_driver_lock); | ||
36 | |||
37 | return 0; | ||
38 | } | ||
39 | |||
40 | EXPORT_SYMBOL_GPL(cpuidle_register_driver); | ||
41 | |||
42 | /** | ||
43 | * cpuidle_unregister_driver - unregisters a driver | ||
44 | * @drv: the driver | ||
45 | */ | ||
46 | void cpuidle_unregister_driver(struct cpuidle_driver *drv) | ||
47 | { | ||
48 | if (!drv) | ||
49 | return; | ||
50 | |||
51 | spin_lock(&cpuidle_driver_lock); | ||
52 | cpuidle_curr_driver = NULL; | ||
53 | spin_unlock(&cpuidle_driver_lock); | ||
54 | } | ||
55 | |||
56 | EXPORT_SYMBOL_GPL(cpuidle_unregister_driver); | ||
diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c new file mode 100644 index 000000000000..bb699cb2dc5a --- /dev/null +++ b/drivers/cpuidle/governor.c | |||
@@ -0,0 +1,141 @@ | |||
1 | /* | ||
2 | * governor.c - governor support | ||
3 | * | ||
4 | * (C) 2006-2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> | ||
5 | * Shaohua Li <shaohua.li@intel.com> | ||
6 | * Adam Belay <abelay@novell.com> | ||
7 | * | ||
8 | * This code is licenced under the GPL. | ||
9 | */ | ||
10 | |||
11 | #include <linux/mutex.h> | ||
12 | #include <linux/module.h> | ||
13 | #include <linux/cpuidle.h> | ||
14 | |||
15 | #include "cpuidle.h" | ||
16 | |||
17 | LIST_HEAD(cpuidle_governors); | ||
18 | struct cpuidle_governor *cpuidle_curr_governor; | ||
19 | |||
20 | /** | ||
21 | * __cpuidle_find_governor - finds a governor of the specified name | ||
22 | * @str: the name | ||
23 | * | ||
24 | * Must be called with cpuidle_lock aquired. | ||
25 | */ | ||
26 | static struct cpuidle_governor * __cpuidle_find_governor(const char *str) | ||
27 | { | ||
28 | struct cpuidle_governor *gov; | ||
29 | |||
30 | list_for_each_entry(gov, &cpuidle_governors, governor_list) | ||
31 | if (!strnicmp(str, gov->name, CPUIDLE_NAME_LEN)) | ||
32 | return gov; | ||
33 | |||
34 | return NULL; | ||
35 | } | ||
36 | |||
37 | /** | ||
38 | * cpuidle_switch_governor - changes the governor | ||
39 | * @gov: the new target governor | ||
40 | * | ||
41 | * NOTE: "gov" can be NULL to specify disabled | ||
42 | * Must be called with cpuidle_lock aquired. | ||
43 | */ | ||
44 | int cpuidle_switch_governor(struct cpuidle_governor *gov) | ||
45 | { | ||
46 | struct cpuidle_device *dev; | ||
47 | |||
48 | if (gov == cpuidle_curr_governor) | ||
49 | return 0; | ||
50 | |||
51 | cpuidle_uninstall_idle_handler(); | ||
52 | |||
53 | if (cpuidle_curr_governor) { | ||
54 | list_for_each_entry(dev, &cpuidle_detected_devices, device_list) | ||
55 | cpuidle_disable_device(dev); | ||
56 | module_put(cpuidle_curr_governor->owner); | ||
57 | } | ||
58 | |||
59 | cpuidle_curr_governor = gov; | ||
60 | |||
61 | if (gov) { | ||
62 | if (!try_module_get(cpuidle_curr_governor->owner)) | ||
63 | return -EINVAL; | ||
64 | list_for_each_entry(dev, &cpuidle_detected_devices, device_list) | ||
65 | cpuidle_enable_device(dev); | ||
66 | cpuidle_install_idle_handler(); | ||
67 | printk(KERN_INFO "cpuidle: using governor %s\n", gov->name); | ||
68 | } | ||
69 | |||
70 | return 0; | ||
71 | } | ||
72 | |||
73 | /** | ||
74 | * cpuidle_register_governor - registers a governor | ||
75 | * @gov: the governor | ||
76 | */ | ||
77 | int cpuidle_register_governor(struct cpuidle_governor *gov) | ||
78 | { | ||
79 | int ret = -EEXIST; | ||
80 | |||
81 | if (!gov || !gov->select) | ||
82 | return -EINVAL; | ||
83 | |||
84 | mutex_lock(&cpuidle_lock); | ||
85 | if (__cpuidle_find_governor(gov->name) == NULL) { | ||
86 | ret = 0; | ||
87 | list_add_tail(&gov->governor_list, &cpuidle_governors); | ||
88 | if (!cpuidle_curr_governor || | ||
89 | cpuidle_curr_governor->rating < gov->rating) | ||
90 | cpuidle_switch_governor(gov); | ||
91 | } | ||
92 | mutex_unlock(&cpuidle_lock); | ||
93 | |||
94 | return ret; | ||
95 | } | ||
96 | |||
97 | EXPORT_SYMBOL_GPL(cpuidle_register_governor); | ||
98 | |||
99 | /** | ||
100 | * cpuidle_replace_governor - find a replacement governor | ||
101 | * @exclude_rating: the rating that will be skipped while looking for | ||
102 | * new governor. | ||
103 | */ | ||
104 | static struct cpuidle_governor *cpuidle_replace_governor(int exclude_rating) | ||
105 | { | ||
106 | struct cpuidle_governor *gov; | ||
107 | struct cpuidle_governor *ret_gov = NULL; | ||
108 | unsigned int max_rating = 0; | ||
109 | |||
110 | list_for_each_entry(gov, &cpuidle_governors, governor_list) { | ||
111 | if (gov->rating == exclude_rating) | ||
112 | continue; | ||
113 | if (gov->rating > max_rating) { | ||
114 | max_rating = gov->rating; | ||
115 | ret_gov = gov; | ||
116 | } | ||
117 | } | ||
118 | |||
119 | return ret_gov; | ||
120 | } | ||
121 | |||
122 | /** | ||
123 | * cpuidle_unregister_governor - unregisters a governor | ||
124 | * @gov: the governor | ||
125 | */ | ||
126 | void cpuidle_unregister_governor(struct cpuidle_governor *gov) | ||
127 | { | ||
128 | if (!gov) | ||
129 | return; | ||
130 | |||
131 | mutex_lock(&cpuidle_lock); | ||
132 | if (gov == cpuidle_curr_governor) { | ||
133 | struct cpuidle_governor *new_gov; | ||
134 | new_gov = cpuidle_replace_governor(gov->rating); | ||
135 | cpuidle_switch_governor(new_gov); | ||
136 | } | ||
137 | list_del(&gov->governor_list); | ||
138 | mutex_unlock(&cpuidle_lock); | ||
139 | } | ||
140 | |||
141 | EXPORT_SYMBOL_GPL(cpuidle_unregister_governor); | ||
diff --git a/drivers/cpuidle/governors/Makefile b/drivers/cpuidle/governors/Makefile new file mode 100644 index 000000000000..1b512722689f --- /dev/null +++ b/drivers/cpuidle/governors/Makefile | |||
@@ -0,0 +1,6 @@ | |||
1 | # | ||
2 | # Makefile for cpuidle governors. | ||
3 | # | ||
4 | |||
5 | obj-$(CONFIG_CPU_IDLE_GOV_LADDER) += ladder.o | ||
6 | obj-$(CONFIG_CPU_IDLE_GOV_MENU) += menu.o | ||
diff --git a/drivers/cpuidle/governors/ladder.c b/drivers/cpuidle/governors/ladder.c new file mode 100644 index 000000000000..eb666ecae7c9 --- /dev/null +++ b/drivers/cpuidle/governors/ladder.c | |||
@@ -0,0 +1,166 @@ | |||
1 | /* | ||
2 | * ladder.c - the residency ladder algorithm | ||
3 | * | ||
4 | * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> | ||
5 | * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> | ||
6 | * Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de> | ||
7 | * | ||
8 | * (C) 2006-2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> | ||
9 | * Shaohua Li <shaohua.li@intel.com> | ||
10 | * Adam Belay <abelay@novell.com> | ||
11 | * | ||
12 | * This code is licenced under the GPL. | ||
13 | */ | ||
14 | |||
15 | #include <linux/kernel.h> | ||
16 | #include <linux/cpuidle.h> | ||
17 | #include <linux/latency.h> | ||
18 | #include <linux/moduleparam.h> | ||
19 | #include <linux/jiffies.h> | ||
20 | |||
21 | #include <asm/io.h> | ||
22 | #include <asm/uaccess.h> | ||
23 | |||
24 | #define PROMOTION_COUNT 4 | ||
25 | #define DEMOTION_COUNT 1 | ||
26 | |||
27 | struct ladder_device_state { | ||
28 | struct { | ||
29 | u32 promotion_count; | ||
30 | u32 demotion_count; | ||
31 | u32 promotion_time; | ||
32 | u32 demotion_time; | ||
33 | } threshold; | ||
34 | struct { | ||
35 | int promotion_count; | ||
36 | int demotion_count; | ||
37 | } stats; | ||
38 | }; | ||
39 | |||
40 | struct ladder_device { | ||
41 | struct ladder_device_state states[CPUIDLE_STATE_MAX]; | ||
42 | int last_state_idx; | ||
43 | }; | ||
44 | |||
45 | static DEFINE_PER_CPU(struct ladder_device, ladder_devices); | ||
46 | |||
47 | /** | ||
48 | * ladder_do_selection - prepares private data for a state change | ||
49 | * @ldev: the ladder device | ||
50 | * @old_idx: the current state index | ||
51 | * @new_idx: the new target state index | ||
52 | */ | ||
53 | static inline void ladder_do_selection(struct ladder_device *ldev, | ||
54 | int old_idx, int new_idx) | ||
55 | { | ||
56 | ldev->states[old_idx].stats.promotion_count = 0; | ||
57 | ldev->states[old_idx].stats.demotion_count = 0; | ||
58 | ldev->last_state_idx = new_idx; | ||
59 | } | ||
60 | |||
61 | /** | ||
62 | * ladder_select_state - selects the next state to enter | ||
63 | * @dev: the CPU | ||
64 | */ | ||
65 | static int ladder_select_state(struct cpuidle_device *dev) | ||
66 | { | ||
67 | struct ladder_device *ldev = &__get_cpu_var(ladder_devices); | ||
68 | struct ladder_device_state *last_state; | ||
69 | int last_residency, last_idx = ldev->last_state_idx; | ||
70 | |||
71 | if (unlikely(!ldev)) | ||
72 | return 0; | ||
73 | |||
74 | last_state = &ldev->states[last_idx]; | ||
75 | |||
76 | if (dev->states[last_idx].flags & CPUIDLE_FLAG_TIME_VALID) | ||
77 | last_residency = cpuidle_get_last_residency(dev) - dev->states[last_idx].exit_latency; | ||
78 | else | ||
79 | last_residency = last_state->threshold.promotion_time + 1; | ||
80 | |||
81 | /* consider promotion */ | ||
82 | if (last_idx < dev->state_count - 1 && | ||
83 | last_residency > last_state->threshold.promotion_time && | ||
84 | dev->states[last_idx + 1].exit_latency <= system_latency_constraint()) { | ||
85 | last_state->stats.promotion_count++; | ||
86 | last_state->stats.demotion_count = 0; | ||
87 | if (last_state->stats.promotion_count >= last_state->threshold.promotion_count) { | ||
88 | ladder_do_selection(ldev, last_idx, last_idx + 1); | ||
89 | return last_idx + 1; | ||
90 | } | ||
91 | } | ||
92 | |||
93 | /* consider demotion */ | ||
94 | if (last_idx > 0 && | ||
95 | last_residency < last_state->threshold.demotion_time) { | ||
96 | last_state->stats.demotion_count++; | ||
97 | last_state->stats.promotion_count = 0; | ||
98 | if (last_state->stats.demotion_count >= last_state->threshold.demotion_count) { | ||
99 | ladder_do_selection(ldev, last_idx, last_idx - 1); | ||
100 | return last_idx - 1; | ||
101 | } | ||
102 | } | ||
103 | |||
104 | /* otherwise remain at the current state */ | ||
105 | return last_idx; | ||
106 | } | ||
107 | |||
108 | /** | ||
109 | * ladder_enable_device - setup for the governor | ||
110 | * @dev: the CPU | ||
111 | */ | ||
112 | static int ladder_enable_device(struct cpuidle_device *dev) | ||
113 | { | ||
114 | int i; | ||
115 | struct ladder_device *ldev = &per_cpu(ladder_devices, dev->cpu); | ||
116 | struct ladder_device_state *lstate; | ||
117 | struct cpuidle_state *state; | ||
118 | |||
119 | ldev->last_state_idx = 0; | ||
120 | |||
121 | for (i = 0; i < dev->state_count; i++) { | ||
122 | state = &dev->states[i]; | ||
123 | lstate = &ldev->states[i]; | ||
124 | |||
125 | lstate->stats.promotion_count = 0; | ||
126 | lstate->stats.demotion_count = 0; | ||
127 | |||
128 | lstate->threshold.promotion_count = PROMOTION_COUNT; | ||
129 | lstate->threshold.demotion_count = DEMOTION_COUNT; | ||
130 | |||
131 | if (i < dev->state_count - 1) | ||
132 | lstate->threshold.promotion_time = state->exit_latency; | ||
133 | if (i > 0) | ||
134 | lstate->threshold.demotion_time = state->exit_latency; | ||
135 | } | ||
136 | |||
137 | return 0; | ||
138 | } | ||
139 | |||
140 | static struct cpuidle_governor ladder_governor = { | ||
141 | .name = "ladder", | ||
142 | .rating = 10, | ||
143 | .enable = ladder_enable_device, | ||
144 | .select = ladder_select_state, | ||
145 | .owner = THIS_MODULE, | ||
146 | }; | ||
147 | |||
148 | /** | ||
149 | * init_ladder - initializes the governor | ||
150 | */ | ||
151 | static int __init init_ladder(void) | ||
152 | { | ||
153 | return cpuidle_register_governor(&ladder_governor); | ||
154 | } | ||
155 | |||
156 | /** | ||
157 | * exit_ladder - exits the governor | ||
158 | */ | ||
159 | static void __exit exit_ladder(void) | ||
160 | { | ||
161 | cpuidle_unregister_governor(&ladder_governor); | ||
162 | } | ||
163 | |||
164 | MODULE_LICENSE("GPL"); | ||
165 | module_init(init_ladder); | ||
166 | module_exit(exit_ladder); | ||
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c new file mode 100644 index 000000000000..299d45c3bdd2 --- /dev/null +++ b/drivers/cpuidle/governors/menu.c | |||
@@ -0,0 +1,137 @@ | |||
1 | /* | ||
2 | * menu.c - the menu idle governor | ||
3 | * | ||
4 | * Copyright (C) 2006-2007 Adam Belay <abelay@novell.com> | ||
5 | * | ||
6 | * This code is licenced under the GPL. | ||
7 | */ | ||
8 | |||
9 | #include <linux/kernel.h> | ||
10 | #include <linux/cpuidle.h> | ||
11 | #include <linux/latency.h> | ||
12 | #include <linux/time.h> | ||
13 | #include <linux/ktime.h> | ||
14 | #include <linux/hrtimer.h> | ||
15 | #include <linux/tick.h> | ||
16 | |||
17 | #define BREAK_FUZZ 4 /* 4 us */ | ||
18 | |||
19 | struct menu_device { | ||
20 | int last_state_idx; | ||
21 | |||
22 | unsigned int expected_us; | ||
23 | unsigned int predicted_us; | ||
24 | unsigned int last_measured_us; | ||
25 | unsigned int elapsed_us; | ||
26 | }; | ||
27 | |||
28 | static DEFINE_PER_CPU(struct menu_device, menu_devices); | ||
29 | |||
30 | /** | ||
31 | * menu_select - selects the next idle state to enter | ||
32 | * @dev: the CPU | ||
33 | */ | ||
34 | static int menu_select(struct cpuidle_device *dev) | ||
35 | { | ||
36 | struct menu_device *data = &__get_cpu_var(menu_devices); | ||
37 | int i; | ||
38 | |||
39 | /* determine the expected residency time */ | ||
40 | data->expected_us = | ||
41 | (u32) ktime_to_ns(tick_nohz_get_sleep_length()) / 1000; | ||
42 | |||
43 | /* find the deepest idle state that satisfies our constraints */ | ||
44 | for (i = 1; i < dev->state_count; i++) { | ||
45 | struct cpuidle_state *s = &dev->states[i]; | ||
46 | |||
47 | if (s->target_residency > data->expected_us) | ||
48 | break; | ||
49 | if (s->target_residency > data->predicted_us) | ||
50 | break; | ||
51 | if (s->exit_latency > system_latency_constraint()) | ||
52 | break; | ||
53 | } | ||
54 | |||
55 | data->last_state_idx = i - 1; | ||
56 | return i - 1; | ||
57 | } | ||
58 | |||
59 | /** | ||
60 | * menu_reflect - attempts to guess what happened after entry | ||
61 | * @dev: the CPU | ||
62 | * | ||
63 | * NOTE: it's important to be fast here because this operation will add to | ||
64 | * the overall exit latency. | ||
65 | */ | ||
66 | static void menu_reflect(struct cpuidle_device *dev) | ||
67 | { | ||
68 | struct menu_device *data = &__get_cpu_var(menu_devices); | ||
69 | int last_idx = data->last_state_idx; | ||
70 | unsigned int measured_us = | ||
71 | cpuidle_get_last_residency(dev) + data->elapsed_us; | ||
72 | struct cpuidle_state *target = &dev->states[last_idx]; | ||
73 | |||
74 | /* | ||
75 | * Ugh, this idle state doesn't support residency measurements, so we | ||
76 | * are basically lost in the dark. As a compromise, assume we slept | ||
77 | * for one full standard timer tick. However, be aware that this | ||
78 | * could potentially result in a suboptimal state transition. | ||
79 | */ | ||
80 | if (!(target->flags & CPUIDLE_FLAG_TIME_VALID)) | ||
81 | measured_us = USEC_PER_SEC / HZ; | ||
82 | |||
83 | /* Predict time remaining until next break event */ | ||
84 | if (measured_us + BREAK_FUZZ < data->expected_us - target->exit_latency) { | ||
85 | data->predicted_us = max(measured_us, data->last_measured_us); | ||
86 | data->last_measured_us = measured_us; | ||
87 | data->elapsed_us = 0; | ||
88 | } else { | ||
89 | if (data->elapsed_us < data->elapsed_us + measured_us) | ||
90 | data->elapsed_us = measured_us; | ||
91 | else | ||
92 | data->elapsed_us = -1; | ||
93 | data->predicted_us = max(measured_us, data->last_measured_us); | ||
94 | } | ||
95 | } | ||
96 | |||
97 | /** | ||
98 | * menu_enable_device - scans a CPU's states and does setup | ||
99 | * @dev: the CPU | ||
100 | */ | ||
101 | static int menu_enable_device(struct cpuidle_device *dev) | ||
102 | { | ||
103 | struct menu_device *data = &per_cpu(menu_devices, dev->cpu); | ||
104 | |||
105 | memset(data, 0, sizeof(struct menu_device)); | ||
106 | |||
107 | return 0; | ||
108 | } | ||
109 | |||
110 | static struct cpuidle_governor menu_governor = { | ||
111 | .name = "menu", | ||
112 | .rating = 20, | ||
113 | .enable = menu_enable_device, | ||
114 | .select = menu_select, | ||
115 | .reflect = menu_reflect, | ||
116 | .owner = THIS_MODULE, | ||
117 | }; | ||
118 | |||
119 | /** | ||
120 | * init_menu - initializes the governor | ||
121 | */ | ||
122 | static int __init init_menu(void) | ||
123 | { | ||
124 | return cpuidle_register_governor(&menu_governor); | ||
125 | } | ||
126 | |||
127 | /** | ||
128 | * exit_menu - exits the governor | ||
129 | */ | ||
130 | static void __exit exit_menu(void) | ||
131 | { | ||
132 | cpuidle_unregister_governor(&menu_governor); | ||
133 | } | ||
134 | |||
135 | MODULE_LICENSE("GPL"); | ||
136 | module_init(init_menu); | ||
137 | module_exit(exit_menu); | ||
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c new file mode 100644 index 000000000000..0f3515e77d4b --- /dev/null +++ b/drivers/cpuidle/sysfs.c | |||
@@ -0,0 +1,361 @@ | |||
1 | /* | ||
2 | * sysfs.c - sysfs support | ||
3 | * | ||
4 | * (C) 2006-2007 Shaohua Li <shaohua.li@intel.com> | ||
5 | * | ||
6 | * This code is licenced under the GPL. | ||
7 | */ | ||
8 | |||
9 | #include <linux/kernel.h> | ||
10 | #include <linux/cpuidle.h> | ||
11 | #include <linux/sysfs.h> | ||
12 | #include <linux/cpu.h> | ||
13 | |||
14 | #include "cpuidle.h" | ||
15 | |||
16 | static unsigned int sysfs_switch; | ||
17 | static int __init cpuidle_sysfs_setup(char *unused) | ||
18 | { | ||
19 | sysfs_switch = 1; | ||
20 | return 1; | ||
21 | } | ||
22 | __setup("cpuidle_sysfs_switch", cpuidle_sysfs_setup); | ||
23 | |||
24 | static ssize_t show_available_governors(struct sys_device *dev, char *buf) | ||
25 | { | ||
26 | ssize_t i = 0; | ||
27 | struct cpuidle_governor *tmp; | ||
28 | |||
29 | mutex_lock(&cpuidle_lock); | ||
30 | list_for_each_entry(tmp, &cpuidle_governors, governor_list) { | ||
31 | if (i >= (ssize_t) ((PAGE_SIZE/sizeof(char)) - CPUIDLE_NAME_LEN - 2)) | ||
32 | goto out; | ||
33 | i += scnprintf(&buf[i], CPUIDLE_NAME_LEN, "%s ", tmp->name); | ||
34 | } | ||
35 | |||
36 | out: | ||
37 | i+= sprintf(&buf[i], "\n"); | ||
38 | mutex_unlock(&cpuidle_lock); | ||
39 | return i; | ||
40 | } | ||
41 | |||
42 | static ssize_t show_current_driver(struct sys_device *dev, char *buf) | ||
43 | { | ||
44 | ssize_t ret; | ||
45 | |||
46 | spin_lock(&cpuidle_driver_lock); | ||
47 | if (cpuidle_curr_driver) | ||
48 | ret = sprintf(buf, "%s\n", cpuidle_curr_driver->name); | ||
49 | else | ||
50 | ret = sprintf(buf, "none\n"); | ||
51 | spin_unlock(&cpuidle_driver_lock); | ||
52 | |||
53 | return ret; | ||
54 | } | ||
55 | |||
56 | static ssize_t show_current_governor(struct sys_device *dev, char *buf) | ||
57 | { | ||
58 | ssize_t ret; | ||
59 | |||
60 | mutex_lock(&cpuidle_lock); | ||
61 | if (cpuidle_curr_governor) | ||
62 | ret = sprintf(buf, "%s\n", cpuidle_curr_governor->name); | ||
63 | else | ||
64 | ret = sprintf(buf, "none\n"); | ||
65 | mutex_unlock(&cpuidle_lock); | ||
66 | |||
67 | return ret; | ||
68 | } | ||
69 | |||
70 | static ssize_t store_current_governor(struct sys_device *dev, | ||
71 | const char *buf, size_t count) | ||
72 | { | ||
73 | char gov_name[CPUIDLE_NAME_LEN]; | ||
74 | int ret = -EINVAL; | ||
75 | size_t len = count; | ||
76 | struct cpuidle_governor *gov; | ||
77 | |||
78 | if (!len || len >= sizeof(gov_name)) | ||
79 | return -EINVAL; | ||
80 | |||
81 | memcpy(gov_name, buf, len); | ||
82 | gov_name[len] = '\0'; | ||
83 | if (gov_name[len - 1] == '\n') | ||
84 | gov_name[--len] = '\0'; | ||
85 | |||
86 | mutex_lock(&cpuidle_lock); | ||
87 | |||
88 | list_for_each_entry(gov, &cpuidle_governors, governor_list) { | ||
89 | if (strlen(gov->name) == len && !strcmp(gov->name, gov_name)) { | ||
90 | ret = cpuidle_switch_governor(gov); | ||
91 | break; | ||
92 | } | ||
93 | } | ||
94 | |||
95 | mutex_unlock(&cpuidle_lock); | ||
96 | |||
97 | if (ret) | ||
98 | return ret; | ||
99 | else | ||
100 | return count; | ||
101 | } | ||
102 | |||
103 | static SYSDEV_ATTR(current_driver, 0444, show_current_driver, NULL); | ||
104 | static SYSDEV_ATTR(current_governor_ro, 0444, show_current_governor, NULL); | ||
105 | |||
106 | static struct attribute *cpuclass_default_attrs[] = { | ||
107 | &attr_current_driver.attr, | ||
108 | &attr_current_governor_ro.attr, | ||
109 | NULL | ||
110 | }; | ||
111 | |||
112 | static SYSDEV_ATTR(available_governors, 0444, show_available_governors, NULL); | ||
113 | static SYSDEV_ATTR(current_governor, 0644, show_current_governor, | ||
114 | store_current_governor); | ||
115 | |||
116 | static struct attribute *cpuclass_switch_attrs[] = { | ||
117 | &attr_available_governors.attr, | ||
118 | &attr_current_driver.attr, | ||
119 | &attr_current_governor.attr, | ||
120 | NULL | ||
121 | }; | ||
122 | |||
123 | static struct attribute_group cpuclass_attr_group = { | ||
124 | .attrs = cpuclass_default_attrs, | ||
125 | .name = "cpuidle", | ||
126 | }; | ||
127 | |||
128 | /** | ||
129 | * cpuidle_add_class_sysfs - add CPU global sysfs attributes | ||
130 | */ | ||
131 | int cpuidle_add_class_sysfs(struct sysdev_class *cls) | ||
132 | { | ||
133 | if (sysfs_switch) | ||
134 | cpuclass_attr_group.attrs = cpuclass_switch_attrs; | ||
135 | |||
136 | return sysfs_create_group(&cls->kset.kobj, &cpuclass_attr_group); | ||
137 | } | ||
138 | |||
139 | /** | ||
140 | * cpuidle_remove_class_sysfs - remove CPU global sysfs attributes | ||
141 | */ | ||
142 | void cpuidle_remove_class_sysfs(struct sysdev_class *cls) | ||
143 | { | ||
144 | sysfs_remove_group(&cls->kset.kobj, &cpuclass_attr_group); | ||
145 | } | ||
146 | |||
147 | struct cpuidle_attr { | ||
148 | struct attribute attr; | ||
149 | ssize_t (*show)(struct cpuidle_device *, char *); | ||
150 | ssize_t (*store)(struct cpuidle_device *, const char *, size_t count); | ||
151 | }; | ||
152 | |||
153 | #define define_one_ro(_name, show) \ | ||
154 | static struct cpuidle_attr attr_##_name = __ATTR(_name, 0444, show, NULL) | ||
155 | #define define_one_rw(_name, show, store) \ | ||
156 | static struct cpuidle_attr attr_##_name = __ATTR(_name, 0644, show, store) | ||
157 | |||
158 | #define kobj_to_cpuidledev(k) container_of(k, struct cpuidle_device, kobj) | ||
159 | #define attr_to_cpuidleattr(a) container_of(a, struct cpuidle_attr, attr) | ||
160 | static ssize_t cpuidle_show(struct kobject * kobj, struct attribute * attr ,char * buf) | ||
161 | { | ||
162 | int ret = -EIO; | ||
163 | struct cpuidle_device *dev = kobj_to_cpuidledev(kobj); | ||
164 | struct cpuidle_attr * cattr = attr_to_cpuidleattr(attr); | ||
165 | |||
166 | if (cattr->show) { | ||
167 | mutex_lock(&cpuidle_lock); | ||
168 | ret = cattr->show(dev, buf); | ||
169 | mutex_unlock(&cpuidle_lock); | ||
170 | } | ||
171 | return ret; | ||
172 | } | ||
173 | |||
174 | static ssize_t cpuidle_store(struct kobject * kobj, struct attribute * attr, | ||
175 | const char * buf, size_t count) | ||
176 | { | ||
177 | int ret = -EIO; | ||
178 | struct cpuidle_device *dev = kobj_to_cpuidledev(kobj); | ||
179 | struct cpuidle_attr * cattr = attr_to_cpuidleattr(attr); | ||
180 | |||
181 | if (cattr->store) { | ||
182 | mutex_lock(&cpuidle_lock); | ||
183 | ret = cattr->store(dev, buf, count); | ||
184 | mutex_unlock(&cpuidle_lock); | ||
185 | } | ||
186 | return ret; | ||
187 | } | ||
188 | |||
189 | static struct sysfs_ops cpuidle_sysfs_ops = { | ||
190 | .show = cpuidle_show, | ||
191 | .store = cpuidle_store, | ||
192 | }; | ||
193 | |||
194 | static void cpuidle_sysfs_release(struct kobject *kobj) | ||
195 | { | ||
196 | struct cpuidle_device *dev = kobj_to_cpuidledev(kobj); | ||
197 | |||
198 | complete(&dev->kobj_unregister); | ||
199 | } | ||
200 | |||
201 | static struct kobj_type ktype_cpuidle = { | ||
202 | .sysfs_ops = &cpuidle_sysfs_ops, | ||
203 | .release = cpuidle_sysfs_release, | ||
204 | }; | ||
205 | |||
206 | struct cpuidle_state_attr { | ||
207 | struct attribute attr; | ||
208 | ssize_t (*show)(struct cpuidle_state *, char *); | ||
209 | ssize_t (*store)(struct cpuidle_state *, const char *, size_t); | ||
210 | }; | ||
211 | |||
212 | #define define_one_state_ro(_name, show) \ | ||
213 | static struct cpuidle_state_attr attr_##_name = __ATTR(_name, 0444, show, NULL) | ||
214 | |||
215 | #define define_show_state_function(_name) \ | ||
216 | static ssize_t show_state_##_name(struct cpuidle_state *state, char *buf) \ | ||
217 | { \ | ||
218 | return sprintf(buf, "%u\n", state->_name);\ | ||
219 | } | ||
220 | |||
221 | static ssize_t show_state_name(struct cpuidle_state *state, char *buf) | ||
222 | { | ||
223 | return sprintf(buf, "%s\n", state->name); | ||
224 | } | ||
225 | |||
226 | define_show_state_function(exit_latency) | ||
227 | define_show_state_function(power_usage) | ||
228 | define_show_state_function(usage) | ||
229 | define_show_state_function(time) | ||
230 | define_one_state_ro(name, show_state_name); | ||
231 | define_one_state_ro(latency, show_state_exit_latency); | ||
232 | define_one_state_ro(power, show_state_power_usage); | ||
233 | define_one_state_ro(usage, show_state_usage); | ||
234 | define_one_state_ro(time, show_state_time); | ||
235 | |||
236 | static struct attribute *cpuidle_state_default_attrs[] = { | ||
237 | &attr_name.attr, | ||
238 | &attr_latency.attr, | ||
239 | &attr_power.attr, | ||
240 | &attr_usage.attr, | ||
241 | &attr_time.attr, | ||
242 | NULL | ||
243 | }; | ||
244 | |||
245 | #define kobj_to_state_obj(k) container_of(k, struct cpuidle_state_kobj, kobj) | ||
246 | #define kobj_to_state(k) (kobj_to_state_obj(k)->state) | ||
247 | #define attr_to_stateattr(a) container_of(a, struct cpuidle_state_attr, attr) | ||
248 | static ssize_t cpuidle_state_show(struct kobject * kobj, | ||
249 | struct attribute * attr ,char * buf) | ||
250 | { | ||
251 | int ret = -EIO; | ||
252 | struct cpuidle_state *state = kobj_to_state(kobj); | ||
253 | struct cpuidle_state_attr * cattr = attr_to_stateattr(attr); | ||
254 | |||
255 | if (cattr->show) | ||
256 | ret = cattr->show(state, buf); | ||
257 | |||
258 | return ret; | ||
259 | } | ||
260 | |||
261 | static struct sysfs_ops cpuidle_state_sysfs_ops = { | ||
262 | .show = cpuidle_state_show, | ||
263 | }; | ||
264 | |||
265 | static void cpuidle_state_sysfs_release(struct kobject *kobj) | ||
266 | { | ||
267 | struct cpuidle_state_kobj *state_obj = kobj_to_state_obj(kobj); | ||
268 | |||
269 | complete(&state_obj->kobj_unregister); | ||
270 | } | ||
271 | |||
272 | static struct kobj_type ktype_state_cpuidle = { | ||
273 | .sysfs_ops = &cpuidle_state_sysfs_ops, | ||
274 | .default_attrs = cpuidle_state_default_attrs, | ||
275 | .release = cpuidle_state_sysfs_release, | ||
276 | }; | ||
277 | |||
278 | static void inline cpuidle_free_state_kobj(struct cpuidle_device *device, int i) | ||
279 | { | ||
280 | kobject_unregister(&device->kobjs[i]->kobj); | ||
281 | wait_for_completion(&device->kobjs[i]->kobj_unregister); | ||
282 | kfree(device->kobjs[i]); | ||
283 | device->kobjs[i] = NULL; | ||
284 | } | ||
285 | |||
286 | /** | ||
287 | * cpuidle_add_driver_sysfs - adds driver-specific sysfs attributes | ||
288 | * @device: the target device | ||
289 | */ | ||
290 | int cpuidle_add_state_sysfs(struct cpuidle_device *device) | ||
291 | { | ||
292 | int i, ret = -ENOMEM; | ||
293 | struct cpuidle_state_kobj *kobj; | ||
294 | |||
295 | /* state statistics */ | ||
296 | for (i = 0; i < device->state_count; i++) { | ||
297 | kobj = kzalloc(sizeof(struct cpuidle_state_kobj), GFP_KERNEL); | ||
298 | if (!kobj) | ||
299 | goto error_state; | ||
300 | kobj->state = &device->states[i]; | ||
301 | init_completion(&kobj->kobj_unregister); | ||
302 | |||
303 | kobj->kobj.parent = &device->kobj; | ||
304 | kobj->kobj.ktype = &ktype_state_cpuidle; | ||
305 | kobject_set_name(&kobj->kobj, "state%d", i); | ||
306 | ret = kobject_register(&kobj->kobj); | ||
307 | if (ret) { | ||
308 | kfree(kobj); | ||
309 | goto error_state; | ||
310 | } | ||
311 | device->kobjs[i] = kobj; | ||
312 | } | ||
313 | |||
314 | return 0; | ||
315 | |||
316 | error_state: | ||
317 | for (i = i - 1; i >= 0; i--) | ||
318 | cpuidle_free_state_kobj(device, i); | ||
319 | return ret; | ||
320 | } | ||
321 | |||
322 | /** | ||
323 | * cpuidle_remove_driver_sysfs - removes driver-specific sysfs attributes | ||
324 | * @device: the target device | ||
325 | */ | ||
326 | void cpuidle_remove_state_sysfs(struct cpuidle_device *device) | ||
327 | { | ||
328 | int i; | ||
329 | |||
330 | for (i = 0; i < device->state_count; i++) | ||
331 | cpuidle_free_state_kobj(device, i); | ||
332 | } | ||
333 | |||
334 | /** | ||
335 | * cpuidle_add_sysfs - creates a sysfs instance for the target device | ||
336 | * @sysdev: the target device | ||
337 | */ | ||
338 | int cpuidle_add_sysfs(struct sys_device *sysdev) | ||
339 | { | ||
340 | int cpu = sysdev->id; | ||
341 | struct cpuidle_device *dev; | ||
342 | |||
343 | dev = per_cpu(cpuidle_devices, cpu); | ||
344 | dev->kobj.parent = &sysdev->kobj; | ||
345 | dev->kobj.ktype = &ktype_cpuidle; | ||
346 | kobject_set_name(&dev->kobj, "%s", "cpuidle"); | ||
347 | return kobject_register(&dev->kobj); | ||
348 | } | ||
349 | |||
350 | /** | ||
351 | * cpuidle_remove_sysfs - deletes a sysfs instance on the target device | ||
352 | * @sysdev: the target device | ||
353 | */ | ||
354 | void cpuidle_remove_sysfs(struct sys_device *sysdev) | ||
355 | { | ||
356 | int cpu = sysdev->id; | ||
357 | struct cpuidle_device *dev; | ||
358 | |||
359 | dev = per_cpu(cpuidle_devices, cpu); | ||
360 | kobject_unregister(&dev->kobj); | ||
361 | } | ||
diff --git a/drivers/net/wireless/ipw2100.c b/drivers/net/wireless/ipw2100.c index 8990585bd228..e8a94b746295 100644 --- a/drivers/net/wireless/ipw2100.c +++ b/drivers/net/wireless/ipw2100.c | |||
@@ -1858,14 +1858,6 @@ static void ipw2100_down(struct ipw2100_priv *priv) | |||
1858 | 1858 | ||
1859 | modify_acceptable_latency("ipw2100", INFINITE_LATENCY); | 1859 | modify_acceptable_latency("ipw2100", INFINITE_LATENCY); |
1860 | 1860 | ||
1861 | #ifdef ACPI_CSTATE_LIMIT_DEFINED | ||
1862 | if (priv->config & CFG_C3_DISABLED) { | ||
1863 | IPW_DEBUG_INFO(": Resetting C3 transitions.\n"); | ||
1864 | acpi_set_cstate_limit(priv->cstate_limit); | ||
1865 | priv->config &= ~CFG_C3_DISABLED; | ||
1866 | } | ||
1867 | #endif | ||
1868 | |||
1869 | /* We have to signal any supplicant if we are disassociating */ | 1861 | /* We have to signal any supplicant if we are disassociating */ |
1870 | if (associated) | 1862 | if (associated) |
1871 | wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL); | 1863 | wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL); |
@@ -2088,14 +2080,6 @@ static void isr_indicate_rf_kill(struct ipw2100_priv *priv, u32 status) | |||
2088 | /* RF_KILL is now enabled (else we wouldn't be here) */ | 2080 | /* RF_KILL is now enabled (else we wouldn't be here) */ |
2089 | priv->status |= STATUS_RF_KILL_HW; | 2081 | priv->status |= STATUS_RF_KILL_HW; |
2090 | 2082 | ||
2091 | #ifdef ACPI_CSTATE_LIMIT_DEFINED | ||
2092 | if (priv->config & CFG_C3_DISABLED) { | ||
2093 | IPW_DEBUG_INFO(": Resetting C3 transitions.\n"); | ||
2094 | acpi_set_cstate_limit(priv->cstate_limit); | ||
2095 | priv->config &= ~CFG_C3_DISABLED; | ||
2096 | } | ||
2097 | #endif | ||
2098 | |||
2099 | /* Make sure the RF Kill check timer is running */ | 2083 | /* Make sure the RF Kill check timer is running */ |
2100 | priv->stop_rf_kill = 0; | 2084 | priv->stop_rf_kill = 0; |
2101 | cancel_delayed_work(&priv->rf_kill); | 2085 | cancel_delayed_work(&priv->rf_kill); |
@@ -2326,23 +2310,10 @@ static void ipw2100_corruption_detected(struct ipw2100_priv *priv, int i) | |||
2326 | u32 match, reg; | 2310 | u32 match, reg; |
2327 | int j; | 2311 | int j; |
2328 | #endif | 2312 | #endif |
2329 | #ifdef ACPI_CSTATE_LIMIT_DEFINED | ||
2330 | int limit; | ||
2331 | #endif | ||
2332 | 2313 | ||
2333 | IPW_DEBUG_INFO(": PCI latency error detected at 0x%04zX.\n", | 2314 | IPW_DEBUG_INFO(": PCI latency error detected at 0x%04zX.\n", |
2334 | i * sizeof(struct ipw2100_status)); | 2315 | i * sizeof(struct ipw2100_status)); |
2335 | 2316 | ||
2336 | #ifdef ACPI_CSTATE_LIMIT_DEFINED | ||
2337 | IPW_DEBUG_INFO(": Disabling C3 transitions.\n"); | ||
2338 | limit = acpi_get_cstate_limit(); | ||
2339 | if (limit > 2) { | ||
2340 | priv->cstate_limit = limit; | ||
2341 | acpi_set_cstate_limit(2); | ||
2342 | priv->config |= CFG_C3_DISABLED; | ||
2343 | } | ||
2344 | #endif | ||
2345 | |||
2346 | #ifdef IPW2100_DEBUG_C3 | 2317 | #ifdef IPW2100_DEBUG_C3 |
2347 | /* Halt the fimrware so we can get a good image */ | 2318 | /* Halt the fimrware so we can get a good image */ |
2348 | write_register(priv->net_dev, IPW_REG_RESET_REG, | 2319 | write_register(priv->net_dev, IPW_REG_RESET_REG, |
diff --git a/drivers/net/wireless/ipw2100.h b/drivers/net/wireless/ipw2100.h index de7d384d38af..2b8be2418fa7 100644 --- a/drivers/net/wireless/ipw2100.h +++ b/drivers/net/wireless/ipw2100.h | |||
@@ -479,7 +479,6 @@ enum { | |||
479 | #define CFG_ASSOCIATE (1<<6) | 479 | #define CFG_ASSOCIATE (1<<6) |
480 | #define CFG_FIXED_RATE (1<<7) | 480 | #define CFG_FIXED_RATE (1<<7) |
481 | #define CFG_ADHOC_CREATE (1<<8) | 481 | #define CFG_ADHOC_CREATE (1<<8) |
482 | #define CFG_C3_DISABLED (1<<9) | ||
483 | #define CFG_PASSIVE_SCAN (1<<10) | 482 | #define CFG_PASSIVE_SCAN (1<<10) |
484 | #ifdef CONFIG_IPW2100_MONITOR | 483 | #ifdef CONFIG_IPW2100_MONITOR |
485 | #define CFG_CRC_CHECK (1<<11) | 484 | #define CFG_CRC_CHECK (1<<11) |
@@ -508,7 +507,6 @@ struct ipw2100_priv { | |||
508 | u8 bssid[ETH_ALEN]; | 507 | u8 bssid[ETH_ALEN]; |
509 | u8 channel; | 508 | u8 channel; |
510 | int last_mode; | 509 | int last_mode; |
511 | int cstate_limit; | ||
512 | 510 | ||
513 | unsigned long connect_start; | 511 | unsigned long connect_start; |
514 | unsigned long last_reset; | 512 | unsigned long last_reset; |