diff options
-rw-r--r-- | arch/i386/Kconfig | 2 | ||||
-rw-r--r-- | arch/x86_64/Kconfig | 2 | ||||
-rw-r--r-- | drivers/Makefile | 1 | ||||
-rw-r--r-- | drivers/acpi/osl.c | 8 | ||||
-rw-r--r-- | drivers/acpi/processor_core.c | 22 | ||||
-rw-r--r-- | drivers/acpi/processor_idle.c | 488 | ||||
-rw-r--r-- | drivers/cpuidle/Kconfig | 20 | ||||
-rw-r--r-- | drivers/cpuidle/Makefile | 5 | ||||
-rw-r--r-- | drivers/cpuidle/cpuidle.c | 295 | ||||
-rw-r--r-- | drivers/cpuidle/cpuidle.h | 33 | ||||
-rw-r--r-- | drivers/cpuidle/driver.c | 56 | ||||
-rw-r--r-- | drivers/cpuidle/governor.c | 141 | ||||
-rw-r--r-- | drivers/cpuidle/governors/Makefile | 6 | ||||
-rw-r--r-- | drivers/cpuidle/governors/ladder.c | 166 | ||||
-rw-r--r-- | drivers/cpuidle/governors/menu.c | 137 | ||||
-rw-r--r-- | drivers/cpuidle/sysfs.c | 361 | ||||
-rw-r--r-- | drivers/net/wireless/ipw2100.c | 29 | ||||
-rw-r--r-- | drivers/net/wireless/ipw2100.h | 2 | ||||
-rw-r--r-- | include/acpi/processor.h | 5 | ||||
-rw-r--r-- | include/linux/acpi.h | 26 | ||||
-rw-r--r-- | include/linux/cpuidle.h | 180 | ||||
-rw-r--r-- | include/linux/tick.h | 9 | ||||
-rw-r--r-- | kernel/time/tick-sched.c | 16 |
23 files changed, 1901 insertions, 109 deletions
diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig index 97b64d7d6bf6..3328e0ab5428 100644 --- a/arch/i386/Kconfig +++ b/arch/i386/Kconfig | |||
@@ -1069,6 +1069,8 @@ endif # APM | |||
1069 | 1069 | ||
1070 | source "arch/i386/kernel/cpu/cpufreq/Kconfig" | 1070 | source "arch/i386/kernel/cpu/cpufreq/Kconfig" |
1071 | 1071 | ||
1072 | source "drivers/cpuidle/Kconfig" | ||
1073 | |||
1072 | endmenu | 1074 | endmenu |
1073 | 1075 | ||
1074 | menu "Bus options (PCI, PCMCIA, EISA, MCA, ISA)" | 1076 | menu "Bus options (PCI, PCMCIA, EISA, MCA, ISA)" |
diff --git a/arch/x86_64/Kconfig b/arch/x86_64/Kconfig index b4d9089a6a06..e8dbb37a29cf 100644 --- a/arch/x86_64/Kconfig +++ b/arch/x86_64/Kconfig | |||
@@ -706,6 +706,8 @@ source "drivers/acpi/Kconfig" | |||
706 | 706 | ||
707 | source "arch/x86_64/kernel/cpufreq/Kconfig" | 707 | source "arch/x86_64/kernel/cpufreq/Kconfig" |
708 | 708 | ||
709 | source "drivers/cpuidle/Kconfig" | ||
710 | |||
709 | endmenu | 711 | endmenu |
710 | 712 | ||
711 | menu "Bus options (PCI etc.)" | 713 | menu "Bus options (PCI etc.)" |
diff --git a/drivers/Makefile b/drivers/Makefile index f0878b2ec55e..10a9c52c9100 100644 --- a/drivers/Makefile +++ b/drivers/Makefile | |||
@@ -76,6 +76,7 @@ obj-$(CONFIG_MCA) += mca/ | |||
76 | obj-$(CONFIG_EISA) += eisa/ | 76 | obj-$(CONFIG_EISA) += eisa/ |
77 | obj-$(CONFIG_LGUEST_GUEST) += lguest/ | 77 | obj-$(CONFIG_LGUEST_GUEST) += lguest/ |
78 | obj-$(CONFIG_CPU_FREQ) += cpufreq/ | 78 | obj-$(CONFIG_CPU_FREQ) += cpufreq/ |
79 | obj-$(CONFIG_CPU_IDLE) += cpuidle/ | ||
79 | obj-$(CONFIG_MMC) += mmc/ | 80 | obj-$(CONFIG_MMC) += mmc/ |
80 | obj-$(CONFIG_NEW_LEDS) += leds/ | 81 | obj-$(CONFIG_NEW_LEDS) += leds/ |
81 | obj-$(CONFIG_INFINIBAND) += infiniband/ | 82 | obj-$(CONFIG_INFINIBAND) += infiniband/ |
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index 12c09fafce9a..5d14d4f10b12 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c | |||
@@ -1043,14 +1043,6 @@ static int __init acpi_wake_gpes_always_on_setup(char *str) | |||
1043 | __setup("acpi_wake_gpes_always_on", acpi_wake_gpes_always_on_setup); | 1043 | __setup("acpi_wake_gpes_always_on", acpi_wake_gpes_always_on_setup); |
1044 | 1044 | ||
1045 | /* | 1045 | /* |
1046 | * max_cstate is defined in the base kernel so modules can | ||
1047 | * change it w/o depending on the state of the processor module. | ||
1048 | */ | ||
1049 | unsigned int max_cstate = ACPI_PROCESSOR_MAX_POWER; | ||
1050 | |||
1051 | EXPORT_SYMBOL(max_cstate); | ||
1052 | |||
1053 | /* | ||
1054 | * Acquire a spinlock. | 1046 | * Acquire a spinlock. |
1055 | * | 1047 | * |
1056 | * handle is a pointer to the spinlock_t. | 1048 | * handle is a pointer to the spinlock_t. |
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c index 9f11dc296cdd..a7351084f2f8 100644 --- a/drivers/acpi/processor_core.c +++ b/drivers/acpi/processor_core.c | |||
@@ -44,6 +44,7 @@ | |||
44 | #include <linux/seq_file.h> | 44 | #include <linux/seq_file.h> |
45 | #include <linux/dmi.h> | 45 | #include <linux/dmi.h> |
46 | #include <linux/moduleparam.h> | 46 | #include <linux/moduleparam.h> |
47 | #include <linux/cpuidle.h> | ||
47 | 48 | ||
48 | #include <asm/io.h> | 49 | #include <asm/io.h> |
49 | #include <asm/system.h> | 50 | #include <asm/system.h> |
@@ -1049,11 +1050,13 @@ static int __init acpi_processor_init(void) | |||
1049 | return -ENOMEM; | 1050 | return -ENOMEM; |
1050 | acpi_processor_dir->owner = THIS_MODULE; | 1051 | acpi_processor_dir->owner = THIS_MODULE; |
1051 | 1052 | ||
1053 | result = cpuidle_register_driver(&acpi_idle_driver); | ||
1054 | if (result < 0) | ||
1055 | goto out_proc; | ||
1056 | |||
1052 | result = acpi_bus_register_driver(&acpi_processor_driver); | 1057 | result = acpi_bus_register_driver(&acpi_processor_driver); |
1053 | if (result < 0) { | 1058 | if (result < 0) |
1054 | remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir); | 1059 | goto out_cpuidle; |
1055 | return result; | ||
1056 | } | ||
1057 | 1060 | ||
1058 | acpi_processor_install_hotplug_notify(); | 1061 | acpi_processor_install_hotplug_notify(); |
1059 | 1062 | ||
@@ -1062,11 +1065,18 @@ static int __init acpi_processor_init(void) | |||
1062 | acpi_processor_ppc_init(); | 1065 | acpi_processor_ppc_init(); |
1063 | 1066 | ||
1064 | return 0; | 1067 | return 0; |
1068 | |||
1069 | out_cpuidle: | ||
1070 | cpuidle_unregister_driver(&acpi_idle_driver); | ||
1071 | |||
1072 | out_proc: | ||
1073 | remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir); | ||
1074 | |||
1075 | return result; | ||
1065 | } | 1076 | } |
1066 | 1077 | ||
1067 | static void __exit acpi_processor_exit(void) | 1078 | static void __exit acpi_processor_exit(void) |
1068 | { | 1079 | { |
1069 | |||
1070 | acpi_processor_ppc_exit(); | 1080 | acpi_processor_ppc_exit(); |
1071 | 1081 | ||
1072 | acpi_thermal_cpufreq_exit(); | 1082 | acpi_thermal_cpufreq_exit(); |
@@ -1075,6 +1085,8 @@ static void __exit acpi_processor_exit(void) | |||
1075 | 1085 | ||
1076 | acpi_bus_unregister_driver(&acpi_processor_driver); | 1086 | acpi_bus_unregister_driver(&acpi_processor_driver); |
1077 | 1087 | ||
1088 | cpuidle_unregister_driver(&acpi_idle_driver); | ||
1089 | |||
1078 | remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir); | 1090 | remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir); |
1079 | 1091 | ||
1080 | return; | 1092 | return; |
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index f18261368e76..0cad56ca342b 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c | |||
@@ -40,6 +40,7 @@ | |||
40 | #include <linux/sched.h> /* need_resched() */ | 40 | #include <linux/sched.h> /* need_resched() */ |
41 | #include <linux/latency.h> | 41 | #include <linux/latency.h> |
42 | #include <linux/clockchips.h> | 42 | #include <linux/clockchips.h> |
43 | #include <linux/cpuidle.h> | ||
43 | 44 | ||
44 | /* | 45 | /* |
45 | * Include the apic definitions for x86 to have the APIC timer related defines | 46 | * Include the apic definitions for x86 to have the APIC timer related defines |
@@ -64,14 +65,22 @@ ACPI_MODULE_NAME("processor_idle"); | |||
64 | #define ACPI_PROCESSOR_FILE_POWER "power" | 65 | #define ACPI_PROCESSOR_FILE_POWER "power" |
65 | #define US_TO_PM_TIMER_TICKS(t) ((t * (PM_TIMER_FREQUENCY/1000)) / 1000) | 66 | #define US_TO_PM_TIMER_TICKS(t) ((t * (PM_TIMER_FREQUENCY/1000)) / 1000) |
66 | #define PM_TIMER_TICK_NS (1000000000ULL/PM_TIMER_FREQUENCY) | 67 | #define PM_TIMER_TICK_NS (1000000000ULL/PM_TIMER_FREQUENCY) |
68 | #ifndef CONFIG_CPU_IDLE | ||
67 | #define C2_OVERHEAD 4 /* 1us (3.579 ticks per us) */ | 69 | #define C2_OVERHEAD 4 /* 1us (3.579 ticks per us) */ |
68 | #define C3_OVERHEAD 4 /* 1us (3.579 ticks per us) */ | 70 | #define C3_OVERHEAD 4 /* 1us (3.579 ticks per us) */ |
69 | static void (*pm_idle_save) (void) __read_mostly; | 71 | static void (*pm_idle_save) (void) __read_mostly; |
70 | module_param(max_cstate, uint, 0644); | 72 | #else |
73 | #define C2_OVERHEAD 1 /* 1us */ | ||
74 | #define C3_OVERHEAD 1 /* 1us */ | ||
75 | #endif | ||
76 | #define PM_TIMER_TICKS_TO_US(p) (((p) * 1000)/(PM_TIMER_FREQUENCY/1000)) | ||
71 | 77 | ||
78 | static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER; | ||
79 | module_param(max_cstate, uint, 0000); | ||
72 | static unsigned int nocst __read_mostly; | 80 | static unsigned int nocst __read_mostly; |
73 | module_param(nocst, uint, 0000); | 81 | module_param(nocst, uint, 0000); |
74 | 82 | ||
83 | #ifndef CONFIG_CPU_IDLE | ||
75 | /* | 84 | /* |
76 | * bm_history -- bit-mask with a bit per jiffy of bus-master activity | 85 | * bm_history -- bit-mask with a bit per jiffy of bus-master activity |
77 | * 1000 HZ: 0xFFFFFFFF: 32 jiffies = 32ms | 86 | * 1000 HZ: 0xFFFFFFFF: 32 jiffies = 32ms |
@@ -82,9 +91,10 @@ module_param(nocst, uint, 0000); | |||
82 | static unsigned int bm_history __read_mostly = | 91 | static unsigned int bm_history __read_mostly = |
83 | (HZ >= 800 ? 0xFFFFFFFF : ((1U << (HZ / 25)) - 1)); | 92 | (HZ >= 800 ? 0xFFFFFFFF : ((1U << (HZ / 25)) - 1)); |
84 | module_param(bm_history, uint, 0644); | 93 | module_param(bm_history, uint, 0644); |
85 | /* -------------------------------------------------------------------------- | 94 | |
86 | Power Management | 95 | static int acpi_processor_set_power_policy(struct acpi_processor *pr); |
87 | -------------------------------------------------------------------------- */ | 96 | |
97 | #endif | ||
88 | 98 | ||
89 | /* | 99 | /* |
90 | * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3. | 100 | * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3. |
@@ -177,6 +187,18 @@ static inline u32 ticks_elapsed(u32 t1, u32 t2) | |||
177 | return ((0xFFFFFFFF - t1) + t2); | 187 | return ((0xFFFFFFFF - t1) + t2); |
178 | } | 188 | } |
179 | 189 | ||
190 | static inline u32 ticks_elapsed_in_us(u32 t1, u32 t2) | ||
191 | { | ||
192 | if (t2 >= t1) | ||
193 | return PM_TIMER_TICKS_TO_US(t2 - t1); | ||
194 | else if (!(acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER)) | ||
195 | return PM_TIMER_TICKS_TO_US(((0x00FFFFFF - t1) + t2) & 0x00FFFFFF); | ||
196 | else | ||
197 | return PM_TIMER_TICKS_TO_US((0xFFFFFFFF - t1) + t2); | ||
198 | } | ||
199 | |||
200 | #ifndef CONFIG_CPU_IDLE | ||
201 | |||
180 | static void | 202 | static void |
181 | acpi_processor_power_activate(struct acpi_processor *pr, | 203 | acpi_processor_power_activate(struct acpi_processor *pr, |
182 | struct acpi_processor_cx *new) | 204 | struct acpi_processor_cx *new) |
@@ -248,6 +270,7 @@ static void acpi_cstate_enter(struct acpi_processor_cx *cstate) | |||
248 | unused = inl(acpi_gbl_FADT.xpm_timer_block.address); | 270 | unused = inl(acpi_gbl_FADT.xpm_timer_block.address); |
249 | } | 271 | } |
250 | } | 272 | } |
273 | #endif /* !CONFIG_CPU_IDLE */ | ||
251 | 274 | ||
252 | #ifdef ARCH_APICTIMER_STOPS_ON_C3 | 275 | #ifdef ARCH_APICTIMER_STOPS_ON_C3 |
253 | 276 | ||
@@ -342,6 +365,7 @@ int acpi_processor_resume(struct acpi_device * device) | |||
342 | return 0; | 365 | return 0; |
343 | } | 366 | } |
344 | 367 | ||
368 | #ifndef CONFIG_CPU_IDLE | ||
345 | static void acpi_processor_idle(void) | 369 | static void acpi_processor_idle(void) |
346 | { | 370 | { |
347 | struct acpi_processor *pr = NULL; | 371 | struct acpi_processor *pr = NULL; |
@@ -439,7 +463,7 @@ static void acpi_processor_idle(void) | |||
439 | * an SMP system. We do it here instead of doing it at _CST/P_LVL | 463 | * an SMP system. We do it here instead of doing it at _CST/P_LVL |
440 | * detection phase, to work cleanly with logical CPU hotplug. | 464 | * detection phase, to work cleanly with logical CPU hotplug. |
441 | */ | 465 | */ |
442 | if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) && | 466 | if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) && |
443 | !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) | 467 | !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) |
444 | cx = &pr->power.states[ACPI_STATE_C1]; | 468 | cx = &pr->power.states[ACPI_STATE_C1]; |
445 | #endif | 469 | #endif |
@@ -739,6 +763,7 @@ static int acpi_processor_set_power_policy(struct acpi_processor *pr) | |||
739 | 763 | ||
740 | return 0; | 764 | return 0; |
741 | } | 765 | } |
766 | #endif /* !CONFIG_CPU_IDLE */ | ||
742 | 767 | ||
743 | static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr) | 768 | static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr) |
744 | { | 769 | { |
@@ -756,7 +781,7 @@ static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr) | |||
756 | #ifndef CONFIG_HOTPLUG_CPU | 781 | #ifndef CONFIG_HOTPLUG_CPU |
757 | /* | 782 | /* |
758 | * Check for P_LVL2_UP flag before entering C2 and above on | 783 | * Check for P_LVL2_UP flag before entering C2 and above on |
759 | * an SMP system. | 784 | * an SMP system. |
760 | */ | 785 | */ |
761 | if ((num_online_cpus() > 1) && | 786 | if ((num_online_cpus() > 1) && |
762 | !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) | 787 | !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) |
@@ -957,7 +982,12 @@ static void acpi_processor_power_verify_c2(struct acpi_processor_cx *cx) | |||
957 | * Normalize the C2 latency to expidite policy | 982 | * Normalize the C2 latency to expidite policy |
958 | */ | 983 | */ |
959 | cx->valid = 1; | 984 | cx->valid = 1; |
985 | |||
986 | #ifndef CONFIG_CPU_IDLE | ||
960 | cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency); | 987 | cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency); |
988 | #else | ||
989 | cx->latency_ticks = cx->latency; | ||
990 | #endif | ||
961 | 991 | ||
962 | return; | 992 | return; |
963 | } | 993 | } |
@@ -1037,7 +1067,12 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr, | |||
1037 | * use this in our C3 policy | 1067 | * use this in our C3 policy |
1038 | */ | 1068 | */ |
1039 | cx->valid = 1; | 1069 | cx->valid = 1; |
1070 | |||
1071 | #ifndef CONFIG_CPU_IDLE | ||
1040 | cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency); | 1072 | cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency); |
1073 | #else | ||
1074 | cx->latency_ticks = cx->latency; | ||
1075 | #endif | ||
1041 | 1076 | ||
1042 | return; | 1077 | return; |
1043 | } | 1078 | } |
@@ -1102,6 +1137,7 @@ static int acpi_processor_get_power_info(struct acpi_processor *pr) | |||
1102 | 1137 | ||
1103 | pr->power.count = acpi_processor_power_verify(pr); | 1138 | pr->power.count = acpi_processor_power_verify(pr); |
1104 | 1139 | ||
1140 | #ifndef CONFIG_CPU_IDLE | ||
1105 | /* | 1141 | /* |
1106 | * Set Default Policy | 1142 | * Set Default Policy |
1107 | * ------------------ | 1143 | * ------------------ |
@@ -1113,6 +1149,7 @@ static int acpi_processor_get_power_info(struct acpi_processor *pr) | |||
1113 | result = acpi_processor_set_power_policy(pr); | 1149 | result = acpi_processor_set_power_policy(pr); |
1114 | if (result) | 1150 | if (result) |
1115 | return result; | 1151 | return result; |
1152 | #endif | ||
1116 | 1153 | ||
1117 | /* | 1154 | /* |
1118 | * if one state of type C2 or C3 is available, mark this | 1155 | * if one state of type C2 or C3 is available, mark this |
@@ -1129,35 +1166,6 @@ static int acpi_processor_get_power_info(struct acpi_processor *pr) | |||
1129 | return 0; | 1166 | return 0; |
1130 | } | 1167 | } |
1131 | 1168 | ||
1132 | int acpi_processor_cst_has_changed(struct acpi_processor *pr) | ||
1133 | { | ||
1134 | int result = 0; | ||
1135 | |||
1136 | |||
1137 | if (!pr) | ||
1138 | return -EINVAL; | ||
1139 | |||
1140 | if (nocst) { | ||
1141 | return -ENODEV; | ||
1142 | } | ||
1143 | |||
1144 | if (!pr->flags.power_setup_done) | ||
1145 | return -ENODEV; | ||
1146 | |||
1147 | /* Fall back to the default idle loop */ | ||
1148 | pm_idle = pm_idle_save; | ||
1149 | synchronize_sched(); /* Relies on interrupts forcing exit from idle. */ | ||
1150 | |||
1151 | pr->flags.power = 0; | ||
1152 | result = acpi_processor_get_power_info(pr); | ||
1153 | if ((pr->flags.power == 1) && (pr->flags.power_setup_done)) | ||
1154 | pm_idle = acpi_processor_idle; | ||
1155 | |||
1156 | return result; | ||
1157 | } | ||
1158 | |||
1159 | /* proc interface */ | ||
1160 | |||
1161 | static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset) | 1169 | static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset) |
1162 | { | 1170 | { |
1163 | struct acpi_processor *pr = seq->private; | 1171 | struct acpi_processor *pr = seq->private; |
@@ -1239,6 +1247,35 @@ static const struct file_operations acpi_processor_power_fops = { | |||
1239 | .release = single_release, | 1247 | .release = single_release, |
1240 | }; | 1248 | }; |
1241 | 1249 | ||
1250 | #ifndef CONFIG_CPU_IDLE | ||
1251 | |||
1252 | int acpi_processor_cst_has_changed(struct acpi_processor *pr) | ||
1253 | { | ||
1254 | int result = 0; | ||
1255 | |||
1256 | |||
1257 | if (!pr) | ||
1258 | return -EINVAL; | ||
1259 | |||
1260 | if (nocst) { | ||
1261 | return -ENODEV; | ||
1262 | } | ||
1263 | |||
1264 | if (!pr->flags.power_setup_done) | ||
1265 | return -ENODEV; | ||
1266 | |||
1267 | /* Fall back to the default idle loop */ | ||
1268 | pm_idle = pm_idle_save; | ||
1269 | synchronize_sched(); /* Relies on interrupts forcing exit from idle. */ | ||
1270 | |||
1271 | pr->flags.power = 0; | ||
1272 | result = acpi_processor_get_power_info(pr); | ||
1273 | if ((pr->flags.power == 1) && (pr->flags.power_setup_done)) | ||
1274 | pm_idle = acpi_processor_idle; | ||
1275 | |||
1276 | return result; | ||
1277 | } | ||
1278 | |||
1242 | #ifdef CONFIG_SMP | 1279 | #ifdef CONFIG_SMP |
1243 | static void smp_callback(void *v) | 1280 | static void smp_callback(void *v) |
1244 | { | 1281 | { |
@@ -1261,7 +1298,366 @@ static int acpi_processor_latency_notify(struct notifier_block *b, | |||
1261 | static struct notifier_block acpi_processor_latency_notifier = { | 1298 | static struct notifier_block acpi_processor_latency_notifier = { |
1262 | .notifier_call = acpi_processor_latency_notify, | 1299 | .notifier_call = acpi_processor_latency_notify, |
1263 | }; | 1300 | }; |
1301 | |||
1302 | #endif | ||
1303 | |||
1304 | #else /* CONFIG_CPU_IDLE */ | ||
1305 | |||
1306 | /** | ||
1307 | * acpi_idle_bm_check - checks if bus master activity was detected | ||
1308 | */ | ||
1309 | static int acpi_idle_bm_check(void) | ||
1310 | { | ||
1311 | u32 bm_status = 0; | ||
1312 | |||
1313 | acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status); | ||
1314 | if (bm_status) | ||
1315 | acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 1); | ||
1316 | /* | ||
1317 | * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect | ||
1318 | * the true state of bus mastering activity; forcing us to | ||
1319 | * manually check the BMIDEA bit of each IDE channel. | ||
1320 | */ | ||
1321 | else if (errata.piix4.bmisx) { | ||
1322 | if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01) | ||
1323 | || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01)) | ||
1324 | bm_status = 1; | ||
1325 | } | ||
1326 | return bm_status; | ||
1327 | } | ||
1328 | |||
1329 | /** | ||
1330 | * acpi_idle_update_bm_rld - updates the BM_RLD bit depending on target state | ||
1331 | * @pr: the processor | ||
1332 | * @target: the new target state | ||
1333 | */ | ||
1334 | static inline void acpi_idle_update_bm_rld(struct acpi_processor *pr, | ||
1335 | struct acpi_processor_cx *target) | ||
1336 | { | ||
1337 | if (pr->flags.bm_rld_set && target->type != ACPI_STATE_C3) { | ||
1338 | acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0); | ||
1339 | pr->flags.bm_rld_set = 0; | ||
1340 | } | ||
1341 | |||
1342 | if (!pr->flags.bm_rld_set && target->type == ACPI_STATE_C3) { | ||
1343 | acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1); | ||
1344 | pr->flags.bm_rld_set = 1; | ||
1345 | } | ||
1346 | } | ||
1347 | |||
1348 | /** | ||
1349 | * acpi_idle_do_entry - a helper function that does C2 and C3 type entry | ||
1350 | * @cx: cstate data | ||
1351 | */ | ||
1352 | static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx) | ||
1353 | { | ||
1354 | if (cx->space_id == ACPI_CSTATE_FFH) { | ||
1355 | /* Call into architectural FFH based C-state */ | ||
1356 | acpi_processor_ffh_cstate_enter(cx); | ||
1357 | } else { | ||
1358 | int unused; | ||
1359 | /* IO port based C-state */ | ||
1360 | inb(cx->address); | ||
1361 | /* Dummy wait op - must do something useless after P_LVL2 read | ||
1362 | because chipsets cannot guarantee that STPCLK# signal | ||
1363 | gets asserted in time to freeze execution properly. */ | ||
1364 | unused = inl(acpi_gbl_FADT.xpm_timer_block.address); | ||
1365 | } | ||
1366 | } | ||
1367 | |||
1368 | /** | ||
1369 | * acpi_idle_enter_c1 - enters an ACPI C1 state-type | ||
1370 | * @dev: the target CPU | ||
1371 | * @state: the state data | ||
1372 | * | ||
1373 | * This is equivalent to the HALT instruction. | ||
1374 | */ | ||
1375 | static int acpi_idle_enter_c1(struct cpuidle_device *dev, | ||
1376 | struct cpuidle_state *state) | ||
1377 | { | ||
1378 | struct acpi_processor *pr; | ||
1379 | struct acpi_processor_cx *cx = cpuidle_get_statedata(state); | ||
1380 | pr = processors[smp_processor_id()]; | ||
1381 | |||
1382 | if (unlikely(!pr)) | ||
1383 | return 0; | ||
1384 | |||
1385 | if (pr->flags.bm_check) | ||
1386 | acpi_idle_update_bm_rld(pr, cx); | ||
1387 | |||
1388 | current_thread_info()->status &= ~TS_POLLING; | ||
1389 | /* | ||
1390 | * TS_POLLING-cleared state must be visible before we test | ||
1391 | * NEED_RESCHED: | ||
1392 | */ | ||
1393 | smp_mb(); | ||
1394 | if (!need_resched()) | ||
1395 | safe_halt(); | ||
1396 | current_thread_info()->status |= TS_POLLING; | ||
1397 | |||
1398 | cx->usage++; | ||
1399 | |||
1400 | return 0; | ||
1401 | } | ||
1402 | |||
1403 | /** | ||
1404 | * acpi_idle_enter_simple - enters an ACPI state without BM handling | ||
1405 | * @dev: the target CPU | ||
1406 | * @state: the state data | ||
1407 | */ | ||
1408 | static int acpi_idle_enter_simple(struct cpuidle_device *dev, | ||
1409 | struct cpuidle_state *state) | ||
1410 | { | ||
1411 | struct acpi_processor *pr; | ||
1412 | struct acpi_processor_cx *cx = cpuidle_get_statedata(state); | ||
1413 | u32 t1, t2; | ||
1414 | pr = processors[smp_processor_id()]; | ||
1415 | |||
1416 | if (unlikely(!pr)) | ||
1417 | return 0; | ||
1418 | |||
1419 | if (acpi_idle_suspend) | ||
1420 | return(acpi_idle_enter_c1(dev, state)); | ||
1421 | |||
1422 | if (pr->flags.bm_check) | ||
1423 | acpi_idle_update_bm_rld(pr, cx); | ||
1424 | |||
1425 | local_irq_disable(); | ||
1426 | current_thread_info()->status &= ~TS_POLLING; | ||
1427 | /* | ||
1428 | * TS_POLLING-cleared state must be visible before we test | ||
1429 | * NEED_RESCHED: | ||
1430 | */ | ||
1431 | smp_mb(); | ||
1432 | |||
1433 | if (unlikely(need_resched())) { | ||
1434 | current_thread_info()->status |= TS_POLLING; | ||
1435 | local_irq_enable(); | ||
1436 | return 0; | ||
1437 | } | ||
1438 | |||
1439 | if (cx->type == ACPI_STATE_C3) | ||
1440 | ACPI_FLUSH_CPU_CACHE(); | ||
1441 | |||
1442 | t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); | ||
1443 | acpi_state_timer_broadcast(pr, cx, 1); | ||
1444 | acpi_idle_do_entry(cx); | ||
1445 | t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); | ||
1446 | |||
1447 | #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC) | ||
1448 | /* TSC could halt in idle, so notify users */ | ||
1449 | mark_tsc_unstable("TSC halts in idle");; | ||
1450 | #endif | ||
1451 | |||
1452 | local_irq_enable(); | ||
1453 | current_thread_info()->status |= TS_POLLING; | ||
1454 | |||
1455 | cx->usage++; | ||
1456 | |||
1457 | acpi_state_timer_broadcast(pr, cx, 0); | ||
1458 | cx->time += ticks_elapsed(t1, t2); | ||
1459 | return ticks_elapsed_in_us(t1, t2); | ||
1460 | } | ||
1461 | |||
1462 | static int c3_cpu_count; | ||
1463 | static DEFINE_SPINLOCK(c3_lock); | ||
1464 | |||
1465 | /** | ||
1466 | * acpi_idle_enter_bm - enters C3 with proper BM handling | ||
1467 | * @dev: the target CPU | ||
1468 | * @state: the state data | ||
1469 | * | ||
1470 | * If BM is detected, the deepest non-C3 idle state is entered instead. | ||
1471 | */ | ||
1472 | static int acpi_idle_enter_bm(struct cpuidle_device *dev, | ||
1473 | struct cpuidle_state *state) | ||
1474 | { | ||
1475 | struct acpi_processor *pr; | ||
1476 | struct acpi_processor_cx *cx = cpuidle_get_statedata(state); | ||
1477 | u32 t1, t2; | ||
1478 | pr = processors[smp_processor_id()]; | ||
1479 | |||
1480 | if (unlikely(!pr)) | ||
1481 | return 0; | ||
1482 | |||
1483 | if (acpi_idle_suspend) | ||
1484 | return(acpi_idle_enter_c1(dev, state)); | ||
1485 | |||
1486 | local_irq_disable(); | ||
1487 | current_thread_info()->status &= ~TS_POLLING; | ||
1488 | /* | ||
1489 | * TS_POLLING-cleared state must be visible before we test | ||
1490 | * NEED_RESCHED: | ||
1491 | */ | ||
1492 | smp_mb(); | ||
1493 | |||
1494 | if (unlikely(need_resched())) { | ||
1495 | current_thread_info()->status |= TS_POLLING; | ||
1496 | local_irq_enable(); | ||
1497 | return 0; | ||
1498 | } | ||
1499 | |||
1500 | /* | ||
1501 | * Must be done before busmaster disable as we might need to | ||
1502 | * access HPET ! | ||
1503 | */ | ||
1504 | acpi_state_timer_broadcast(pr, cx, 1); | ||
1505 | |||
1506 | if (acpi_idle_bm_check()) { | ||
1507 | cx = pr->power.bm_state; | ||
1508 | |||
1509 | acpi_idle_update_bm_rld(pr, cx); | ||
1510 | |||
1511 | t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); | ||
1512 | acpi_idle_do_entry(cx); | ||
1513 | t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); | ||
1514 | } else { | ||
1515 | acpi_idle_update_bm_rld(pr, cx); | ||
1516 | |||
1517 | spin_lock(&c3_lock); | ||
1518 | c3_cpu_count++; | ||
1519 | /* Disable bus master arbitration when all CPUs are in C3 */ | ||
1520 | if (c3_cpu_count == num_online_cpus()) | ||
1521 | acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1); | ||
1522 | spin_unlock(&c3_lock); | ||
1523 | |||
1524 | t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); | ||
1525 | acpi_idle_do_entry(cx); | ||
1526 | t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); | ||
1527 | |||
1528 | spin_lock(&c3_lock); | ||
1529 | /* Re-enable bus master arbitration */ | ||
1530 | if (c3_cpu_count == num_online_cpus()) | ||
1531 | acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0); | ||
1532 | c3_cpu_count--; | ||
1533 | spin_unlock(&c3_lock); | ||
1534 | } | ||
1535 | |||
1536 | #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC) | ||
1537 | /* TSC could halt in idle, so notify users */ | ||
1538 | mark_tsc_unstable("TSC halts in idle"); | ||
1539 | #endif | ||
1540 | |||
1541 | local_irq_enable(); | ||
1542 | current_thread_info()->status |= TS_POLLING; | ||
1543 | |||
1544 | cx->usage++; | ||
1545 | |||
1546 | acpi_state_timer_broadcast(pr, cx, 0); | ||
1547 | cx->time += ticks_elapsed(t1, t2); | ||
1548 | return ticks_elapsed_in_us(t1, t2); | ||
1549 | } | ||
1550 | |||
1551 | struct cpuidle_driver acpi_idle_driver = { | ||
1552 | .name = "acpi_idle", | ||
1553 | .owner = THIS_MODULE, | ||
1554 | }; | ||
1555 | |||
1556 | /** | ||
1557 | * acpi_processor_setup_cpuidle - prepares and configures CPUIDLE | ||
1558 | * @pr: the ACPI processor | ||
1559 | */ | ||
1560 | static int acpi_processor_setup_cpuidle(struct acpi_processor *pr) | ||
1561 | { | ||
1562 | int i, count = 0; | ||
1563 | struct acpi_processor_cx *cx; | ||
1564 | struct cpuidle_state *state; | ||
1565 | struct cpuidle_device *dev = &pr->power.dev; | ||
1566 | |||
1567 | if (!pr->flags.power_setup_done) | ||
1568 | return -EINVAL; | ||
1569 | |||
1570 | if (pr->flags.power == 0) { | ||
1571 | return -EINVAL; | ||
1572 | } | ||
1573 | |||
1574 | for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) { | ||
1575 | cx = &pr->power.states[i]; | ||
1576 | state = &dev->states[count]; | ||
1577 | |||
1578 | if (!cx->valid) | ||
1579 | continue; | ||
1580 | |||
1581 | #ifdef CONFIG_HOTPLUG_CPU | ||
1582 | if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) && | ||
1583 | !pr->flags.has_cst && | ||
1584 | !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) | ||
1585 | continue; | ||
1264 | #endif | 1586 | #endif |
1587 | cpuidle_set_statedata(state, cx); | ||
1588 | |||
1589 | snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i); | ||
1590 | state->exit_latency = cx->latency; | ||
1591 | state->target_residency = cx->latency * 6; | ||
1592 | state->power_usage = cx->power; | ||
1593 | |||
1594 | state->flags = 0; | ||
1595 | switch (cx->type) { | ||
1596 | case ACPI_STATE_C1: | ||
1597 | state->flags |= CPUIDLE_FLAG_SHALLOW; | ||
1598 | state->enter = acpi_idle_enter_c1; | ||
1599 | break; | ||
1600 | |||
1601 | case ACPI_STATE_C2: | ||
1602 | state->flags |= CPUIDLE_FLAG_BALANCED; | ||
1603 | state->flags |= CPUIDLE_FLAG_TIME_VALID; | ||
1604 | state->enter = acpi_idle_enter_simple; | ||
1605 | break; | ||
1606 | |||
1607 | case ACPI_STATE_C3: | ||
1608 | state->flags |= CPUIDLE_FLAG_DEEP; | ||
1609 | state->flags |= CPUIDLE_FLAG_TIME_VALID; | ||
1610 | state->flags |= CPUIDLE_FLAG_CHECK_BM; | ||
1611 | state->enter = pr->flags.bm_check ? | ||
1612 | acpi_idle_enter_bm : | ||
1613 | acpi_idle_enter_simple; | ||
1614 | break; | ||
1615 | } | ||
1616 | |||
1617 | count++; | ||
1618 | } | ||
1619 | |||
1620 | dev->state_count = count; | ||
1621 | |||
1622 | if (!count) | ||
1623 | return -EINVAL; | ||
1624 | |||
1625 | /* find the deepest state that can handle active BM */ | ||
1626 | if (pr->flags.bm_check) { | ||
1627 | for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) | ||
1628 | if (pr->power.states[i].type == ACPI_STATE_C3) | ||
1629 | break; | ||
1630 | pr->power.bm_state = &pr->power.states[i-1]; | ||
1631 | } | ||
1632 | |||
1633 | return 0; | ||
1634 | } | ||
1635 | |||
1636 | int acpi_processor_cst_has_changed(struct acpi_processor *pr) | ||
1637 | { | ||
1638 | int ret; | ||
1639 | |||
1640 | if (!pr) | ||
1641 | return -EINVAL; | ||
1642 | |||
1643 | if (nocst) { | ||
1644 | return -ENODEV; | ||
1645 | } | ||
1646 | |||
1647 | if (!pr->flags.power_setup_done) | ||
1648 | return -ENODEV; | ||
1649 | |||
1650 | cpuidle_pause_and_lock(); | ||
1651 | cpuidle_disable_device(&pr->power.dev); | ||
1652 | acpi_processor_get_power_info(pr); | ||
1653 | acpi_processor_setup_cpuidle(pr); | ||
1654 | ret = cpuidle_enable_device(&pr->power.dev); | ||
1655 | cpuidle_resume_and_unlock(); | ||
1656 | |||
1657 | return ret; | ||
1658 | } | ||
1659 | |||
1660 | #endif /* CONFIG_CPU_IDLE */ | ||
1265 | 1661 | ||
1266 | int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, | 1662 | int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, |
1267 | struct acpi_device *device) | 1663 | struct acpi_device *device) |
@@ -1279,7 +1675,7 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, | |||
1279 | "ACPI: processor limited to max C-state %d\n", | 1675 | "ACPI: processor limited to max C-state %d\n", |
1280 | max_cstate); | 1676 | max_cstate); |
1281 | first_run++; | 1677 | first_run++; |
1282 | #ifdef CONFIG_SMP | 1678 | #if !defined (CONFIG_CPU_IDLE) && defined (CONFIG_SMP) |
1283 | register_latency_notifier(&acpi_processor_latency_notifier); | 1679 | register_latency_notifier(&acpi_processor_latency_notifier); |
1284 | #endif | 1680 | #endif |
1285 | } | 1681 | } |
@@ -1297,6 +1693,7 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, | |||
1297 | } | 1693 | } |
1298 | 1694 | ||
1299 | acpi_processor_get_power_info(pr); | 1695 | acpi_processor_get_power_info(pr); |
1696 | pr->flags.power_setup_done = 1; | ||
1300 | 1697 | ||
1301 | /* | 1698 | /* |
1302 | * Install the idle handler if processor power management is supported. | 1699 | * Install the idle handler if processor power management is supported. |
@@ -1304,6 +1701,13 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, | |||
1304 | * platforms that only support C1. | 1701 | * platforms that only support C1. |
1305 | */ | 1702 | */ |
1306 | if ((pr->flags.power) && (!boot_option_idle_override)) { | 1703 | if ((pr->flags.power) && (!boot_option_idle_override)) { |
1704 | #ifdef CONFIG_CPU_IDLE | ||
1705 | acpi_processor_setup_cpuidle(pr); | ||
1706 | pr->power.dev.cpu = pr->id; | ||
1707 | if (cpuidle_register_device(&pr->power.dev)) | ||
1708 | return -EIO; | ||
1709 | #endif | ||
1710 | |||
1307 | printk(KERN_INFO PREFIX "CPU%d (power states:", pr->id); | 1711 | printk(KERN_INFO PREFIX "CPU%d (power states:", pr->id); |
1308 | for (i = 1; i <= pr->power.count; i++) | 1712 | for (i = 1; i <= pr->power.count; i++) |
1309 | if (pr->power.states[i].valid) | 1713 | if (pr->power.states[i].valid) |
@@ -1311,10 +1715,12 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, | |||
1311 | pr->power.states[i].type); | 1715 | pr->power.states[i].type); |
1312 | printk(")\n"); | 1716 | printk(")\n"); |
1313 | 1717 | ||
1718 | #ifndef CONFIG_CPU_IDLE | ||
1314 | if (pr->id == 0) { | 1719 | if (pr->id == 0) { |
1315 | pm_idle_save = pm_idle; | 1720 | pm_idle_save = pm_idle; |
1316 | pm_idle = acpi_processor_idle; | 1721 | pm_idle = acpi_processor_idle; |
1317 | } | 1722 | } |
1723 | #endif | ||
1318 | } | 1724 | } |
1319 | 1725 | ||
1320 | /* 'power' [R] */ | 1726 | /* 'power' [R] */ |
@@ -1328,21 +1734,24 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, | |||
1328 | entry->owner = THIS_MODULE; | 1734 | entry->owner = THIS_MODULE; |
1329 | } | 1735 | } |
1330 | 1736 | ||
1331 | pr->flags.power_setup_done = 1; | ||
1332 | |||
1333 | return 0; | 1737 | return 0; |
1334 | } | 1738 | } |
1335 | 1739 | ||
1336 | int acpi_processor_power_exit(struct acpi_processor *pr, | 1740 | int acpi_processor_power_exit(struct acpi_processor *pr, |
1337 | struct acpi_device *device) | 1741 | struct acpi_device *device) |
1338 | { | 1742 | { |
1339 | 1743 | #ifdef CONFIG_CPU_IDLE | |
1744 | if ((pr->flags.power) && (!boot_option_idle_override)) | ||
1745 | cpuidle_unregister_device(&pr->power.dev); | ||
1746 | #endif | ||
1340 | pr->flags.power_setup_done = 0; | 1747 | pr->flags.power_setup_done = 0; |
1341 | 1748 | ||
1342 | if (acpi_device_dir(device)) | 1749 | if (acpi_device_dir(device)) |
1343 | remove_proc_entry(ACPI_PROCESSOR_FILE_POWER, | 1750 | remove_proc_entry(ACPI_PROCESSOR_FILE_POWER, |
1344 | acpi_device_dir(device)); | 1751 | acpi_device_dir(device)); |
1345 | 1752 | ||
1753 | #ifndef CONFIG_CPU_IDLE | ||
1754 | |||
1346 | /* Unregister the idle handler when processor #0 is removed. */ | 1755 | /* Unregister the idle handler when processor #0 is removed. */ |
1347 | if (pr->id == 0) { | 1756 | if (pr->id == 0) { |
1348 | pm_idle = pm_idle_save; | 1757 | pm_idle = pm_idle_save; |
@@ -1357,6 +1766,7 @@ int acpi_processor_power_exit(struct acpi_processor *pr, | |||
1357 | unregister_latency_notifier(&acpi_processor_latency_notifier); | 1766 | unregister_latency_notifier(&acpi_processor_latency_notifier); |
1358 | #endif | 1767 | #endif |
1359 | } | 1768 | } |
1769 | #endif | ||
1360 | 1770 | ||
1361 | return 0; | 1771 | return 0; |
1362 | } | 1772 | } |
diff --git a/drivers/cpuidle/Kconfig b/drivers/cpuidle/Kconfig new file mode 100644 index 000000000000..3bed4127d4ad --- /dev/null +++ b/drivers/cpuidle/Kconfig | |||
@@ -0,0 +1,20 @@ | |||
1 | |||
2 | config CPU_IDLE | ||
3 | bool "CPU idle PM support" | ||
4 | help | ||
5 | CPU idle is a generic framework for supporting software-controlled | ||
6 | idle processor power management. It includes modular cross-platform | ||
7 | governors that can be swapped during runtime. | ||
8 | |||
9 | If you're using a mobile platform that supports CPU idle PM (e.g. | ||
10 | an ACPI-capable notebook), you should say Y here. | ||
11 | |||
12 | config CPU_IDLE_GOV_LADDER | ||
13 | bool | ||
14 | depends on CPU_IDLE | ||
15 | default y | ||
16 | |||
17 | config CPU_IDLE_GOV_MENU | ||
18 | bool | ||
19 | depends on CPU_IDLE && NO_HZ | ||
20 | default y | ||
diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile new file mode 100644 index 000000000000..5634f88379df --- /dev/null +++ b/drivers/cpuidle/Makefile | |||
@@ -0,0 +1,5 @@ | |||
1 | # | ||
2 | # Makefile for cpuidle. | ||
3 | # | ||
4 | |||
5 | obj-y += cpuidle.o driver.o governor.o sysfs.o governors/ | ||
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c new file mode 100644 index 000000000000..fdf4106b817b --- /dev/null +++ b/drivers/cpuidle/cpuidle.c | |||
@@ -0,0 +1,295 @@ | |||
1 | /* | ||
2 | * cpuidle.c - core cpuidle infrastructure | ||
3 | * | ||
4 | * (C) 2006-2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> | ||
5 | * Shaohua Li <shaohua.li@intel.com> | ||
6 | * Adam Belay <abelay@novell.com> | ||
7 | * | ||
8 | * This code is licenced under the GPL. | ||
9 | */ | ||
10 | |||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/mutex.h> | ||
13 | #include <linux/sched.h> | ||
14 | #include <linux/notifier.h> | ||
15 | #include <linux/latency.h> | ||
16 | #include <linux/cpu.h> | ||
17 | #include <linux/cpuidle.h> | ||
18 | |||
19 | #include "cpuidle.h" | ||
20 | |||
21 | DEFINE_PER_CPU(struct cpuidle_device *, cpuidle_devices); | ||
22 | EXPORT_PER_CPU_SYMBOL_GPL(cpuidle_devices); | ||
23 | |||
24 | DEFINE_MUTEX(cpuidle_lock); | ||
25 | LIST_HEAD(cpuidle_detected_devices); | ||
26 | static void (*pm_idle_old)(void); | ||
27 | |||
28 | static int enabled_devices; | ||
29 | |||
30 | /** | ||
31 | * cpuidle_idle_call - the main idle loop | ||
32 | * | ||
33 | * NOTE: no locks or semaphores should be used here | ||
34 | */ | ||
35 | static void cpuidle_idle_call(void) | ||
36 | { | ||
37 | struct cpuidle_device *dev = __get_cpu_var(cpuidle_devices); | ||
38 | struct cpuidle_state *target_state; | ||
39 | int next_state; | ||
40 | |||
41 | /* check if the device is ready */ | ||
42 | if (!dev || !dev->enabled) { | ||
43 | if (pm_idle_old) | ||
44 | pm_idle_old(); | ||
45 | else | ||
46 | local_irq_enable(); | ||
47 | return; | ||
48 | } | ||
49 | |||
50 | /* ask the governor for the next state */ | ||
51 | next_state = cpuidle_curr_governor->select(dev); | ||
52 | if (need_resched()) | ||
53 | return; | ||
54 | target_state = &dev->states[next_state]; | ||
55 | |||
56 | /* enter the state and update stats */ | ||
57 | dev->last_residency = target_state->enter(dev, target_state); | ||
58 | dev->last_state = target_state; | ||
59 | target_state->time += dev->last_residency; | ||
60 | target_state->usage++; | ||
61 | |||
62 | /* give the governor an opportunity to reflect on the outcome */ | ||
63 | if (cpuidle_curr_governor->reflect) | ||
64 | cpuidle_curr_governor->reflect(dev); | ||
65 | } | ||
66 | |||
67 | /** | ||
68 | * cpuidle_install_idle_handler - installs the cpuidle idle loop handler | ||
69 | */ | ||
70 | void cpuidle_install_idle_handler(void) | ||
71 | { | ||
72 | if (enabled_devices && (pm_idle != cpuidle_idle_call)) { | ||
73 | /* Make sure all changes finished before we switch to new idle */ | ||
74 | smp_wmb(); | ||
75 | pm_idle = cpuidle_idle_call; | ||
76 | } | ||
77 | } | ||
78 | |||
79 | /** | ||
80 | * cpuidle_uninstall_idle_handler - uninstalls the cpuidle idle loop handler | ||
81 | */ | ||
82 | void cpuidle_uninstall_idle_handler(void) | ||
83 | { | ||
84 | if (enabled_devices && (pm_idle != pm_idle_old)) { | ||
85 | pm_idle = pm_idle_old; | ||
86 | cpu_idle_wait(); | ||
87 | } | ||
88 | } | ||
89 | |||
90 | /** | ||
91 | * cpuidle_pause_and_lock - temporarily disables CPUIDLE | ||
92 | */ | ||
93 | void cpuidle_pause_and_lock(void) | ||
94 | { | ||
95 | mutex_lock(&cpuidle_lock); | ||
96 | cpuidle_uninstall_idle_handler(); | ||
97 | } | ||
98 | |||
99 | EXPORT_SYMBOL_GPL(cpuidle_pause_and_lock); | ||
100 | |||
101 | /** | ||
102 | * cpuidle_resume_and_unlock - resumes CPUIDLE operation | ||
103 | */ | ||
104 | void cpuidle_resume_and_unlock(void) | ||
105 | { | ||
106 | cpuidle_install_idle_handler(); | ||
107 | mutex_unlock(&cpuidle_lock); | ||
108 | } | ||
109 | |||
110 | EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock); | ||
111 | |||
112 | /** | ||
113 | * cpuidle_enable_device - enables idle PM for a CPU | ||
114 | * @dev: the CPU | ||
115 | * | ||
116 | * This function must be called between cpuidle_pause_and_lock and | ||
117 | * cpuidle_resume_and_unlock when used externally. | ||
118 | */ | ||
119 | int cpuidle_enable_device(struct cpuidle_device *dev) | ||
120 | { | ||
121 | int ret, i; | ||
122 | |||
123 | if (dev->enabled) | ||
124 | return 0; | ||
125 | if (!cpuidle_curr_driver || !cpuidle_curr_governor) | ||
126 | return -EIO; | ||
127 | if (!dev->state_count) | ||
128 | return -EINVAL; | ||
129 | |||
130 | if ((ret = cpuidle_add_state_sysfs(dev))) | ||
131 | return ret; | ||
132 | |||
133 | if (cpuidle_curr_governor->enable && | ||
134 | (ret = cpuidle_curr_governor->enable(dev))) | ||
135 | goto fail_sysfs; | ||
136 | |||
137 | for (i = 0; i < dev->state_count; i++) { | ||
138 | dev->states[i].usage = 0; | ||
139 | dev->states[i].time = 0; | ||
140 | } | ||
141 | dev->last_residency = 0; | ||
142 | dev->last_state = NULL; | ||
143 | |||
144 | smp_wmb(); | ||
145 | |||
146 | dev->enabled = 1; | ||
147 | |||
148 | enabled_devices++; | ||
149 | return 0; | ||
150 | |||
151 | fail_sysfs: | ||
152 | cpuidle_remove_state_sysfs(dev); | ||
153 | |||
154 | return ret; | ||
155 | } | ||
156 | |||
157 | EXPORT_SYMBOL_GPL(cpuidle_enable_device); | ||
158 | |||
159 | /** | ||
160 | * cpuidle_disable_device - disables idle PM for a CPU | ||
161 | * @dev: the CPU | ||
162 | * | ||
163 | * This function must be called between cpuidle_pause_and_lock and | ||
164 | * cpuidle_resume_and_unlock when used externally. | ||
165 | */ | ||
166 | void cpuidle_disable_device(struct cpuidle_device *dev) | ||
167 | { | ||
168 | if (!dev->enabled) | ||
169 | return; | ||
170 | if (!cpuidle_curr_driver || !cpuidle_curr_governor) | ||
171 | return; | ||
172 | |||
173 | dev->enabled = 0; | ||
174 | |||
175 | if (cpuidle_curr_governor->disable) | ||
176 | cpuidle_curr_governor->disable(dev); | ||
177 | |||
178 | cpuidle_remove_state_sysfs(dev); | ||
179 | enabled_devices--; | ||
180 | } | ||
181 | |||
182 | EXPORT_SYMBOL_GPL(cpuidle_disable_device); | ||
183 | |||
184 | /** | ||
185 | * cpuidle_register_device - registers a CPU's idle PM feature | ||
186 | * @dev: the cpu | ||
187 | */ | ||
188 | int cpuidle_register_device(struct cpuidle_device *dev) | ||
189 | { | ||
190 | int ret; | ||
191 | struct sys_device *sys_dev = get_cpu_sysdev((unsigned long)dev->cpu); | ||
192 | |||
193 | if (!sys_dev) | ||
194 | return -EINVAL; | ||
195 | if (!try_module_get(cpuidle_curr_driver->owner)) | ||
196 | return -EINVAL; | ||
197 | |||
198 | init_completion(&dev->kobj_unregister); | ||
199 | |||
200 | mutex_lock(&cpuidle_lock); | ||
201 | |||
202 | per_cpu(cpuidle_devices, dev->cpu) = dev; | ||
203 | list_add(&dev->device_list, &cpuidle_detected_devices); | ||
204 | if ((ret = cpuidle_add_sysfs(sys_dev))) { | ||
205 | mutex_unlock(&cpuidle_lock); | ||
206 | module_put(cpuidle_curr_driver->owner); | ||
207 | return ret; | ||
208 | } | ||
209 | |||
210 | cpuidle_enable_device(dev); | ||
211 | cpuidle_install_idle_handler(); | ||
212 | |||
213 | mutex_unlock(&cpuidle_lock); | ||
214 | |||
215 | return 0; | ||
216 | |||
217 | } | ||
218 | |||
219 | EXPORT_SYMBOL_GPL(cpuidle_register_device); | ||
220 | |||
221 | /** | ||
222 | * cpuidle_unregister_device - unregisters a CPU's idle PM feature | ||
223 | * @dev: the cpu | ||
224 | */ | ||
225 | void cpuidle_unregister_device(struct cpuidle_device *dev) | ||
226 | { | ||
227 | struct sys_device *sys_dev = get_cpu_sysdev((unsigned long)dev->cpu); | ||
228 | |||
229 | cpuidle_pause_and_lock(); | ||
230 | |||
231 | cpuidle_disable_device(dev); | ||
232 | |||
233 | cpuidle_remove_sysfs(sys_dev); | ||
234 | list_del(&dev->device_list); | ||
235 | wait_for_completion(&dev->kobj_unregister); | ||
236 | per_cpu(cpuidle_devices, dev->cpu) = NULL; | ||
237 | |||
238 | cpuidle_resume_and_unlock(); | ||
239 | |||
240 | module_put(cpuidle_curr_driver->owner); | ||
241 | } | ||
242 | |||
243 | EXPORT_SYMBOL_GPL(cpuidle_unregister_device); | ||
244 | |||
245 | #ifdef CONFIG_SMP | ||
246 | |||
247 | static void smp_callback(void *v) | ||
248 | { | ||
249 | /* we already woke the CPU up, nothing more to do */ | ||
250 | } | ||
251 | |||
252 | /* | ||
253 | * This function gets called when a part of the kernel has a new latency | ||
254 | * requirement. This means we need to get all processors out of their C-state, | ||
255 | * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that | ||
256 | * wakes them all right up. | ||
257 | */ | ||
258 | static int cpuidle_latency_notify(struct notifier_block *b, | ||
259 | unsigned long l, void *v) | ||
260 | { | ||
261 | smp_call_function(smp_callback, NULL, 0, 1); | ||
262 | return NOTIFY_OK; | ||
263 | } | ||
264 | |||
265 | static struct notifier_block cpuidle_latency_notifier = { | ||
266 | .notifier_call = cpuidle_latency_notify, | ||
267 | }; | ||
268 | |||
269 | #define latency_notifier_init(x) do { register_latency_notifier(x); } while (0) | ||
270 | |||
271 | #else /* CONFIG_SMP */ | ||
272 | |||
273 | #define latency_notifier_init(x) do { } while (0) | ||
274 | |||
275 | #endif /* CONFIG_SMP */ | ||
276 | |||
277 | /** | ||
278 | * cpuidle_init - core initializer | ||
279 | */ | ||
280 | static int __init cpuidle_init(void) | ||
281 | { | ||
282 | int ret; | ||
283 | |||
284 | pm_idle_old = pm_idle; | ||
285 | |||
286 | ret = cpuidle_add_class_sysfs(&cpu_sysdev_class); | ||
287 | if (ret) | ||
288 | return ret; | ||
289 | |||
290 | latency_notifier_init(&cpuidle_latency_notifier); | ||
291 | |||
292 | return 0; | ||
293 | } | ||
294 | |||
295 | core_initcall(cpuidle_init); | ||
diff --git a/drivers/cpuidle/cpuidle.h b/drivers/cpuidle/cpuidle.h new file mode 100644 index 000000000000..9476ba33ee2c --- /dev/null +++ b/drivers/cpuidle/cpuidle.h | |||
@@ -0,0 +1,33 @@ | |||
1 | /* | ||
2 | * cpuidle.h - The internal header file | ||
3 | */ | ||
4 | |||
5 | #ifndef __DRIVER_CPUIDLE_H | ||
6 | #define __DRIVER_CPUIDLE_H | ||
7 | |||
8 | #include <linux/sysdev.h> | ||
9 | |||
10 | /* For internal use only */ | ||
11 | extern struct cpuidle_governor *cpuidle_curr_governor; | ||
12 | extern struct cpuidle_driver *cpuidle_curr_driver; | ||
13 | extern struct list_head cpuidle_governors; | ||
14 | extern struct list_head cpuidle_detected_devices; | ||
15 | extern struct mutex cpuidle_lock; | ||
16 | extern spinlock_t cpuidle_driver_lock; | ||
17 | |||
18 | /* idle loop */ | ||
19 | extern void cpuidle_install_idle_handler(void); | ||
20 | extern void cpuidle_uninstall_idle_handler(void); | ||
21 | |||
22 | /* governors */ | ||
23 | extern int cpuidle_switch_governor(struct cpuidle_governor *gov); | ||
24 | |||
25 | /* sysfs */ | ||
26 | extern int cpuidle_add_class_sysfs(struct sysdev_class *cls); | ||
27 | extern void cpuidle_remove_class_sysfs(struct sysdev_class *cls); | ||
28 | extern int cpuidle_add_state_sysfs(struct cpuidle_device *device); | ||
29 | extern void cpuidle_remove_state_sysfs(struct cpuidle_device *device); | ||
30 | extern int cpuidle_add_sysfs(struct sys_device *sysdev); | ||
31 | extern void cpuidle_remove_sysfs(struct sys_device *sysdev); | ||
32 | |||
33 | #endif /* __DRIVER_CPUIDLE_H */ | ||
diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c new file mode 100644 index 000000000000..2257004fe33d --- /dev/null +++ b/drivers/cpuidle/driver.c | |||
@@ -0,0 +1,56 @@ | |||
1 | /* | ||
2 | * driver.c - driver support | ||
3 | * | ||
4 | * (C) 2006-2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> | ||
5 | * Shaohua Li <shaohua.li@intel.com> | ||
6 | * Adam Belay <abelay@novell.com> | ||
7 | * | ||
8 | * This code is licenced under the GPL. | ||
9 | */ | ||
10 | |||
11 | #include <linux/mutex.h> | ||
12 | #include <linux/module.h> | ||
13 | #include <linux/cpuidle.h> | ||
14 | |||
15 | #include "cpuidle.h" | ||
16 | |||
17 | struct cpuidle_driver *cpuidle_curr_driver; | ||
18 | DEFINE_SPINLOCK(cpuidle_driver_lock); | ||
19 | |||
20 | /** | ||
21 | * cpuidle_register_driver - registers a driver | ||
22 | * @drv: the driver | ||
23 | */ | ||
24 | int cpuidle_register_driver(struct cpuidle_driver *drv) | ||
25 | { | ||
26 | if (!drv) | ||
27 | return -EINVAL; | ||
28 | |||
29 | spin_lock(&cpuidle_driver_lock); | ||
30 | if (cpuidle_curr_driver) { | ||
31 | spin_unlock(&cpuidle_driver_lock); | ||
32 | return -EBUSY; | ||
33 | } | ||
34 | cpuidle_curr_driver = drv; | ||
35 | spin_unlock(&cpuidle_driver_lock); | ||
36 | |||
37 | return 0; | ||
38 | } | ||
39 | |||
40 | EXPORT_SYMBOL_GPL(cpuidle_register_driver); | ||
41 | |||
42 | /** | ||
43 | * cpuidle_unregister_driver - unregisters a driver | ||
44 | * @drv: the driver | ||
45 | */ | ||
46 | void cpuidle_unregister_driver(struct cpuidle_driver *drv) | ||
47 | { | ||
48 | if (!drv) | ||
49 | return; | ||
50 | |||
51 | spin_lock(&cpuidle_driver_lock); | ||
52 | cpuidle_curr_driver = NULL; | ||
53 | spin_unlock(&cpuidle_driver_lock); | ||
54 | } | ||
55 | |||
56 | EXPORT_SYMBOL_GPL(cpuidle_unregister_driver); | ||
diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c new file mode 100644 index 000000000000..bb699cb2dc5a --- /dev/null +++ b/drivers/cpuidle/governor.c | |||
@@ -0,0 +1,141 @@ | |||
1 | /* | ||
2 | * governor.c - governor support | ||
3 | * | ||
4 | * (C) 2006-2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> | ||
5 | * Shaohua Li <shaohua.li@intel.com> | ||
6 | * Adam Belay <abelay@novell.com> | ||
7 | * | ||
8 | * This code is licenced under the GPL. | ||
9 | */ | ||
10 | |||
11 | #include <linux/mutex.h> | ||
12 | #include <linux/module.h> | ||
13 | #include <linux/cpuidle.h> | ||
14 | |||
15 | #include "cpuidle.h" | ||
16 | |||
17 | LIST_HEAD(cpuidle_governors); | ||
18 | struct cpuidle_governor *cpuidle_curr_governor; | ||
19 | |||
20 | /** | ||
21 | * __cpuidle_find_governor - finds a governor of the specified name | ||
22 | * @str: the name | ||
23 | * | ||
24 | * Must be called with cpuidle_lock aquired. | ||
25 | */ | ||
26 | static struct cpuidle_governor * __cpuidle_find_governor(const char *str) | ||
27 | { | ||
28 | struct cpuidle_governor *gov; | ||
29 | |||
30 | list_for_each_entry(gov, &cpuidle_governors, governor_list) | ||
31 | if (!strnicmp(str, gov->name, CPUIDLE_NAME_LEN)) | ||
32 | return gov; | ||
33 | |||
34 | return NULL; | ||
35 | } | ||
36 | |||
37 | /** | ||
38 | * cpuidle_switch_governor - changes the governor | ||
39 | * @gov: the new target governor | ||
40 | * | ||
41 | * NOTE: "gov" can be NULL to specify disabled | ||
42 | * Must be called with cpuidle_lock aquired. | ||
43 | */ | ||
44 | int cpuidle_switch_governor(struct cpuidle_governor *gov) | ||
45 | { | ||
46 | struct cpuidle_device *dev; | ||
47 | |||
48 | if (gov == cpuidle_curr_governor) | ||
49 | return 0; | ||
50 | |||
51 | cpuidle_uninstall_idle_handler(); | ||
52 | |||
53 | if (cpuidle_curr_governor) { | ||
54 | list_for_each_entry(dev, &cpuidle_detected_devices, device_list) | ||
55 | cpuidle_disable_device(dev); | ||
56 | module_put(cpuidle_curr_governor->owner); | ||
57 | } | ||
58 | |||
59 | cpuidle_curr_governor = gov; | ||
60 | |||
61 | if (gov) { | ||
62 | if (!try_module_get(cpuidle_curr_governor->owner)) | ||
63 | return -EINVAL; | ||
64 | list_for_each_entry(dev, &cpuidle_detected_devices, device_list) | ||
65 | cpuidle_enable_device(dev); | ||
66 | cpuidle_install_idle_handler(); | ||
67 | printk(KERN_INFO "cpuidle: using governor %s\n", gov->name); | ||
68 | } | ||
69 | |||
70 | return 0; | ||
71 | } | ||
72 | |||
73 | /** | ||
74 | * cpuidle_register_governor - registers a governor | ||
75 | * @gov: the governor | ||
76 | */ | ||
77 | int cpuidle_register_governor(struct cpuidle_governor *gov) | ||
78 | { | ||
79 | int ret = -EEXIST; | ||
80 | |||
81 | if (!gov || !gov->select) | ||
82 | return -EINVAL; | ||
83 | |||
84 | mutex_lock(&cpuidle_lock); | ||
85 | if (__cpuidle_find_governor(gov->name) == NULL) { | ||
86 | ret = 0; | ||
87 | list_add_tail(&gov->governor_list, &cpuidle_governors); | ||
88 | if (!cpuidle_curr_governor || | ||
89 | cpuidle_curr_governor->rating < gov->rating) | ||
90 | cpuidle_switch_governor(gov); | ||
91 | } | ||
92 | mutex_unlock(&cpuidle_lock); | ||
93 | |||
94 | return ret; | ||
95 | } | ||
96 | |||
97 | EXPORT_SYMBOL_GPL(cpuidle_register_governor); | ||
98 | |||
99 | /** | ||
100 | * cpuidle_replace_governor - find a replacement governor | ||
101 | * @exclude_rating: the rating that will be skipped while looking for | ||
102 | * new governor. | ||
103 | */ | ||
104 | static struct cpuidle_governor *cpuidle_replace_governor(int exclude_rating) | ||
105 | { | ||
106 | struct cpuidle_governor *gov; | ||
107 | struct cpuidle_governor *ret_gov = NULL; | ||
108 | unsigned int max_rating = 0; | ||
109 | |||
110 | list_for_each_entry(gov, &cpuidle_governors, governor_list) { | ||
111 | if (gov->rating == exclude_rating) | ||
112 | continue; | ||
113 | if (gov->rating > max_rating) { | ||
114 | max_rating = gov->rating; | ||
115 | ret_gov = gov; | ||
116 | } | ||
117 | } | ||
118 | |||
119 | return ret_gov; | ||
120 | } | ||
121 | |||
122 | /** | ||
123 | * cpuidle_unregister_governor - unregisters a governor | ||
124 | * @gov: the governor | ||
125 | */ | ||
126 | void cpuidle_unregister_governor(struct cpuidle_governor *gov) | ||
127 | { | ||
128 | if (!gov) | ||
129 | return; | ||
130 | |||
131 | mutex_lock(&cpuidle_lock); | ||
132 | if (gov == cpuidle_curr_governor) { | ||
133 | struct cpuidle_governor *new_gov; | ||
134 | new_gov = cpuidle_replace_governor(gov->rating); | ||
135 | cpuidle_switch_governor(new_gov); | ||
136 | } | ||
137 | list_del(&gov->governor_list); | ||
138 | mutex_unlock(&cpuidle_lock); | ||
139 | } | ||
140 | |||
141 | EXPORT_SYMBOL_GPL(cpuidle_unregister_governor); | ||
diff --git a/drivers/cpuidle/governors/Makefile b/drivers/cpuidle/governors/Makefile new file mode 100644 index 000000000000..1b512722689f --- /dev/null +++ b/drivers/cpuidle/governors/Makefile | |||
@@ -0,0 +1,6 @@ | |||
1 | # | ||
2 | # Makefile for cpuidle governors. | ||
3 | # | ||
4 | |||
5 | obj-$(CONFIG_CPU_IDLE_GOV_LADDER) += ladder.o | ||
6 | obj-$(CONFIG_CPU_IDLE_GOV_MENU) += menu.o | ||
diff --git a/drivers/cpuidle/governors/ladder.c b/drivers/cpuidle/governors/ladder.c new file mode 100644 index 000000000000..eb666ecae7c9 --- /dev/null +++ b/drivers/cpuidle/governors/ladder.c | |||
@@ -0,0 +1,166 @@ | |||
1 | /* | ||
2 | * ladder.c - the residency ladder algorithm | ||
3 | * | ||
4 | * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> | ||
5 | * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> | ||
6 | * Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de> | ||
7 | * | ||
8 | * (C) 2006-2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> | ||
9 | * Shaohua Li <shaohua.li@intel.com> | ||
10 | * Adam Belay <abelay@novell.com> | ||
11 | * | ||
12 | * This code is licenced under the GPL. | ||
13 | */ | ||
14 | |||
15 | #include <linux/kernel.h> | ||
16 | #include <linux/cpuidle.h> | ||
17 | #include <linux/latency.h> | ||
18 | #include <linux/moduleparam.h> | ||
19 | #include <linux/jiffies.h> | ||
20 | |||
21 | #include <asm/io.h> | ||
22 | #include <asm/uaccess.h> | ||
23 | |||
24 | #define PROMOTION_COUNT 4 | ||
25 | #define DEMOTION_COUNT 1 | ||
26 | |||
27 | struct ladder_device_state { | ||
28 | struct { | ||
29 | u32 promotion_count; | ||
30 | u32 demotion_count; | ||
31 | u32 promotion_time; | ||
32 | u32 demotion_time; | ||
33 | } threshold; | ||
34 | struct { | ||
35 | int promotion_count; | ||
36 | int demotion_count; | ||
37 | } stats; | ||
38 | }; | ||
39 | |||
40 | struct ladder_device { | ||
41 | struct ladder_device_state states[CPUIDLE_STATE_MAX]; | ||
42 | int last_state_idx; | ||
43 | }; | ||
44 | |||
45 | static DEFINE_PER_CPU(struct ladder_device, ladder_devices); | ||
46 | |||
47 | /** | ||
48 | * ladder_do_selection - prepares private data for a state change | ||
49 | * @ldev: the ladder device | ||
50 | * @old_idx: the current state index | ||
51 | * @new_idx: the new target state index | ||
52 | */ | ||
53 | static inline void ladder_do_selection(struct ladder_device *ldev, | ||
54 | int old_idx, int new_idx) | ||
55 | { | ||
56 | ldev->states[old_idx].stats.promotion_count = 0; | ||
57 | ldev->states[old_idx].stats.demotion_count = 0; | ||
58 | ldev->last_state_idx = new_idx; | ||
59 | } | ||
60 | |||
61 | /** | ||
62 | * ladder_select_state - selects the next state to enter | ||
63 | * @dev: the CPU | ||
64 | */ | ||
65 | static int ladder_select_state(struct cpuidle_device *dev) | ||
66 | { | ||
67 | struct ladder_device *ldev = &__get_cpu_var(ladder_devices); | ||
68 | struct ladder_device_state *last_state; | ||
69 | int last_residency, last_idx = ldev->last_state_idx; | ||
70 | |||
71 | if (unlikely(!ldev)) | ||
72 | return 0; | ||
73 | |||
74 | last_state = &ldev->states[last_idx]; | ||
75 | |||
76 | if (dev->states[last_idx].flags & CPUIDLE_FLAG_TIME_VALID) | ||
77 | last_residency = cpuidle_get_last_residency(dev) - dev->states[last_idx].exit_latency; | ||
78 | else | ||
79 | last_residency = last_state->threshold.promotion_time + 1; | ||
80 | |||
81 | /* consider promotion */ | ||
82 | if (last_idx < dev->state_count - 1 && | ||
83 | last_residency > last_state->threshold.promotion_time && | ||
84 | dev->states[last_idx + 1].exit_latency <= system_latency_constraint()) { | ||
85 | last_state->stats.promotion_count++; | ||
86 | last_state->stats.demotion_count = 0; | ||
87 | if (last_state->stats.promotion_count >= last_state->threshold.promotion_count) { | ||
88 | ladder_do_selection(ldev, last_idx, last_idx + 1); | ||
89 | return last_idx + 1; | ||
90 | } | ||
91 | } | ||
92 | |||
93 | /* consider demotion */ | ||
94 | if (last_idx > 0 && | ||
95 | last_residency < last_state->threshold.demotion_time) { | ||
96 | last_state->stats.demotion_count++; | ||
97 | last_state->stats.promotion_count = 0; | ||
98 | if (last_state->stats.demotion_count >= last_state->threshold.demotion_count) { | ||
99 | ladder_do_selection(ldev, last_idx, last_idx - 1); | ||
100 | return last_idx - 1; | ||
101 | } | ||
102 | } | ||
103 | |||
104 | /* otherwise remain at the current state */ | ||
105 | return last_idx; | ||
106 | } | ||
107 | |||
108 | /** | ||
109 | * ladder_enable_device - setup for the governor | ||
110 | * @dev: the CPU | ||
111 | */ | ||
112 | static int ladder_enable_device(struct cpuidle_device *dev) | ||
113 | { | ||
114 | int i; | ||
115 | struct ladder_device *ldev = &per_cpu(ladder_devices, dev->cpu); | ||
116 | struct ladder_device_state *lstate; | ||
117 | struct cpuidle_state *state; | ||
118 | |||
119 | ldev->last_state_idx = 0; | ||
120 | |||
121 | for (i = 0; i < dev->state_count; i++) { | ||
122 | state = &dev->states[i]; | ||
123 | lstate = &ldev->states[i]; | ||
124 | |||
125 | lstate->stats.promotion_count = 0; | ||
126 | lstate->stats.demotion_count = 0; | ||
127 | |||
128 | lstate->threshold.promotion_count = PROMOTION_COUNT; | ||
129 | lstate->threshold.demotion_count = DEMOTION_COUNT; | ||
130 | |||
131 | if (i < dev->state_count - 1) | ||
132 | lstate->threshold.promotion_time = state->exit_latency; | ||
133 | if (i > 0) | ||
134 | lstate->threshold.demotion_time = state->exit_latency; | ||
135 | } | ||
136 | |||
137 | return 0; | ||
138 | } | ||
139 | |||
140 | static struct cpuidle_governor ladder_governor = { | ||
141 | .name = "ladder", | ||
142 | .rating = 10, | ||
143 | .enable = ladder_enable_device, | ||
144 | .select = ladder_select_state, | ||
145 | .owner = THIS_MODULE, | ||
146 | }; | ||
147 | |||
148 | /** | ||
149 | * init_ladder - initializes the governor | ||
150 | */ | ||
151 | static int __init init_ladder(void) | ||
152 | { | ||
153 | return cpuidle_register_governor(&ladder_governor); | ||
154 | } | ||
155 | |||
156 | /** | ||
157 | * exit_ladder - exits the governor | ||
158 | */ | ||
159 | static void __exit exit_ladder(void) | ||
160 | { | ||
161 | cpuidle_unregister_governor(&ladder_governor); | ||
162 | } | ||
163 | |||
164 | MODULE_LICENSE("GPL"); | ||
165 | module_init(init_ladder); | ||
166 | module_exit(exit_ladder); | ||
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c new file mode 100644 index 000000000000..299d45c3bdd2 --- /dev/null +++ b/drivers/cpuidle/governors/menu.c | |||
@@ -0,0 +1,137 @@ | |||
1 | /* | ||
2 | * menu.c - the menu idle governor | ||
3 | * | ||
4 | * Copyright (C) 2006-2007 Adam Belay <abelay@novell.com> | ||
5 | * | ||
6 | * This code is licenced under the GPL. | ||
7 | */ | ||
8 | |||
9 | #include <linux/kernel.h> | ||
10 | #include <linux/cpuidle.h> | ||
11 | #include <linux/latency.h> | ||
12 | #include <linux/time.h> | ||
13 | #include <linux/ktime.h> | ||
14 | #include <linux/hrtimer.h> | ||
15 | #include <linux/tick.h> | ||
16 | |||
17 | #define BREAK_FUZZ 4 /* 4 us */ | ||
18 | |||
19 | struct menu_device { | ||
20 | int last_state_idx; | ||
21 | |||
22 | unsigned int expected_us; | ||
23 | unsigned int predicted_us; | ||
24 | unsigned int last_measured_us; | ||
25 | unsigned int elapsed_us; | ||
26 | }; | ||
27 | |||
28 | static DEFINE_PER_CPU(struct menu_device, menu_devices); | ||
29 | |||
30 | /** | ||
31 | * menu_select - selects the next idle state to enter | ||
32 | * @dev: the CPU | ||
33 | */ | ||
34 | static int menu_select(struct cpuidle_device *dev) | ||
35 | { | ||
36 | struct menu_device *data = &__get_cpu_var(menu_devices); | ||
37 | int i; | ||
38 | |||
39 | /* determine the expected residency time */ | ||
40 | data->expected_us = | ||
41 | (u32) ktime_to_ns(tick_nohz_get_sleep_length()) / 1000; | ||
42 | |||
43 | /* find the deepest idle state that satisfies our constraints */ | ||
44 | for (i = 1; i < dev->state_count; i++) { | ||
45 | struct cpuidle_state *s = &dev->states[i]; | ||
46 | |||
47 | if (s->target_residency > data->expected_us) | ||
48 | break; | ||
49 | if (s->target_residency > data->predicted_us) | ||
50 | break; | ||
51 | if (s->exit_latency > system_latency_constraint()) | ||
52 | break; | ||
53 | } | ||
54 | |||
55 | data->last_state_idx = i - 1; | ||
56 | return i - 1; | ||
57 | } | ||
58 | |||
59 | /** | ||
60 | * menu_reflect - attempts to guess what happened after entry | ||
61 | * @dev: the CPU | ||
62 | * | ||
63 | * NOTE: it's important to be fast here because this operation will add to | ||
64 | * the overall exit latency. | ||
65 | */ | ||
66 | static void menu_reflect(struct cpuidle_device *dev) | ||
67 | { | ||
68 | struct menu_device *data = &__get_cpu_var(menu_devices); | ||
69 | int last_idx = data->last_state_idx; | ||
70 | unsigned int measured_us = | ||
71 | cpuidle_get_last_residency(dev) + data->elapsed_us; | ||
72 | struct cpuidle_state *target = &dev->states[last_idx]; | ||
73 | |||
74 | /* | ||
75 | * Ugh, this idle state doesn't support residency measurements, so we | ||
76 | * are basically lost in the dark. As a compromise, assume we slept | ||
77 | * for one full standard timer tick. However, be aware that this | ||
78 | * could potentially result in a suboptimal state transition. | ||
79 | */ | ||
80 | if (!(target->flags & CPUIDLE_FLAG_TIME_VALID)) | ||
81 | measured_us = USEC_PER_SEC / HZ; | ||
82 | |||
83 | /* Predict time remaining until next break event */ | ||
84 | if (measured_us + BREAK_FUZZ < data->expected_us - target->exit_latency) { | ||
85 | data->predicted_us = max(measured_us, data->last_measured_us); | ||
86 | data->last_measured_us = measured_us; | ||
87 | data->elapsed_us = 0; | ||
88 | } else { | ||
89 | if (data->elapsed_us < data->elapsed_us + measured_us) | ||
90 | data->elapsed_us = measured_us; | ||
91 | else | ||
92 | data->elapsed_us = -1; | ||
93 | data->predicted_us = max(measured_us, data->last_measured_us); | ||
94 | } | ||
95 | } | ||
96 | |||
97 | /** | ||
98 | * menu_enable_device - scans a CPU's states and does setup | ||
99 | * @dev: the CPU | ||
100 | */ | ||
101 | static int menu_enable_device(struct cpuidle_device *dev) | ||
102 | { | ||
103 | struct menu_device *data = &per_cpu(menu_devices, dev->cpu); | ||
104 | |||
105 | memset(data, 0, sizeof(struct menu_device)); | ||
106 | |||
107 | return 0; | ||
108 | } | ||
109 | |||
110 | static struct cpuidle_governor menu_governor = { | ||
111 | .name = "menu", | ||
112 | .rating = 20, | ||
113 | .enable = menu_enable_device, | ||
114 | .select = menu_select, | ||
115 | .reflect = menu_reflect, | ||
116 | .owner = THIS_MODULE, | ||
117 | }; | ||
118 | |||
119 | /** | ||
120 | * init_menu - initializes the governor | ||
121 | */ | ||
122 | static int __init init_menu(void) | ||
123 | { | ||
124 | return cpuidle_register_governor(&menu_governor); | ||
125 | } | ||
126 | |||
127 | /** | ||
128 | * exit_menu - exits the governor | ||
129 | */ | ||
130 | static void __exit exit_menu(void) | ||
131 | { | ||
132 | cpuidle_unregister_governor(&menu_governor); | ||
133 | } | ||
134 | |||
135 | MODULE_LICENSE("GPL"); | ||
136 | module_init(init_menu); | ||
137 | module_exit(exit_menu); | ||
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c new file mode 100644 index 000000000000..0f3515e77d4b --- /dev/null +++ b/drivers/cpuidle/sysfs.c | |||
@@ -0,0 +1,361 @@ | |||
1 | /* | ||
2 | * sysfs.c - sysfs support | ||
3 | * | ||
4 | * (C) 2006-2007 Shaohua Li <shaohua.li@intel.com> | ||
5 | * | ||
6 | * This code is licenced under the GPL. | ||
7 | */ | ||
8 | |||
9 | #include <linux/kernel.h> | ||
10 | #include <linux/cpuidle.h> | ||
11 | #include <linux/sysfs.h> | ||
12 | #include <linux/cpu.h> | ||
13 | |||
14 | #include "cpuidle.h" | ||
15 | |||
16 | static unsigned int sysfs_switch; | ||
17 | static int __init cpuidle_sysfs_setup(char *unused) | ||
18 | { | ||
19 | sysfs_switch = 1; | ||
20 | return 1; | ||
21 | } | ||
22 | __setup("cpuidle_sysfs_switch", cpuidle_sysfs_setup); | ||
23 | |||
24 | static ssize_t show_available_governors(struct sys_device *dev, char *buf) | ||
25 | { | ||
26 | ssize_t i = 0; | ||
27 | struct cpuidle_governor *tmp; | ||
28 | |||
29 | mutex_lock(&cpuidle_lock); | ||
30 | list_for_each_entry(tmp, &cpuidle_governors, governor_list) { | ||
31 | if (i >= (ssize_t) ((PAGE_SIZE/sizeof(char)) - CPUIDLE_NAME_LEN - 2)) | ||
32 | goto out; | ||
33 | i += scnprintf(&buf[i], CPUIDLE_NAME_LEN, "%s ", tmp->name); | ||
34 | } | ||
35 | |||
36 | out: | ||
37 | i+= sprintf(&buf[i], "\n"); | ||
38 | mutex_unlock(&cpuidle_lock); | ||
39 | return i; | ||
40 | } | ||
41 | |||
42 | static ssize_t show_current_driver(struct sys_device *dev, char *buf) | ||
43 | { | ||
44 | ssize_t ret; | ||
45 | |||
46 | spin_lock(&cpuidle_driver_lock); | ||
47 | if (cpuidle_curr_driver) | ||
48 | ret = sprintf(buf, "%s\n", cpuidle_curr_driver->name); | ||
49 | else | ||
50 | ret = sprintf(buf, "none\n"); | ||
51 | spin_unlock(&cpuidle_driver_lock); | ||
52 | |||
53 | return ret; | ||
54 | } | ||
55 | |||
56 | static ssize_t show_current_governor(struct sys_device *dev, char *buf) | ||
57 | { | ||
58 | ssize_t ret; | ||
59 | |||
60 | mutex_lock(&cpuidle_lock); | ||
61 | if (cpuidle_curr_governor) | ||
62 | ret = sprintf(buf, "%s\n", cpuidle_curr_governor->name); | ||
63 | else | ||
64 | ret = sprintf(buf, "none\n"); | ||
65 | mutex_unlock(&cpuidle_lock); | ||
66 | |||
67 | return ret; | ||
68 | } | ||
69 | |||
70 | static ssize_t store_current_governor(struct sys_device *dev, | ||
71 | const char *buf, size_t count) | ||
72 | { | ||
73 | char gov_name[CPUIDLE_NAME_LEN]; | ||
74 | int ret = -EINVAL; | ||
75 | size_t len = count; | ||
76 | struct cpuidle_governor *gov; | ||
77 | |||
78 | if (!len || len >= sizeof(gov_name)) | ||
79 | return -EINVAL; | ||
80 | |||
81 | memcpy(gov_name, buf, len); | ||
82 | gov_name[len] = '\0'; | ||
83 | if (gov_name[len - 1] == '\n') | ||
84 | gov_name[--len] = '\0'; | ||
85 | |||
86 | mutex_lock(&cpuidle_lock); | ||
87 | |||
88 | list_for_each_entry(gov, &cpuidle_governors, governor_list) { | ||
89 | if (strlen(gov->name) == len && !strcmp(gov->name, gov_name)) { | ||
90 | ret = cpuidle_switch_governor(gov); | ||
91 | break; | ||
92 | } | ||
93 | } | ||
94 | |||
95 | mutex_unlock(&cpuidle_lock); | ||
96 | |||
97 | if (ret) | ||
98 | return ret; | ||
99 | else | ||
100 | return count; | ||
101 | } | ||
102 | |||
103 | static SYSDEV_ATTR(current_driver, 0444, show_current_driver, NULL); | ||
104 | static SYSDEV_ATTR(current_governor_ro, 0444, show_current_governor, NULL); | ||
105 | |||
106 | static struct attribute *cpuclass_default_attrs[] = { | ||
107 | &attr_current_driver.attr, | ||
108 | &attr_current_governor_ro.attr, | ||
109 | NULL | ||
110 | }; | ||
111 | |||
112 | static SYSDEV_ATTR(available_governors, 0444, show_available_governors, NULL); | ||
113 | static SYSDEV_ATTR(current_governor, 0644, show_current_governor, | ||
114 | store_current_governor); | ||
115 | |||
116 | static struct attribute *cpuclass_switch_attrs[] = { | ||
117 | &attr_available_governors.attr, | ||
118 | &attr_current_driver.attr, | ||
119 | &attr_current_governor.attr, | ||
120 | NULL | ||
121 | }; | ||
122 | |||
123 | static struct attribute_group cpuclass_attr_group = { | ||
124 | .attrs = cpuclass_default_attrs, | ||
125 | .name = "cpuidle", | ||
126 | }; | ||
127 | |||
128 | /** | ||
129 | * cpuidle_add_class_sysfs - add CPU global sysfs attributes | ||
130 | */ | ||
131 | int cpuidle_add_class_sysfs(struct sysdev_class *cls) | ||
132 | { | ||
133 | if (sysfs_switch) | ||
134 | cpuclass_attr_group.attrs = cpuclass_switch_attrs; | ||
135 | |||
136 | return sysfs_create_group(&cls->kset.kobj, &cpuclass_attr_group); | ||
137 | } | ||
138 | |||
139 | /** | ||
140 | * cpuidle_remove_class_sysfs - remove CPU global sysfs attributes | ||
141 | */ | ||
142 | void cpuidle_remove_class_sysfs(struct sysdev_class *cls) | ||
143 | { | ||
144 | sysfs_remove_group(&cls->kset.kobj, &cpuclass_attr_group); | ||
145 | } | ||
146 | |||
147 | struct cpuidle_attr { | ||
148 | struct attribute attr; | ||
149 | ssize_t (*show)(struct cpuidle_device *, char *); | ||
150 | ssize_t (*store)(struct cpuidle_device *, const char *, size_t count); | ||
151 | }; | ||
152 | |||
153 | #define define_one_ro(_name, show) \ | ||
154 | static struct cpuidle_attr attr_##_name = __ATTR(_name, 0444, show, NULL) | ||
155 | #define define_one_rw(_name, show, store) \ | ||
156 | static struct cpuidle_attr attr_##_name = __ATTR(_name, 0644, show, store) | ||
157 | |||
158 | #define kobj_to_cpuidledev(k) container_of(k, struct cpuidle_device, kobj) | ||
159 | #define attr_to_cpuidleattr(a) container_of(a, struct cpuidle_attr, attr) | ||
160 | static ssize_t cpuidle_show(struct kobject * kobj, struct attribute * attr ,char * buf) | ||
161 | { | ||
162 | int ret = -EIO; | ||
163 | struct cpuidle_device *dev = kobj_to_cpuidledev(kobj); | ||
164 | struct cpuidle_attr * cattr = attr_to_cpuidleattr(attr); | ||
165 | |||
166 | if (cattr->show) { | ||
167 | mutex_lock(&cpuidle_lock); | ||
168 | ret = cattr->show(dev, buf); | ||
169 | mutex_unlock(&cpuidle_lock); | ||
170 | } | ||
171 | return ret; | ||
172 | } | ||
173 | |||
174 | static ssize_t cpuidle_store(struct kobject * kobj, struct attribute * attr, | ||
175 | const char * buf, size_t count) | ||
176 | { | ||
177 | int ret = -EIO; | ||
178 | struct cpuidle_device *dev = kobj_to_cpuidledev(kobj); | ||
179 | struct cpuidle_attr * cattr = attr_to_cpuidleattr(attr); | ||
180 | |||
181 | if (cattr->store) { | ||
182 | mutex_lock(&cpuidle_lock); | ||
183 | ret = cattr->store(dev, buf, count); | ||
184 | mutex_unlock(&cpuidle_lock); | ||
185 | } | ||
186 | return ret; | ||
187 | } | ||
188 | |||
189 | static struct sysfs_ops cpuidle_sysfs_ops = { | ||
190 | .show = cpuidle_show, | ||
191 | .store = cpuidle_store, | ||
192 | }; | ||
193 | |||
194 | static void cpuidle_sysfs_release(struct kobject *kobj) | ||
195 | { | ||
196 | struct cpuidle_device *dev = kobj_to_cpuidledev(kobj); | ||
197 | |||
198 | complete(&dev->kobj_unregister); | ||
199 | } | ||
200 | |||
201 | static struct kobj_type ktype_cpuidle = { | ||
202 | .sysfs_ops = &cpuidle_sysfs_ops, | ||
203 | .release = cpuidle_sysfs_release, | ||
204 | }; | ||
205 | |||
206 | struct cpuidle_state_attr { | ||
207 | struct attribute attr; | ||
208 | ssize_t (*show)(struct cpuidle_state *, char *); | ||
209 | ssize_t (*store)(struct cpuidle_state *, const char *, size_t); | ||
210 | }; | ||
211 | |||
212 | #define define_one_state_ro(_name, show) \ | ||
213 | static struct cpuidle_state_attr attr_##_name = __ATTR(_name, 0444, show, NULL) | ||
214 | |||
215 | #define define_show_state_function(_name) \ | ||
216 | static ssize_t show_state_##_name(struct cpuidle_state *state, char *buf) \ | ||
217 | { \ | ||
218 | return sprintf(buf, "%u\n", state->_name);\ | ||
219 | } | ||
220 | |||
221 | static ssize_t show_state_name(struct cpuidle_state *state, char *buf) | ||
222 | { | ||
223 | return sprintf(buf, "%s\n", state->name); | ||
224 | } | ||
225 | |||
226 | define_show_state_function(exit_latency) | ||
227 | define_show_state_function(power_usage) | ||
228 | define_show_state_function(usage) | ||
229 | define_show_state_function(time) | ||
230 | define_one_state_ro(name, show_state_name); | ||
231 | define_one_state_ro(latency, show_state_exit_latency); | ||
232 | define_one_state_ro(power, show_state_power_usage); | ||
233 | define_one_state_ro(usage, show_state_usage); | ||
234 | define_one_state_ro(time, show_state_time); | ||
235 | |||
236 | static struct attribute *cpuidle_state_default_attrs[] = { | ||
237 | &attr_name.attr, | ||
238 | &attr_latency.attr, | ||
239 | &attr_power.attr, | ||
240 | &attr_usage.attr, | ||
241 | &attr_time.attr, | ||
242 | NULL | ||
243 | }; | ||
244 | |||
245 | #define kobj_to_state_obj(k) container_of(k, struct cpuidle_state_kobj, kobj) | ||
246 | #define kobj_to_state(k) (kobj_to_state_obj(k)->state) | ||
247 | #define attr_to_stateattr(a) container_of(a, struct cpuidle_state_attr, attr) | ||
248 | static ssize_t cpuidle_state_show(struct kobject * kobj, | ||
249 | struct attribute * attr ,char * buf) | ||
250 | { | ||
251 | int ret = -EIO; | ||
252 | struct cpuidle_state *state = kobj_to_state(kobj); | ||
253 | struct cpuidle_state_attr * cattr = attr_to_stateattr(attr); | ||
254 | |||
255 | if (cattr->show) | ||
256 | ret = cattr->show(state, buf); | ||
257 | |||
258 | return ret; | ||
259 | } | ||
260 | |||
261 | static struct sysfs_ops cpuidle_state_sysfs_ops = { | ||
262 | .show = cpuidle_state_show, | ||
263 | }; | ||
264 | |||
265 | static void cpuidle_state_sysfs_release(struct kobject *kobj) | ||
266 | { | ||
267 | struct cpuidle_state_kobj *state_obj = kobj_to_state_obj(kobj); | ||
268 | |||
269 | complete(&state_obj->kobj_unregister); | ||
270 | } | ||
271 | |||
272 | static struct kobj_type ktype_state_cpuidle = { | ||
273 | .sysfs_ops = &cpuidle_state_sysfs_ops, | ||
274 | .default_attrs = cpuidle_state_default_attrs, | ||
275 | .release = cpuidle_state_sysfs_release, | ||
276 | }; | ||
277 | |||
278 | static void inline cpuidle_free_state_kobj(struct cpuidle_device *device, int i) | ||
279 | { | ||
280 | kobject_unregister(&device->kobjs[i]->kobj); | ||
281 | wait_for_completion(&device->kobjs[i]->kobj_unregister); | ||
282 | kfree(device->kobjs[i]); | ||
283 | device->kobjs[i] = NULL; | ||
284 | } | ||
285 | |||
286 | /** | ||
287 | * cpuidle_add_driver_sysfs - adds driver-specific sysfs attributes | ||
288 | * @device: the target device | ||
289 | */ | ||
290 | int cpuidle_add_state_sysfs(struct cpuidle_device *device) | ||
291 | { | ||
292 | int i, ret = -ENOMEM; | ||
293 | struct cpuidle_state_kobj *kobj; | ||
294 | |||
295 | /* state statistics */ | ||
296 | for (i = 0; i < device->state_count; i++) { | ||
297 | kobj = kzalloc(sizeof(struct cpuidle_state_kobj), GFP_KERNEL); | ||
298 | if (!kobj) | ||
299 | goto error_state; | ||
300 | kobj->state = &device->states[i]; | ||
301 | init_completion(&kobj->kobj_unregister); | ||
302 | |||
303 | kobj->kobj.parent = &device->kobj; | ||
304 | kobj->kobj.ktype = &ktype_state_cpuidle; | ||
305 | kobject_set_name(&kobj->kobj, "state%d", i); | ||
306 | ret = kobject_register(&kobj->kobj); | ||
307 | if (ret) { | ||
308 | kfree(kobj); | ||
309 | goto error_state; | ||
310 | } | ||
311 | device->kobjs[i] = kobj; | ||
312 | } | ||
313 | |||
314 | return 0; | ||
315 | |||
316 | error_state: | ||
317 | for (i = i - 1; i >= 0; i--) | ||
318 | cpuidle_free_state_kobj(device, i); | ||
319 | return ret; | ||
320 | } | ||
321 | |||
322 | /** | ||
323 | * cpuidle_remove_driver_sysfs - removes driver-specific sysfs attributes | ||
324 | * @device: the target device | ||
325 | */ | ||
326 | void cpuidle_remove_state_sysfs(struct cpuidle_device *device) | ||
327 | { | ||
328 | int i; | ||
329 | |||
330 | for (i = 0; i < device->state_count; i++) | ||
331 | cpuidle_free_state_kobj(device, i); | ||
332 | } | ||
333 | |||
334 | /** | ||
335 | * cpuidle_add_sysfs - creates a sysfs instance for the target device | ||
336 | * @sysdev: the target device | ||
337 | */ | ||
338 | int cpuidle_add_sysfs(struct sys_device *sysdev) | ||
339 | { | ||
340 | int cpu = sysdev->id; | ||
341 | struct cpuidle_device *dev; | ||
342 | |||
343 | dev = per_cpu(cpuidle_devices, cpu); | ||
344 | dev->kobj.parent = &sysdev->kobj; | ||
345 | dev->kobj.ktype = &ktype_cpuidle; | ||
346 | kobject_set_name(&dev->kobj, "%s", "cpuidle"); | ||
347 | return kobject_register(&dev->kobj); | ||
348 | } | ||
349 | |||
350 | /** | ||
351 | * cpuidle_remove_sysfs - deletes a sysfs instance on the target device | ||
352 | * @sysdev: the target device | ||
353 | */ | ||
354 | void cpuidle_remove_sysfs(struct sys_device *sysdev) | ||
355 | { | ||
356 | int cpu = sysdev->id; | ||
357 | struct cpuidle_device *dev; | ||
358 | |||
359 | dev = per_cpu(cpuidle_devices, cpu); | ||
360 | kobject_unregister(&dev->kobj); | ||
361 | } | ||
diff --git a/drivers/net/wireless/ipw2100.c b/drivers/net/wireless/ipw2100.c index 8990585bd228..e8a94b746295 100644 --- a/drivers/net/wireless/ipw2100.c +++ b/drivers/net/wireless/ipw2100.c | |||
@@ -1858,14 +1858,6 @@ static void ipw2100_down(struct ipw2100_priv *priv) | |||
1858 | 1858 | ||
1859 | modify_acceptable_latency("ipw2100", INFINITE_LATENCY); | 1859 | modify_acceptable_latency("ipw2100", INFINITE_LATENCY); |
1860 | 1860 | ||
1861 | #ifdef ACPI_CSTATE_LIMIT_DEFINED | ||
1862 | if (priv->config & CFG_C3_DISABLED) { | ||
1863 | IPW_DEBUG_INFO(": Resetting C3 transitions.\n"); | ||
1864 | acpi_set_cstate_limit(priv->cstate_limit); | ||
1865 | priv->config &= ~CFG_C3_DISABLED; | ||
1866 | } | ||
1867 | #endif | ||
1868 | |||
1869 | /* We have to signal any supplicant if we are disassociating */ | 1861 | /* We have to signal any supplicant if we are disassociating */ |
1870 | if (associated) | 1862 | if (associated) |
1871 | wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL); | 1863 | wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL); |
@@ -2088,14 +2080,6 @@ static void isr_indicate_rf_kill(struct ipw2100_priv *priv, u32 status) | |||
2088 | /* RF_KILL is now enabled (else we wouldn't be here) */ | 2080 | /* RF_KILL is now enabled (else we wouldn't be here) */ |
2089 | priv->status |= STATUS_RF_KILL_HW; | 2081 | priv->status |= STATUS_RF_KILL_HW; |
2090 | 2082 | ||
2091 | #ifdef ACPI_CSTATE_LIMIT_DEFINED | ||
2092 | if (priv->config & CFG_C3_DISABLED) { | ||
2093 | IPW_DEBUG_INFO(": Resetting C3 transitions.\n"); | ||
2094 | acpi_set_cstate_limit(priv->cstate_limit); | ||
2095 | priv->config &= ~CFG_C3_DISABLED; | ||
2096 | } | ||
2097 | #endif | ||
2098 | |||
2099 | /* Make sure the RF Kill check timer is running */ | 2083 | /* Make sure the RF Kill check timer is running */ |
2100 | priv->stop_rf_kill = 0; | 2084 | priv->stop_rf_kill = 0; |
2101 | cancel_delayed_work(&priv->rf_kill); | 2085 | cancel_delayed_work(&priv->rf_kill); |
@@ -2326,23 +2310,10 @@ static void ipw2100_corruption_detected(struct ipw2100_priv *priv, int i) | |||
2326 | u32 match, reg; | 2310 | u32 match, reg; |
2327 | int j; | 2311 | int j; |
2328 | #endif | 2312 | #endif |
2329 | #ifdef ACPI_CSTATE_LIMIT_DEFINED | ||
2330 | int limit; | ||
2331 | #endif | ||
2332 | 2313 | ||
2333 | IPW_DEBUG_INFO(": PCI latency error detected at 0x%04zX.\n", | 2314 | IPW_DEBUG_INFO(": PCI latency error detected at 0x%04zX.\n", |
2334 | i * sizeof(struct ipw2100_status)); | 2315 | i * sizeof(struct ipw2100_status)); |
2335 | 2316 | ||
2336 | #ifdef ACPI_CSTATE_LIMIT_DEFINED | ||
2337 | IPW_DEBUG_INFO(": Disabling C3 transitions.\n"); | ||
2338 | limit = acpi_get_cstate_limit(); | ||
2339 | if (limit > 2) { | ||
2340 | priv->cstate_limit = limit; | ||
2341 | acpi_set_cstate_limit(2); | ||
2342 | priv->config |= CFG_C3_DISABLED; | ||
2343 | } | ||
2344 | #endif | ||
2345 | |||
2346 | #ifdef IPW2100_DEBUG_C3 | 2317 | #ifdef IPW2100_DEBUG_C3 |
2347 | /* Halt the fimrware so we can get a good image */ | 2318 | /* Halt the fimrware so we can get a good image */ |
2348 | write_register(priv->net_dev, IPW_REG_RESET_REG, | 2319 | write_register(priv->net_dev, IPW_REG_RESET_REG, |
diff --git a/drivers/net/wireless/ipw2100.h b/drivers/net/wireless/ipw2100.h index de7d384d38af..2b8be2418fa7 100644 --- a/drivers/net/wireless/ipw2100.h +++ b/drivers/net/wireless/ipw2100.h | |||
@@ -479,7 +479,6 @@ enum { | |||
479 | #define CFG_ASSOCIATE (1<<6) | 479 | #define CFG_ASSOCIATE (1<<6) |
480 | #define CFG_FIXED_RATE (1<<7) | 480 | #define CFG_FIXED_RATE (1<<7) |
481 | #define CFG_ADHOC_CREATE (1<<8) | 481 | #define CFG_ADHOC_CREATE (1<<8) |
482 | #define CFG_C3_DISABLED (1<<9) | ||
483 | #define CFG_PASSIVE_SCAN (1<<10) | 482 | #define CFG_PASSIVE_SCAN (1<<10) |
484 | #ifdef CONFIG_IPW2100_MONITOR | 483 | #ifdef CONFIG_IPW2100_MONITOR |
485 | #define CFG_CRC_CHECK (1<<11) | 484 | #define CFG_CRC_CHECK (1<<11) |
@@ -508,7 +507,6 @@ struct ipw2100_priv { | |||
508 | u8 bssid[ETH_ALEN]; | 507 | u8 bssid[ETH_ALEN]; |
509 | u8 channel; | 508 | u8 channel; |
510 | int last_mode; | 509 | int last_mode; |
511 | int cstate_limit; | ||
512 | 510 | ||
513 | unsigned long connect_start; | 511 | unsigned long connect_start; |
514 | unsigned long last_reset; | 512 | unsigned long last_reset; |
diff --git a/include/acpi/processor.h b/include/acpi/processor.h index 99934a999e66..26d79f6db8a0 100644 --- a/include/acpi/processor.h +++ b/include/acpi/processor.h | |||
@@ -3,6 +3,7 @@ | |||
3 | 3 | ||
4 | #include <linux/kernel.h> | 4 | #include <linux/kernel.h> |
5 | #include <linux/cpu.h> | 5 | #include <linux/cpu.h> |
6 | #include <linux/cpuidle.h> | ||
6 | 7 | ||
7 | #include <asm/acpi.h> | 8 | #include <asm/acpi.h> |
8 | 9 | ||
@@ -75,7 +76,9 @@ struct acpi_processor_cx { | |||
75 | }; | 76 | }; |
76 | 77 | ||
77 | struct acpi_processor_power { | 78 | struct acpi_processor_power { |
79 | struct cpuidle_device dev; | ||
78 | struct acpi_processor_cx *state; | 80 | struct acpi_processor_cx *state; |
81 | struct acpi_processor_cx *bm_state; | ||
79 | unsigned long bm_check_timestamp; | 82 | unsigned long bm_check_timestamp; |
80 | u32 default_state; | 83 | u32 default_state; |
81 | u32 bm_activity; | 84 | u32 bm_activity; |
@@ -199,6 +202,7 @@ struct acpi_processor_flags { | |||
199 | u8 bm_check:1; | 202 | u8 bm_check:1; |
200 | u8 has_cst:1; | 203 | u8 has_cst:1; |
201 | u8 power_setup_done:1; | 204 | u8 power_setup_done:1; |
205 | u8 bm_rld_set:1; | ||
202 | }; | 206 | }; |
203 | 207 | ||
204 | struct acpi_processor { | 208 | struct acpi_processor { |
@@ -322,6 +326,7 @@ int acpi_processor_power_exit(struct acpi_processor *pr, | |||
322 | struct acpi_device *device); | 326 | struct acpi_device *device); |
323 | int acpi_processor_suspend(struct acpi_device * device, pm_message_t state); | 327 | int acpi_processor_suspend(struct acpi_device * device, pm_message_t state); |
324 | int acpi_processor_resume(struct acpi_device * device); | 328 | int acpi_processor_resume(struct acpi_device * device); |
329 | extern struct cpuidle_driver acpi_idle_driver; | ||
325 | 330 | ||
326 | /* in processor_thermal.c */ | 331 | /* in processor_thermal.c */ |
327 | int acpi_processor_get_limit_info(struct acpi_processor *pr); | 332 | int acpi_processor_get_limit_info(struct acpi_processor *pr); |
diff --git a/include/linux/acpi.h b/include/linux/acpi.h index bf5e0009de75..8ccedf7a0a5a 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h | |||
@@ -189,32 +189,6 @@ extern int ec_transaction(u8 command, | |||
189 | extern int acpi_blacklisted(void); | 189 | extern int acpi_blacklisted(void); |
190 | extern void acpi_bios_year(char *s); | 190 | extern void acpi_bios_year(char *s); |
191 | 191 | ||
192 | #define ACPI_CSTATE_LIMIT_DEFINED /* for driver builds */ | ||
193 | #ifdef CONFIG_ACPI | ||
194 | |||
195 | /* | ||
196 | * Set highest legal C-state | ||
197 | * 0: C0 okay, but not C1 | ||
198 | * 1: C1 okay, but not C2 | ||
199 | * 2: C2 okay, but not C3 etc. | ||
200 | */ | ||
201 | |||
202 | extern unsigned int max_cstate; | ||
203 | |||
204 | static inline unsigned int acpi_get_cstate_limit(void) | ||
205 | { | ||
206 | return max_cstate; | ||
207 | } | ||
208 | static inline void acpi_set_cstate_limit(unsigned int new_limit) | ||
209 | { | ||
210 | max_cstate = new_limit; | ||
211 | return; | ||
212 | } | ||
213 | #else | ||
214 | static inline unsigned int acpi_get_cstate_limit(void) { return 0; } | ||
215 | static inline void acpi_set_cstate_limit(unsigned int new_limit) { return; } | ||
216 | #endif | ||
217 | |||
218 | #ifdef CONFIG_ACPI_NUMA | 192 | #ifdef CONFIG_ACPI_NUMA |
219 | int acpi_get_pxm(acpi_handle handle); | 193 | int acpi_get_pxm(acpi_handle handle); |
220 | int acpi_get_node(acpi_handle *handle); | 194 | int acpi_get_node(acpi_handle *handle); |
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h new file mode 100644 index 000000000000..16a51546db44 --- /dev/null +++ b/include/linux/cpuidle.h | |||
@@ -0,0 +1,180 @@ | |||
1 | /* | ||
2 | * cpuidle.h - a generic framework for CPU idle power management | ||
3 | * | ||
4 | * (C) 2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> | ||
5 | * Shaohua Li <shaohua.li@intel.com> | ||
6 | * Adam Belay <abelay@novell.com> | ||
7 | * | ||
8 | * This code is licenced under the GPL. | ||
9 | */ | ||
10 | |||
11 | #ifndef _LINUX_CPUIDLE_H | ||
12 | #define _LINUX_CPUIDLE_H | ||
13 | |||
14 | #include <linux/percpu.h> | ||
15 | #include <linux/list.h> | ||
16 | #include <linux/module.h> | ||
17 | #include <linux/kobject.h> | ||
18 | #include <linux/completion.h> | ||
19 | |||
20 | #define CPUIDLE_STATE_MAX 8 | ||
21 | #define CPUIDLE_NAME_LEN 16 | ||
22 | |||
23 | struct cpuidle_device; | ||
24 | |||
25 | |||
26 | /**************************** | ||
27 | * CPUIDLE DEVICE INTERFACE * | ||
28 | ****************************/ | ||
29 | |||
30 | struct cpuidle_state { | ||
31 | char name[CPUIDLE_NAME_LEN]; | ||
32 | void *driver_data; | ||
33 | |||
34 | unsigned int flags; | ||
35 | unsigned int exit_latency; /* in US */ | ||
36 | unsigned int power_usage; /* in mW */ | ||
37 | unsigned int target_residency; /* in US */ | ||
38 | |||
39 | unsigned int usage; | ||
40 | unsigned int time; /* in US */ | ||
41 | |||
42 | int (*enter) (struct cpuidle_device *dev, | ||
43 | struct cpuidle_state *state); | ||
44 | }; | ||
45 | |||
46 | /* Idle State Flags */ | ||
47 | #define CPUIDLE_FLAG_TIME_VALID (0x01) /* is residency time measurable? */ | ||
48 | #define CPUIDLE_FLAG_CHECK_BM (0x02) /* BM activity will exit state */ | ||
49 | #define CPUIDLE_FLAG_SHALLOW (0x10) /* low latency, minimal savings */ | ||
50 | #define CPUIDLE_FLAG_BALANCED (0x20) /* medium latency, moderate savings */ | ||
51 | #define CPUIDLE_FLAG_DEEP (0x40) /* high latency, large savings */ | ||
52 | |||
53 | #define CPUIDLE_DRIVER_FLAGS_MASK (0xFFFF0000) | ||
54 | |||
55 | /** | ||
56 | * cpuidle_get_statedata - retrieves private driver state data | ||
57 | * @state: the state | ||
58 | */ | ||
59 | static inline void * cpuidle_get_statedata(struct cpuidle_state *state) | ||
60 | { | ||
61 | return state->driver_data; | ||
62 | } | ||
63 | |||
64 | /** | ||
65 | * cpuidle_set_statedata - stores private driver state data | ||
66 | * @state: the state | ||
67 | * @data: the private data | ||
68 | */ | ||
69 | static inline void | ||
70 | cpuidle_set_statedata(struct cpuidle_state *state, void *data) | ||
71 | { | ||
72 | state->driver_data = data; | ||
73 | } | ||
74 | |||
75 | struct cpuidle_state_kobj { | ||
76 | struct cpuidle_state *state; | ||
77 | struct completion kobj_unregister; | ||
78 | struct kobject kobj; | ||
79 | }; | ||
80 | |||
81 | struct cpuidle_device { | ||
82 | int enabled:1; | ||
83 | unsigned int cpu; | ||
84 | |||
85 | int last_residency; | ||
86 | int state_count; | ||
87 | struct cpuidle_state states[CPUIDLE_STATE_MAX]; | ||
88 | struct cpuidle_state_kobj *kobjs[CPUIDLE_STATE_MAX]; | ||
89 | struct cpuidle_state *last_state; | ||
90 | |||
91 | struct list_head device_list; | ||
92 | struct kobject kobj; | ||
93 | struct completion kobj_unregister; | ||
94 | void *governor_data; | ||
95 | }; | ||
96 | |||
97 | DECLARE_PER_CPU(struct cpuidle_device *, cpuidle_devices); | ||
98 | |||
99 | /** | ||
100 | * cpuidle_get_last_residency - retrieves the last state's residency time | ||
101 | * @dev: the target CPU | ||
102 | * | ||
103 | * NOTE: this value is invalid if CPUIDLE_FLAG_TIME_VALID isn't set | ||
104 | */ | ||
105 | static inline int cpuidle_get_last_residency(struct cpuidle_device *dev) | ||
106 | { | ||
107 | return dev->last_residency; | ||
108 | } | ||
109 | |||
110 | |||
111 | /**************************** | ||
112 | * CPUIDLE DRIVER INTERFACE * | ||
113 | ****************************/ | ||
114 | |||
115 | struct cpuidle_driver { | ||
116 | char name[CPUIDLE_NAME_LEN]; | ||
117 | struct module *owner; | ||
118 | }; | ||
119 | |||
120 | #ifdef CONFIG_CPU_IDLE | ||
121 | |||
122 | extern int cpuidle_register_driver(struct cpuidle_driver *drv); | ||
123 | extern void cpuidle_unregister_driver(struct cpuidle_driver *drv); | ||
124 | extern int cpuidle_register_device(struct cpuidle_device *dev); | ||
125 | extern void cpuidle_unregister_device(struct cpuidle_device *dev); | ||
126 | |||
127 | extern void cpuidle_pause_and_lock(void); | ||
128 | extern void cpuidle_resume_and_unlock(void); | ||
129 | extern int cpuidle_enable_device(struct cpuidle_device *dev); | ||
130 | extern void cpuidle_disable_device(struct cpuidle_device *dev); | ||
131 | |||
132 | #else | ||
133 | |||
134 | static inline int cpuidle_register_driver(struct cpuidle_driver *drv) | ||
135 | {return 0;} | ||
136 | static inline void cpuidle_unregister_driver(struct cpuidle_driver *drv) { } | ||
137 | static inline int cpuidle_register_device(struct cpuidle_device *dev) | ||
138 | {return 0;} | ||
139 | static inline void cpuidle_unregister_device(struct cpuidle_device *dev) { } | ||
140 | |||
141 | static inline void cpuidle_pause_and_lock(void) { } | ||
142 | static inline void cpuidle_resume_and_unlock(void) { } | ||
143 | static inline int cpuidle_enable_device(struct cpuidle_device *dev) | ||
144 | {return 0;} | ||
145 | static inline void cpuidle_disable_device(struct cpuidle_device *dev) { } | ||
146 | |||
147 | #endif | ||
148 | |||
149 | /****************************** | ||
150 | * CPUIDLE GOVERNOR INTERFACE * | ||
151 | ******************************/ | ||
152 | |||
153 | struct cpuidle_governor { | ||
154 | char name[CPUIDLE_NAME_LEN]; | ||
155 | struct list_head governor_list; | ||
156 | unsigned int rating; | ||
157 | |||
158 | int (*enable) (struct cpuidle_device *dev); | ||
159 | void (*disable) (struct cpuidle_device *dev); | ||
160 | |||
161 | int (*select) (struct cpuidle_device *dev); | ||
162 | void (*reflect) (struct cpuidle_device *dev); | ||
163 | |||
164 | struct module *owner; | ||
165 | }; | ||
166 | |||
167 | #ifdef CONFIG_CPU_IDLE | ||
168 | |||
169 | extern int cpuidle_register_governor(struct cpuidle_governor *gov); | ||
170 | extern void cpuidle_unregister_governor(struct cpuidle_governor *gov); | ||
171 | |||
172 | #else | ||
173 | |||
174 | static inline int cpuidle_register_governor(struct cpuidle_governor *gov) | ||
175 | {return 0;} | ||
176 | static inline void cpuidle_unregister_governor(struct cpuidle_governor *gov) { } | ||
177 | |||
178 | #endif | ||
179 | |||
180 | #endif /* _LINUX_CPUIDLE_H */ | ||
diff --git a/include/linux/tick.h b/include/linux/tick.h index 9a7252e089b9..f4a1395e05ff 100644 --- a/include/linux/tick.h +++ b/include/linux/tick.h | |||
@@ -40,6 +40,7 @@ enum tick_nohz_mode { | |||
40 | * @idle_sleeps: Number of idle calls, where the sched tick was stopped | 40 | * @idle_sleeps: Number of idle calls, where the sched tick was stopped |
41 | * @idle_entrytime: Time when the idle call was entered | 41 | * @idle_entrytime: Time when the idle call was entered |
42 | * @idle_sleeptime: Sum of the time slept in idle with sched tick stopped | 42 | * @idle_sleeptime: Sum of the time slept in idle with sched tick stopped |
43 | * @sleep_length: Duration of the current idle sleep | ||
43 | */ | 44 | */ |
44 | struct tick_sched { | 45 | struct tick_sched { |
45 | struct hrtimer sched_timer; | 46 | struct hrtimer sched_timer; |
@@ -52,6 +53,7 @@ struct tick_sched { | |||
52 | unsigned long idle_sleeps; | 53 | unsigned long idle_sleeps; |
53 | ktime_t idle_entrytime; | 54 | ktime_t idle_entrytime; |
54 | ktime_t idle_sleeptime; | 55 | ktime_t idle_sleeptime; |
56 | ktime_t sleep_length; | ||
55 | unsigned long last_jiffies; | 57 | unsigned long last_jiffies; |
56 | unsigned long next_jiffies; | 58 | unsigned long next_jiffies; |
57 | ktime_t idle_expires; | 59 | ktime_t idle_expires; |
@@ -100,10 +102,17 @@ static inline int tick_check_oneshot_change(int allow_nohz) { return 0; } | |||
100 | extern void tick_nohz_stop_sched_tick(void); | 102 | extern void tick_nohz_stop_sched_tick(void); |
101 | extern void tick_nohz_restart_sched_tick(void); | 103 | extern void tick_nohz_restart_sched_tick(void); |
102 | extern void tick_nohz_update_jiffies(void); | 104 | extern void tick_nohz_update_jiffies(void); |
105 | extern ktime_t tick_nohz_get_sleep_length(void); | ||
103 | # else | 106 | # else |
104 | static inline void tick_nohz_stop_sched_tick(void) { } | 107 | static inline void tick_nohz_stop_sched_tick(void) { } |
105 | static inline void tick_nohz_restart_sched_tick(void) { } | 108 | static inline void tick_nohz_restart_sched_tick(void) { } |
106 | static inline void tick_nohz_update_jiffies(void) { } | 109 | static inline void tick_nohz_update_jiffies(void) { } |
110 | static inline ktime_t tick_nohz_get_sleep_length(void) | ||
111 | { | ||
112 | ktime_t len = { .tv64 = NSEC_PER_SEC/HZ }; | ||
113 | |||
114 | return len; | ||
115 | } | ||
107 | # endif /* !NO_HZ */ | 116 | # endif /* !NO_HZ */ |
108 | 117 | ||
109 | #endif | 118 | #endif |
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 8c3fef1db09c..637519af6151 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
@@ -153,6 +153,7 @@ void tick_nohz_stop_sched_tick(void) | |||
153 | unsigned long seq, last_jiffies, next_jiffies, delta_jiffies, flags; | 153 | unsigned long seq, last_jiffies, next_jiffies, delta_jiffies, flags; |
154 | struct tick_sched *ts; | 154 | struct tick_sched *ts; |
155 | ktime_t last_update, expires, now, delta; | 155 | ktime_t last_update, expires, now, delta; |
156 | struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev; | ||
156 | int cpu; | 157 | int cpu; |
157 | 158 | ||
158 | local_irq_save(flags); | 159 | local_irq_save(flags); |
@@ -302,11 +303,26 @@ void tick_nohz_stop_sched_tick(void) | |||
302 | out: | 303 | out: |
303 | ts->next_jiffies = next_jiffies; | 304 | ts->next_jiffies = next_jiffies; |
304 | ts->last_jiffies = last_jiffies; | 305 | ts->last_jiffies = last_jiffies; |
306 | ts->sleep_length = ktime_sub(dev->next_event, now); | ||
305 | end: | 307 | end: |
306 | local_irq_restore(flags); | 308 | local_irq_restore(flags); |
307 | } | 309 | } |
308 | 310 | ||
309 | /** | 311 | /** |
312 | * tick_nohz_get_sleep_length - return the length of the current sleep | ||
313 | * | ||
314 | * Called from power state control code with interrupts disabled | ||
315 | */ | ||
316 | ktime_t tick_nohz_get_sleep_length(void) | ||
317 | { | ||
318 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); | ||
319 | |||
320 | return ts->sleep_length; | ||
321 | } | ||
322 | |||
323 | EXPORT_SYMBOL_GPL(tick_nohz_get_sleep_length); | ||
324 | |||
325 | /** | ||
310 | * nohz_restart_sched_tick - restart the idle tick from the idle task | 326 | * nohz_restart_sched_tick - restart the idle tick from the idle task |
311 | * | 327 | * |
312 | * Restart the idle tick when the CPU is woken up from idle | 328 | * Restart the idle tick when the CPU is woken up from idle |