aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/processor_driver.c20
-rw-r--r--drivers/acpi/processor_idle.c191
-rw-r--r--drivers/cpuidle/cpuidle.c45
-rw-r--r--drivers/cpuidle/driver.c25
-rw-r--r--drivers/cpuidle/governors/ladder.c28
-rw-r--r--drivers/cpuidle/governors/menu.c20
-rw-r--r--drivers/cpuidle/sysfs.c3
-rw-r--r--drivers/idle/intel_idle.c80
8 files changed, 298 insertions, 114 deletions
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index a4e0f1ba6040..9d7bc9f6b6cc 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -426,7 +426,7 @@ static int acpi_cpu_soft_notify(struct notifier_block *nfb,
426 426
427 if (action == CPU_ONLINE && pr) { 427 if (action == CPU_ONLINE && pr) {
428 acpi_processor_ppc_has_changed(pr, 0); 428 acpi_processor_ppc_has_changed(pr, 0);
429 acpi_processor_cst_has_changed(pr); 429 acpi_processor_hotplug(pr);
430 acpi_processor_reevaluate_tstate(pr, action); 430 acpi_processor_reevaluate_tstate(pr, action);
431 acpi_processor_tstate_has_changed(pr); 431 acpi_processor_tstate_has_changed(pr);
432 } 432 }
@@ -503,8 +503,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
503 acpi_processor_get_throttling_info(pr); 503 acpi_processor_get_throttling_info(pr);
504 acpi_processor_get_limit_info(pr); 504 acpi_processor_get_limit_info(pr);
505 505
506 506 if (!cpuidle_get_driver() || cpuidle_get_driver() == &acpi_idle_driver)
507 if (cpuidle_get_driver() == &acpi_idle_driver)
508 acpi_processor_power_init(pr, device); 507 acpi_processor_power_init(pr, device);
509 508
510 pr->cdev = thermal_cooling_device_register("Processor", device, 509 pr->cdev = thermal_cooling_device_register("Processor", device,
@@ -800,17 +799,9 @@ static int __init acpi_processor_init(void)
800 799
801 memset(&errata, 0, sizeof(errata)); 800 memset(&errata, 0, sizeof(errata));
802 801
803 if (!cpuidle_register_driver(&acpi_idle_driver)) {
804 printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n",
805 acpi_idle_driver.name);
806 } else {
807 printk(KERN_DEBUG "ACPI: acpi_idle yielding to %s\n",
808 cpuidle_get_driver()->name);
809 }
810
811 result = acpi_bus_register_driver(&acpi_processor_driver); 802 result = acpi_bus_register_driver(&acpi_processor_driver);
812 if (result < 0) 803 if (result < 0)
813 goto out_cpuidle; 804 return result;
814 805
815 acpi_processor_install_hotplug_notify(); 806 acpi_processor_install_hotplug_notify();
816 807
@@ -821,11 +812,6 @@ static int __init acpi_processor_init(void)
821 acpi_processor_throttling_init(); 812 acpi_processor_throttling_init();
822 813
823 return 0; 814 return 0;
824
825out_cpuidle:
826 cpuidle_unregister_driver(&acpi_idle_driver);
827
828 return result;
829} 815}
830 816
831static void __exit acpi_processor_exit(void) 817static void __exit acpi_processor_exit(void)
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index b98c75285690..24fe3afa7119 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -741,11 +741,13 @@ static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx)
741/** 741/**
742 * acpi_idle_enter_c1 - enters an ACPI C1 state-type 742 * acpi_idle_enter_c1 - enters an ACPI C1 state-type
743 * @dev: the target CPU 743 * @dev: the target CPU
744 * @drv: cpuidle driver containing cpuidle state info
744 * @index: index of target state 745 * @index: index of target state
745 * 746 *
746 * This is equivalent to the HALT instruction. 747 * This is equivalent to the HALT instruction.
747 */ 748 */
748static int acpi_idle_enter_c1(struct cpuidle_device *dev, int index) 749static int acpi_idle_enter_c1(struct cpuidle_device *dev,
750 struct cpuidle_driver *drv, int index)
749{ 751{
750 ktime_t kt1, kt2; 752 ktime_t kt1, kt2;
751 s64 idle_time; 753 s64 idle_time;
@@ -787,9 +789,11 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev, int index)
787/** 789/**
788 * acpi_idle_enter_simple - enters an ACPI state without BM handling 790 * acpi_idle_enter_simple - enters an ACPI state without BM handling
789 * @dev: the target CPU 791 * @dev: the target CPU
792 * @drv: cpuidle driver with cpuidle state information
790 * @index: the index of suggested state 793 * @index: the index of suggested state
791 */ 794 */
792static int acpi_idle_enter_simple(struct cpuidle_device *dev, int index) 795static int acpi_idle_enter_simple(struct cpuidle_device *dev,
796 struct cpuidle_driver *drv, int index)
793{ 797{
794 struct acpi_processor *pr; 798 struct acpi_processor *pr;
795 struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; 799 struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
@@ -869,11 +873,13 @@ static DEFINE_SPINLOCK(c3_lock);
869/** 873/**
870 * acpi_idle_enter_bm - enters C3 with proper BM handling 874 * acpi_idle_enter_bm - enters C3 with proper BM handling
871 * @dev: the target CPU 875 * @dev: the target CPU
876 * @drv: cpuidle driver containing state data
872 * @index: the index of suggested state 877 * @index: the index of suggested state
873 * 878 *
874 * If BM is detected, the deepest non-C3 idle state is entered instead. 879 * If BM is detected, the deepest non-C3 idle state is entered instead.
875 */ 880 */
876static int acpi_idle_enter_bm(struct cpuidle_device *dev, int index) 881static int acpi_idle_enter_bm(struct cpuidle_device *dev,
882 struct cpuidle_driver *drv, int index)
877{ 883{
878 struct acpi_processor *pr; 884 struct acpi_processor *pr;
879 struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; 885 struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
@@ -896,9 +902,9 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, int index)
896 } 902 }
897 903
898 if (!cx->bm_sts_skip && acpi_idle_bm_check()) { 904 if (!cx->bm_sts_skip && acpi_idle_bm_check()) {
899 if (dev->safe_state_index >= 0) { 905 if (drv->safe_state_index >= 0) {
900 return dev->states[dev->safe_state_index].enter(dev, 906 return drv->states[drv->safe_state_index].enter(dev,
901 dev->safe_state_index); 907 drv, drv->safe_state_index);
902 } else { 908 } else {
903 local_irq_disable(); 909 local_irq_disable();
904 acpi_safe_halt(); 910 acpi_safe_halt();
@@ -993,14 +999,15 @@ struct cpuidle_driver acpi_idle_driver = {
993}; 999};
994 1000
995/** 1001/**
996 * acpi_processor_setup_cpuidle - prepares and configures CPUIDLE 1002 * acpi_processor_setup_cpuidle_cx - prepares and configures CPUIDLE
1003 * device i.e. per-cpu data
1004 *
997 * @pr: the ACPI processor 1005 * @pr: the ACPI processor
998 */ 1006 */
999static int acpi_processor_setup_cpuidle(struct acpi_processor *pr) 1007static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr)
1000{ 1008{
1001 int i, count = CPUIDLE_DRIVER_STATE_START; 1009 int i, count = CPUIDLE_DRIVER_STATE_START;
1002 struct acpi_processor_cx *cx; 1010 struct acpi_processor_cx *cx;
1003 struct cpuidle_state *state;
1004 struct cpuidle_state_usage *state_usage; 1011 struct cpuidle_state_usage *state_usage;
1005 struct cpuidle_device *dev = &pr->power.dev; 1012 struct cpuidle_device *dev = &pr->power.dev;
1006 1013
@@ -1012,18 +1019,12 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
1012 } 1019 }
1013 1020
1014 dev->cpu = pr->id; 1021 dev->cpu = pr->id;
1015 dev->safe_state_index = -1;
1016 for (i = 0; i < CPUIDLE_STATE_MAX; i++) {
1017 dev->states[i].name[0] = '\0';
1018 dev->states[i].desc[0] = '\0';
1019 }
1020 1022
1021 if (max_cstate == 0) 1023 if (max_cstate == 0)
1022 max_cstate = 1; 1024 max_cstate = 1;
1023 1025
1024 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) { 1026 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
1025 cx = &pr->power.states[i]; 1027 cx = &pr->power.states[i];
1026 state = &dev->states[count];
1027 state_usage = &dev->states_usage[count]; 1028 state_usage = &dev->states_usage[count];
1028 1029
1029 if (!cx->valid) 1030 if (!cx->valid)
@@ -1035,8 +1036,64 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
1035 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) 1036 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
1036 continue; 1037 continue;
1037#endif 1038#endif
1039
1038 cpuidle_set_statedata(state_usage, cx); 1040 cpuidle_set_statedata(state_usage, cx);
1039 1041
1042 count++;
1043 if (count == CPUIDLE_STATE_MAX)
1044 break;
1045 }
1046
1047 dev->state_count = count;
1048
1049 if (!count)
1050 return -EINVAL;
1051
1052 return 0;
1053}
1054
1055/**
1056 * acpi_processor_setup_cpuidle states- prepares and configures cpuidle
1057 * global state data i.e. idle routines
1058 *
1059 * @pr: the ACPI processor
1060 */
1061static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
1062{
1063 int i, count = CPUIDLE_DRIVER_STATE_START;
1064 struct acpi_processor_cx *cx;
1065 struct cpuidle_state *state;
1066 struct cpuidle_driver *drv = &acpi_idle_driver;
1067
1068 if (!pr->flags.power_setup_done)
1069 return -EINVAL;
1070
1071 if (pr->flags.power == 0)
1072 return -EINVAL;
1073
1074 drv->safe_state_index = -1;
1075 for (i = 0; i < CPUIDLE_STATE_MAX; i++) {
1076 drv->states[i].name[0] = '\0';
1077 drv->states[i].desc[0] = '\0';
1078 }
1079
1080 if (max_cstate == 0)
1081 max_cstate = 1;
1082
1083 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
1084 cx = &pr->power.states[i];
1085
1086 if (!cx->valid)
1087 continue;
1088
1089#ifdef CONFIG_HOTPLUG_CPU
1090 if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
1091 !pr->flags.has_cst &&
1092 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
1093 continue;
1094#endif
1095
1096 state = &drv->states[count];
1040 snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i); 1097 snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i);
1041 strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN); 1098 strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
1042 state->exit_latency = cx->latency; 1099 state->exit_latency = cx->latency;
@@ -1049,13 +1106,13 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
1049 state->flags |= CPUIDLE_FLAG_TIME_VALID; 1106 state->flags |= CPUIDLE_FLAG_TIME_VALID;
1050 1107
1051 state->enter = acpi_idle_enter_c1; 1108 state->enter = acpi_idle_enter_c1;
1052 dev->safe_state_index = count; 1109 drv->safe_state_index = count;
1053 break; 1110 break;
1054 1111
1055 case ACPI_STATE_C2: 1112 case ACPI_STATE_C2:
1056 state->flags |= CPUIDLE_FLAG_TIME_VALID; 1113 state->flags |= CPUIDLE_FLAG_TIME_VALID;
1057 state->enter = acpi_idle_enter_simple; 1114 state->enter = acpi_idle_enter_simple;
1058 dev->safe_state_index = count; 1115 drv->safe_state_index = count;
1059 break; 1116 break;
1060 1117
1061 case ACPI_STATE_C3: 1118 case ACPI_STATE_C3:
@@ -1071,7 +1128,7 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
1071 break; 1128 break;
1072 } 1129 }
1073 1130
1074 dev->state_count = count; 1131 drv->state_count = count;
1075 1132
1076 if (!count) 1133 if (!count)
1077 return -EINVAL; 1134 return -EINVAL;
@@ -1079,7 +1136,7 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
1079 return 0; 1136 return 0;
1080} 1137}
1081 1138
1082int acpi_processor_cst_has_changed(struct acpi_processor *pr) 1139int acpi_processor_hotplug(struct acpi_processor *pr)
1083{ 1140{
1084 int ret = 0; 1141 int ret = 0;
1085 1142
@@ -1100,7 +1157,7 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr)
1100 cpuidle_disable_device(&pr->power.dev); 1157 cpuidle_disable_device(&pr->power.dev);
1101 acpi_processor_get_power_info(pr); 1158 acpi_processor_get_power_info(pr);
1102 if (pr->flags.power) { 1159 if (pr->flags.power) {
1103 acpi_processor_setup_cpuidle(pr); 1160 acpi_processor_setup_cpuidle_cx(pr);
1104 ret = cpuidle_enable_device(&pr->power.dev); 1161 ret = cpuidle_enable_device(&pr->power.dev);
1105 } 1162 }
1106 cpuidle_resume_and_unlock(); 1163 cpuidle_resume_and_unlock();
@@ -1108,10 +1165,72 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr)
1108 return ret; 1165 return ret;
1109} 1166}
1110 1167
1168int acpi_processor_cst_has_changed(struct acpi_processor *pr)
1169{
1170 int cpu;
1171 struct acpi_processor *_pr;
1172
1173 if (disabled_by_idle_boot_param())
1174 return 0;
1175
1176 if (!pr)
1177 return -EINVAL;
1178
1179 if (nocst)
1180 return -ENODEV;
1181
1182 if (!pr->flags.power_setup_done)
1183 return -ENODEV;
1184
1185 /*
1186 * FIXME: Design the ACPI notification to make it once per
1187 * system instead of once per-cpu. This condition is a hack
1188 * to make the code that updates C-States be called once.
1189 */
1190
1191 if (smp_processor_id() == 0 &&
1192 cpuidle_get_driver() == &acpi_idle_driver) {
1193
1194 cpuidle_pause_and_lock();
1195 /* Protect against cpu-hotplug */
1196 get_online_cpus();
1197
1198 /* Disable all cpuidle devices */
1199 for_each_online_cpu(cpu) {
1200 _pr = per_cpu(processors, cpu);
1201 if (!_pr || !_pr->flags.power_setup_done)
1202 continue;
1203 cpuidle_disable_device(&_pr->power.dev);
1204 }
1205
1206 /* Populate Updated C-state information */
1207 acpi_processor_setup_cpuidle_states(pr);
1208
1209 /* Enable all cpuidle devices */
1210 for_each_online_cpu(cpu) {
1211 _pr = per_cpu(processors, cpu);
1212 if (!_pr || !_pr->flags.power_setup_done)
1213 continue;
1214 acpi_processor_get_power_info(_pr);
1215 if (_pr->flags.power) {
1216 acpi_processor_setup_cpuidle_cx(_pr);
1217 cpuidle_enable_device(&_pr->power.dev);
1218 }
1219 }
1220 put_online_cpus();
1221 cpuidle_resume_and_unlock();
1222 }
1223
1224 return 0;
1225}
1226
1227static int acpi_processor_registered;
1228
1111int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, 1229int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
1112 struct acpi_device *device) 1230 struct acpi_device *device)
1113{ 1231{
1114 acpi_status status = 0; 1232 acpi_status status = 0;
1233 int retval;
1115 static int first_run; 1234 static int first_run;
1116 1235
1117 if (disabled_by_idle_boot_param()) 1236 if (disabled_by_idle_boot_param())
@@ -1148,9 +1267,26 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
1148 * platforms that only support C1. 1267 * platforms that only support C1.
1149 */ 1268 */
1150 if (pr->flags.power) { 1269 if (pr->flags.power) {
1151 acpi_processor_setup_cpuidle(pr); 1270 /* Register acpi_idle_driver if not already registered */
1152 if (cpuidle_register_device(&pr->power.dev)) 1271 if (!acpi_processor_registered) {
1153 return -EIO; 1272 acpi_processor_setup_cpuidle_states(pr);
1273 retval = cpuidle_register_driver(&acpi_idle_driver);
1274 if (retval)
1275 return retval;
1276 printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n",
1277 acpi_idle_driver.name);
1278 }
1279 /* Register per-cpu cpuidle_device. Cpuidle driver
1280 * must already be registered before registering device
1281 */
1282 acpi_processor_setup_cpuidle_cx(pr);
1283 retval = cpuidle_register_device(&pr->power.dev);
1284 if (retval) {
1285 if (acpi_processor_registered == 0)
1286 cpuidle_unregister_driver(&acpi_idle_driver);
1287 return retval;
1288 }
1289 acpi_processor_registered++;
1154 } 1290 }
1155 return 0; 1291 return 0;
1156} 1292}
@@ -1161,8 +1297,13 @@ int acpi_processor_power_exit(struct acpi_processor *pr,
1161 if (disabled_by_idle_boot_param()) 1297 if (disabled_by_idle_boot_param())
1162 return 0; 1298 return 0;
1163 1299
1164 cpuidle_unregister_device(&pr->power.dev); 1300 if (pr->flags.power) {
1165 pr->flags.power_setup_done = 0; 1301 cpuidle_unregister_device(&pr->power.dev);
1302 acpi_processor_registered--;
1303 if (acpi_processor_registered == 0)
1304 cpuidle_unregister_driver(&acpi_idle_driver);
1305 }
1166 1306
1307 pr->flags.power_setup_done = 0;
1167 return 0; 1308 return 0;
1168} 1309}
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 7127e92fa8a1..7a57b11eaa8d 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -61,6 +61,7 @@ static int __cpuidle_register_device(struct cpuidle_device *dev);
61int cpuidle_idle_call(void) 61int cpuidle_idle_call(void)
62{ 62{
63 struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); 63 struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
64 struct cpuidle_driver *drv = cpuidle_get_driver();
64 struct cpuidle_state *target_state; 65 struct cpuidle_state *target_state;
65 int next_state, entered_state; 66 int next_state, entered_state;
66 67
@@ -84,18 +85,18 @@ int cpuidle_idle_call(void)
84#endif 85#endif
85 86
86 /* ask the governor for the next state */ 87 /* ask the governor for the next state */
87 next_state = cpuidle_curr_governor->select(dev); 88 next_state = cpuidle_curr_governor->select(drv, dev);
88 if (need_resched()) { 89 if (need_resched()) {
89 local_irq_enable(); 90 local_irq_enable();
90 return 0; 91 return 0;
91 } 92 }
92 93
93 target_state = &dev->states[next_state]; 94 target_state = &drv->states[next_state];
94 95
95 trace_power_start(POWER_CSTATE, next_state, dev->cpu); 96 trace_power_start(POWER_CSTATE, next_state, dev->cpu);
96 trace_cpu_idle(next_state, dev->cpu); 97 trace_cpu_idle(next_state, dev->cpu);
97 98
98 entered_state = target_state->enter(dev, next_state); 99 entered_state = target_state->enter(dev, drv, next_state);
99 100
100 trace_power_end(dev->cpu); 101 trace_power_end(dev->cpu);
101 trace_cpu_idle(PWR_EVENT_EXIT, dev->cpu); 102 trace_cpu_idle(PWR_EVENT_EXIT, dev->cpu);
@@ -163,7 +164,8 @@ void cpuidle_resume_and_unlock(void)
163EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock); 164EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock);
164 165
165#ifdef CONFIG_ARCH_HAS_CPU_RELAX 166#ifdef CONFIG_ARCH_HAS_CPU_RELAX
166static int poll_idle(struct cpuidle_device *dev, int index) 167static int poll_idle(struct cpuidle_device *dev,
168 struct cpuidle_driver *drv, int index)
167{ 169{
168 ktime_t t1, t2; 170 ktime_t t1, t2;
169 s64 diff; 171 s64 diff;
@@ -183,12 +185,9 @@ static int poll_idle(struct cpuidle_device *dev, int index)
183 return index; 185 return index;
184} 186}
185 187
186static void poll_idle_init(struct cpuidle_device *dev) 188static void poll_idle_init(struct cpuidle_driver *drv)
187{ 189{
188 struct cpuidle_state *state = &dev->states[0]; 190 struct cpuidle_state *state = &drv->states[0];
189 struct cpuidle_state_usage *state_usage = &dev->states_usage[0];
190
191 cpuidle_set_statedata(state_usage, NULL);
192 191
193 snprintf(state->name, CPUIDLE_NAME_LEN, "POLL"); 192 snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
194 snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE"); 193 snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
@@ -199,7 +198,7 @@ static void poll_idle_init(struct cpuidle_device *dev)
199 state->enter = poll_idle; 198 state->enter = poll_idle;
200} 199}
201#else 200#else
202static void poll_idle_init(struct cpuidle_device *dev) {} 201static void poll_idle_init(struct cpuidle_driver *drv) {}
203#endif /* CONFIG_ARCH_HAS_CPU_RELAX */ 202#endif /* CONFIG_ARCH_HAS_CPU_RELAX */
204 203
205/** 204/**
@@ -226,13 +225,13 @@ int cpuidle_enable_device(struct cpuidle_device *dev)
226 return ret; 225 return ret;
227 } 226 }
228 227
229 poll_idle_init(dev); 228 poll_idle_init(cpuidle_get_driver());
230 229
231 if ((ret = cpuidle_add_state_sysfs(dev))) 230 if ((ret = cpuidle_add_state_sysfs(dev)))
232 return ret; 231 return ret;
233 232
234 if (cpuidle_curr_governor->enable && 233 if (cpuidle_curr_governor->enable &&
235 (ret = cpuidle_curr_governor->enable(dev))) 234 (ret = cpuidle_curr_governor->enable(cpuidle_get_driver(), dev)))
236 goto fail_sysfs; 235 goto fail_sysfs;
237 236
238 for (i = 0; i < dev->state_count; i++) { 237 for (i = 0; i < dev->state_count; i++) {
@@ -273,7 +272,7 @@ void cpuidle_disable_device(struct cpuidle_device *dev)
273 dev->enabled = 0; 272 dev->enabled = 0;
274 273
275 if (cpuidle_curr_governor->disable) 274 if (cpuidle_curr_governor->disable)
276 cpuidle_curr_governor->disable(dev); 275 cpuidle_curr_governor->disable(cpuidle_get_driver(), dev);
277 276
278 cpuidle_remove_state_sysfs(dev); 277 cpuidle_remove_state_sysfs(dev);
279 enabled_devices--; 278 enabled_devices--;
@@ -301,26 +300,6 @@ static int __cpuidle_register_device(struct cpuidle_device *dev)
301 300
302 init_completion(&dev->kobj_unregister); 301 init_completion(&dev->kobj_unregister);
303 302
304 /*
305 * cpuidle driver should set the dev->power_specified bit
306 * before registering the device if the driver provides
307 * power_usage numbers.
308 *
309 * For those devices whose ->power_specified is not set,
310 * we fill in power_usage with decreasing values as the
311 * cpuidle code has an implicit assumption that state Cn
312 * uses less power than C(n-1).
313 *
314 * With CONFIG_ARCH_HAS_CPU_RELAX, C0 is already assigned
315 * an power value of -1. So we use -2, -3, etc, for other
316 * c-states.
317 */
318 if (!dev->power_specified) {
319 int i;
320 for (i = CPUIDLE_DRIVER_STATE_START; i < dev->state_count; i++)
321 dev->states[i].power_usage = -1 - i;
322 }
323
324 per_cpu(cpuidle_devices, dev->cpu) = dev; 303 per_cpu(cpuidle_devices, dev->cpu) = dev;
325 list_add(&dev->device_list, &cpuidle_detected_devices); 304 list_add(&dev->device_list, &cpuidle_detected_devices);
326 if ((ret = cpuidle_add_sysfs(sys_dev))) { 305 if ((ret = cpuidle_add_sysfs(sys_dev))) {
diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
index 3f7e3cedd133..284d7af5a9c8 100644
--- a/drivers/cpuidle/driver.c
+++ b/drivers/cpuidle/driver.c
@@ -17,6 +17,30 @@
17static struct cpuidle_driver *cpuidle_curr_driver; 17static struct cpuidle_driver *cpuidle_curr_driver;
18DEFINE_SPINLOCK(cpuidle_driver_lock); 18DEFINE_SPINLOCK(cpuidle_driver_lock);
19 19
20static void __cpuidle_register_driver(struct cpuidle_driver *drv)
21{
22 int i;
23 /*
24 * cpuidle driver should set the drv->power_specified bit
25 * before registering if the driver provides
26 * power_usage numbers.
27 *
28 * If power_specified is not set,
29 * we fill in power_usage with decreasing values as the
30 * cpuidle code has an implicit assumption that state Cn
31 * uses less power than C(n-1).
32 *
33 * With CONFIG_ARCH_HAS_CPU_RELAX, C0 is already assigned
34 * an power value of -1. So we use -2, -3, etc, for other
35 * c-states.
36 */
37 if (!drv->power_specified) {
38 for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++)
39 drv->states[i].power_usage = -1 - i;
40 }
41}
42
43
20/** 44/**
21 * cpuidle_register_driver - registers a driver 45 * cpuidle_register_driver - registers a driver
22 * @drv: the driver 46 * @drv: the driver
@@ -34,6 +58,7 @@ int cpuidle_register_driver(struct cpuidle_driver *drv)
34 spin_unlock(&cpuidle_driver_lock); 58 spin_unlock(&cpuidle_driver_lock);
35 return -EBUSY; 59 return -EBUSY;
36 } 60 }
61 __cpuidle_register_driver(drv);
37 cpuidle_curr_driver = drv; 62 cpuidle_curr_driver = drv;
38 spin_unlock(&cpuidle_driver_lock); 63 spin_unlock(&cpuidle_driver_lock);
39 64
diff --git a/drivers/cpuidle/governors/ladder.c b/drivers/cpuidle/governors/ladder.c
index 6a686a76711f..ef6b9e4727a7 100644
--- a/drivers/cpuidle/governors/ladder.c
+++ b/drivers/cpuidle/governors/ladder.c
@@ -60,9 +60,11 @@ static inline void ladder_do_selection(struct ladder_device *ldev,
60 60
61/** 61/**
62 * ladder_select_state - selects the next state to enter 62 * ladder_select_state - selects the next state to enter
63 * @drv: cpuidle driver
63 * @dev: the CPU 64 * @dev: the CPU
64 */ 65 */
65static int ladder_select_state(struct cpuidle_device *dev) 66static int ladder_select_state(struct cpuidle_driver *drv,
67 struct cpuidle_device *dev)
66{ 68{
67 struct ladder_device *ldev = &__get_cpu_var(ladder_devices); 69 struct ladder_device *ldev = &__get_cpu_var(ladder_devices);
68 struct ladder_device_state *last_state; 70 struct ladder_device_state *last_state;
@@ -77,15 +79,17 @@ static int ladder_select_state(struct cpuidle_device *dev)
77 79
78 last_state = &ldev->states[last_idx]; 80 last_state = &ldev->states[last_idx];
79 81
80 if (dev->states[last_idx].flags & CPUIDLE_FLAG_TIME_VALID) 82 if (drv->states[last_idx].flags & CPUIDLE_FLAG_TIME_VALID) {
81 last_residency = cpuidle_get_last_residency(dev) - dev->states[last_idx].exit_latency; 83 last_residency = cpuidle_get_last_residency(dev) - \
84 drv->states[last_idx].exit_latency;
85 }
82 else 86 else
83 last_residency = last_state->threshold.promotion_time + 1; 87 last_residency = last_state->threshold.promotion_time + 1;
84 88
85 /* consider promotion */ 89 /* consider promotion */
86 if (last_idx < dev->state_count - 1 && 90 if (last_idx < drv->state_count - 1 &&
87 last_residency > last_state->threshold.promotion_time && 91 last_residency > last_state->threshold.promotion_time &&
88 dev->states[last_idx + 1].exit_latency <= latency_req) { 92 drv->states[last_idx + 1].exit_latency <= latency_req) {
89 last_state->stats.promotion_count++; 93 last_state->stats.promotion_count++;
90 last_state->stats.demotion_count = 0; 94 last_state->stats.demotion_count = 0;
91 if (last_state->stats.promotion_count >= last_state->threshold.promotion_count) { 95 if (last_state->stats.promotion_count >= last_state->threshold.promotion_count) {
@@ -96,11 +100,11 @@ static int ladder_select_state(struct cpuidle_device *dev)
96 100
97 /* consider demotion */ 101 /* consider demotion */
98 if (last_idx > CPUIDLE_DRIVER_STATE_START && 102 if (last_idx > CPUIDLE_DRIVER_STATE_START &&
99 dev->states[last_idx].exit_latency > latency_req) { 103 drv->states[last_idx].exit_latency > latency_req) {
100 int i; 104 int i;
101 105
102 for (i = last_idx - 1; i > CPUIDLE_DRIVER_STATE_START; i--) { 106 for (i = last_idx - 1; i > CPUIDLE_DRIVER_STATE_START; i--) {
103 if (dev->states[i].exit_latency <= latency_req) 107 if (drv->states[i].exit_latency <= latency_req)
104 break; 108 break;
105 } 109 }
106 ladder_do_selection(ldev, last_idx, i); 110 ladder_do_selection(ldev, last_idx, i);
@@ -123,9 +127,11 @@ static int ladder_select_state(struct cpuidle_device *dev)
123 127
124/** 128/**
125 * ladder_enable_device - setup for the governor 129 * ladder_enable_device - setup for the governor
130 * @drv: cpuidle driver
126 * @dev: the CPU 131 * @dev: the CPU
127 */ 132 */
128static int ladder_enable_device(struct cpuidle_device *dev) 133static int ladder_enable_device(struct cpuidle_driver *drv,
134 struct cpuidle_device *dev)
129{ 135{
130 int i; 136 int i;
131 struct ladder_device *ldev = &per_cpu(ladder_devices, dev->cpu); 137 struct ladder_device *ldev = &per_cpu(ladder_devices, dev->cpu);
@@ -134,8 +140,8 @@ static int ladder_enable_device(struct cpuidle_device *dev)
134 140
135 ldev->last_state_idx = CPUIDLE_DRIVER_STATE_START; 141 ldev->last_state_idx = CPUIDLE_DRIVER_STATE_START;
136 142
137 for (i = 0; i < dev->state_count; i++) { 143 for (i = 0; i < drv->state_count; i++) {
138 state = &dev->states[i]; 144 state = &drv->states[i];
139 lstate = &ldev->states[i]; 145 lstate = &ldev->states[i];
140 146
141 lstate->stats.promotion_count = 0; 147 lstate->stats.promotion_count = 0;
@@ -144,7 +150,7 @@ static int ladder_enable_device(struct cpuidle_device *dev)
144 lstate->threshold.promotion_count = PROMOTION_COUNT; 150 lstate->threshold.promotion_count = PROMOTION_COUNT;
145 lstate->threshold.demotion_count = DEMOTION_COUNT; 151 lstate->threshold.demotion_count = DEMOTION_COUNT;
146 152
147 if (i < dev->state_count - 1) 153 if (i < drv->state_count - 1)
148 lstate->threshold.promotion_time = state->exit_latency; 154 lstate->threshold.promotion_time = state->exit_latency;
149 if (i > 0) 155 if (i > 0)
150 lstate->threshold.demotion_time = state->exit_latency; 156 lstate->threshold.demotion_time = state->exit_latency;
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index af724e823c8e..bcbe88142135 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -182,7 +182,7 @@ static inline int performance_multiplier(void)
182 182
183static DEFINE_PER_CPU(struct menu_device, menu_devices); 183static DEFINE_PER_CPU(struct menu_device, menu_devices);
184 184
185static void menu_update(struct cpuidle_device *dev); 185static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev);
186 186
187/* This implements DIV_ROUND_CLOSEST but avoids 64 bit division */ 187/* This implements DIV_ROUND_CLOSEST but avoids 64 bit division */
188static u64 div_round64(u64 dividend, u32 divisor) 188static u64 div_round64(u64 dividend, u32 divisor)
@@ -228,9 +228,10 @@ static void detect_repeating_patterns(struct menu_device *data)
228 228
229/** 229/**
230 * menu_select - selects the next idle state to enter 230 * menu_select - selects the next idle state to enter
231 * @drv: cpuidle driver containing state data
231 * @dev: the CPU 232 * @dev: the CPU
232 */ 233 */
233static int menu_select(struct cpuidle_device *dev) 234static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
234{ 235{
235 struct menu_device *data = &__get_cpu_var(menu_devices); 236 struct menu_device *data = &__get_cpu_var(menu_devices);
236 int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY); 237 int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
@@ -240,7 +241,7 @@ static int menu_select(struct cpuidle_device *dev)
240 struct timespec t; 241 struct timespec t;
241 242
242 if (data->needs_update) { 243 if (data->needs_update) {
243 menu_update(dev); 244 menu_update(drv, dev);
244 data->needs_update = 0; 245 data->needs_update = 0;
245 } 246 }
246 247
@@ -285,8 +286,8 @@ static int menu_select(struct cpuidle_device *dev)
285 * Find the idle state with the lowest power while satisfying 286 * Find the idle state with the lowest power while satisfying
286 * our constraints. 287 * our constraints.
287 */ 288 */
288 for (i = CPUIDLE_DRIVER_STATE_START; i < dev->state_count; i++) { 289 for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) {
289 struct cpuidle_state *s = &dev->states[i]; 290 struct cpuidle_state *s = &drv->states[i];
290 291
291 if (s->target_residency > data->predicted_us) 292 if (s->target_residency > data->predicted_us)
292 continue; 293 continue;
@@ -323,14 +324,15 @@ static void menu_reflect(struct cpuidle_device *dev, int index)
323 324
324/** 325/**
325 * menu_update - attempts to guess what happened after entry 326 * menu_update - attempts to guess what happened after entry
327 * @drv: cpuidle driver containing state data
326 * @dev: the CPU 328 * @dev: the CPU
327 */ 329 */
328static void menu_update(struct cpuidle_device *dev) 330static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
329{ 331{
330 struct menu_device *data = &__get_cpu_var(menu_devices); 332 struct menu_device *data = &__get_cpu_var(menu_devices);
331 int last_idx = data->last_state_idx; 333 int last_idx = data->last_state_idx;
332 unsigned int last_idle_us = cpuidle_get_last_residency(dev); 334 unsigned int last_idle_us = cpuidle_get_last_residency(dev);
333 struct cpuidle_state *target = &dev->states[last_idx]; 335 struct cpuidle_state *target = &drv->states[last_idx];
334 unsigned int measured_us; 336 unsigned int measured_us;
335 u64 new_factor; 337 u64 new_factor;
336 338
@@ -384,9 +386,11 @@ static void menu_update(struct cpuidle_device *dev)
384 386
385/** 387/**
386 * menu_enable_device - scans a CPU's states and does setup 388 * menu_enable_device - scans a CPU's states and does setup
389 * @drv: cpuidle driver
387 * @dev: the CPU 390 * @dev: the CPU
388 */ 391 */
389static int menu_enable_device(struct cpuidle_device *dev) 392static int menu_enable_device(struct cpuidle_driver *drv,
393 struct cpuidle_device *dev)
390{ 394{
391 struct menu_device *data = &per_cpu(menu_devices, dev->cpu); 395 struct menu_device *data = &per_cpu(menu_devices, dev->cpu);
392 396
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
index 8a1ace104476..1e756e160dca 100644
--- a/drivers/cpuidle/sysfs.c
+++ b/drivers/cpuidle/sysfs.c
@@ -322,13 +322,14 @@ int cpuidle_add_state_sysfs(struct cpuidle_device *device)
322{ 322{
323 int i, ret = -ENOMEM; 323 int i, ret = -ENOMEM;
324 struct cpuidle_state_kobj *kobj; 324 struct cpuidle_state_kobj *kobj;
325 struct cpuidle_driver *drv = cpuidle_get_driver();
325 326
326 /* state statistics */ 327 /* state statistics */
327 for (i = 0; i < device->state_count; i++) { 328 for (i = 0; i < device->state_count; i++) {
328 kobj = kzalloc(sizeof(struct cpuidle_state_kobj), GFP_KERNEL); 329 kobj = kzalloc(sizeof(struct cpuidle_state_kobj), GFP_KERNEL);
329 if (!kobj) 330 if (!kobj)
330 goto error_state; 331 goto error_state;
331 kobj->state = &device->states[i]; 332 kobj->state = &drv->states[i];
332 kobj->state_usage = &device->states_usage[i]; 333 kobj->state_usage = &device->states_usage[i];
333 init_completion(&kobj->kobj_unregister); 334 init_completion(&kobj->kobj_unregister);
334 335
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 3aa8d4cb6dca..5be9d599ff6b 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -81,7 +81,8 @@ static unsigned int mwait_substates;
81static unsigned int lapic_timer_reliable_states = (1 << 1); /* Default to only C1 */ 81static unsigned int lapic_timer_reliable_states = (1 << 1); /* Default to only C1 */
82 82
83static struct cpuidle_device __percpu *intel_idle_cpuidle_devices; 83static struct cpuidle_device __percpu *intel_idle_cpuidle_devices;
84static int intel_idle(struct cpuidle_device *dev, int index); 84static int intel_idle(struct cpuidle_device *dev,
85 struct cpuidle_driver *drv, int index);
85 86
86static struct cpuidle_state *cpuidle_state_table; 87static struct cpuidle_state *cpuidle_state_table;
87 88
@@ -227,13 +228,15 @@ static int get_driver_data(int cstate)
227/** 228/**
228 * intel_idle 229 * intel_idle
229 * @dev: cpuidle_device 230 * @dev: cpuidle_device
231 * @drv: cpuidle driver
230 * @index: index of cpuidle state 232 * @index: index of cpuidle state
231 * 233 *
232 */ 234 */
233static int intel_idle(struct cpuidle_device *dev, int index) 235static int intel_idle(struct cpuidle_device *dev,
236 struct cpuidle_driver *drv, int index)
234{ 237{
235 unsigned long ecx = 1; /* break on interrupt flag */ 238 unsigned long ecx = 1; /* break on interrupt flag */
236 struct cpuidle_state *state = &dev->states[index]; 239 struct cpuidle_state *state = &drv->states[index];
237 struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; 240 struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
238 unsigned long eax = (unsigned long)cpuidle_get_statedata(state_usage); 241 unsigned long eax = (unsigned long)cpuidle_get_statedata(state_usage);
239 unsigned int cstate; 242 unsigned int cstate;
@@ -420,6 +423,60 @@ static void intel_idle_cpuidle_devices_uninit(void)
420 return; 423 return;
421} 424}
422/* 425/*
426 * intel_idle_cpuidle_driver_init()
427 * allocate, initialize cpuidle_states
428 */
429static int intel_idle_cpuidle_driver_init(void)
430{
431 int cstate;
432 struct cpuidle_driver *drv = &intel_idle_driver;
433
434 drv->state_count = 1;
435
436 for (cstate = 1; cstate < MWAIT_MAX_NUM_CSTATES; ++cstate) {
437 int num_substates;
438
439 if (cstate > max_cstate) {
440 printk(PREFIX "max_cstate %d reached\n",
441 max_cstate);
442 break;
443 }
444
445 /* does the state exist in CPUID.MWAIT? */
446 num_substates = (mwait_substates >> ((cstate) * 4))
447 & MWAIT_SUBSTATE_MASK;
448 if (num_substates == 0)
449 continue;
450 /* is the state not enabled? */
451 if (cpuidle_state_table[cstate].enter == NULL) {
452 /* does the driver not know about the state? */
453 if (*cpuidle_state_table[cstate].name == '\0')
454 pr_debug(PREFIX "unaware of model 0x%x"
455 " MWAIT %d please"
456 " contact lenb@kernel.org",
457 boot_cpu_data.x86_model, cstate);
458 continue;
459 }
460
461 if ((cstate > 2) &&
462 !boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
463 mark_tsc_unstable("TSC halts in idle"
464 " states deeper than C2");
465
466 drv->states[drv->state_count] = /* structure copy */
467 cpuidle_state_table[cstate];
468
469 drv->state_count += 1;
470 }
471
472 if (auto_demotion_disable_flags)
473 smp_call_function(auto_demotion_disable, NULL, 1);
474
475 return 0;
476}
477
478
479/*
423 * intel_idle_cpuidle_devices_init() 480 * intel_idle_cpuidle_devices_init()
424 * allocate, initialize, register cpuidle_devices 481 * allocate, initialize, register cpuidle_devices
425 */ 482 */
@@ -453,23 +510,9 @@ static int intel_idle_cpuidle_devices_init(void)
453 continue; 510 continue;
454 /* is the state not enabled? */ 511 /* is the state not enabled? */
455 if (cpuidle_state_table[cstate].enter == NULL) { 512 if (cpuidle_state_table[cstate].enter == NULL) {
456 /* does the driver not know about the state? */
457 if (*cpuidle_state_table[cstate].name == '\0')
458 pr_debug(PREFIX "unaware of model 0x%x"
459 " MWAIT %d please"
460 " contact lenb@kernel.org",
461 boot_cpu_data.x86_model, cstate);
462 continue; 513 continue;
463 } 514 }
464 515
465 if ((cstate > 2) &&
466 !boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
467 mark_tsc_unstable("TSC halts in idle"
468 " states deeper than C2");
469
470 dev->states[dev->state_count] = /* structure copy */
471 cpuidle_state_table[cstate];
472
473 dev->states_usage[dev->state_count].driver_data = 516 dev->states_usage[dev->state_count].driver_data =
474 (void *)get_driver_data(cstate); 517 (void *)get_driver_data(cstate);
475 518
@@ -484,8 +527,6 @@ static int intel_idle_cpuidle_devices_init(void)
484 return -EIO; 527 return -EIO;
485 } 528 }
486 } 529 }
487 if (auto_demotion_disable_flags)
488 smp_call_function(auto_demotion_disable, NULL, 1);
489 530
490 return 0; 531 return 0;
491} 532}
@@ -503,6 +544,7 @@ static int __init intel_idle_init(void)
503 if (retval) 544 if (retval)
504 return retval; 545 return retval;
505 546
547 intel_idle_cpuidle_driver_init();
506 retval = cpuidle_register_driver(&intel_idle_driver); 548 retval = cpuidle_register_driver(&intel_idle_driver);
507 if (retval) { 549 if (retval) {
508 printk(KERN_DEBUG PREFIX "intel_idle yielding to %s", 550 printk(KERN_DEBUG PREFIX "intel_idle yielding to %s",