aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/cpuidle/cpuidle.c
diff options
context:
space:
mode:
authorDeepthi Dharwar <deepthi@linux.vnet.ibm.com>2011-10-28 06:50:42 -0400
committerLen Brown <len.brown@intel.com>2011-11-06 21:13:58 -0500
commit46bcfad7a819bd17ac4e831b04405152d59784ab (patch)
tree20041e788154d103edff2699f88d4a30320e3ee2 /drivers/cpuidle/cpuidle.c
parent4202735e8ab6ecfb0381631a0d0b58fefe0bd4e2 (diff)
cpuidle: Single/Global registration of idle states
This patch makes the cpuidle_states structure global (single copy) instead of per-cpu. The statistics needed on per-cpu basis by the governor are kept per-cpu. This simplifies the cpuidle subsystem as state registration is done by single cpu only. Having single copy of cpuidle_states saves memory. Rare case of asymmetric C-states can be handled within the cpuidle driver and architectures such as POWER do not have asymmetric C-states. Having single/global registration of all the idle states, dynamic C-state transitions on x86 are handled by the boot cpu. Here, the boot cpu would disable all the devices, re-populate the states and later enable all the devices, irrespective of the cpu that would receive the notification first. Reference: https://lkml.org/lkml/2011/4/25/83 Signed-off-by: Deepthi Dharwar <deepthi@linux.vnet.ibm.com> Signed-off-by: Trinabh Gupta <g.trinabh@gmail.com> Tested-by: Jean Pihet <j-pihet@ti.com> Reviewed-by: Kevin Hilman <khilman@ti.com> Acked-by: Arjan van de Ven <arjan@linux.intel.com> Acked-by: Kevin Hilman <khilman@ti.com> Signed-off-by: Len Brown <len.brown@intel.com>
Diffstat (limited to 'drivers/cpuidle/cpuidle.c')
-rw-r--r--drivers/cpuidle/cpuidle.c45
1 files changed, 12 insertions, 33 deletions
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 7127e92fa8a1..7a57b11eaa8d 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -61,6 +61,7 @@ static int __cpuidle_register_device(struct cpuidle_device *dev);
61int cpuidle_idle_call(void) 61int cpuidle_idle_call(void)
62{ 62{
63 struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); 63 struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
64 struct cpuidle_driver *drv = cpuidle_get_driver();
64 struct cpuidle_state *target_state; 65 struct cpuidle_state *target_state;
65 int next_state, entered_state; 66 int next_state, entered_state;
66 67
@@ -84,18 +85,18 @@ int cpuidle_idle_call(void)
84#endif 85#endif
85 86
86 /* ask the governor for the next state */ 87 /* ask the governor for the next state */
87 next_state = cpuidle_curr_governor->select(dev); 88 next_state = cpuidle_curr_governor->select(drv, dev);
88 if (need_resched()) { 89 if (need_resched()) {
89 local_irq_enable(); 90 local_irq_enable();
90 return 0; 91 return 0;
91 } 92 }
92 93
93 target_state = &dev->states[next_state]; 94 target_state = &drv->states[next_state];
94 95
95 trace_power_start(POWER_CSTATE, next_state, dev->cpu); 96 trace_power_start(POWER_CSTATE, next_state, dev->cpu);
96 trace_cpu_idle(next_state, dev->cpu); 97 trace_cpu_idle(next_state, dev->cpu);
97 98
98 entered_state = target_state->enter(dev, next_state); 99 entered_state = target_state->enter(dev, drv, next_state);
99 100
100 trace_power_end(dev->cpu); 101 trace_power_end(dev->cpu);
101 trace_cpu_idle(PWR_EVENT_EXIT, dev->cpu); 102 trace_cpu_idle(PWR_EVENT_EXIT, dev->cpu);
@@ -163,7 +164,8 @@ void cpuidle_resume_and_unlock(void)
163EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock); 164EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock);
164 165
165#ifdef CONFIG_ARCH_HAS_CPU_RELAX 166#ifdef CONFIG_ARCH_HAS_CPU_RELAX
166static int poll_idle(struct cpuidle_device *dev, int index) 167static int poll_idle(struct cpuidle_device *dev,
168 struct cpuidle_driver *drv, int index)
167{ 169{
168 ktime_t t1, t2; 170 ktime_t t1, t2;
169 s64 diff; 171 s64 diff;
@@ -183,12 +185,9 @@ static int poll_idle(struct cpuidle_device *dev, int index)
183 return index; 185 return index;
184} 186}
185 187
186static void poll_idle_init(struct cpuidle_device *dev) 188static void poll_idle_init(struct cpuidle_driver *drv)
187{ 189{
188 struct cpuidle_state *state = &dev->states[0]; 190 struct cpuidle_state *state = &drv->states[0];
189 struct cpuidle_state_usage *state_usage = &dev->states_usage[0];
190
191 cpuidle_set_statedata(state_usage, NULL);
192 191
193 snprintf(state->name, CPUIDLE_NAME_LEN, "POLL"); 192 snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
194 snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE"); 193 snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
@@ -199,7 +198,7 @@ static void poll_idle_init(struct cpuidle_device *dev)
199 state->enter = poll_idle; 198 state->enter = poll_idle;
200} 199}
201#else 200#else
202static void poll_idle_init(struct cpuidle_device *dev) {} 201static void poll_idle_init(struct cpuidle_driver *drv) {}
203#endif /* CONFIG_ARCH_HAS_CPU_RELAX */ 202#endif /* CONFIG_ARCH_HAS_CPU_RELAX */
204 203
205/** 204/**
@@ -226,13 +225,13 @@ int cpuidle_enable_device(struct cpuidle_device *dev)
226 return ret; 225 return ret;
227 } 226 }
228 227
229 poll_idle_init(dev); 228 poll_idle_init(cpuidle_get_driver());
230 229
231 if ((ret = cpuidle_add_state_sysfs(dev))) 230 if ((ret = cpuidle_add_state_sysfs(dev)))
232 return ret; 231 return ret;
233 232
234 if (cpuidle_curr_governor->enable && 233 if (cpuidle_curr_governor->enable &&
235 (ret = cpuidle_curr_governor->enable(dev))) 234 (ret = cpuidle_curr_governor->enable(cpuidle_get_driver(), dev)))
236 goto fail_sysfs; 235 goto fail_sysfs;
237 236
238 for (i = 0; i < dev->state_count; i++) { 237 for (i = 0; i < dev->state_count; i++) {
@@ -273,7 +272,7 @@ void cpuidle_disable_device(struct cpuidle_device *dev)
273 dev->enabled = 0; 272 dev->enabled = 0;
274 273
275 if (cpuidle_curr_governor->disable) 274 if (cpuidle_curr_governor->disable)
276 cpuidle_curr_governor->disable(dev); 275 cpuidle_curr_governor->disable(cpuidle_get_driver(), dev);
277 276
278 cpuidle_remove_state_sysfs(dev); 277 cpuidle_remove_state_sysfs(dev);
279 enabled_devices--; 278 enabled_devices--;
@@ -301,26 +300,6 @@ static int __cpuidle_register_device(struct cpuidle_device *dev)
301 300
302 init_completion(&dev->kobj_unregister); 301 init_completion(&dev->kobj_unregister);
303 302
304 /*
305 * cpuidle driver should set the dev->power_specified bit
306 * before registering the device if the driver provides
307 * power_usage numbers.
308 *
309 * For those devices whose ->power_specified is not set,
310 * we fill in power_usage with decreasing values as the
311 * cpuidle code has an implicit assumption that state Cn
312 * uses less power than C(n-1).
313 *
314 * With CONFIG_ARCH_HAS_CPU_RELAX, C0 is already assigned
315 * an power value of -1. So we use -2, -3, etc, for other
316 * c-states.
317 */
318 if (!dev->power_specified) {
319 int i;
320 for (i = CPUIDLE_DRIVER_STATE_START; i < dev->state_count; i++)
321 dev->states[i].power_usage = -1 - i;
322 }
323
324 per_cpu(cpuidle_devices, dev->cpu) = dev; 303 per_cpu(cpuidle_devices, dev->cpu) = dev;
325 list_add(&dev->device_list, &cpuidle_detected_devices); 304 list_add(&dev->device_list, &cpuidle_detected_devices);
326 if ((ret = cpuidle_add_sysfs(sys_dev))) { 305 if ((ret = cpuidle_add_sysfs(sys_dev))) {